summaryrefslogtreecommitdiff
path: root/numpy/core/tests
diff options
context:
space:
mode:
authorEric Wieser <wieser.eric@gmail.com>2019-06-08 16:17:17 -0700
committerEric Wieser <wieser.eric@gmail.com>2019-06-08 19:57:59 -0700
commitb12a8690b6383e03573237b65fddd859afa1f282 (patch)
treef639320bd30b8b7bff5400592ab595c6fb5d6fb6 /numpy/core/tests
parent24b2a2d36a7e8356310cd16dbe60abd9d0e682dc (diff)
parentf07a38da97a6a36eb12b203f6c1ffa4bf2b2cb87 (diff)
downloadnumpy-b12a8690b6383e03573237b65fddd859afa1f282.tar.gz
Merge tag 'branch-points/1.16.x' into bit_shifts
Diffstat (limited to 'numpy/core/tests')
-rw-r--r--numpy/core/tests/__init__.py0
-rw-r--r--numpy/core/tests/_locales.py76
-rw-r--r--numpy/core/tests/test_abc.py33
-rw-r--r--numpy/core/tests/test_api.py59
-rw-r--r--numpy/core/tests/test_arrayprint.py836
-rw-r--r--numpy/core/tests/test_datetime.py334
-rw-r--r--numpy/core/tests/test_defchararray.py339
-rw-r--r--numpy/core/tests/test_deprecations.py475
-rw-r--r--numpy/core/tests/test_dtype.py520
-rw-r--r--numpy/core/tests/test_einsum.py1010
-rw-r--r--numpy/core/tests/test_errstate.py23
-rw-r--r--numpy/core/tests/test_extint128.py12
-rw-r--r--numpy/core/tests/test_function_base.py307
-rw-r--r--numpy/core/tests/test_getlimits.py78
-rw-r--r--numpy/core/tests/test_half.py23
-rw-r--r--numpy/core/tests/test_indexerrors.py7
-rw-r--r--numpy/core/tests/test_indexing.py347
-rw-r--r--numpy/core/tests/test_item_selection.py23
-rw-r--r--numpy/core/tests/test_longdouble.py162
-rw-r--r--numpy/core/tests/test_machar.py17
-rw-r--r--numpy/core/tests/test_mem_overlap.py484
-rw-r--r--numpy/core/tests/test_memmap.py108
-rw-r--r--numpy/core/tests/test_multiarray.py3574
-rw-r--r--numpy/core/tests/test_nditer.py1157
-rw-r--r--numpy/core/tests/test_numeric.py598
-rw-r--r--numpy/core/tests/test_numerictypes.py243
-rw-r--r--numpy/core/tests/test_overrides.py388
-rw-r--r--numpy/core/tests/test_print.py191
-rw-r--r--numpy/core/tests/test_records.py204
-rw-r--r--numpy/core/tests/test_regression.py979
-rw-r--r--numpy/core/tests/test_scalar_ctors.py65
-rw-r--r--numpy/core/tests/test_scalarbuffer.py105
-rw-r--r--numpy/core/tests/test_scalarinherit.py41
-rw-r--r--numpy/core/tests/test_scalarmath.py373
-rw-r--r--numpy/core/tests/test_scalarprint.py326
-rw-r--r--numpy/core/tests/test_shape_base.py420
-rw-r--r--numpy/core/tests/test_ufunc.py769
-rw-r--r--numpy/core/tests/test_umath.py1385
-rw-r--r--numpy/core/tests/test_umath_complex.py297
-rw-r--r--numpy/core/tests/test_unicode.py186
40 files changed, 12103 insertions, 4471 deletions
diff --git a/numpy/core/tests/__init__.py b/numpy/core/tests/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/numpy/core/tests/__init__.py
diff --git a/numpy/core/tests/_locales.py b/numpy/core/tests/_locales.py
new file mode 100644
index 000000000..52e4ff36d
--- /dev/null
+++ b/numpy/core/tests/_locales.py
@@ -0,0 +1,76 @@
+"""Provide class for testing in French locale
+
+"""
+from __future__ import division, absolute_import, print_function
+
+import sys
+import locale
+
+import pytest
+
+__ALL__ = ['CommaDecimalPointLocale']
+
+
+def find_comma_decimal_point_locale():
+ """See if platform has a decimal point as comma locale.
+
+ Find a locale that uses a comma instead of a period as the
+ decimal point.
+
+ Returns
+ -------
+ old_locale: str
+ Locale when the function was called.
+ new_locale: {str, None)
+ First French locale found, None if none found.
+
+ """
+ if sys.platform == 'win32':
+ locales = ['FRENCH']
+ else:
+ locales = ['fr_FR', 'fr_FR.UTF-8', 'fi_FI', 'fi_FI.UTF-8']
+
+ old_locale = locale.getlocale(locale.LC_NUMERIC)
+ new_locale = None
+ try:
+ for loc in locales:
+ try:
+ locale.setlocale(locale.LC_NUMERIC, loc)
+ new_locale = loc
+ break
+ except locale.Error:
+ pass
+ finally:
+ locale.setlocale(locale.LC_NUMERIC, locale=old_locale)
+ return old_locale, new_locale
+
+
+class CommaDecimalPointLocale(object):
+ """Sets LC_NUMERIC to a locale with comma as decimal point.
+
+ Classes derived from this class have setup and teardown methods that run
+ tests with locale.LC_NUMERIC set to a locale where commas (',') are used as
+ the decimal point instead of periods ('.'). On exit the locale is restored
+ to the initial locale. It also serves as context manager with the same
+ effect. If no such locale is available, the test is skipped.
+
+ .. versionadded:: 1.15.0
+
+ """
+ (cur_locale, tst_locale) = find_comma_decimal_point_locale()
+
+ def setup(self):
+ if self.tst_locale is None:
+ pytest.skip("No French locale available")
+ locale.setlocale(locale.LC_NUMERIC, locale=self.tst_locale)
+
+ def teardown(self):
+ locale.setlocale(locale.LC_NUMERIC, locale=self.cur_locale)
+
+ def __enter__(self):
+ if self.tst_locale is None:
+ pytest.skip("No French locale available")
+ locale.setlocale(locale.LC_NUMERIC, locale=self.tst_locale)
+
+ def __exit__(self, type, value, traceback):
+ locale.setlocale(locale.LC_NUMERIC, locale=self.cur_locale)
diff --git a/numpy/core/tests/test_abc.py b/numpy/core/tests/test_abc.py
index 2430866fd..d9c61b0c6 100644
--- a/numpy/core/tests/test_abc.py
+++ b/numpy/core/tests/test_abc.py
@@ -1,47 +1,56 @@
from __future__ import division, absolute_import, print_function
-from numpy.testing import TestCase, assert_, run_module_suite
+from numpy.testing import assert_
import numbers
+
+import numpy as np
from numpy.core.numerictypes import sctypes
-class ABC(TestCase):
+class TestABC(object):
+ def test_abstract(self):
+ assert_(issubclass(np.number, numbers.Number))
+
+ assert_(issubclass(np.inexact, numbers.Complex))
+ assert_(issubclass(np.complexfloating, numbers.Complex))
+ assert_(issubclass(np.floating, numbers.Real))
+
+ assert_(issubclass(np.integer, numbers.Integral))
+ assert_(issubclass(np.signedinteger, numbers.Integral))
+ assert_(issubclass(np.unsignedinteger, numbers.Integral))
+
def test_floats(self):
for t in sctypes['float']:
- assert_(isinstance(t(), numbers.Real),
+ assert_(isinstance(t(), numbers.Real),
"{0} is not instance of Real".format(t.__name__))
assert_(issubclass(t, numbers.Real),
"{0} is not subclass of Real".format(t.__name__))
- assert_(not isinstance(t(), numbers.Rational),
+ assert_(not isinstance(t(), numbers.Rational),
"{0} is instance of Rational".format(t.__name__))
assert_(not issubclass(t, numbers.Rational),
"{0} is subclass of Rational".format(t.__name__))
def test_complex(self):
for t in sctypes['complex']:
- assert_(isinstance(t(), numbers.Complex),
+ assert_(isinstance(t(), numbers.Complex),
"{0} is not instance of Complex".format(t.__name__))
assert_(issubclass(t, numbers.Complex),
"{0} is not subclass of Complex".format(t.__name__))
- assert_(not isinstance(t(), numbers.Real),
+ assert_(not isinstance(t(), numbers.Real),
"{0} is instance of Real".format(t.__name__))
assert_(not issubclass(t, numbers.Real),
"{0} is subclass of Real".format(t.__name__))
def test_int(self):
for t in sctypes['int']:
- assert_(isinstance(t(), numbers.Integral),
+ assert_(isinstance(t(), numbers.Integral),
"{0} is not instance of Integral".format(t.__name__))
assert_(issubclass(t, numbers.Integral),
"{0} is not subclass of Integral".format(t.__name__))
def test_uint(self):
for t in sctypes['uint']:
- assert_(isinstance(t(), numbers.Integral),
+ assert_(isinstance(t(), numbers.Integral),
"{0} is not instance of Integral".format(t.__name__))
assert_(issubclass(t, numbers.Integral),
"{0} is not subclass of Integral".format(t.__name__))
-
-
-if __name__ == "__main__":
- run_module_suite()
diff --git a/numpy/core/tests/test_api.py b/numpy/core/tests/test_api.py
index 876cab4d7..9755e7b36 100644
--- a/numpy/core/tests/test_api.py
+++ b/numpy/core/tests/test_api.py
@@ -3,11 +3,9 @@ from __future__ import division, absolute_import, print_function
import sys
import numpy as np
-from numpy.compat import sixu
from numpy.testing import (
- run_module_suite, assert_, assert_equal, assert_array_equal,
- assert_raises
-)
+ assert_, assert_equal, assert_array_equal, assert_raises, HAS_REFCOUNT
+ )
# Switch between new behaviour when NPY_RELAXED_STRIDES_CHECKING is set.
NPY_RELAXED_STRIDES_CHECKING = np.ones((10, 1), order='C').flags.f_contiguous
@@ -19,23 +17,26 @@ def test_array_array():
tndarray = type(ones11)
# Test is_ndarray
assert_equal(np.array(ones11, dtype=np.float64), ones11)
- old_refcount = sys.getrefcount(tndarray)
- np.array(ones11)
- assert_equal(old_refcount, sys.getrefcount(tndarray))
+ if HAS_REFCOUNT:
+ old_refcount = sys.getrefcount(tndarray)
+ np.array(ones11)
+ assert_equal(old_refcount, sys.getrefcount(tndarray))
# test None
assert_equal(np.array(None, dtype=np.float64),
np.array(np.nan, dtype=np.float64))
- old_refcount = sys.getrefcount(tobj)
- np.array(None, dtype=np.float64)
- assert_equal(old_refcount, sys.getrefcount(tobj))
+ if HAS_REFCOUNT:
+ old_refcount = sys.getrefcount(tobj)
+ np.array(None, dtype=np.float64)
+ assert_equal(old_refcount, sys.getrefcount(tobj))
# test scalar
assert_equal(np.array(1.0, dtype=np.float64),
np.ones((), dtype=np.float64))
- old_refcount = sys.getrefcount(np.float64)
- np.array(np.array(1.0, dtype=np.float64), dtype=np.float64)
- assert_equal(old_refcount, sys.getrefcount(np.float64))
+ if HAS_REFCOUNT:
+ old_refcount = sys.getrefcount(np.float64)
+ np.array(np.array(1.0, dtype=np.float64), dtype=np.float64)
+ assert_equal(old_refcount, sys.getrefcount(np.float64))
# test string
S2 = np.dtype((str, 2))
@@ -64,7 +65,7 @@ def test_array_array():
np.ones((), dtype=U5))
builtins = getattr(__builtins__, '__dict__', __builtins__)
- assert_(isinstance(builtins, dict))
+ assert_(hasattr(builtins, 'get'))
# test buffer
_buffer = builtins.get("buffer")
@@ -103,7 +104,7 @@ def test_array_array():
dict(__array_struct__=a.__array_struct__))
## wasn't what I expected... is np.array(o) supposed to equal a ?
## instead we get a array([...], dtype=">V18")
- assert_equal(str(np.array(o).data), str(a.data))
+ assert_equal(bytes(np.array(o).data), bytes(a.data))
# test array
o = type("o", (object,),
@@ -222,22 +223,25 @@ def test_array_astype():
b = a.astype('f4', subok=0, copy=False)
assert_(a is b)
- a = np.matrix([[0, 1, 2], [3, 4, 5]], dtype='f4')
+ class MyNDArray(np.ndarray):
+ pass
- # subok=True passes through a matrix
+ a = np.array([[0, 1, 2], [3, 4, 5]], dtype='f4').view(MyNDArray)
+
+ # subok=True passes through a subclass
b = a.astype('f4', subok=True, copy=False)
assert_(a is b)
# subok=True is default, and creates a subtype on a cast
b = a.astype('i4', copy=False)
assert_equal(a, b)
- assert_equal(type(b), np.matrix)
+ assert_equal(type(b), MyNDArray)
- # subok=False never returns a matrix
+ # subok=False never returns a subclass
b = a.astype('f4', subok=False, copy=False)
assert_equal(a, b)
assert_(not (a is b))
- assert_(type(b) is not np.matrix)
+ assert_(type(b) is not MyNDArray)
# Make sure converting from string object to fixed length string
# does not truncate.
@@ -245,7 +249,7 @@ def test_array_astype():
b = a.astype('S')
assert_equal(a, b)
assert_equal(b.dtype, np.dtype('S100'))
- a = np.array([sixu('a')*100], dtype='O')
+ a = np.array([u'a'*100], dtype='O')
b = a.astype('U')
assert_equal(a, b)
assert_equal(b.dtype, np.dtype('U100'))
@@ -255,7 +259,7 @@ def test_array_astype():
b = a.astype('S')
assert_equal(a, b)
assert_equal(b.dtype, np.dtype('S10'))
- a = np.array([sixu('a')*10], dtype='O')
+ a = np.array([u'a'*10], dtype='O')
b = a.astype('U')
assert_equal(a, b)
assert_equal(b.dtype, np.dtype('U10'))
@@ -263,19 +267,19 @@ def test_array_astype():
a = np.array(123456789012345678901234567890, dtype='O').astype('S')
assert_array_equal(a, np.array(b'1234567890' * 3, dtype='S30'))
a = np.array(123456789012345678901234567890, dtype='O').astype('U')
- assert_array_equal(a, np.array(sixu('1234567890' * 3), dtype='U30'))
+ assert_array_equal(a, np.array(u'1234567890' * 3, dtype='U30'))
a = np.array([123456789012345678901234567890], dtype='O').astype('S')
assert_array_equal(a, np.array(b'1234567890' * 3, dtype='S30'))
a = np.array([123456789012345678901234567890], dtype='O').astype('U')
- assert_array_equal(a, np.array(sixu('1234567890' * 3), dtype='U30'))
+ assert_array_equal(a, np.array(u'1234567890' * 3, dtype='U30'))
a = np.array(123456789012345678901234567890, dtype='S')
assert_array_equal(a, np.array(b'1234567890' * 3, dtype='S30'))
a = np.array(123456789012345678901234567890, dtype='U')
- assert_array_equal(a, np.array(sixu('1234567890' * 3), dtype='U30'))
+ assert_array_equal(a, np.array(u'1234567890' * 3, dtype='U30'))
- a = np.array(sixu('a\u0140'), dtype='U')
+ a = np.array(u'a\u0140', dtype='U')
b = np.ndarray(buffer=a, dtype='uint32', shape=2)
assert_(b.size == 2)
@@ -510,6 +514,3 @@ def test_broadcast_arrays():
result = np.broadcast_arrays(a, b)
assert_equal(result[0], np.array([(1, 2, 3), (1, 2, 3), (1, 2, 3)], dtype='u4,u4,u4'))
assert_equal(result[1], np.array([(1, 2, 3), (4, 5, 6), (7, 8, 9)], dtype='u4,u4,u4'))
-
-if __name__ == "__main__":
- run_module_suite()
diff --git a/numpy/core/tests/test_arrayprint.py b/numpy/core/tests/test_arrayprint.py
index 5759a0984..7a858d2e2 100644
--- a/numpy/core/tests/test_arrayprint.py
+++ b/numpy/core/tests/test_arrayprint.py
@@ -1,79 +1,224 @@
-#!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import division, absolute_import, print_function
import sys
+import gc
+import pytest
import numpy as np
-from numpy.compat import sixu
from numpy.testing import (
- TestCase, run_module_suite, assert_, assert_equal
-)
+ assert_, assert_equal, assert_raises, assert_warns, HAS_REFCOUNT,
+ assert_raises_regex,
+ )
+import textwrap
class TestArrayRepr(object):
def test_nan_inf(self):
x = np.array([np.nan, np.inf])
- assert_equal(repr(x), 'array([ nan, inf])')
+ assert_equal(repr(x), 'array([nan, inf])')
-class TestComplexArray(TestCase):
+ def test_subclass(self):
+ class sub(np.ndarray): pass
+
+ # one dimensional
+ x1d = np.array([1, 2]).view(sub)
+ assert_equal(repr(x1d), 'sub([1, 2])')
+
+ # two dimensional
+ x2d = np.array([[1, 2], [3, 4]]).view(sub)
+ assert_equal(repr(x2d),
+ 'sub([[1, 2],\n'
+ ' [3, 4]])')
+
+ # two dimensional with flexible dtype
+ xstruct = np.ones((2,2), dtype=[('a', '<i4')]).view(sub)
+ assert_equal(repr(xstruct),
+ "sub([[(1,), (1,)],\n"
+ " [(1,), (1,)]], dtype=[('a', '<i4')])"
+ )
+
+ @pytest.mark.xfail(reason="See gh-10544")
+ def test_object_subclass(self):
+ class sub(np.ndarray):
+ def __new__(cls, inp):
+ obj = np.asarray(inp).view(cls)
+ return obj
+
+ def __getitem__(self, ind):
+ ret = super(sub, self).__getitem__(ind)
+ return sub(ret)
+
+ # test that object + subclass is OK:
+ x = sub([None, None])
+ assert_equal(repr(x), 'sub([None, None], dtype=object)')
+ assert_equal(str(x), '[None None]')
+
+ x = sub([None, sub([None, None])])
+ assert_equal(repr(x),
+ 'sub([None, sub([None, None], dtype=object)], dtype=object)')
+ assert_equal(str(x), '[None sub([None, None], dtype=object)]')
+
+ def test_0d_object_subclass(self):
+ # make sure that subclasses which return 0ds instead
+ # of scalars don't cause infinite recursion in str
+ class sub(np.ndarray):
+ def __new__(cls, inp):
+ obj = np.asarray(inp).view(cls)
+ return obj
+
+ def __getitem__(self, ind):
+ ret = super(sub, self).__getitem__(ind)
+ return sub(ret)
+
+ x = sub(1)
+ assert_equal(repr(x), 'sub(1)')
+ assert_equal(str(x), '1')
+
+ x = sub([1, 1])
+ assert_equal(repr(x), 'sub([1, 1])')
+ assert_equal(str(x), '[1 1]')
+
+ # check it works properly with object arrays too
+ x = sub(None)
+ assert_equal(repr(x), 'sub(None, dtype=object)')
+ assert_equal(str(x), 'None')
+
+ # plus recursive object arrays (even depth > 1)
+ y = sub(None)
+ x[()] = y
+ y[()] = x
+ assert_equal(repr(x),
+ 'sub(sub(sub(..., dtype=object), dtype=object), dtype=object)')
+ assert_equal(str(x), '...')
+
+ # nested 0d-subclass-object
+ x = sub(None)
+ x[()] = sub(None)
+ assert_equal(repr(x), 'sub(sub(None, dtype=object), dtype=object)')
+ assert_equal(str(x), 'None')
+
+ # gh-10663
+ class DuckCounter(np.ndarray):
+ def __getitem__(self, item):
+ result = super(DuckCounter, self).__getitem__(item)
+ if not isinstance(result, DuckCounter):
+ result = result[...].view(DuckCounter)
+ return result
+
+ def to_string(self):
+ return {0: 'zero', 1: 'one', 2: 'two'}.get(self.item(), 'many')
+
+ def __str__(self):
+ if self.shape == ():
+ return self.to_string()
+ else:
+ fmt = {'all': lambda x: x.to_string()}
+ return np.array2string(self, formatter=fmt)
+
+ dc = np.arange(5).view(DuckCounter)
+ assert_equal(str(dc), "[zero one two many many]")
+ assert_equal(str(dc[0]), "zero")
+
+ def test_self_containing(self):
+ arr0d = np.array(None)
+ arr0d[()] = arr0d
+ assert_equal(repr(arr0d),
+ 'array(array(..., dtype=object), dtype=object)')
+
+ arr1d = np.array([None, None])
+ arr1d[1] = arr1d
+ assert_equal(repr(arr1d),
+ 'array([None, array(..., dtype=object)], dtype=object)')
+
+ first = np.array(None)
+ second = np.array(None)
+ first[()] = second
+ second[()] = first
+ assert_equal(repr(first),
+ 'array(array(array(..., dtype=object), dtype=object), dtype=object)')
+
+ def test_containing_list(self):
+ # printing square brackets directly would be ambiguuous
+ arr1d = np.array([None, None])
+ arr1d[0] = [1, 2]
+ arr1d[1] = [3]
+ assert_equal(repr(arr1d),
+ 'array([list([1, 2]), list([3])], dtype=object)')
+
+ def test_void_scalar_recursion(self):
+ # gh-9345
+ repr(np.void(b'test')) # RecursionError ?
+
+ def test_fieldless_structured(self):
+ # gh-10366
+ no_fields = np.dtype([])
+ arr_no_fields = np.empty(4, dtype=no_fields)
+ assert_equal(repr(arr_no_fields), 'array([(), (), (), ()], dtype=[])')
+
+
+class TestComplexArray(object):
def test_str(self):
rvals = [0, 1, -1, np.inf, -np.inf, np.nan]
cvals = [complex(rp, ip) for rp in rvals for ip in rvals]
dtypes = [np.complex64, np.cdouble, np.clongdouble]
actual = [str(np.array([c], dt)) for c in cvals for dt in dtypes]
wanted = [
- '[ 0.+0.j]', '[ 0.+0.j]', '[ 0.0+0.0j]',
- '[ 0.+1.j]', '[ 0.+1.j]', '[ 0.0+1.0j]',
- '[ 0.-1.j]', '[ 0.-1.j]', '[ 0.0-1.0j]',
- '[ 0.+infj]', '[ 0.+infj]', '[ 0.0+infj]',
- '[ 0.-infj]', '[ 0.-infj]', '[ 0.0-infj]',
- '[ 0.+nanj]', '[ 0.+nanj]', '[ 0.0+nanj]',
- '[ 1.+0.j]', '[ 1.+0.j]', '[ 1.0+0.0j]',
- '[ 1.+1.j]', '[ 1.+1.j]', '[ 1.0+1.0j]',
- '[ 1.-1.j]', '[ 1.-1.j]', '[ 1.0-1.0j]',
- '[ 1.+infj]', '[ 1.+infj]', '[ 1.0+infj]',
- '[ 1.-infj]', '[ 1.-infj]', '[ 1.0-infj]',
- '[ 1.+nanj]', '[ 1.+nanj]', '[ 1.0+nanj]',
- '[-1.+0.j]', '[-1.+0.j]', '[-1.0+0.0j]',
- '[-1.+1.j]', '[-1.+1.j]', '[-1.0+1.0j]',
- '[-1.-1.j]', '[-1.-1.j]', '[-1.0-1.0j]',
- '[-1.+infj]', '[-1.+infj]', '[-1.0+infj]',
- '[-1.-infj]', '[-1.-infj]', '[-1.0-infj]',
- '[-1.+nanj]', '[-1.+nanj]', '[-1.0+nanj]',
- '[ inf+0.j]', '[ inf+0.j]', '[ inf+0.0j]',
- '[ inf+1.j]', '[ inf+1.j]', '[ inf+1.0j]',
- '[ inf-1.j]', '[ inf-1.j]', '[ inf-1.0j]',
- '[ inf+infj]', '[ inf+infj]', '[ inf+infj]',
- '[ inf-infj]', '[ inf-infj]', '[ inf-infj]',
- '[ inf+nanj]', '[ inf+nanj]', '[ inf+nanj]',
- '[-inf+0.j]', '[-inf+0.j]', '[-inf+0.0j]',
- '[-inf+1.j]', '[-inf+1.j]', '[-inf+1.0j]',
- '[-inf-1.j]', '[-inf-1.j]', '[-inf-1.0j]',
- '[-inf+infj]', '[-inf+infj]', '[-inf+infj]',
- '[-inf-infj]', '[-inf-infj]', '[-inf-infj]',
- '[-inf+nanj]', '[-inf+nanj]', '[-inf+nanj]',
- '[ nan+0.j]', '[ nan+0.j]', '[ nan+0.0j]',
- '[ nan+1.j]', '[ nan+1.j]', '[ nan+1.0j]',
- '[ nan-1.j]', '[ nan-1.j]', '[ nan-1.0j]',
- '[ nan+infj]', '[ nan+infj]', '[ nan+infj]',
- '[ nan-infj]', '[ nan-infj]', '[ nan-infj]',
- '[ nan+nanj]', '[ nan+nanj]', '[ nan+nanj]']
+ '[0.+0.j]', '[0.+0.j]', '[0.+0.j]',
+ '[0.+1.j]', '[0.+1.j]', '[0.+1.j]',
+ '[0.-1.j]', '[0.-1.j]', '[0.-1.j]',
+ '[0.+infj]', '[0.+infj]', '[0.+infj]',
+ '[0.-infj]', '[0.-infj]', '[0.-infj]',
+ '[0.+nanj]', '[0.+nanj]', '[0.+nanj]',
+ '[1.+0.j]', '[1.+0.j]', '[1.+0.j]',
+ '[1.+1.j]', '[1.+1.j]', '[1.+1.j]',
+ '[1.-1.j]', '[1.-1.j]', '[1.-1.j]',
+ '[1.+infj]', '[1.+infj]', '[1.+infj]',
+ '[1.-infj]', '[1.-infj]', '[1.-infj]',
+ '[1.+nanj]', '[1.+nanj]', '[1.+nanj]',
+ '[-1.+0.j]', '[-1.+0.j]', '[-1.+0.j]',
+ '[-1.+1.j]', '[-1.+1.j]', '[-1.+1.j]',
+ '[-1.-1.j]', '[-1.-1.j]', '[-1.-1.j]',
+ '[-1.+infj]', '[-1.+infj]', '[-1.+infj]',
+ '[-1.-infj]', '[-1.-infj]', '[-1.-infj]',
+ '[-1.+nanj]', '[-1.+nanj]', '[-1.+nanj]',
+ '[inf+0.j]', '[inf+0.j]', '[inf+0.j]',
+ '[inf+1.j]', '[inf+1.j]', '[inf+1.j]',
+ '[inf-1.j]', '[inf-1.j]', '[inf-1.j]',
+ '[inf+infj]', '[inf+infj]', '[inf+infj]',
+ '[inf-infj]', '[inf-infj]', '[inf-infj]',
+ '[inf+nanj]', '[inf+nanj]', '[inf+nanj]',
+ '[-inf+0.j]', '[-inf+0.j]', '[-inf+0.j]',
+ '[-inf+1.j]', '[-inf+1.j]', '[-inf+1.j]',
+ '[-inf-1.j]', '[-inf-1.j]', '[-inf-1.j]',
+ '[-inf+infj]', '[-inf+infj]', '[-inf+infj]',
+ '[-inf-infj]', '[-inf-infj]', '[-inf-infj]',
+ '[-inf+nanj]', '[-inf+nanj]', '[-inf+nanj]',
+ '[nan+0.j]', '[nan+0.j]', '[nan+0.j]',
+ '[nan+1.j]', '[nan+1.j]', '[nan+1.j]',
+ '[nan-1.j]', '[nan-1.j]', '[nan-1.j]',
+ '[nan+infj]', '[nan+infj]', '[nan+infj]',
+ '[nan-infj]', '[nan-infj]', '[nan-infj]',
+ '[nan+nanj]', '[nan+nanj]', '[nan+nanj]']
for res, val in zip(actual, wanted):
- assert_(res == val)
+ assert_equal(res, val)
-class TestArray2String(TestCase):
+class TestArray2String(object):
def test_basic(self):
"""Basic test of array2string."""
a = np.arange(3)
assert_(np.array2string(a) == '[0 1 2]')
- assert_(np.array2string(a, max_line_width=4) == '[0 1\n 2]')
+ assert_(np.array2string(a, max_line_width=4, legacy='1.13') == '[0 1\n 2]')
+ assert_(np.array2string(a, max_line_width=4) == '[0\n 1\n 2]')
- def test_style_keyword(self):
- """This should only apply to 0-D arrays. See #1218."""
- stylestr = np.array2string(np.array(1.5),
- style=lambda x: "Value in 0-D array: " + str(x))
- assert_(stylestr == 'Value in 0-D array: 1.5')
+ def test_unexpected_kwarg(self):
+ # ensure than an appropriate TypeError
+ # is raised when array2string receives
+ # an unexpected kwarg
+
+ with assert_raises_regex(TypeError, 'nonsense'):
+ np.array2string(np.array([1, 2, 3]),
+ nonsense=None)
def test_format_function(self):
"""Test custom format function for each element in array."""
@@ -113,21 +258,186 @@ class TestArray2String(TestCase):
assert_(np.array2string(s, formatter={'numpystr':lambda s: s*2}) ==
'[abcabc defdef]')
+ # check for backcompat that using FloatFormat works and emits warning
+ with assert_warns(DeprecationWarning):
+ fmt = np.core.arrayprint.FloatFormat(x, 9, 'maxprec', False)
+ assert_equal(np.array2string(x, formatter={'float_kind': fmt}),
+ '[0. 1. 2.]')
+
+ def test_structure_format(self):
+ dt = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))])
+ x = np.array([('Sarah', (8.0, 7.0)), ('John', (6.0, 7.0))], dtype=dt)
+ assert_equal(np.array2string(x),
+ "[('Sarah', [8., 7.]) ('John', [6., 7.])]")
+
+ np.set_printoptions(legacy='1.13')
+ try:
+ # for issue #5692
+ A = np.zeros(shape=10, dtype=[("A", "M8[s]")])
+ A[5:].fill(np.datetime64('NaT'))
+ assert_equal(
+ np.array2string(A),
+ textwrap.dedent("""\
+ [('1970-01-01T00:00:00',) ('1970-01-01T00:00:00',) ('1970-01-01T00:00:00',)
+ ('1970-01-01T00:00:00',) ('1970-01-01T00:00:00',) ('NaT',) ('NaT',)
+ ('NaT',) ('NaT',) ('NaT',)]""")
+ )
+ finally:
+ np.set_printoptions(legacy=False)
-class TestPrintOptions:
+ # same again, but with non-legacy behavior
+ assert_equal(
+ np.array2string(A),
+ textwrap.dedent("""\
+ [('1970-01-01T00:00:00',) ('1970-01-01T00:00:00',)
+ ('1970-01-01T00:00:00',) ('1970-01-01T00:00:00',)
+ ('1970-01-01T00:00:00',) ( 'NaT',)
+ ( 'NaT',) ( 'NaT',)
+ ( 'NaT',) ( 'NaT',)]""")
+ )
+
+ # and again, with timedeltas
+ A = np.full(10, 123456, dtype=[("A", "m8[s]")])
+ A[5:].fill(np.datetime64('NaT'))
+ assert_equal(
+ np.array2string(A),
+ textwrap.dedent("""\
+ [(123456,) (123456,) (123456,) (123456,) (123456,) ( 'NaT',) ( 'NaT',)
+ ( 'NaT',) ( 'NaT',) ( 'NaT',)]""")
+ )
+
+ # See #8160
+ struct_int = np.array([([1, -1],), ([123, 1],)], dtype=[('B', 'i4', 2)])
+ assert_equal(np.array2string(struct_int),
+ "[([ 1, -1],) ([123, 1],)]")
+ struct_2dint = np.array([([[0, 1], [2, 3]],), ([[12, 0], [0, 0]],)],
+ dtype=[('B', 'i4', (2, 2))])
+ assert_equal(np.array2string(struct_2dint),
+ "[([[ 0, 1], [ 2, 3]],) ([[12, 0], [ 0, 0]],)]")
+
+ # See #8172
+ array_scalar = np.array(
+ (1., 2.1234567890123456789, 3.), dtype=('f8,f8,f8'))
+ assert_equal(np.array2string(array_scalar), "(1., 2.12345679, 3.)")
+
+ def test_unstructured_void_repr(self):
+ a = np.array([27, 91, 50, 75, 7, 65, 10, 8,
+ 27, 91, 51, 49,109, 82,101,100], dtype='u1').view('V8')
+ assert_equal(repr(a[0]), r"void(b'\x1B\x5B\x32\x4B\x07\x41\x0A\x08')")
+ assert_equal(str(a[0]), r"b'\x1B\x5B\x32\x4B\x07\x41\x0A\x08'")
+ assert_equal(repr(a),
+ r"array([b'\x1B\x5B\x32\x4B\x07\x41\x0A\x08'," "\n"
+ r" b'\x1B\x5B\x33\x31\x6D\x52\x65\x64'], dtype='|V8')")
+
+ assert_equal(eval(repr(a), vars(np)), a)
+ assert_equal(eval(repr(a[0]), vars(np)), a[0])
+
+ def test_edgeitems_kwarg(self):
+ # previously the global print options would be taken over the kwarg
+ arr = np.zeros(3, int)
+ assert_equal(
+ np.array2string(arr, edgeitems=1, threshold=0),
+ "[0 ... 0]"
+ )
+
+ def test_summarize_1d(self):
+ A = np.arange(1001)
+ strA = '[ 0 1 2 ... 998 999 1000]'
+ assert_equal(str(A), strA)
+
+ reprA = 'array([ 0, 1, 2, ..., 998, 999, 1000])'
+ assert_equal(repr(A), reprA)
+
+ def test_summarize_2d(self):
+ A = np.arange(1002).reshape(2, 501)
+ strA = '[[ 0 1 2 ... 498 499 500]\n' \
+ ' [ 501 502 503 ... 999 1000 1001]]'
+ assert_equal(str(A), strA)
+
+ reprA = 'array([[ 0, 1, 2, ..., 498, 499, 500],\n' \
+ ' [ 501, 502, 503, ..., 999, 1000, 1001]])'
+ assert_equal(repr(A), reprA)
+
+ def test_linewidth(self):
+ a = np.full(6, 1)
+
+ def make_str(a, width, **kw):
+ return np.array2string(a, separator="", max_line_width=width, **kw)
+
+ assert_equal(make_str(a, 8, legacy='1.13'), '[111111]')
+ assert_equal(make_str(a, 7, legacy='1.13'), '[111111]')
+ assert_equal(make_str(a, 5, legacy='1.13'), '[1111\n'
+ ' 11]')
+
+ assert_equal(make_str(a, 8), '[111111]')
+ assert_equal(make_str(a, 7), '[11111\n'
+ ' 1]')
+ assert_equal(make_str(a, 5), '[111\n'
+ ' 111]')
+
+ b = a[None,None,:]
+
+ assert_equal(make_str(b, 12, legacy='1.13'), '[[[111111]]]')
+ assert_equal(make_str(b, 9, legacy='1.13'), '[[[111111]]]')
+ assert_equal(make_str(b, 8, legacy='1.13'), '[[[11111\n'
+ ' 1]]]')
+
+ assert_equal(make_str(b, 12), '[[[111111]]]')
+ assert_equal(make_str(b, 9), '[[[111\n'
+ ' 111]]]')
+ assert_equal(make_str(b, 8), '[[[11\n'
+ ' 11\n'
+ ' 11]]]')
+
+ def test_wide_element(self):
+ a = np.array(['xxxxx'])
+ assert_equal(
+ np.array2string(a, max_line_width=5),
+ "['xxxxx']"
+ )
+ assert_equal(
+ np.array2string(a, max_line_width=5, legacy='1.13'),
+ "[ 'xxxxx']"
+ )
+
+ @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
+ def test_refcount(self):
+ # make sure we do not hold references to the array due to a recursive
+ # closure (gh-10620)
+ gc.disable()
+ a = np.arange(2)
+ r1 = sys.getrefcount(a)
+ np.array2string(a)
+ np.array2string(a)
+ r2 = sys.getrefcount(a)
+ gc.collect()
+ gc.enable()
+ assert_(r1 == r2)
+
+class TestPrintOptions(object):
"""Test getting and setting global print options."""
- def setUp(self):
+ def setup(self):
self.oldopts = np.get_printoptions()
- def tearDown(self):
+ def teardown(self):
np.set_printoptions(**self.oldopts)
def test_basic(self):
x = np.array([1.5, 0, 1.234567890])
- assert_equal(repr(x), "array([ 1.5 , 0. , 1.23456789])")
+ assert_equal(repr(x), "array([1.5 , 0. , 1.23456789])")
np.set_printoptions(precision=4)
- assert_equal(repr(x), "array([ 1.5 , 0. , 1.2346])")
+ assert_equal(repr(x), "array([1.5 , 0. , 1.2346])")
+
+ def test_precision_zero(self):
+ np.set_printoptions(precision=0)
+ for values, string in (
+ ([0.], "0."), ([.3], "0."), ([-.3], "-0."), ([.7], "1."),
+ ([1.5], "2."), ([-1.5], "-2."), ([-15.34], "-15."),
+ ([100.], "100."), ([.2, -1, 122.51], " 0., -1., 123."),
+ ([0], "0"), ([-12], "-12"), ([complex(.3, -.7)], "0.-1.j")):
+ x = np.array(values)
+ assert_equal(repr(x), "array([%s])" % string)
def test_formatter(self):
x = np.arange(3)
@@ -155,7 +465,387 @@ class TestPrintOptions:
np.set_printoptions(formatter={'float':lambda x: str(x-1)})
assert_equal(repr(x), "array([-1.0, 0.0, 1.0])")
np.set_printoptions(formatter={'float_kind':None})
- assert_equal(repr(x), "array([ 0., 1., 2.])")
+ assert_equal(repr(x), "array([0., 1., 2.])")
+
+ def test_0d_arrays(self):
+ unicode = type(u'')
+
+ assert_equal(unicode(np.array(u'café', '<U4')), u'café')
+
+ if sys.version_info[0] >= 3:
+ assert_equal(repr(np.array('café', '<U4')),
+ "array('café', dtype='<U4')")
+ else:
+ assert_equal(repr(np.array(u'café', '<U4')),
+ "array(u'caf\\xe9', dtype='<U4')")
+ assert_equal(str(np.array('test', np.str_)), 'test')
+
+ a = np.zeros(1, dtype=[('a', '<i4', (3,))])
+ assert_equal(str(a[0]), '([0, 0, 0],)')
+
+ assert_equal(repr(np.datetime64('2005-02-25')[...]),
+ "array('2005-02-25', dtype='datetime64[D]')")
+
+ assert_equal(repr(np.timedelta64('10', 'Y')[...]),
+ "array(10, dtype='timedelta64[Y]')")
+
+ # repr of 0d arrays is affected by printoptions
+ x = np.array(1)
+ np.set_printoptions(formatter={'all':lambda x: "test"})
+ assert_equal(repr(x), "array(test)")
+ # str is unaffected
+ assert_equal(str(x), "1")
+
+ # check `style` arg raises
+ assert_warns(DeprecationWarning, np.array2string,
+ np.array(1.), style=repr)
+ # but not in legacy mode
+ np.array2string(np.array(1.), style=repr, legacy='1.13')
+ # gh-10934 style was broken in legacy mode, check it works
+ np.array2string(np.array(1.), legacy='1.13')
+
+ def test_float_spacing(self):
+ x = np.array([1., 2., 3.])
+ y = np.array([1., 2., -10.])
+ z = np.array([100., 2., -1.])
+ w = np.array([-100., 2., 1.])
+
+ assert_equal(repr(x), 'array([1., 2., 3.])')
+ assert_equal(repr(y), 'array([ 1., 2., -10.])')
+ assert_equal(repr(np.array(y[0])), 'array(1.)')
+ assert_equal(repr(np.array(y[-1])), 'array(-10.)')
+ assert_equal(repr(z), 'array([100., 2., -1.])')
+ assert_equal(repr(w), 'array([-100., 2., 1.])')
+
+ assert_equal(repr(np.array([np.nan, np.inf])), 'array([nan, inf])')
+ assert_equal(repr(np.array([np.nan, -np.inf])), 'array([ nan, -inf])')
+
+ x = np.array([np.inf, 100000, 1.1234])
+ y = np.array([np.inf, 100000, -1.1234])
+ z = np.array([np.inf, 1.1234, -1e120])
+ np.set_printoptions(precision=2)
+ assert_equal(repr(x), 'array([ inf, 1.00e+05, 1.12e+00])')
+ assert_equal(repr(y), 'array([ inf, 1.00e+05, -1.12e+00])')
+ assert_equal(repr(z), 'array([ inf, 1.12e+000, -1.00e+120])')
+
+ def test_bool_spacing(self):
+ assert_equal(repr(np.array([True, True])),
+ 'array([ True, True])')
+ assert_equal(repr(np.array([True, False])),
+ 'array([ True, False])')
+ assert_equal(repr(np.array([True])),
+ 'array([ True])')
+ assert_equal(repr(np.array(True)),
+ 'array(True)')
+ assert_equal(repr(np.array(False)),
+ 'array(False)')
+
+ def test_sign_spacing(self):
+ a = np.arange(4.)
+ b = np.array([1.234e9])
+ c = np.array([1.0 + 1.0j, 1.123456789 + 1.123456789j], dtype='c16')
+
+ assert_equal(repr(a), 'array([0., 1., 2., 3.])')
+ assert_equal(repr(np.array(1.)), 'array(1.)')
+ assert_equal(repr(b), 'array([1.234e+09])')
+ assert_equal(repr(np.array([0.])), 'array([0.])')
+ assert_equal(repr(c),
+ "array([1. +1.j , 1.12345679+1.12345679j])")
+ assert_equal(repr(np.array([0., -0.])), 'array([ 0., -0.])')
+
+ np.set_printoptions(sign=' ')
+ assert_equal(repr(a), 'array([ 0., 1., 2., 3.])')
+ assert_equal(repr(np.array(1.)), 'array( 1.)')
+ assert_equal(repr(b), 'array([ 1.234e+09])')
+ assert_equal(repr(c),
+ "array([ 1. +1.j , 1.12345679+1.12345679j])")
+ assert_equal(repr(np.array([0., -0.])), 'array([ 0., -0.])')
+
+ np.set_printoptions(sign='+')
+ assert_equal(repr(a), 'array([+0., +1., +2., +3.])')
+ assert_equal(repr(np.array(1.)), 'array(+1.)')
+ assert_equal(repr(b), 'array([+1.234e+09])')
+ assert_equal(repr(c),
+ "array([+1. +1.j , +1.12345679+1.12345679j])")
+
+ np.set_printoptions(legacy='1.13')
+ assert_equal(repr(a), 'array([ 0., 1., 2., 3.])')
+ assert_equal(repr(b), 'array([ 1.23400000e+09])')
+ assert_equal(repr(-b), 'array([ -1.23400000e+09])')
+ assert_equal(repr(np.array(1.)), 'array(1.0)')
+ assert_equal(repr(np.array([0.])), 'array([ 0.])')
+ assert_equal(repr(c),
+ "array([ 1.00000000+1.j , 1.12345679+1.12345679j])")
+ # gh-10383
+ assert_equal(str(np.array([-1., 10])), "[ -1. 10.]")
+
+ assert_raises(TypeError, np.set_printoptions, wrongarg=True)
+
+ def test_float_overflow_nowarn(self):
+ # make sure internal computations in FloatingFormat don't
+ # warn about overflow
+ repr(np.array([1e4, 0.1], dtype='f2'))
+
+ def test_sign_spacing_structured(self):
+ a = np.ones(2, dtype='<f,<f')
+ assert_equal(repr(a),
+ "array([(1., 1.), (1., 1.)], dtype=[('f0', '<f4'), ('f1', '<f4')])")
+ assert_equal(repr(a[0]), "(1., 1.)")
+
+ def test_floatmode(self):
+ x = np.array([0.6104, 0.922, 0.457, 0.0906, 0.3733, 0.007244,
+ 0.5933, 0.947, 0.2383, 0.4226], dtype=np.float16)
+ y = np.array([0.2918820979355541, 0.5064172631089138,
+ 0.2848750619642916, 0.4342965294660567,
+ 0.7326538397312751, 0.3459503329096204,
+ 0.0862072768214508, 0.39112753029631175],
+ dtype=np.float64)
+ z = np.arange(6, dtype=np.float16)/10
+ c = np.array([1.0 + 1.0j, 1.123456789 + 1.123456789j], dtype='c16')
+
+ # also make sure 1e23 is right (is between two fp numbers)
+ w = np.array(['1e{}'.format(i) for i in range(25)], dtype=np.float64)
+ # note: we construct w from the strings `1eXX` instead of doing
+ # `10.**arange(24)` because it turns out the two are not equivalent in
+ # python. On some architectures `1e23 != 10.**23`.
+ wp = np.array([1.234e1, 1e2, 1e123])
+
+ # unique mode
+ np.set_printoptions(floatmode='unique')
+ assert_equal(repr(x),
+ "array([0.6104 , 0.922 , 0.457 , 0.0906 , 0.3733 , 0.007244,\n"
+ " 0.5933 , 0.947 , 0.2383 , 0.4226 ], dtype=float16)")
+ assert_equal(repr(y),
+ "array([0.2918820979355541 , 0.5064172631089138 , 0.2848750619642916 ,\n"
+ " 0.4342965294660567 , 0.7326538397312751 , 0.3459503329096204 ,\n"
+ " 0.0862072768214508 , 0.39112753029631175])")
+ assert_equal(repr(z),
+ "array([0. , 0.1, 0.2, 0.3, 0.4, 0.5], dtype=float16)")
+ assert_equal(repr(w),
+ "array([1.e+00, 1.e+01, 1.e+02, 1.e+03, 1.e+04, 1.e+05, 1.e+06, 1.e+07,\n"
+ " 1.e+08, 1.e+09, 1.e+10, 1.e+11, 1.e+12, 1.e+13, 1.e+14, 1.e+15,\n"
+ " 1.e+16, 1.e+17, 1.e+18, 1.e+19, 1.e+20, 1.e+21, 1.e+22, 1.e+23,\n"
+ " 1.e+24])")
+ assert_equal(repr(wp), "array([1.234e+001, 1.000e+002, 1.000e+123])")
+ assert_equal(repr(c),
+ "array([1. +1.j , 1.123456789+1.123456789j])")
+
+ # maxprec mode, precision=8
+ np.set_printoptions(floatmode='maxprec', precision=8)
+ assert_equal(repr(x),
+ "array([0.6104 , 0.922 , 0.457 , 0.0906 , 0.3733 , 0.007244,\n"
+ " 0.5933 , 0.947 , 0.2383 , 0.4226 ], dtype=float16)")
+ assert_equal(repr(y),
+ "array([0.2918821 , 0.50641726, 0.28487506, 0.43429653, 0.73265384,\n"
+ " 0.34595033, 0.08620728, 0.39112753])")
+ assert_equal(repr(z),
+ "array([0. , 0.1, 0.2, 0.3, 0.4, 0.5], dtype=float16)")
+ assert_equal(repr(w[::5]),
+ "array([1.e+00, 1.e+05, 1.e+10, 1.e+15, 1.e+20])")
+ assert_equal(repr(wp), "array([1.234e+001, 1.000e+002, 1.000e+123])")
+ assert_equal(repr(c),
+ "array([1. +1.j , 1.12345679+1.12345679j])")
+
+ # fixed mode, precision=4
+ np.set_printoptions(floatmode='fixed', precision=4)
+ assert_equal(repr(x),
+ "array([0.6104, 0.9219, 0.4570, 0.0906, 0.3733, 0.0072, 0.5933, 0.9468,\n"
+ " 0.2383, 0.4226], dtype=float16)")
+ assert_equal(repr(y),
+ "array([0.2919, 0.5064, 0.2849, 0.4343, 0.7327, 0.3460, 0.0862, 0.3911])")
+ assert_equal(repr(z),
+ "array([0.0000, 0.1000, 0.2000, 0.3000, 0.3999, 0.5000], dtype=float16)")
+ assert_equal(repr(w[::5]),
+ "array([1.0000e+00, 1.0000e+05, 1.0000e+10, 1.0000e+15, 1.0000e+20])")
+ assert_equal(repr(wp), "array([1.2340e+001, 1.0000e+002, 1.0000e+123])")
+ assert_equal(repr(np.zeros(3)), "array([0.0000, 0.0000, 0.0000])")
+ assert_equal(repr(c),
+ "array([1.0000+1.0000j, 1.1235+1.1235j])")
+ # for larger precision, representation error becomes more apparent:
+ np.set_printoptions(floatmode='fixed', precision=8)
+ assert_equal(repr(z),
+ "array([0.00000000, 0.09997559, 0.19995117, 0.30004883, 0.39990234,\n"
+ " 0.50000000], dtype=float16)")
+
+ # maxprec_equal mode, precision=8
+ np.set_printoptions(floatmode='maxprec_equal', precision=8)
+ assert_equal(repr(x),
+ "array([0.610352, 0.921875, 0.457031, 0.090576, 0.373291, 0.007244,\n"
+ " 0.593262, 0.946777, 0.238281, 0.422607], dtype=float16)")
+ assert_equal(repr(y),
+ "array([0.29188210, 0.50641726, 0.28487506, 0.43429653, 0.73265384,\n"
+ " 0.34595033, 0.08620728, 0.39112753])")
+ assert_equal(repr(z),
+ "array([0.0, 0.1, 0.2, 0.3, 0.4, 0.5], dtype=float16)")
+ assert_equal(repr(w[::5]),
+ "array([1.e+00, 1.e+05, 1.e+10, 1.e+15, 1.e+20])")
+ assert_equal(repr(wp), "array([1.234e+001, 1.000e+002, 1.000e+123])")
+ assert_equal(repr(c),
+ "array([1.00000000+1.00000000j, 1.12345679+1.12345679j])")
+
+ def test_legacy_mode_scalars(self):
+ # in legacy mode, str of floats get truncated, and complex scalars
+ # use * for non-finite imaginary part
+ np.set_printoptions(legacy='1.13')
+ assert_equal(str(np.float64(1.123456789123456789)), '1.12345678912')
+ assert_equal(str(np.complex128(complex(1, np.nan))), '(1+nan*j)')
+
+ np.set_printoptions(legacy=False)
+ assert_equal(str(np.float64(1.123456789123456789)),
+ '1.1234567891234568')
+ assert_equal(str(np.complex128(complex(1, np.nan))), '(1+nanj)')
+
+ def test_legacy_stray_comma(self):
+ np.set_printoptions(legacy='1.13')
+ assert_equal(str(np.arange(10000)), '[ 0 1 2 ..., 9997 9998 9999]')
+
+ np.set_printoptions(legacy=False)
+ assert_equal(str(np.arange(10000)), '[ 0 1 2 ... 9997 9998 9999]')
+
+ def test_dtype_linewidth_wrapping(self):
+ np.set_printoptions(linewidth=75)
+ assert_equal(repr(np.arange(10,20., dtype='f4')),
+ "array([10., 11., 12., 13., 14., 15., 16., 17., 18., 19.], dtype=float32)")
+ assert_equal(repr(np.arange(10,23., dtype='f4')), textwrap.dedent("""\
+ array([10., 11., 12., 13., 14., 15., 16., 17., 18., 19., 20., 21., 22.],
+ dtype=float32)"""))
+
+ styp = '<U4' if sys.version_info[0] >= 3 else '|S4'
+ assert_equal(repr(np.ones(3, dtype=styp)),
+ "array(['1', '1', '1'], dtype='{}')".format(styp))
+ assert_equal(repr(np.ones(12, dtype=styp)), textwrap.dedent("""\
+ array(['1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1'],
+ dtype='{}')""".format(styp)))
+
+ def test_linewidth_repr(self):
+ a = np.full(7, fill_value=2)
+ np.set_printoptions(linewidth=17)
+ assert_equal(
+ repr(a),
+ textwrap.dedent("""\
+ array([2, 2, 2,
+ 2, 2, 2,
+ 2])""")
+ )
+ np.set_printoptions(linewidth=17, legacy='1.13')
+ assert_equal(
+ repr(a),
+ textwrap.dedent("""\
+ array([2, 2, 2,
+ 2, 2, 2, 2])""")
+ )
+
+ a = np.full(8, fill_value=2)
+
+ np.set_printoptions(linewidth=18, legacy=False)
+ assert_equal(
+ repr(a),
+ textwrap.dedent("""\
+ array([2, 2, 2,
+ 2, 2, 2,
+ 2, 2])""")
+ )
+
+ np.set_printoptions(linewidth=18, legacy='1.13')
+ assert_equal(
+ repr(a),
+ textwrap.dedent("""\
+ array([2, 2, 2, 2,
+ 2, 2, 2, 2])""")
+ )
+
+ def test_linewidth_str(self):
+ a = np.full(18, fill_value=2)
+ np.set_printoptions(linewidth=18)
+ assert_equal(
+ str(a),
+ textwrap.dedent("""\
+ [2 2 2 2 2 2 2 2
+ 2 2 2 2 2 2 2 2
+ 2 2]""")
+ )
+ np.set_printoptions(linewidth=18, legacy='1.13')
+ assert_equal(
+ str(a),
+ textwrap.dedent("""\
+ [2 2 2 2 2 2 2 2 2
+ 2 2 2 2 2 2 2 2 2]""")
+ )
+
+ def test_edgeitems(self):
+ np.set_printoptions(edgeitems=1, threshold=1)
+ a = np.arange(27).reshape((3, 3, 3))
+ assert_equal(
+ repr(a),
+ textwrap.dedent("""\
+ array([[[ 0, ..., 2],
+ ...,
+ [ 6, ..., 8]],
+
+ ...,
+
+ [[18, ..., 20],
+ ...,
+ [24, ..., 26]]])""")
+ )
+
+ b = np.zeros((3, 3, 1, 1))
+ assert_equal(
+ repr(b),
+ textwrap.dedent("""\
+ array([[[[0.]],
+
+ ...,
+
+ [[0.]]],
+
+
+ ...,
+
+
+ [[[0.]],
+
+ ...,
+
+ [[0.]]]])""")
+ )
+
+ # 1.13 had extra trailing spaces, and was missing newlines
+ np.set_printoptions(legacy='1.13')
+
+ assert_equal(
+ repr(a),
+ textwrap.dedent("""\
+ array([[[ 0, ..., 2],
+ ...,
+ [ 6, ..., 8]],
+
+ ...,
+ [[18, ..., 20],
+ ...,
+ [24, ..., 26]]])""")
+ )
+
+ assert_equal(
+ repr(b),
+ textwrap.dedent("""\
+ array([[[[ 0.]],
+
+ ...,
+ [[ 0.]]],
+
+
+ ...,
+ [[[ 0.]],
+
+ ...,
+ [[ 0.]]]])""")
+ )
+
+ def test_bad_args(self):
+ assert_raises(ValueError, np.set_printoptions, threshold='nan')
+ assert_raises(ValueError, np.set_printoptions, threshold=u'1')
+ assert_raises(ValueError, np.set_printoptions, threshold=b'1')
def test_unicode_object_array():
import sys
@@ -163,9 +853,37 @@ def test_unicode_object_array():
expected = "array(['é'], dtype=object)"
else:
expected = "array([u'\\xe9'], dtype=object)"
- x = np.array([sixu('\xe9')], dtype=object)
+ x = np.array([u'\xe9'], dtype=object)
assert_equal(repr(x), expected)
-if __name__ == "__main__":
- run_module_suite()
+class TestContextManager(object):
+ def test_ctx_mgr(self):
+ # test that context manager actuall works
+ with np.printoptions(precision=2):
+ s = str(np.array([2.0]) / 3)
+ assert_equal(s, '[0.67]')
+
+ def test_ctx_mgr_restores(self):
+ # test that print options are actually restrored
+ opts = np.get_printoptions()
+ with np.printoptions(precision=opts['precision'] - 1,
+ linewidth=opts['linewidth'] - 4):
+ pass
+ assert_equal(np.get_printoptions(), opts)
+
+ def test_ctx_mgr_exceptions(self):
+ # test that print options are restored even if an exception is raised
+ opts = np.get_printoptions()
+ try:
+ with np.printoptions(precision=2, linewidth=11):
+ raise ValueError
+ except ValueError:
+ pass
+ assert_equal(np.get_printoptions(), opts)
+
+ def test_ctx_mgr_as_smth(self):
+ opts = {"precision": 2}
+ with np.printoptions(**opts) as ctx:
+ saved_opts = ctx.copy()
+ assert_equal({k: saved_opts[k] for k in opts}, opts)
diff --git a/numpy/core/tests/test_datetime.py b/numpy/core/tests/test_datetime.py
index 601f09c09..b2ce0402a 100644
--- a/numpy/core/tests/test_datetime.py
+++ b/numpy/core/tests/test_datetime.py
@@ -1,16 +1,15 @@
from __future__ import division, absolute_import, print_function
-import pickle
-import warnings
import numpy
import numpy as np
import datetime
-from numpy.compat import asbytes
+import pytest
from numpy.testing import (
- TestCase, run_module_suite, assert_, assert_equal, assert_raises,
- assert_warns, dec
-)
+ assert_, assert_equal, assert_raises, assert_warns, suppress_warnings,
+ assert_raises_regex,
+ )
+from numpy.core.numeric import pickle
# Use pytz to test out various time zones if available
try:
@@ -19,8 +18,13 @@ try:
except ImportError:
_has_pytz = False
+try:
+ RecursionError
+except NameError:
+ RecursionError = RuntimeError # python < 3.5
+
-class TestDateTime(TestCase):
+class TestDateTime(object):
def test_datetime_dtype_creation(self):
for unit in ['Y', 'M', 'W', 'D',
'h', 'm', 's', 'ms', 'us',
@@ -126,13 +130,11 @@ class TestDateTime(TestCase):
assert_(not np.can_cast('M8[h]', 'M8', casting='safe'))
def test_compare_generic_nat(self):
- # regression tests for GH6452
- assert_equal(np.datetime64('NaT'),
- np.datetime64('2000') + np.timedelta64('NaT'))
- # nb. we may want to make NaT != NaT true in the future; this test
- # verifies the existing behavior (and that it should not warn)
- assert_(np.datetime64('NaT') == np.datetime64('NaT', 'us'))
- assert_(np.datetime64('NaT', 'us') == np.datetime64('NaT'))
+ # regression tests for gh-6452
+ assert_(np.datetime64('NaT') !=
+ np.datetime64('2000') + np.timedelta64('NaT'))
+ assert_(np.datetime64('NaT') != np.datetime64('NaT', 'us'))
+ assert_(np.datetime64('NaT', 'us') != np.datetime64('NaT'))
def test_datetime_scalar_construction(self):
# Construct with different units
@@ -237,18 +239,40 @@ class TestDateTime(TestCase):
# find "supertype" for non-dates and dates
b = np.bool_(True)
- dt = np.datetime64('1970-01-01', 'M')
- arr = np.array([b, dt])
+ dm = np.datetime64('1970-01-01', 'M')
+ d = datetime.date(1970, 1, 1)
+ dt = datetime.datetime(1970, 1, 1, 12, 30, 40)
+
+ arr = np.array([b, dm])
assert_equal(arr.dtype, np.dtype('O'))
- dt = datetime.date(1970, 1, 1)
- arr = np.array([b, dt])
+ arr = np.array([b, d])
assert_equal(arr.dtype, np.dtype('O'))
- dt = datetime.datetime(1970, 1, 1, 12, 30, 40)
arr = np.array([b, dt])
assert_equal(arr.dtype, np.dtype('O'))
+ arr = np.array([d, d]).astype('datetime64')
+ assert_equal(arr.dtype, np.dtype('M8[D]'))
+
+ arr = np.array([dt, dt]).astype('datetime64')
+ assert_equal(arr.dtype, np.dtype('M8[us]'))
+
+ @pytest.mark.parametrize("unit", [
+ # test all date / time units and use
+ # "generic" to select generic unit
+ ("Y"), ("M"), ("W"), ("D"), ("h"), ("m"),
+ ("s"), ("ms"), ("us"), ("ns"), ("ps"),
+ ("fs"), ("as"), ("generic") ])
+ def test_timedelta_np_int_construction(self, unit):
+ # regression test for gh-7617
+ if unit != "generic":
+ assert_equal(np.timedelta64(np.int64(123), unit),
+ np.timedelta64(123, unit))
+ else:
+ assert_equal(np.timedelta64(np.int64(123)),
+ np.timedelta64(123))
+
def test_timedelta_scalar_construction(self):
# Construct with different units
assert_equal(np.timedelta64(7, 'D'),
@@ -325,6 +349,34 @@ class TestDateTime(TestCase):
a = np.timedelta64(1, 'Y')
assert_raises(TypeError, np.timedelta64, a, 'D')
assert_raises(TypeError, np.timedelta64, a, 'm')
+ a = datetime.timedelta(seconds=3)
+ assert_raises(TypeError, np.timedelta64, a, 'M')
+ assert_raises(TypeError, np.timedelta64, a, 'Y')
+ a = datetime.timedelta(weeks=3)
+ assert_raises(TypeError, np.timedelta64, a, 'M')
+ assert_raises(TypeError, np.timedelta64, a, 'Y')
+ a = datetime.timedelta()
+ assert_raises(TypeError, np.timedelta64, a, 'M')
+ assert_raises(TypeError, np.timedelta64, a, 'Y')
+
+ def test_timedelta_object_array_conversion(self):
+ # Regression test for gh-11096
+ inputs = [datetime.timedelta(28),
+ datetime.timedelta(30),
+ datetime.timedelta(31)]
+ expected = np.array([28, 30, 31], dtype='timedelta64[D]')
+ actual = np.array(inputs, dtype='timedelta64[D]')
+ assert_equal(expected, actual)
+
+ def test_timedelta_0_dim_object_array_conversion(self):
+ # Regression test for gh-11151
+ test = np.array(datetime.timedelta(seconds=20))
+ actual = test.astype(np.timedelta64)
+ # expected value from the array constructor workaround
+ # described in above issue
+ expected = np.array(datetime.timedelta(seconds=20),
+ np.timedelta64)
+ assert_equal(actual, expected)
def test_timedelta_scalar_construction_units(self):
# String construction detecting units
@@ -519,31 +571,38 @@ class TestDateTime(TestCase):
def test_datetime_string_conversion(self):
a = ['2011-03-16', '1920-01-01', '2013-05-19']
str_a = np.array(a, dtype='S')
+ uni_a = np.array(a, dtype='U')
dt_a = np.array(a, dtype='M')
- str_b = np.empty_like(str_a)
- dt_b = np.empty_like(dt_a)
# String to datetime
assert_equal(dt_a, str_a.astype('M'))
assert_equal(dt_a.dtype, str_a.astype('M').dtype)
+ dt_b = np.empty_like(dt_a)
dt_b[...] = str_a
assert_equal(dt_a, dt_b)
+
# Datetime to string
assert_equal(str_a, dt_a.astype('S0'))
+ str_b = np.empty_like(str_a)
str_b[...] = dt_a
assert_equal(str_a, str_b)
- # Convert the 'S' to 'U'
- str_a = str_a.astype('U')
- str_b = str_b.astype('U')
-
# Unicode to datetime
- assert_equal(dt_a, str_a.astype('M'))
- assert_equal(dt_a.dtype, str_a.astype('M').dtype)
- dt_b[...] = str_a
+ assert_equal(dt_a, uni_a.astype('M'))
+ assert_equal(dt_a.dtype, uni_a.astype('M').dtype)
+ dt_b = np.empty_like(dt_a)
+ dt_b[...] = uni_a
assert_equal(dt_a, dt_b)
+
# Datetime to unicode
- assert_equal(str_a, dt_a.astype('U'))
+ assert_equal(uni_a, dt_a.astype('U'))
+ uni_b = np.empty_like(uni_a)
+ uni_b[...] = dt_a
+ assert_equal(uni_a, uni_b)
+
+ # Datetime to long string - gh-9712
+ assert_equal(str_a, dt_a.astype((np.string_, 128)))
+ str_b = np.empty(str_a.shape, dtype=(np.string_, 128))
str_b[...] = dt_a
assert_equal(str_a, str_b)
@@ -559,7 +618,7 @@ class TestDateTime(TestCase):
# Check that one NaT doesn't corrupt subsequent entries
a = np.array(['2010', 'NaT', '2030']).astype('M')
- assert_equal(str(a), "['2010' 'NaT' '2030']")
+ assert_equal(str(a), "['2010' 'NaT' '2030']")
def test_timedelta_array_str(self):
a = np.array([-1, 0, 100], dtype='m')
@@ -572,26 +631,39 @@ class TestDateTime(TestCase):
a = np.array([-1, 'NaT', 1234567], dtype='m')
assert_equal(str(a), "[ -1 'NaT' 1234567]")
+ # Test with other byteorder:
+ a = np.array([-1, 'NaT', 1234567], dtype='>m')
+ assert_equal(str(a), "[ -1 'NaT' 1234567]")
+ a = np.array([-1, 'NaT', 1234567], dtype='<m')
+ assert_equal(str(a), "[ -1 'NaT' 1234567]")
+
def test_pickle(self):
# Check that pickle roundtripping works
- dt = np.dtype('M8[7D]')
- assert_equal(pickle.loads(pickle.dumps(dt)), dt)
- dt = np.dtype('M8[W]')
- assert_equal(pickle.loads(pickle.dumps(dt)), dt)
+ for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
+ dt = np.dtype('M8[7D]')
+ assert_equal(pickle.loads(pickle.dumps(dt, protocol=proto)), dt)
+ dt = np.dtype('M8[W]')
+ assert_equal(pickle.loads(pickle.dumps(dt, protocol=proto)), dt)
+ scalar = np.datetime64('2016-01-01T00:00:00.000000000')
+ assert_equal(pickle.loads(pickle.dumps(scalar, protocol=proto)),
+ scalar)
+ delta = scalar - np.datetime64('2015-01-01T00:00:00.000000000')
+ assert_equal(pickle.loads(pickle.dumps(delta, protocol=proto)),
+ delta)
# Check that loading pickles from 1.6 works
- pkl = "cnumpy\ndtype\np0\n(S'M8'\np1\nI0\nI1\ntp2\nRp3\n" + \
- "(I4\nS'<'\np4\nNNNI-1\nI-1\nI0\n((dp5\n(S'D'\np6\n" + \
- "I7\nI1\nI1\ntp7\ntp8\ntp9\nb."
- assert_equal(pickle.loads(asbytes(pkl)), np.dtype('<M8[7D]'))
- pkl = "cnumpy\ndtype\np0\n(S'M8'\np1\nI0\nI1\ntp2\nRp3\n" + \
- "(I4\nS'<'\np4\nNNNI-1\nI-1\nI0\n((dp5\n(S'W'\np6\n" + \
- "I1\nI1\nI1\ntp7\ntp8\ntp9\nb."
- assert_equal(pickle.loads(asbytes(pkl)), np.dtype('<M8[W]'))
- pkl = "cnumpy\ndtype\np0\n(S'M8'\np1\nI0\nI1\ntp2\nRp3\n" + \
- "(I4\nS'>'\np4\nNNNI-1\nI-1\nI0\n((dp5\n(S'us'\np6\n" + \
- "I1\nI1\nI1\ntp7\ntp8\ntp9\nb."
- assert_equal(pickle.loads(asbytes(pkl)), np.dtype('>M8[us]'))
+ pkl = b"cnumpy\ndtype\np0\n(S'M8'\np1\nI0\nI1\ntp2\nRp3\n" + \
+ b"(I4\nS'<'\np4\nNNNI-1\nI-1\nI0\n((dp5\n(S'D'\np6\n" + \
+ b"I7\nI1\nI1\ntp7\ntp8\ntp9\nb."
+ assert_equal(pickle.loads(pkl), np.dtype('<M8[7D]'))
+ pkl = b"cnumpy\ndtype\np0\n(S'M8'\np1\nI0\nI1\ntp2\nRp3\n" + \
+ b"(I4\nS'<'\np4\nNNNI-1\nI-1\nI0\n((dp5\n(S'W'\np6\n" + \
+ b"I1\nI1\nI1\ntp7\ntp8\ntp9\nb."
+ assert_equal(pickle.loads(pkl), np.dtype('<M8[W]'))
+ pkl = b"cnumpy\ndtype\np0\n(S'M8'\np1\nI0\nI1\ntp2\nRp3\n" + \
+ b"(I4\nS'>'\np4\nNNNI-1\nI-1\nI0\n((dp5\n(S'us'\np6\n" + \
+ b"I1\nI1\nI1\ntp7\ntp8\ntp9\nb."
+ assert_equal(pickle.loads(pkl), np.dtype('>M8[us]'))
def test_setstate(self):
"Verify that datetime dtype __setstate__ can handle bad arguments"
@@ -789,6 +861,12 @@ class TestDateTime(TestCase):
assert_equal(np.negative(tdb), tda)
assert_equal(np.negative(tdb).dtype, tda.dtype)
+ # positive ufunc
+ assert_equal(np.positive(tda), tda)
+ assert_equal(np.positive(tda).dtype, tda.dtype)
+ assert_equal(np.positive(tdb), tdb)
+ assert_equal(np.positive(tdb).dtype, tdb.dtype)
+
# absolute ufunc
assert_equal(np.absolute(tdb), tda)
assert_equal(np.absolute(tdb).dtype, tda.dtype)
@@ -989,8 +1067,8 @@ class TestDateTime(TestCase):
assert_raises(TypeError, np.multiply, 1.5, dta)
# NaTs
- with warnings.catch_warnings():
- warnings.filterwarnings('ignore', category=RuntimeWarning)
+ with suppress_warnings() as sup:
+ sup.filter(RuntimeWarning, "invalid value encountered in multiply")
nat = np.timedelta64('NaT')
def check(a, b, res):
assert_equal(a * b, res)
@@ -1053,8 +1131,8 @@ class TestDateTime(TestCase):
assert_raises(TypeError, np.divide, 1.5, dta)
# NaTs
- with warnings.catch_warnings():
- warnings.filterwarnings('ignore', category=RuntimeWarning)
+ with suppress_warnings() as sup:
+ sup.filter(RuntimeWarning, r".*encountered in true\_divide")
nat = np.timedelta64('NaT')
for tp in (int, float):
assert_equal(np.timedelta64(1) / tp(0), nat)
@@ -1094,23 +1172,19 @@ class TestDateTime(TestCase):
for op in [np.equal, np.less, np.less_equal,
np.greater, np.greater_equal]:
- if op(dt_nat, dt_nat):
- assert_warns(FutureWarning, op, dt_nat, dt_nat)
- if op(dt_nat, dt_other):
- assert_warns(FutureWarning, op, dt_nat, dt_other)
- if op(dt_other, dt_nat):
- assert_warns(FutureWarning, op, dt_other, dt_nat)
- if op(td_nat, td_nat):
- assert_warns(FutureWarning, op, td_nat, td_nat)
- if op(td_nat, td_other):
- assert_warns(FutureWarning, op, td_nat, td_other)
- if op(td_other, td_nat):
- assert_warns(FutureWarning, op, td_other, td_nat)
-
- assert_warns(FutureWarning, np.not_equal, dt_nat, dt_nat)
+ assert_(not op(dt_nat, dt_nat))
+ assert_(not op(dt_nat, dt_other))
+ assert_(not op(dt_other, dt_nat))
+
+ assert_(not op(td_nat, td_nat))
+ assert_(not op(td_nat, td_other))
+ assert_(not op(td_other, td_nat))
+
+ assert_(np.not_equal(dt_nat, dt_nat))
assert_(np.not_equal(dt_nat, dt_other))
assert_(np.not_equal(dt_other, dt_nat))
- assert_warns(FutureWarning, np.not_equal, td_nat, td_nat)
+
+ assert_(np.not_equal(td_nat, td_nat))
assert_(np.not_equal(td_nat, td_other))
assert_(np.not_equal(td_other, td_nat))
@@ -1208,21 +1282,28 @@ class TestDateTime(TestCase):
def test_divisor_conversion_fs(self):
assert_(np.dtype('M8[fs/100]') == np.dtype('M8[10as]'))
- self.assertRaises(ValueError, lambda: np.dtype('M8[3fs/10000]'))
+ assert_raises(ValueError, lambda: np.dtype('M8[3fs/10000]'))
def test_divisor_conversion_as(self):
- self.assertRaises(ValueError, lambda: np.dtype('M8[as/10]'))
+ assert_raises(ValueError, lambda: np.dtype('M8[as/10]'))
def test_string_parser_variants(self):
# Allow space instead of 'T' between date and time
assert_equal(np.array(['1980-02-29T01:02:03'], np.dtype('M8[s]')),
np.array(['1980-02-29 01:02:03'], np.dtype('M8[s]')))
+ # Allow positive years
+ assert_equal(np.array(['+1980-02-29T01:02:03'], np.dtype('M8[s]')),
+ np.array(['+1980-02-29 01:02:03'], np.dtype('M8[s]')))
# Allow negative years
assert_equal(np.array(['-1980-02-29T01:02:03'], np.dtype('M8[s]')),
np.array(['-1980-02-29 01:02:03'], np.dtype('M8[s]')))
# UTC specifier
with assert_warns(DeprecationWarning):
assert_equal(
+ np.array(['+1980-02-29T01:02:03'], np.dtype('M8[s]')),
+ np.array(['+1980-02-29 01:02:03Z'], np.dtype('M8[s]')))
+ with assert_warns(DeprecationWarning):
+ assert_equal(
np.array(['-1980-02-29T01:02:03'], np.dtype('M8[s]')),
np.array(['-1980-02-29 01:02:03Z'], np.dtype('M8[s]')))
# Time zone offset
@@ -1442,7 +1523,7 @@ class TestDateTime(TestCase):
np.datetime64('2032-01-01T00:00:00', 'us'), unit='auto'),
'2032-01-01')
- @dec.skipif(not _has_pytz, "The pytz module is not available.")
+ @pytest.mark.skipif(not _has_pytz, reason="The pytz module is not available.")
def test_datetime_as_string_timezone(self):
# timezone='local' vs 'UTC'
a = np.datetime64('2010-03-15T06:30', 'm')
@@ -1546,6 +1627,76 @@ class TestDateTime(TestCase):
assert_raises(TypeError, np.arange, np.timedelta64(0, 'Y'),
np.timedelta64(5, 'D'))
+ @pytest.mark.parametrize("val1, val2, expected", [
+ # case from gh-12092
+ (np.timedelta64(7, 's'),
+ np.timedelta64(3, 's'),
+ np.timedelta64(1, 's')),
+ # negative value cases
+ (np.timedelta64(3, 's'),
+ np.timedelta64(-2, 's'),
+ np.timedelta64(-1, 's')),
+ (np.timedelta64(-3, 's'),
+ np.timedelta64(2, 's'),
+ np.timedelta64(1, 's')),
+ # larger value cases
+ (np.timedelta64(17, 's'),
+ np.timedelta64(22, 's'),
+ np.timedelta64(17, 's')),
+ (np.timedelta64(22, 's'),
+ np.timedelta64(17, 's'),
+ np.timedelta64(5, 's')),
+ # different units
+ (np.timedelta64(1, 'm'),
+ np.timedelta64(57, 's'),
+ np.timedelta64(3, 's')),
+ (np.timedelta64(1, 'us'),
+ np.timedelta64(727, 'ns'),
+ np.timedelta64(273, 'ns')),
+ # NaT is propagated
+ (np.timedelta64('NaT'),
+ np.timedelta64(50, 'ns'),
+ np.timedelta64('NaT')),
+ # Y % M works
+ (np.timedelta64(2, 'Y'),
+ np.timedelta64(22, 'M'),
+ np.timedelta64(2, 'M')),
+ ])
+ def test_timedelta_modulus(self, val1, val2, expected):
+ assert_equal(val1 % val2, expected)
+
+ @pytest.mark.parametrize("val1, val2", [
+ # years and months sometimes can't be unambiguously
+ # divided for modulus operation
+ (np.timedelta64(7, 'Y'),
+ np.timedelta64(3, 's')),
+ (np.timedelta64(7, 'M'),
+ np.timedelta64(1, 'D')),
+ ])
+ def test_timedelta_modulus_error(self, val1, val2):
+ with assert_raises_regex(TypeError, "common metadata divisor"):
+ val1 % val2
+
+ def test_timedelta_modulus_div_by_zero(self):
+ with assert_warns(RuntimeWarning):
+ actual = np.timedelta64(10, 's') % np.timedelta64(0, 's')
+ assert_equal(actual, np.timedelta64(0, 's'))
+
+ @pytest.mark.parametrize("val1, val2", [
+ # cases where one operand is not
+ # timedelta64
+ (np.timedelta64(7, 'Y'),
+ 15,),
+ (7.5,
+ np.timedelta64(1, 'D')),
+ ])
+ def test_timedelta_modulus_type_resolution(self, val1, val2):
+ # NOTE: some of the operations may be supported
+ # in the future
+ with assert_raises_regex(TypeError,
+ "remainder cannot use operands with types"):
+ val1 % val2
+
def test_timedelta_arange_no_dtype(self):
d = np.array(5, dtype="m8[D]")
assert_equal(np.arange(d, d + 1), d)
@@ -1623,7 +1774,6 @@ class TestDateTime(TestCase):
assert_equal(np.busday_offset(np.datetime64('NaT'), 1, roll='preceding'),
np.datetime64('NaT'))
-
def test_datetime_busdaycalendar(self):
# Check that it removes NaT, duplicates, and weekends
# and sorts the result.
@@ -1899,11 +2049,49 @@ class TestDateTime(TestCase):
a = np.datetime64('2038-01-20T13:21:14')
assert_equal(str(a), '2038-01-20T13:21:14')
-class TestDateTimeData(TestCase):
+ def test_isnat(self):
+ assert_(np.isnat(np.datetime64('NaT', 'ms')))
+ assert_(np.isnat(np.datetime64('NaT', 'ns')))
+ assert_(not np.isnat(np.datetime64('2038-01-19T03:14:07')))
+
+ assert_(np.isnat(np.timedelta64('NaT', "ms")))
+ assert_(not np.isnat(np.timedelta64(34, "ms")))
+
+ res = np.array([False, False, True])
+ for unit in ['Y', 'M', 'W', 'D',
+ 'h', 'm', 's', 'ms', 'us',
+ 'ns', 'ps', 'fs', 'as']:
+ arr = np.array([123, -321, "NaT"], dtype='<datetime64[%s]' % unit)
+ assert_equal(np.isnat(arr), res)
+ arr = np.array([123, -321, "NaT"], dtype='>datetime64[%s]' % unit)
+ assert_equal(np.isnat(arr), res)
+ arr = np.array([123, -321, "NaT"], dtype='<timedelta64[%s]' % unit)
+ assert_equal(np.isnat(arr), res)
+ arr = np.array([123, -321, "NaT"], dtype='>timedelta64[%s]' % unit)
+ assert_equal(np.isnat(arr), res)
+
+ def test_isnat_error(self):
+ # Test that only datetime dtype arrays are accepted
+ for t in np.typecodes["All"]:
+ if t in np.typecodes["Datetime"]:
+ continue
+ assert_raises(TypeError, np.isnat, np.zeros(10, t))
+
+ def test_corecursive_input(self):
+ # construct a co-recursive list
+ a, b = [], []
+ a.append(b)
+ b.append(a)
+ obj_arr = np.array([None])
+ obj_arr[0] = a
+
+ # gh-11154: This shouldn't cause a C stack overflow
+ assert_raises(RecursionError, obj_arr.astype, 'M8')
+ assert_raises(RecursionError, obj_arr.astype, 'm8')
+
+
+class TestDateTimeData(object):
def test_basic(self):
a = np.array(['1980-03-23'], dtype=np.datetime64)
assert_equal(np.datetime_data(a.dtype), ('D', 1))
-
-if __name__ == "__main__":
- run_module_suite()
diff --git a/numpy/core/tests/test_defchararray.py b/numpy/core/tests/test_defchararray.py
index e828b879f..7b0e6f8a4 100644
--- a/numpy/core/tests/test_defchararray.py
+++ b/numpy/core/tests/test_defchararray.py
@@ -4,35 +4,35 @@ import sys
import numpy as np
from numpy.core.multiarray import _vec_string
-from numpy.compat import asbytes, asbytes_nested, sixu
from numpy.testing import (
- TestCase, run_module_suite, assert_, assert_equal, assert_array_equal
-)
+ assert_, assert_equal, assert_array_equal, assert_raises,
+ assert_raises_regex, suppress_warnings,
+ )
kw_unicode_true = {'unicode': True} # make 2to3 work properly
kw_unicode_false = {'unicode': False}
-class TestBasic(TestCase):
+class TestBasic(object):
def test_from_object_array(self):
A = np.array([['abc', 2],
['long ', '0123456789']], dtype='O')
B = np.char.array(A)
assert_equal(B.dtype.itemsize, 10)
- assert_array_equal(B, asbytes_nested([['abc', '2'],
- ['long', '0123456789']]))
+ assert_array_equal(B, [[b'abc', b'2'],
+ [b'long', b'0123456789']])
def test_from_object_array_unicode(self):
- A = np.array([['abc', sixu('Sigma \u03a3')],
+ A = np.array([['abc', u'Sigma \u03a3'],
['long ', '0123456789']], dtype='O')
- self.assertRaises(ValueError, np.char.array, (A,))
+ assert_raises(ValueError, np.char.array, (A,))
B = np.char.array(A, **kw_unicode_true)
assert_equal(B.dtype.itemsize, 10 * np.array('a', 'U').dtype.itemsize)
- assert_array_equal(B, [['abc', sixu('Sigma \u03a3')],
+ assert_array_equal(B, [['abc', u'Sigma \u03a3'],
['long', '0123456789']])
def test_from_string_array(self):
- A = np.array(asbytes_nested([['abc', 'foo'],
- ['long ', '0123456789']]))
+ A = np.array([[b'abc', b'foo'],
+ [b'long ', b'0123456789']])
assert_equal(A.dtype.type, np.string_)
B = np.char.array(A)
assert_array_equal(B, A)
@@ -48,7 +48,7 @@ class TestBasic(TestCase):
assert_(C[0, 0] == A[0, 0])
def test_from_unicode_array(self):
- A = np.array([['abc', sixu('Sigma \u03a3')],
+ A = np.array([['abc', u'Sigma \u03a3'],
['long ', '0123456789']])
assert_equal(A.dtype.type, np.unicode_)
B = np.char.array(A)
@@ -63,79 +63,79 @@ class TestBasic(TestCase):
def fail():
np.char.array(A, **kw_unicode_false)
- self.assertRaises(UnicodeEncodeError, fail)
+ assert_raises(UnicodeEncodeError, fail)
def test_unicode_upconvert(self):
A = np.char.array(['abc'])
- B = np.char.array([sixu('\u03a3')])
+ B = np.char.array([u'\u03a3'])
assert_(issubclass((A + B).dtype.type, np.unicode_))
def test_from_string(self):
- A = np.char.array(asbytes('abc'))
+ A = np.char.array(b'abc')
assert_equal(len(A), 1)
assert_equal(len(A[0]), 3)
assert_(issubclass(A.dtype.type, np.string_))
def test_from_unicode(self):
- A = np.char.array(sixu('\u03a3'))
+ A = np.char.array(u'\u03a3')
assert_equal(len(A), 1)
assert_equal(len(A[0]), 1)
assert_equal(A.itemsize, 4)
assert_(issubclass(A.dtype.type, np.unicode_))
-class TestVecString(TestCase):
+class TestVecString(object):
def test_non_existent_method(self):
def fail():
_vec_string('a', np.string_, 'bogus')
- self.assertRaises(AttributeError, fail)
+ assert_raises(AttributeError, fail)
def test_non_string_array(self):
def fail():
_vec_string(1, np.string_, 'strip')
- self.assertRaises(TypeError, fail)
+ assert_raises(TypeError, fail)
def test_invalid_args_tuple(self):
def fail():
_vec_string(['a'], np.string_, 'strip', 1)
- self.assertRaises(TypeError, fail)
+ assert_raises(TypeError, fail)
def test_invalid_type_descr(self):
def fail():
_vec_string(['a'], 'BOGUS', 'strip')
- self.assertRaises(TypeError, fail)
+ assert_raises(TypeError, fail)
def test_invalid_function_args(self):
def fail():
_vec_string(['a'], np.string_, 'strip', (1,))
- self.assertRaises(TypeError, fail)
+ assert_raises(TypeError, fail)
def test_invalid_result_type(self):
def fail():
_vec_string(['a'], np.integer, 'strip')
- self.assertRaises(TypeError, fail)
+ assert_raises(TypeError, fail)
def test_broadcast_error(self):
def fail():
_vec_string([['abc', 'def']], np.integer, 'find', (['a', 'd', 'j'],))
- self.assertRaises(ValueError, fail)
+ assert_raises(ValueError, fail)
-class TestWhitespace(TestCase):
- def setUp(self):
+class TestWhitespace(object):
+ def setup(self):
self.A = np.array([['abc ', '123 '],
['789 ', 'xyz ']]).view(np.chararray)
self.B = np.array([['abc', '123'],
@@ -149,16 +149,16 @@ class TestWhitespace(TestCase):
assert_(not np.any(self.A < self.B))
assert_(not np.any(self.A != self.B))
-class TestChar(TestCase):
- def setUp(self):
+class TestChar(object):
+ def setup(self):
self.A = np.array('abc1', dtype='c').view(np.chararray)
def test_it(self):
assert_equal(self.A.shape, (4,))
- assert_equal(self.A.upper()[:2].tobytes(), asbytes('AB'))
+ assert_equal(self.A.upper()[:2].tobytes(), b'AB')
-class TestComparisons(TestCase):
- def setUp(self):
+class TestComparisons(object):
+ def setup(self):
self.A = np.array([['abc', '123'],
['789', 'xyz']]).view(np.chararray)
self.B = np.array([['efg', '123 '],
@@ -185,27 +185,27 @@ class TestComparisons(TestCase):
class TestComparisonsMixed1(TestComparisons):
"""Ticket #1276"""
- def setUp(self):
- TestComparisons.setUp(self)
+ def setup(self):
+ TestComparisons.setup(self)
self.B = np.array([['efg', '123 '],
['051', 'tuv']], np.unicode_).view(np.chararray)
class TestComparisonsMixed2(TestComparisons):
"""Ticket #1276"""
- def setUp(self):
- TestComparisons.setUp(self)
+ def setup(self):
+ TestComparisons.setup(self)
self.A = np.array([['abc', '123'],
['789', 'xyz']], np.unicode_).view(np.chararray)
-class TestInformation(TestCase):
- def setUp(self):
+class TestInformation(object):
+ def setup(self):
self.A = np.array([[' abc ', ''],
['12345', 'MixedCase'],
['123 \t 345 \0 ', 'UPPER']]).view(np.chararray)
- self.B = np.array([[sixu(' \u03a3 '), sixu('')],
- [sixu('12345'), sixu('MixedCase')],
- [sixu('123 \t 345 \0 '), sixu('UPPER')]]).view(np.chararray)
+ self.B = np.array([[u' \u03a3 ', u''],
+ [u'12345', u'MixedCase'],
+ [u'123 \t 345 \0 ', u'UPPER']]).view(np.chararray)
def test_len(self):
assert_(issubclass(np.char.str_len(self.A).dtype.type, np.integer))
@@ -231,7 +231,7 @@ class TestInformation(TestCase):
def fail():
self.A.endswith('3', 'fdjk')
- self.assertRaises(TypeError, fail)
+ assert_raises(TypeError, fail)
def test_find(self):
assert_(issubclass(self.A.find('a').dtype.type, np.integer))
@@ -245,7 +245,7 @@ class TestInformation(TestCase):
def fail():
self.A.index('a')
- self.assertRaises(ValueError, fail)
+ assert_raises(ValueError, fail)
assert_(np.char.index('abcba', 'b') == 1)
assert_(issubclass(np.char.index('abcba', 'b').dtype.type, np.integer))
@@ -289,7 +289,7 @@ class TestInformation(TestCase):
def fail():
self.A.rindex('a')
- self.assertRaises(ValueError, fail)
+ assert_raises(ValueError, fail)
assert_(np.char.rindex('abcba', 'b') == 3)
assert_(issubclass(np.char.rindex('abcba', 'b').dtype.type, np.integer))
@@ -301,27 +301,27 @@ class TestInformation(TestCase):
def fail():
self.A.startswith('3', 'fdjk')
- self.assertRaises(TypeError, fail)
+ assert_raises(TypeError, fail)
-class TestMethods(TestCase):
- def setUp(self):
+class TestMethods(object):
+ def setup(self):
self.A = np.array([[' abc ', ''],
['12345', 'MixedCase'],
['123 \t 345 \0 ', 'UPPER']],
dtype='S').view(np.chararray)
- self.B = np.array([[sixu(' \u03a3 '), sixu('')],
- [sixu('12345'), sixu('MixedCase')],
- [sixu('123 \t 345 \0 '), sixu('UPPER')]]).view(np.chararray)
+ self.B = np.array([[u' \u03a3 ', u''],
+ [u'12345', u'MixedCase'],
+ [u'123 \t 345 \0 ', u'UPPER']]).view(np.chararray)
def test_capitalize(self):
- tgt = asbytes_nested([[' abc ', ''],
- ['12345', 'Mixedcase'],
- ['123 \t 345 \0 ', 'Upper']])
+ tgt = [[b' abc ', b''],
+ [b'12345', b'Mixedcase'],
+ [b'123 \t 345 \0 ', b'Upper']]
assert_(issubclass(self.A.capitalize().dtype.type, np.string_))
assert_array_equal(self.A.capitalize(), tgt)
- tgt = [[sixu(' \u03c3 '), ''],
+ tgt = [[u' \u03c3 ', ''],
['12345', 'Mixedcase'],
['123 \t 345 \0 ', 'Upper']]
assert_(issubclass(self.B.capitalize().dtype.type, np.unicode_))
@@ -332,23 +332,26 @@ class TestMethods(TestCase):
C = self.A.center([10, 20])
assert_array_equal(np.char.str_len(C), [[10, 20], [10, 20], [12, 20]])
- C = self.A.center(20, asbytes('#'))
- assert_(np.all(C.startswith(asbytes('#'))))
- assert_(np.all(C.endswith(asbytes('#'))))
+ C = self.A.center(20, b'#')
+ assert_(np.all(C.startswith(b'#')))
+ assert_(np.all(C.endswith(b'#')))
- C = np.char.center(asbytes('FOO'), [[10, 20], [15, 8]])
- tgt = asbytes_nested([[' FOO ', ' FOO '],
- [' FOO ', ' FOO ']])
+ C = np.char.center(b'FOO', [[10, 20], [15, 8]])
+ tgt = [[b' FOO ', b' FOO '],
+ [b' FOO ', b' FOO ']]
assert_(issubclass(C.dtype.type, np.string_))
assert_array_equal(C, tgt)
def test_decode(self):
if sys.version_info[0] >= 3:
- A = np.char.array([asbytes('\\u03a3')])
+ A = np.char.array([b'\\u03a3'])
assert_(A.decode('unicode-escape')[0] == '\u03a3')
else:
- A = np.char.array(['736563726574206d657373616765'])
- assert_(A.decode('hex_codec')[0] == 'secret message')
+ with suppress_warnings() as sup:
+ if sys.py3kwarning:
+ sup.filter(DeprecationWarning, "'hex_codec'")
+ A = np.char.array(['736563726574206d657373616765'])
+ assert_(A.decode('hex_codec')[0] == 'secret message')
def test_encode(self):
B = self.B.encode('unicode_escape')
@@ -356,7 +359,7 @@ class TestMethods(TestCase):
def test_expandtabs(self):
T = self.A.expandtabs()
- assert_(T[2, 0] == asbytes('123 345 \0'))
+ assert_(T[2, 0] == b'123 345 \0')
def test_join(self):
if sys.version_info[0] >= 3:
@@ -382,70 +385,70 @@ class TestMethods(TestCase):
C = self.A.ljust([10, 20])
assert_array_equal(np.char.str_len(C), [[10, 20], [10, 20], [12, 20]])
- C = self.A.ljust(20, asbytes('#'))
- assert_array_equal(C.startswith(asbytes('#')), [
+ C = self.A.ljust(20, b'#')
+ assert_array_equal(C.startswith(b'#'), [
[False, True], [False, False], [False, False]])
- assert_(np.all(C.endswith(asbytes('#'))))
+ assert_(np.all(C.endswith(b'#')))
- C = np.char.ljust(asbytes('FOO'), [[10, 20], [15, 8]])
- tgt = asbytes_nested([['FOO ', 'FOO '],
- ['FOO ', 'FOO ']])
+ C = np.char.ljust(b'FOO', [[10, 20], [15, 8]])
+ tgt = [[b'FOO ', b'FOO '],
+ [b'FOO ', b'FOO ']]
assert_(issubclass(C.dtype.type, np.string_))
assert_array_equal(C, tgt)
def test_lower(self):
- tgt = asbytes_nested([[' abc ', ''],
- ['12345', 'mixedcase'],
- ['123 \t 345 \0 ', 'upper']])
+ tgt = [[b' abc ', b''],
+ [b'12345', b'mixedcase'],
+ [b'123 \t 345 \0 ', b'upper']]
assert_(issubclass(self.A.lower().dtype.type, np.string_))
assert_array_equal(self.A.lower(), tgt)
- tgt = [[sixu(' \u03c3 '), sixu('')],
- [sixu('12345'), sixu('mixedcase')],
- [sixu('123 \t 345 \0 '), sixu('upper')]]
+ tgt = [[u' \u03c3 ', u''],
+ [u'12345', u'mixedcase'],
+ [u'123 \t 345 \0 ', u'upper']]
assert_(issubclass(self.B.lower().dtype.type, np.unicode_))
assert_array_equal(self.B.lower(), tgt)
def test_lstrip(self):
- tgt = asbytes_nested([['abc ', ''],
- ['12345', 'MixedCase'],
- ['123 \t 345 \0 ', 'UPPER']])
+ tgt = [[b'abc ', b''],
+ [b'12345', b'MixedCase'],
+ [b'123 \t 345 \0 ', b'UPPER']]
assert_(issubclass(self.A.lstrip().dtype.type, np.string_))
assert_array_equal(self.A.lstrip(), tgt)
- tgt = asbytes_nested([[' abc', ''],
- ['2345', 'ixedCase'],
- ['23 \t 345 \x00', 'UPPER']])
- assert_array_equal(self.A.lstrip(asbytes_nested(['1', 'M'])), tgt)
+ tgt = [[b' abc', b''],
+ [b'2345', b'ixedCase'],
+ [b'23 \t 345 \x00', b'UPPER']]
+ assert_array_equal(self.A.lstrip([b'1', b'M']), tgt)
- tgt = [[sixu('\u03a3 '), ''],
+ tgt = [[u'\u03a3 ', ''],
['12345', 'MixedCase'],
['123 \t 345 \0 ', 'UPPER']]
assert_(issubclass(self.B.lstrip().dtype.type, np.unicode_))
assert_array_equal(self.B.lstrip(), tgt)
def test_partition(self):
- P = self.A.partition(asbytes_nested(['3', 'M']))
- tgt = asbytes_nested([[(' abc ', '', ''), ('', '', '')],
- [('12', '3', '45'), ('', 'M', 'ixedCase')],
- [('12', '3', ' \t 345 \0 '), ('UPPER', '', '')]])
+ P = self.A.partition([b'3', b'M'])
+ tgt = [[(b' abc ', b'', b''), (b'', b'', b'')],
+ [(b'12', b'3', b'45'), (b'', b'M', b'ixedCase')],
+ [(b'12', b'3', b' \t 345 \0 '), (b'UPPER', b'', b'')]]
assert_(issubclass(P.dtype.type, np.string_))
assert_array_equal(P, tgt)
def test_replace(self):
- R = self.A.replace(asbytes_nested(['3', 'a']),
- asbytes_nested(['##########', '@']))
- tgt = asbytes_nested([[' abc ', ''],
- ['12##########45', 'MixedC@se'],
- ['12########## \t ##########45 \x00', 'UPPER']])
+ R = self.A.replace([b'3', b'a'],
+ [b'##########', b'@'])
+ tgt = [[b' abc ', b''],
+ [b'12##########45', b'MixedC@se'],
+ [b'12########## \t ##########45 \x00', b'UPPER']]
assert_(issubclass(R.dtype.type, np.string_))
assert_array_equal(R, tgt)
if sys.version_info[0] < 3:
# NOTE: b'abc'.replace(b'a', 'b') is not allowed on Py3
- R = self.A.replace(asbytes('a'), sixu('\u03a3'))
- tgt = [[sixu(' \u03a3bc '), ''],
- ['12345', sixu('MixedC\u03a3se')],
+ R = self.A.replace(b'a', u'\u03a3')
+ tgt = [[u' \u03a3bc ', ''],
+ ['12345', u'MixedC\u03a3se'],
['123 \t 345 \x00', 'UPPER']]
assert_(issubclass(R.dtype.type, np.unicode_))
assert_array_equal(R, tgt)
@@ -456,77 +459,77 @@ class TestMethods(TestCase):
C = self.A.rjust([10, 20])
assert_array_equal(np.char.str_len(C), [[10, 20], [10, 20], [12, 20]])
- C = self.A.rjust(20, asbytes('#'))
- assert_(np.all(C.startswith(asbytes('#'))))
- assert_array_equal(C.endswith(asbytes('#')),
+ C = self.A.rjust(20, b'#')
+ assert_(np.all(C.startswith(b'#')))
+ assert_array_equal(C.endswith(b'#'),
[[False, True], [False, False], [False, False]])
- C = np.char.rjust(asbytes('FOO'), [[10, 20], [15, 8]])
- tgt = asbytes_nested([[' FOO', ' FOO'],
- [' FOO', ' FOO']])
+ C = np.char.rjust(b'FOO', [[10, 20], [15, 8]])
+ tgt = [[b' FOO', b' FOO'],
+ [b' FOO', b' FOO']]
assert_(issubclass(C.dtype.type, np.string_))
assert_array_equal(C, tgt)
def test_rpartition(self):
- P = self.A.rpartition(asbytes_nested(['3', 'M']))
- tgt = asbytes_nested([[('', '', ' abc '), ('', '', '')],
- [('12', '3', '45'), ('', 'M', 'ixedCase')],
- [('123 \t ', '3', '45 \0 '), ('', '', 'UPPER')]])
+ P = self.A.rpartition([b'3', b'M'])
+ tgt = [[(b'', b'', b' abc '), (b'', b'', b'')],
+ [(b'12', b'3', b'45'), (b'', b'M', b'ixedCase')],
+ [(b'123 \t ', b'3', b'45 \0 '), (b'', b'', b'UPPER')]]
assert_(issubclass(P.dtype.type, np.string_))
assert_array_equal(P, tgt)
def test_rsplit(self):
- A = self.A.rsplit(asbytes('3'))
- tgt = asbytes_nested([[[' abc '], ['']],
- [['12', '45'], ['MixedCase']],
- [['12', ' \t ', '45 \x00 '], ['UPPER']]])
+ A = self.A.rsplit(b'3')
+ tgt = [[[b' abc '], [b'']],
+ [[b'12', b'45'], [b'MixedCase']],
+ [[b'12', b' \t ', b'45 \x00 '], [b'UPPER']]]
assert_(issubclass(A.dtype.type, np.object_))
assert_equal(A.tolist(), tgt)
def test_rstrip(self):
assert_(issubclass(self.A.rstrip().dtype.type, np.string_))
- tgt = asbytes_nested([[' abc', ''],
- ['12345', 'MixedCase'],
- ['123 \t 345', 'UPPER']])
+ tgt = [[b' abc', b''],
+ [b'12345', b'MixedCase'],
+ [b'123 \t 345', b'UPPER']]
assert_array_equal(self.A.rstrip(), tgt)
- tgt = asbytes_nested([[' abc ', ''],
- ['1234', 'MixedCase'],
- ['123 \t 345 \x00', 'UPP']
- ])
- assert_array_equal(self.A.rstrip(asbytes_nested(['5', 'ER'])), tgt)
+ tgt = [[b' abc ', b''],
+ [b'1234', b'MixedCase'],
+ [b'123 \t 345 \x00', b'UPP']
+ ]
+ assert_array_equal(self.A.rstrip([b'5', b'ER']), tgt)
- tgt = [[sixu(' \u03a3'), ''],
+ tgt = [[u' \u03a3', ''],
['12345', 'MixedCase'],
['123 \t 345', 'UPPER']]
assert_(issubclass(self.B.rstrip().dtype.type, np.unicode_))
assert_array_equal(self.B.rstrip(), tgt)
def test_strip(self):
- tgt = asbytes_nested([['abc', ''],
- ['12345', 'MixedCase'],
- ['123 \t 345', 'UPPER']])
+ tgt = [[b'abc', b''],
+ [b'12345', b'MixedCase'],
+ [b'123 \t 345', b'UPPER']]
assert_(issubclass(self.A.strip().dtype.type, np.string_))
assert_array_equal(self.A.strip(), tgt)
- tgt = asbytes_nested([[' abc ', ''],
- ['234', 'ixedCas'],
- ['23 \t 345 \x00', 'UPP']])
- assert_array_equal(self.A.strip(asbytes_nested(['15', 'EReM'])), tgt)
+ tgt = [[b' abc ', b''],
+ [b'234', b'ixedCas'],
+ [b'23 \t 345 \x00', b'UPP']]
+ assert_array_equal(self.A.strip([b'15', b'EReM']), tgt)
- tgt = [[sixu('\u03a3'), ''],
+ tgt = [[u'\u03a3', ''],
['12345', 'MixedCase'],
['123 \t 345', 'UPPER']]
assert_(issubclass(self.B.strip().dtype.type, np.unicode_))
assert_array_equal(self.B.strip(), tgt)
def test_split(self):
- A = self.A.split(asbytes('3'))
- tgt = asbytes_nested([
- [[' abc '], ['']],
- [['12', '45'], ['MixedCase']],
- [['12', ' \t ', '45 \x00 '], ['UPPER']]])
+ A = self.A.split(b'3')
+ tgt = [
+ [[b' abc '], [b'']],
+ [[b'12', b'45'], [b'MixedCase']],
+ [[b'12', b' \t ', b'45 \x00 '], [b'UPPER']]]
assert_(issubclass(A.dtype.type, np.object_))
assert_equal(A.tolist(), tgt)
@@ -537,41 +540,41 @@ class TestMethods(TestCase):
assert_(len(A[0]) == 3)
def test_swapcase(self):
- tgt = asbytes_nested([[' ABC ', ''],
- ['12345', 'mIXEDcASE'],
- ['123 \t 345 \0 ', 'upper']])
+ tgt = [[b' ABC ', b''],
+ [b'12345', b'mIXEDcASE'],
+ [b'123 \t 345 \0 ', b'upper']]
assert_(issubclass(self.A.swapcase().dtype.type, np.string_))
assert_array_equal(self.A.swapcase(), tgt)
- tgt = [[sixu(' \u03c3 '), sixu('')],
- [sixu('12345'), sixu('mIXEDcASE')],
- [sixu('123 \t 345 \0 '), sixu('upper')]]
+ tgt = [[u' \u03c3 ', u''],
+ [u'12345', u'mIXEDcASE'],
+ [u'123 \t 345 \0 ', u'upper']]
assert_(issubclass(self.B.swapcase().dtype.type, np.unicode_))
assert_array_equal(self.B.swapcase(), tgt)
def test_title(self):
- tgt = asbytes_nested([[' Abc ', ''],
- ['12345', 'Mixedcase'],
- ['123 \t 345 \0 ', 'Upper']])
+ tgt = [[b' Abc ', b''],
+ [b'12345', b'Mixedcase'],
+ [b'123 \t 345 \0 ', b'Upper']]
assert_(issubclass(self.A.title().dtype.type, np.string_))
assert_array_equal(self.A.title(), tgt)
- tgt = [[sixu(' \u03a3 '), sixu('')],
- [sixu('12345'), sixu('Mixedcase')],
- [sixu('123 \t 345 \0 '), sixu('Upper')]]
+ tgt = [[u' \u03a3 ', u''],
+ [u'12345', u'Mixedcase'],
+ [u'123 \t 345 \0 ', u'Upper']]
assert_(issubclass(self.B.title().dtype.type, np.unicode_))
assert_array_equal(self.B.title(), tgt)
def test_upper(self):
- tgt = asbytes_nested([[' ABC ', ''],
- ['12345', 'MIXEDCASE'],
- ['123 \t 345 \0 ', 'UPPER']])
+ tgt = [[b' ABC ', b''],
+ [b'12345', b'MIXEDCASE'],
+ [b'123 \t 345 \0 ', b'UPPER']]
assert_(issubclass(self.A.upper().dtype.type, np.string_))
assert_array_equal(self.A.upper(), tgt)
- tgt = [[sixu(' \u03a3 '), sixu('')],
- [sixu('12345'), sixu('MIXEDCASE')],
- [sixu('123 \t 345 \0 '), sixu('UPPER')]]
+ tgt = [[u' \u03a3 ', u''],
+ [u'12345', u'MIXEDCASE'],
+ [u'123 \t 345 \0 ', u'UPPER']]
assert_(issubclass(self.B.upper().dtype.type, np.unicode_))
assert_array_equal(self.B.upper(), tgt)
@@ -580,7 +583,7 @@ class TestMethods(TestCase):
def fail():
self.A.isnumeric()
- self.assertRaises(TypeError, fail)
+ assert_raises(TypeError, fail)
assert_(issubclass(self.B.isnumeric().dtype.type, np.bool_))
assert_array_equal(self.B.isnumeric(), [
[False, False], [True, False], [False, False]])
@@ -590,14 +593,14 @@ class TestMethods(TestCase):
def fail():
self.A.isdecimal()
- self.assertRaises(TypeError, fail)
+ assert_raises(TypeError, fail)
assert_(issubclass(self.B.isdecimal().dtype.type, np.bool_))
assert_array_equal(self.B.isdecimal(), [
[False, False], [True, False], [False, False]])
-class TestOperations(TestCase):
- def setUp(self):
+class TestOperations(object):
+ def setup(self):
self.A = np.array([['abc', '123'],
['789', 'xyz']]).view(np.chararray)
self.B = np.array([['efg', '456'],
@@ -623,12 +626,9 @@ class TestOperations(TestCase):
assert_array_equal(Ar, (self.A * r))
for ob in [object(), 'qrs']:
- try:
- A * ob
- except ValueError:
- pass
- else:
- self.fail("chararray can only be multiplied by integers")
+ with assert_raises_regex(ValueError,
+ 'Can only multiply by integers'):
+ A*ob
def test_rmul(self):
A = self.A
@@ -638,12 +638,9 @@ class TestOperations(TestCase):
assert_array_equal(Ar, (r * self.A))
for ob in [object(), 'qrs']:
- try:
+ with assert_raises_regex(ValueError,
+ 'Can only multiply by integers'):
ob * A
- except ValueError:
- pass
- else:
- self.fail("chararray can only be multiplied by integers")
def test_mod(self):
"""Ticket #856"""
@@ -665,13 +662,9 @@ class TestOperations(TestCase):
assert_(("%r" % self.A) == repr(self.A))
for ob in [42, object()]:
- try:
+ with assert_raises_regex(
+ TypeError, "unsupported operand type.* and 'chararray'"):
ob % self.A
- except TypeError:
- pass
- else:
- self.fail("chararray __rmod__ should fail with "
- "non-string objects")
def test_slice(self):
"""Regression test for https://github.com/numpy/numpy/issues/5982"""
@@ -688,7 +681,7 @@ class TestOperations(TestCase):
assert_(sl2.base is arr)
assert_(sl2.base.base is arr.base)
- assert_(arr[0, 0] == asbytes('abc'))
+ assert_(arr[0, 0] == b'abc')
def test_empty_indexing():
@@ -697,7 +690,3 @@ def test_empty_indexing():
# empty chararray instead of a chararray with a single empty string in it.
s = np.chararray((4,))
assert_(s[[]].size == 0)
-
-
-if __name__ == "__main__":
- run_module_suite()
diff --git a/numpy/core/tests/test_deprecations.py b/numpy/core/tests/test_deprecations.py
index e6d3cd261..edb5d5e46 100644
--- a/numpy/core/tests/test_deprecations.py
+++ b/numpy/core/tests/test_deprecations.py
@@ -9,11 +9,12 @@ import datetime
import sys
import operator
import warnings
+import pytest
import numpy as np
from numpy.testing import (
- run_module_suite, assert_raises, assert_warns, assert_no_warnings,
- assert_array_equal, assert_, dec)
+ assert_raises, assert_warns, assert_
+ )
try:
import pytz
@@ -22,134 +23,33 @@ except ImportError:
_has_pytz = False
-class _VisibleDeprecationTestCase(object):
+class _DeprecationTestCase(object):
# Just as warning: warnings uses re.match, so the start of this message
# must match.
message = ''
+ warning_cls = DeprecationWarning
- def setUp(self):
+ def setup(self):
self.warn_ctx = warnings.catch_warnings(record=True)
self.log = self.warn_ctx.__enter__()
# Do *not* ignore other DeprecationWarnings. Ignoring warnings
# can give very confusing results because of
- # http://bugs.python.org/issue4180 and it is probably simplest to
+ # https://bugs.python.org/issue4180 and it is probably simplest to
# try to keep the tests cleanly giving only the right warning type.
# (While checking them set to "error" those are ignored anyway)
# We still have them show up, because otherwise they would be raised
- warnings.filterwarnings("always", category=np.VisibleDeprecationWarning)
+ warnings.filterwarnings("always", category=self.warning_cls)
warnings.filterwarnings("always", message=self.message,
- category=np.VisibleDeprecationWarning)
+ category=self.warning_cls)
- def tearDown(self):
+ def teardown(self):
self.warn_ctx.__exit__()
def assert_deprecated(self, function, num=1, ignore_others=False,
function_fails=False,
- exceptions=(np.VisibleDeprecationWarning,),
+ exceptions=np._NoValue,
args=(), kwargs={}):
- """Test if VisibleDeprecationWarnings are given and raised.
-
- This first checks if the function when called gives `num`
- VisibleDeprecationWarnings, after that it tries to raise these
- VisibleDeprecationWarnings and compares them with `exceptions`.
- The exceptions can be different for cases where this code path
- is simply not anticipated and the exception is replaced.
-
- Parameters
- ----------
- function : callable
- The function to test
- num : int
- Number of VisibleDeprecationWarnings to expect. This should
- normally be 1.
- ignore_others : bool
- Whether warnings of the wrong type should be ignored (note that
- the message is not checked)
- function_fails : bool
- If the function would normally fail, setting this will check for
- warnings inside a try/except block.
- exceptions : Exception or tuple of Exceptions
- Exception to expect when turning the warnings into an error.
- The default checks for DeprecationWarnings. If exceptions is
- empty the function is expected to run successfully.
- args : tuple
- Arguments for `function`
- kwargs : dict
- Keyword arguments for `function`
- """
- # reset the log
- self.log[:] = []
-
- try:
- function(*args, **kwargs)
- except (Exception if function_fails else tuple()):
- pass
-
- # just in case, clear the registry
- num_found = 0
- for warning in self.log:
- if warning.category is np.VisibleDeprecationWarning:
- num_found += 1
- elif not ignore_others:
- raise AssertionError(
- "expected DeprecationWarning but got: %s" %
- (warning.category,))
- if num is not None and num_found != num:
- msg = "%i warnings found but %i expected." % (len(self.log), num)
- lst = [w.category for w in self.log]
- raise AssertionError("\n".join([msg] + lst))
-
- with warnings.catch_warnings():
- warnings.filterwarnings("error", message=self.message,
- category=np.VisibleDeprecationWarning)
- try:
- function(*args, **kwargs)
- if exceptions != tuple():
- raise AssertionError(
- "No error raised during function call")
- except exceptions:
- if exceptions == tuple():
- raise AssertionError(
- "Error raised during function call")
-
- def assert_not_deprecated(self, function, args=(), kwargs={}):
- """Test if VisibleDeprecationWarnings are given and raised.
-
- This is just a shorthand for:
-
- self.assert_deprecated(function, num=0, ignore_others=True,
- exceptions=tuple(), args=args, kwargs=kwargs)
- """
- self.assert_deprecated(function, num=0, ignore_others=True,
- exceptions=tuple(), args=args, kwargs=kwargs)
-
-
-class _DeprecationTestCase(object):
- # Just as warning: warnings uses re.match, so the start of this message
- # must match.
- message = ''
-
- def setUp(self):
- self.warn_ctx = warnings.catch_warnings(record=True)
- self.log = self.warn_ctx.__enter__()
-
- # Do *not* ignore other DeprecationWarnings. Ignoring warnings
- # can give very confusing results because of
- # http://bugs.python.org/issue4180 and it is probably simplest to
- # try to keep the tests cleanly giving only the right warning type.
- # (While checking them set to "error" those are ignored anyway)
- # We still have them show up, because otherwise they would be raised
- warnings.filterwarnings("always", category=DeprecationWarning)
- warnings.filterwarnings("always", message=self.message,
- category=DeprecationWarning)
-
- def tearDown(self):
- self.warn_ctx.__exit__()
-
- def assert_deprecated(self, function, num=1, ignore_others=False,
- function_fails=False,
- exceptions=(DeprecationWarning,), args=(), kwargs={}):
"""Test if DeprecationWarnings are given and raised.
This first checks if the function when called gives `num`
@@ -182,6 +82,9 @@ class _DeprecationTestCase(object):
# reset the log
self.log[:] = []
+ if exceptions is np._NoValue:
+ exceptions = (self.warning_cls,)
+
try:
function(*args, **kwargs)
except (Exception if function_fails else tuple()):
@@ -190,20 +93,20 @@ class _DeprecationTestCase(object):
# just in case, clear the registry
num_found = 0
for warning in self.log:
- if warning.category is DeprecationWarning:
+ if warning.category is self.warning_cls:
num_found += 1
elif not ignore_others:
raise AssertionError(
- "expected DeprecationWarning but got: %s" %
- (warning.category,))
+ "expected %s but got: %s" %
+ (self.warning_cls.__name__, warning.category))
if num is not None and num_found != num:
msg = "%i warnings found but %i expected." % (len(self.log), num)
- lst = [w.category for w in self.log]
+ lst = [str(w.category) for w in self.log]
raise AssertionError("\n".join([msg] + lst))
with warnings.catch_warnings():
warnings.filterwarnings("error", message=self.message,
- category=DeprecationWarning)
+ category=self.warning_cls)
try:
function(*args, **kwargs)
if exceptions != tuple():
@@ -215,7 +118,7 @@ class _DeprecationTestCase(object):
"Error raised during function call")
def assert_not_deprecated(self, function, args=(), kwargs={}):
- """Test if DeprecationWarnings are given and raised.
+ """Test that warnings are not raised.
This is just a shorthand for:
@@ -226,40 +129,24 @@ class _DeprecationTestCase(object):
exceptions=tuple(), args=args, kwargs=kwargs)
-class TestBooleanUnaryMinusDeprecation(_DeprecationTestCase):
- """Test deprecation of unary boolean `-`. While + and * are well
- defined, unary - is not and even a corrected form seems to have
- no real uses.
-
- The deprecation process was started in NumPy 1.9.
- """
- message = r"numpy boolean negative, the `-` operator, .*"
+class _VisibleDeprecationTestCase(_DeprecationTestCase):
+ warning_cls = np.VisibleDeprecationWarning
- def test_unary_minus_operator_deprecation(self):
- array = np.array([True])
- generic = np.bool_(True)
- # Unary minus/negative ufunc:
- self.assert_deprecated(operator.neg, args=(array,))
- self.assert_deprecated(operator.neg, args=(generic,))
-
-
-class TestBooleanBinaryMinusDeprecation(_DeprecationTestCase):
- """Test deprecation of binary boolean `-`. While + and * are well
- defined, binary - is not and even a corrected form seems to have
- no real uses.
-
- The deprecation process was started in NumPy 1.9.
- """
- message = r"numpy boolean subtract, the `-` operator, .*"
+class TestNonTupleNDIndexDeprecation(object):
+ def test_basic(self):
+ a = np.zeros((5, 5))
+ with warnings.catch_warnings():
+ warnings.filterwarnings('always')
+ assert_warns(FutureWarning, a.__getitem__, [[0, 1], [0, 1]])
+ assert_warns(FutureWarning, a.__getitem__, [slice(None)])
- def test_operator_deprecation(self):
- array = np.array([True])
- generic = np.bool_(True)
+ warnings.filterwarnings('error')
+ assert_raises(FutureWarning, a.__getitem__, [[0, 1], [0, 1]])
+ assert_raises(FutureWarning, a.__getitem__, [slice(None)])
- # Minus operator/subtract ufunc:
- self.assert_deprecated(operator.sub, args=(array, array))
- self.assert_deprecated(operator.sub, args=(generic, generic))
+ # a a[[0, 1]] always was advanced indexing, so no error/warning
+ a[[0, 1]]
class TestRankDeprecation(_DeprecationTestCase):
@@ -302,44 +189,10 @@ class TestComparisonDeprecations(_DeprecationTestCase):
b = np.array(['a', 'b', 'c'])
assert_raises(ValueError, lambda x, y: x == y, a, b)
- # The empty list is not cast to string, as this is only to document
- # that fact (it likely should be changed). This means that the
- # following works (and returns False) due to dtype mismatch:
- a == []
-
- def test_none_comparison(self):
- # Test comparison of None, which should result in element-wise
- # comparison in the future. [1, 2] == None should be [False, False].
- with warnings.catch_warnings():
- warnings.filterwarnings('always', '', FutureWarning)
- assert_warns(FutureWarning, operator.eq, np.arange(3), None)
- assert_warns(FutureWarning, operator.ne, np.arange(3), None)
-
- with warnings.catch_warnings():
- warnings.filterwarnings('error', '', FutureWarning)
- assert_raises(FutureWarning, operator.eq, np.arange(3), None)
- assert_raises(FutureWarning, operator.ne, np.arange(3), None)
-
- def test_scalar_none_comparison(self):
- # Scalars should still just return False and not give a warnings.
- # The comparisons are flagged by pep8, ignore that.
- with warnings.catch_warnings(record=True) as w:
- warnings.filterwarnings('always', '', FutureWarning)
- assert_(not np.float32(1) == None)
- assert_(not np.str_('test') == None)
- # This is dubious (see below):
- assert_(not np.datetime64('NaT') == None)
-
- assert_(np.float32(1) != None)
- assert_(np.str_('test') != None)
- # This is dubious (see below):
- assert_(np.datetime64('NaT') != None)
- assert_(len(w) == 0)
-
- # For documentation purposes, this is why the datetime is dubious.
- # At the time of deprecation this was no behaviour change, but
- # it has to be considered when the deprecations are done.
- assert_(np.equal(np.datetime64('NaT'), None))
+ # The empty list is not cast to string, and this used to pass due
+ # to dtype mismatch; now (2018-06-21) it correctly leads to a
+ # FutureWarning.
+ assert_warns(FutureWarning, lambda: a == [])
def test_void_dtype_equality_failures(self):
class NotArray(object):
@@ -393,98 +246,6 @@ class TestComparisonDeprecations(_DeprecationTestCase):
assert_warns(DeprecationWarning, f, arg1, arg2)
-class TestIdentityComparisonDeprecations(_DeprecationTestCase):
- """This tests the equal and not_equal object ufuncs identity check
- deprecation. This was due to the usage of PyObject_RichCompareBool.
-
- This tests that for example for `a = np.array([np.nan], dtype=object)`
- `a == a` it is warned that False and not `np.nan is np.nan` is returned.
-
- Should be kept in sync with TestComparisonDeprecations and new tests
- added when the deprecation is over. Requires only removing of @identity@
- (and blocks) from the ufunc loops.c.src of the OBJECT comparisons.
- """
-
- message = "numpy .* will not check object identity in the future."
-
- def test_identity_equality_mismatch(self):
- a = np.array([np.nan], dtype=object)
-
- with warnings.catch_warnings():
- warnings.filterwarnings('always', '', FutureWarning)
- assert_warns(FutureWarning, np.equal, a, a)
- assert_warns(FutureWarning, np.not_equal, a, a)
-
- with warnings.catch_warnings():
- warnings.filterwarnings('error', '', FutureWarning)
- assert_raises(FutureWarning, np.equal, a, a)
- assert_raises(FutureWarning, np.not_equal, a, a)
- # And the other do not warn:
- with np.errstate(invalid='ignore'):
- np.less(a, a)
- np.greater(a, a)
- np.less_equal(a, a)
- np.greater_equal(a, a)
-
- def test_comparison_error(self):
- class FunkyType(object):
- def __eq__(self, other):
- raise TypeError("I won't compare")
-
- def __ne__(self, other):
- raise TypeError("I won't compare")
-
- a = np.array([FunkyType()])
- self.assert_deprecated(np.equal, args=(a, a))
- self.assert_deprecated(np.not_equal, args=(a, a))
-
- def test_bool_error(self):
- # The comparison result cannot be interpreted as a bool
- a = np.array([np.array([1, 2, 3]), None], dtype=object)
- self.assert_deprecated(np.equal, args=(a, a))
- self.assert_deprecated(np.not_equal, args=(a, a))
-
-
-class TestAlterdotRestoredotDeprecations(_DeprecationTestCase):
- """The alterdot/restoredot functions are deprecated.
-
- These functions no longer do anything in numpy 1.10, so
- they should not be used.
-
- """
-
- def test_alterdot_restoredot_deprecation(self):
- self.assert_deprecated(np.alterdot)
- self.assert_deprecated(np.restoredot)
-
-
-class TestBooleanIndexShapeMismatchDeprecation():
- """Tests deprecation for boolean indexing where the boolean array
- does not match the input array along the given dimensions.
- """
- message = r"boolean index did not match indexed array"
-
- def test_simple(self):
- arr = np.ones((5, 4, 3))
- index = np.array([True])
- #self.assert_deprecated(arr.__getitem__, args=(index,))
- assert_warns(np.VisibleDeprecationWarning,
- arr.__getitem__, index)
-
- index = np.array([False] * 6)
- #self.assert_deprecated(arr.__getitem__, args=(index,))
- assert_warns(np.VisibleDeprecationWarning,
- arr.__getitem__, index)
-
- index = np.zeros((4, 4), dtype=bool)
- #self.assert_deprecated(arr.__getitem__, args=(index,))
- assert_warns(np.VisibleDeprecationWarning,
- arr.__getitem__, index)
- #self.assert_deprecated(arr.__getitem__, args=((slice(None), index),))
- assert_warns(np.VisibleDeprecationWarning,
- arr.__getitem__, (slice(None), index))
-
-
class TestDatetime64Timezone(_DeprecationTestCase):
"""Parsing of datetime64 with timezones deprecated in 1.11.0, because
datetime64 is now timezone naive rather than UTC only.
@@ -498,7 +259,8 @@ class TestDatetime64Timezone(_DeprecationTestCase):
self.assert_deprecated(np.datetime64, args=('2000-01-01T00+01',))
self.assert_deprecated(np.datetime64, args=('2000-01-01T00Z',))
- @dec.skipif(not _has_pytz, "The pytz module is not available.")
+ @pytest.mark.skipif(not _has_pytz,
+ reason="The pytz module is not available.")
def test_datetime(self):
tz = pytz.timezone('US/Eastern')
dt = datetime.datetime(2000, 1, 1, 0, 0, tzinfo=tz)
@@ -515,7 +277,7 @@ class TestNonCContiguousViewDeprecation(_DeprecationTestCase):
"""
def test_fortran_contiguous(self):
- self.assert_deprecated(np.ones((2,2)).T.view, args=(np.complex,))
+ self.assert_deprecated(np.ones((2,2)).T.view, args=(complex,))
self.assert_deprecated(np.ones((2,2)).T.view, args=(np.int8,))
@@ -609,18 +371,165 @@ class TestBinaryReprInsufficientWidthParameterForRepresentation(_DeprecationTest
self.assert_deprecated(np.binary_repr, args=args, kwargs=kwargs)
+class TestNumericStyleTypecodes(_DeprecationTestCase):
+ """
+ Deprecate the old numeric-style dtypes, which are especially
+ confusing for complex types, e.g. Complex32 -> complex64. When the
+ deprecation cycle is complete, the check for the strings should be
+ removed from PyArray_DescrConverter in descriptor.c, and the
+ deprecated keys should not be added as capitalized aliases in
+ _add_aliases in numerictypes.py.
+ """
+ def test_all_dtypes(self):
+ deprecated_types = [
+ 'Bool', 'Complex32', 'Complex64', 'Float16', 'Float32', 'Float64',
+ 'Int8', 'Int16', 'Int32', 'Int64', 'Object0', 'Timedelta64',
+ 'UInt8', 'UInt16', 'UInt32', 'UInt64', 'Void0'
+ ]
+ if sys.version_info[0] < 3:
+ deprecated_types.extend(['Unicode0', 'String0'])
+
+ for dt in deprecated_types:
+ self.assert_deprecated(np.dtype, exceptions=(TypeError,),
+ args=(dt,))
+
+
class TestTestDeprecated(object):
def test_assert_deprecated(self):
test_case_instance = _DeprecationTestCase()
- test_case_instance.setUp()
+ test_case_instance.setup()
assert_raises(AssertionError,
test_case_instance.assert_deprecated,
lambda: None)
def foo():
- warnings.warn("foo", category=DeprecationWarning)
+ warnings.warn("foo", category=DeprecationWarning, stacklevel=2)
test_case_instance.assert_deprecated(foo)
+ test_case_instance.teardown()
+
-if __name__ == "__main__":
- run_module_suite()
+class TestClassicIntDivision(_DeprecationTestCase):
+ """
+ See #7949. Deprecate the numeric-style dtypes with -3 flag in python 2
+ if used for division
+ List of data types: https://docs.scipy.org/doc/numpy/user/basics.types.html
+ """
+ def test_int_dtypes(self):
+ #scramble types and do some mix and match testing
+ deprecated_types = [
+ 'bool_', 'int_', 'intc', 'uint8', 'int8', 'uint64', 'int32', 'uint16',
+ 'intp', 'int64', 'uint32', 'int16'
+ ]
+ if sys.version_info[0] < 3 and sys.py3kwarning:
+ import operator as op
+ dt2 = 'bool_'
+ for dt1 in deprecated_types:
+ a = np.array([1,2,3], dtype=dt1)
+ b = np.array([1,2,3], dtype=dt2)
+ self.assert_deprecated(op.div, args=(a,b))
+ dt2 = dt1
+
+
+class TestNonNumericConjugate(_DeprecationTestCase):
+ """
+ Deprecate no-op behavior of ndarray.conjugate on non-numeric dtypes,
+ which conflicts with the error behavior of np.conjugate.
+ """
+ def test_conjugate(self):
+ for a in np.array(5), np.array(5j):
+ self.assert_not_deprecated(a.conjugate)
+ for a in (np.array('s'), np.array('2016', 'M'),
+ np.array((1, 2), [('a', int), ('b', int)])):
+ self.assert_deprecated(a.conjugate)
+
+
+class TestNPY_CHAR(_DeprecationTestCase):
+ # 2017-05-03, 1.13.0
+ def test_npy_char_deprecation(self):
+ from numpy.core._multiarray_tests import npy_char_deprecation
+ self.assert_deprecated(npy_char_deprecation)
+ assert_(npy_char_deprecation() == 'S1')
+
+
+class Test_UPDATEIFCOPY(_DeprecationTestCase):
+ """
+ v1.14 deprecates creating an array with the UPDATEIFCOPY flag, use
+ WRITEBACKIFCOPY instead
+ """
+ def test_npy_updateifcopy_deprecation(self):
+ from numpy.core._multiarray_tests import npy_updateifcopy_deprecation
+ arr = np.arange(9).reshape(3, 3)
+ v = arr.T
+ self.assert_deprecated(npy_updateifcopy_deprecation, args=(v,))
+
+
+class TestDatetimeEvent(_DeprecationTestCase):
+ # 2017-08-11, 1.14.0
+ def test_3_tuple(self):
+ for cls in (np.datetime64, np.timedelta64):
+ # two valid uses - (unit, num) and (unit, num, den, None)
+ self.assert_not_deprecated(cls, args=(1, ('ms', 2)))
+ self.assert_not_deprecated(cls, args=(1, ('ms', 2, 1, None)))
+
+ # trying to use the event argument, removed in 1.7.0, is deprecated
+ # it used to be a uint8
+ self.assert_deprecated(cls, args=(1, ('ms', 2, 'event')))
+ self.assert_deprecated(cls, args=(1, ('ms', 2, 63)))
+ self.assert_deprecated(cls, args=(1, ('ms', 2, 1, 'event')))
+ self.assert_deprecated(cls, args=(1, ('ms', 2, 1, 63)))
+
+
+class TestTruthTestingEmptyArrays(_DeprecationTestCase):
+ # 2017-09-25, 1.14.0
+ message = '.*truth value of an empty array is ambiguous.*'
+
+ def test_1d(self):
+ self.assert_deprecated(bool, args=(np.array([]),))
+
+ def test_2d(self):
+ self.assert_deprecated(bool, args=(np.zeros((1, 0)),))
+ self.assert_deprecated(bool, args=(np.zeros((0, 1)),))
+ self.assert_deprecated(bool, args=(np.zeros((0, 0)),))
+
+
+class TestBincount(_DeprecationTestCase):
+ # 2017-06-01, 1.14.0
+ def test_bincount_minlength(self):
+ self.assert_deprecated(lambda: np.bincount([1, 2, 3], minlength=None))
+
+
+class TestGeneratorSum(_DeprecationTestCase):
+ # 2018-02-25, 1.15.0
+ def test_generator_sum(self):
+ self.assert_deprecated(np.sum, args=((i for i in range(5)),))
+
+
+class TestSctypeNA(_VisibleDeprecationTestCase):
+ # 2018-06-24, 1.16
+ def test_sctypeNA(self):
+ self.assert_deprecated(lambda: np.sctypeNA['?'])
+ self.assert_deprecated(lambda: np.typeNA['?'])
+ self.assert_deprecated(lambda: np.typeNA.get('?'))
+
+
+class TestPositiveOnNonNumerical(_DeprecationTestCase):
+ # 2018-06-28, 1.16.0
+ def test_positive_on_non_number(self):
+ self.assert_deprecated(operator.pos, args=(np.array('foo'),))
+
+class TestFromstring(_DeprecationTestCase):
+ # 2017-10-19, 1.14
+ def test_fromstring(self):
+ self.assert_deprecated(np.fromstring, args=('\x00'*80,))
+
+class Test_GetSet_NumericOps(_DeprecationTestCase):
+ # 2018-09-20, 1.16.0
+ def test_get_numeric_ops(self):
+ from numpy.core._multiarray_tests import getset_numericops
+ self.assert_deprecated(getset_numericops, num=2)
+
+ # empty kwargs prevents any state actually changing which would break
+ # other tests.
+ self.assert_deprecated(np.set_numeric_ops, kwargs={})
+ assert_raises(ValueError, np.set_numeric_ops, add='abc')
diff --git a/numpy/core/tests/test_dtype.py b/numpy/core/tests/test_dtype.py
index a6cb66b7d..c55751e3c 100644
--- a/numpy/core/tests/test_dtype.py
+++ b/numpy/core/tests/test_dtype.py
@@ -1,13 +1,14 @@
from __future__ import division, absolute_import, print_function
import sys
+import operator
+import pytest
+import ctypes
import numpy as np
-from numpy.core.test_rational import rational
-from numpy.testing import (
- TestCase, run_module_suite, assert_, assert_equal, assert_raises,
- dec
-)
+from numpy.core._rational_tests import rational
+from numpy.testing import assert_, assert_equal, assert_raises
+from numpy.core.numeric import pickle
def assert_dtype_equal(a, b):
assert_equal(a, b)
@@ -19,27 +20,27 @@ def assert_dtype_not_equal(a, b):
assert_(hash(a) != hash(b),
"two different types hash to the same value !")
-class TestBuiltin(TestCase):
- def test_run(self):
+class TestBuiltin(object):
+ @pytest.mark.parametrize('t', [int, float, complex, np.int32, str, object,
+ np.unicode])
+ def test_run(self, t):
"""Only test hash runs at all."""
- for t in [np.int, np.float, np.complex, np.int32, np.str, np.object,
- np.unicode]:
- dt = np.dtype(t)
- hash(dt)
+ dt = np.dtype(t)
+ hash(dt)
- def test_dtype(self):
+ @pytest.mark.parametrize('t', [int, float])
+ def test_dtype(self, t):
# Make sure equivalent byte order char hash the same (e.g. < and = on
# little endian)
- for t in [np.int, np.float]:
- dt = np.dtype(t)
- dt2 = dt.newbyteorder("<")
- dt3 = dt.newbyteorder(">")
- if dt == dt2:
- self.assertTrue(dt.byteorder != dt2.byteorder, "bogus test")
- assert_dtype_equal(dt, dt2)
- else:
- self.assertTrue(dt.byteorder != dt3.byteorder, "bogus test")
- assert_dtype_equal(dt, dt3)
+ dt = np.dtype(t)
+ dt2 = dt.newbyteorder("<")
+ dt3 = dt.newbyteorder(">")
+ if dt == dt2:
+ assert_(dt.byteorder != dt2.byteorder, "bogus test")
+ assert_dtype_equal(dt, dt2)
+ else:
+ assert_(dt.byteorder != dt3.byteorder, "bogus test")
+ assert_dtype_equal(dt, dt3)
def test_equivalent_dtype_hashing(self):
# Make sure equivalent dtypes with different type num hash equal
@@ -50,8 +51,8 @@ class TestBuiltin(TestCase):
else:
left = uintp
right = np.dtype(np.ulonglong)
- self.assertTrue(left == right)
- self.assertTrue(hash(left) == hash(right))
+ assert_(left == right)
+ assert_(hash(left) == hash(right))
def test_invalid_types(self):
# Make sure invalid type strings raise an error
@@ -103,17 +104,26 @@ class TestBuiltin(TestCase):
'formats':['i1', 'f4'],
'offsets':[0, 2]}, align=True)
-class TestRecord(TestCase):
+ def test_field_order_equality(self):
+ x = np.dtype({'names': ['A', 'B'],
+ 'formats': ['i4', 'f4'],
+ 'offsets': [0, 4]})
+ y = np.dtype({'names': ['B', 'A'],
+ 'formats': ['f4', 'i4'],
+ 'offsets': [4, 0]})
+ assert_equal(x == y, False)
+
+class TestRecord(object):
def test_equivalent_record(self):
"""Test whether equivalent record dtypes hash the same."""
- a = np.dtype([('yo', np.int)])
- b = np.dtype([('yo', np.int)])
+ a = np.dtype([('yo', int)])
+ b = np.dtype([('yo', int)])
assert_dtype_equal(a, b)
def test_different_names(self):
# In theory, they may hash the same (collision) ?
- a = np.dtype([('yo', np.int)])
- b = np.dtype([('ye', np.int)])
+ a = np.dtype([('yo', int)])
+ b = np.dtype([('ye', int)])
assert_dtype_not_equal(a, b)
def test_different_titles(self):
@@ -128,9 +138,9 @@ class TestRecord(TestCase):
def test_mutate(self):
# Mutating a dtype should reset the cached hash value
- a = np.dtype([('yo', np.int)])
- b = np.dtype([('yo', np.int)])
- c = np.dtype([('ye', np.int)])
+ a = np.dtype([('yo', int)])
+ b = np.dtype([('yo', int)])
+ c = np.dtype([('ye', int)])
assert_dtype_equal(a, b)
assert_dtype_not_equal(a, c)
a.names = ['ye']
@@ -145,10 +155,10 @@ class TestRecord(TestCase):
"""Test if an appropriate exception is raised when passing bad values to
the dtype constructor.
"""
- self.assertRaises(TypeError, np.dtype,
- dict(names=set(['A', 'B']), formats=['f8', 'i4']))
- self.assertRaises(TypeError, np.dtype,
- dict(names=['A', 'B'], formats=set(['f8', 'i4'])))
+ assert_raises(TypeError, np.dtype,
+ dict(names={'A', 'B'}, formats=['f8', 'i4']))
+ assert_raises(TypeError, np.dtype,
+ dict(names=['A', 'B'], formats={'f8', 'i4'}))
def test_aligned_size(self):
# Check that structured dtypes get padded to an aligned size
@@ -196,6 +206,14 @@ class TestRecord(TestCase):
assert_equal(dt3.itemsize, 11)
assert_equal(dt1, dt2)
assert_equal(dt2, dt3)
+ # Array of subtype should preserve alignment
+ dt1 = np.dtype([('a', '|i1'),
+ ('b', [('f0', '<i2'),
+ ('f1', '<f4')], 2)], align=True)
+ assert_equal(dt1.descr, [('a', '|i1'), ('', '|V3'),
+ ('b', [('f0', '<i2'), ('', '|V2'),
+ ('f1', '<f4')], (2,))])
+
def test_union_struct(self):
# Should be able to create union dtypes
@@ -210,11 +228,12 @@ class TestRecord(TestCase):
dt = np.dtype({'names':['f0', 'f1', 'f2'], 'formats':['<u4', '<u2', '<u2'],
'offsets':[4, 0, 2]}, align=True)
assert_equal(dt.itemsize, 8)
+ # field name should not matter: assignment is by position
dt2 = np.dtype({'names':['f2', 'f0', 'f1'],
- 'formats':['<u2', '<u4', '<u2'],
- 'offsets':[2, 4, 0]}, align=True)
+ 'formats':['<u4', '<u2', '<u2'],
+ 'offsets':[4, 0, 2]}, align=True)
vals = [(0, 1, 2), (3, -1, 4)]
- vals2 = [(2, 0, 1), (4, 3, -1)]
+ vals2 = [(0, 1, 2), (3, -1, 4)]
a = np.array(vals, dt)
b = np.array(vals2, dt2)
assert_equal(a.astype(dt2), b)
@@ -256,17 +275,55 @@ class TestRecord(TestCase):
dt2 = np.dtype((np.void, dt.fields))
assert_equal(dt2.fields, dt.fields)
+ def test_from_dict_with_zero_width_field(self):
+ # Regression test for #6430 / #2196
+ dt = np.dtype([('val1', np.float32, (0,)), ('val2', int)])
+ dt2 = np.dtype({'names': ['val1', 'val2'],
+ 'formats': [(np.float32, (0,)), int]})
+
+ assert_dtype_equal(dt, dt2)
+ assert_equal(dt.fields['val1'][0].itemsize, 0)
+ assert_equal(dt.itemsize, dt.fields['val2'][0].itemsize)
+
def test_bool_commastring(self):
d = np.dtype('?,?,?') # raises?
assert_equal(len(d.names), 3)
for n in d.names:
assert_equal(d.fields[n][0], np.dtype('?'))
+ def test_nonint_offsets(self):
+ # gh-8059
+ def make_dtype(off):
+ return np.dtype({'names': ['A'], 'formats': ['i4'],
+ 'offsets': [off]})
+
+ assert_raises(TypeError, make_dtype, 'ASD')
+ assert_raises(OverflowError, make_dtype, 2**70)
+ assert_raises(TypeError, make_dtype, 2.3)
+ assert_raises(ValueError, make_dtype, -10)
+
+ # no errors here:
+ dt = make_dtype(np.uint32(0))
+ np.zeros(1, dtype=dt)[0].item()
+
+ def test_fields_by_index(self):
+ dt = np.dtype([('a', np.int8), ('b', np.float32, 3)])
+ assert_dtype_equal(dt[0], np.dtype(np.int8))
+ assert_dtype_equal(dt[1], np.dtype((np.float32, 3)))
+ assert_dtype_equal(dt[-1], dt[1])
+ assert_dtype_equal(dt[-2], dt[0])
+ assert_raises(IndexError, lambda: dt[-3])
-class TestSubarray(TestCase):
+ assert_raises(TypeError, operator.getitem, dt, 3.0)
+ assert_raises(TypeError, operator.getitem, dt, [])
+
+ assert_equal(dt[1], dt[np.int8(1)])
+
+
+class TestSubarray(object):
def test_single_subarray(self):
- a = np.dtype((np.int, (2)))
- b = np.dtype((np.int, (2,)))
+ a = np.dtype((int, (2)))
+ b = np.dtype((int, (2,)))
assert_dtype_equal(a, b)
assert_equal(type(a.subdtype[1]), tuple)
@@ -274,29 +331,29 @@ class TestSubarray(TestCase):
def test_equivalent_record(self):
"""Test whether equivalent subarray dtypes hash the same."""
- a = np.dtype((np.int, (2, 3)))
- b = np.dtype((np.int, (2, 3)))
+ a = np.dtype((int, (2, 3)))
+ b = np.dtype((int, (2, 3)))
assert_dtype_equal(a, b)
def test_nonequivalent_record(self):
"""Test whether different subarray dtypes hash differently."""
- a = np.dtype((np.int, (2, 3)))
- b = np.dtype((np.int, (3, 2)))
+ a = np.dtype((int, (2, 3)))
+ b = np.dtype((int, (3, 2)))
assert_dtype_not_equal(a, b)
- a = np.dtype((np.int, (2, 3)))
- b = np.dtype((np.int, (2, 2)))
+ a = np.dtype((int, (2, 3)))
+ b = np.dtype((int, (2, 2)))
assert_dtype_not_equal(a, b)
- a = np.dtype((np.int, (1, 2, 3)))
- b = np.dtype((np.int, (1, 2)))
+ a = np.dtype((int, (1, 2, 3)))
+ b = np.dtype((int, (1, 2)))
assert_dtype_not_equal(a, b)
def test_shape_equal(self):
"""Test some data types that are equal"""
assert_dtype_equal(np.dtype('f8'), np.dtype(('f8', tuple())))
assert_dtype_equal(np.dtype('f8'), np.dtype(('f8', 1)))
- assert_dtype_equal(np.dtype((np.int, 2)), np.dtype((np.int, (2,))))
+ assert_dtype_equal(np.dtype((int, 2)), np.dtype((int, (2,))))
assert_dtype_equal(np.dtype(('<f4', (3, 2))), np.dtype(('<f4', (3, 2))))
d = ([('a', 'f4', (1, 2)), ('b', 'f8', (3, 1))], (3, 2))
assert_dtype_equal(np.dtype(d), np.dtype(d))
@@ -351,6 +408,23 @@ class TestSubarray(TestCase):
assert_(isinstance(dt['a'].shape, tuple))
assert_(isinstance(dt['a'].shape[0], int))
+ def test_shape_matches_ndim(self):
+ dt = np.dtype([('a', 'f4', ())])
+ assert_equal(dt['a'].shape, ())
+ assert_equal(dt['a'].ndim, 0)
+
+ dt = np.dtype([('a', 'f4')])
+ assert_equal(dt['a'].shape, ())
+ assert_equal(dt['a'].ndim, 0)
+
+ dt = np.dtype([('a', 'f4', 4)])
+ assert_equal(dt['a'].shape, (4,))
+ assert_equal(dt['a'].ndim, 1)
+
+ dt = np.dtype([('a', 'f4', (1, 2, 3))])
+ assert_equal(dt['a'].shape, (1, 2, 3))
+ assert_equal(dt['a'].ndim, 3)
+
def test_shape_invalid(self):
# Check that the shape is valid.
max_int = np.iinfo(np.intc).max
@@ -372,47 +446,47 @@ class TestSubarray(TestCase):
assert_equal(t1.alignment, t2.alignment)
-class TestMonsterType(TestCase):
+class TestMonsterType(object):
"""Test deeply nested subtypes."""
def test1(self):
simple1 = np.dtype({'names': ['r', 'b'], 'formats': ['u1', 'u1'],
'titles': ['Red pixel', 'Blue pixel']})
- a = np.dtype([('yo', np.int), ('ye', simple1),
- ('yi', np.dtype((np.int, (3, 2))))])
- b = np.dtype([('yo', np.int), ('ye', simple1),
- ('yi', np.dtype((np.int, (3, 2))))])
+ a = np.dtype([('yo', int), ('ye', simple1),
+ ('yi', np.dtype((int, (3, 2))))])
+ b = np.dtype([('yo', int), ('ye', simple1),
+ ('yi', np.dtype((int, (3, 2))))])
assert_dtype_equal(a, b)
- c = np.dtype([('yo', np.int), ('ye', simple1),
+ c = np.dtype([('yo', int), ('ye', simple1),
('yi', np.dtype((a, (3, 2))))])
- d = np.dtype([('yo', np.int), ('ye', simple1),
+ d = np.dtype([('yo', int), ('ye', simple1),
('yi', np.dtype((a, (3, 2))))])
assert_dtype_equal(c, d)
-class TestMetadata(TestCase):
+class TestMetadata(object):
def test_no_metadata(self):
d = np.dtype(int)
- self.assertEqual(d.metadata, None)
+ assert_(d.metadata is None)
def test_metadata_takes_dict(self):
d = np.dtype(int, metadata={'datum': 1})
- self.assertEqual(d.metadata, {'datum': 1})
+ assert_(d.metadata == {'datum': 1})
def test_metadata_rejects_nondict(self):
- self.assertRaises(TypeError, np.dtype, int, metadata='datum')
- self.assertRaises(TypeError, np.dtype, int, metadata=1)
- self.assertRaises(TypeError, np.dtype, int, metadata=None)
+ assert_raises(TypeError, np.dtype, int, metadata='datum')
+ assert_raises(TypeError, np.dtype, int, metadata=1)
+ assert_raises(TypeError, np.dtype, int, metadata=None)
def test_nested_metadata(self):
d = np.dtype([('a', np.dtype(int, metadata={'datum': 1}))])
- self.assertEqual(d['a'].metadata, {'datum': 1})
+ assert_(d['a'].metadata == {'datum': 1})
- def base_metadata_copied(self):
+ def test_base_metadata_copied(self):
d = np.dtype((np.void, np.dtype('i4,i4', metadata={'datum': 1})))
- assert_equal(d.metadata, {'datum': 1})
+ assert_(d.metadata == {'datum': 1})
-class TestString(TestCase):
+class TestString(object):
def test_complex_dtype_str(self):
dt = np.dtype([('top', [('tiles', ('>f4', (64, 64)), (1,)),
('rtile', '>f4', (64, 36))], (3,)),
@@ -478,7 +552,7 @@ class TestString(TestCase):
assert_equal(str(dt),
"[('a', '<m8[D]'), ('b', '<M8[us]')]")
- def test_complex_dtype_repr(self):
+ def test_repr_structured(self):
dt = np.dtype([('top', [('tiles', ('>f4', (64, 64)), (1,)),
('rtile', '>f4', (64, 36))], (3,)),
('bottom', [('bleft', ('>f4', (8, 64)), (1,)),
@@ -498,6 +572,7 @@ class TestString(TestCase):
"(('Green pixel', 'g'), 'u1'), "
"(('Blue pixel', 'b'), 'u1')], align=True)")
+ def test_repr_structured_not_packed(self):
dt = np.dtype({'names': ['rgba', 'r', 'g', 'b'],
'formats': ['<u4', 'u1', 'u1', 'u1'],
'offsets': [0, 0, 1, 2],
@@ -522,11 +597,17 @@ class TestString(TestCase):
"'titles':['Red pixel','Blue pixel'], "
"'itemsize':4})")
+ def test_repr_structured_datetime(self):
dt = np.dtype([('a', '<M8[D]'), ('b', '<m8[us]')])
assert_equal(repr(dt),
"dtype([('a', '<M8[D]'), ('b', '<m8[us]')])")
- @dec.skipif(sys.version_info[0] >= 3)
+ def test_repr_str_subarray(self):
+ dt = np.dtype(('<i2', (1,)))
+ assert_equal(repr(dt), "dtype(('<i2', (1,)))")
+ assert_equal(str(dt), "('<i2', (1,))")
+
+ @pytest.mark.skipif(sys.version_info[0] >= 3, reason="Python 2 only")
def test_dtype_str_with_long_in_shape(self):
# Pull request #376, should not error
np.dtype('(1L,)i4')
@@ -539,7 +620,26 @@ class TestString(TestCase):
# Pull request #4722
np.array(["", ""]).astype(object)
-class TestDtypeAttributeDeletion(TestCase):
+ def test_void_subclass_unsized(self):
+ dt = np.dtype(np.record)
+ assert_equal(repr(dt), "dtype('V')")
+ assert_equal(str(dt), '|V0')
+ assert_equal(dt.name, 'record')
+
+ def test_void_subclass_sized(self):
+ dt = np.dtype((np.record, 2))
+ assert_equal(repr(dt), "dtype('V2')")
+ assert_equal(str(dt), '|V2')
+ assert_equal(dt.name, 'record16')
+
+ def test_void_subclass_fields(self):
+ dt = np.dtype((np.record, [('a', '<u2')]))
+ assert_equal(repr(dt), "dtype((numpy.record, [('a', '<u2')]))")
+ assert_equal(str(dt), "(numpy.record, [('a', '<u2')])")
+ assert_equal(dt.name, 'record16')
+
+
+class TestDtypeAttributeDeletion(object):
def test_dtype_non_writable_attributes_deletion(self):
dt = np.dtype(np.double)
@@ -557,7 +657,7 @@ class TestDtypeAttributeDeletion(TestCase):
assert_raises(AttributeError, delattr, dt, s)
-class TestDtypeAttributes(TestCase):
+class TestDtypeAttributes(object):
def test_descr_has_trailing_void(self):
# see gh-6359
dtype = np.dtype({
@@ -568,15 +668,12 @@ class TestDtypeAttributes(TestCase):
new_dtype = np.dtype(dtype.descr)
assert_equal(new_dtype.itemsize, 16)
-
-class TestDtypeAttributes(TestCase):
-
- def test_name_builtin(self):
- for t in np.typeDict.values():
- name = t.__name__
- if name.endswith('_'):
- name = name[:-1]
- assert_equal(np.dtype(t).name, name)
+ @pytest.mark.parametrize('t', np.typeDict.values())
+ def test_name_builtin(self, t):
+ name = t.__name__
+ if name.endswith('_'):
+ name = name[:-1]
+ assert_equal(np.dtype(t).name, name)
def test_name_dtype_subclass(self):
# Ticket #4357
@@ -585,6 +682,67 @@ class TestDtypeAttributes(TestCase):
assert_equal(np.dtype(user_def_subcls).name, 'user_def_subcls')
+class TestPickling(object):
+
+ def check_pickling(self, dtype):
+ for proto in range(pickle.HIGHEST_PROTOCOL + 1):
+ pickled = pickle.loads(pickle.dumps(dtype, proto))
+ assert_equal(pickled, dtype)
+ assert_equal(pickled.descr, dtype.descr)
+ if dtype.metadata is not None:
+ assert_equal(pickled.metadata, dtype.metadata)
+ # Check the reconstructed dtype is functional
+ x = np.zeros(3, dtype=dtype)
+ y = np.zeros(3, dtype=pickled)
+ assert_equal(x, y)
+ assert_equal(x[0], y[0])
+
+ @pytest.mark.parametrize('t', [int, float, complex, np.int32, str, object,
+ np.unicode, bool])
+ def test_builtin(self, t):
+ self.check_pickling(np.dtype(t))
+
+ def test_structured(self):
+ dt = np.dtype(([('a', '>f4', (2, 1)), ('b', '<f8', (1, 3))], (2, 2)))
+ self.check_pickling(dt)
+
+ def test_structured_aligned(self):
+ dt = np.dtype('i4, i1', align=True)
+ self.check_pickling(dt)
+
+ def test_structured_unaligned(self):
+ dt = np.dtype('i4, i1', align=False)
+ self.check_pickling(dt)
+
+ def test_structured_padded(self):
+ dt = np.dtype({
+ 'names': ['A', 'B'],
+ 'formats': ['f4', 'f4'],
+ 'offsets': [0, 8],
+ 'itemsize': 16})
+ self.check_pickling(dt)
+
+ def test_structured_titles(self):
+ dt = np.dtype({'names': ['r', 'b'],
+ 'formats': ['u1', 'u1'],
+ 'titles': ['Red pixel', 'Blue pixel']})
+ self.check_pickling(dt)
+
+ @pytest.mark.parametrize('base', ['m8', 'M8'])
+ @pytest.mark.parametrize('unit', ['', 'Y', 'M', 'W', 'D', 'h', 'm', 's',
+ 'ms', 'us', 'ns', 'ps', 'fs', 'as'])
+ def test_datetime(self, base, unit):
+ dt = np.dtype('%s[%s]' % (base, unit) if unit else base)
+ self.check_pickling(dt)
+ if unit:
+ dt = np.dtype('%s[7%s]' % (base, unit))
+ self.check_pickling(dt)
+
+ def test_metadata(self):
+ dt = np.dtype(int, metadata={'datum': 1})
+ self.check_pickling(dt)
+
+
def test_rational_dtype():
# test for bug gh-5719
a = np.array([1111], dtype=rational).astype
@@ -595,5 +753,201 @@ def test_rational_dtype():
assert_equal(np.array([x,x]).dtype, np.dtype(rational))
-if __name__ == "__main__":
- run_module_suite()
+def test_dtypes_are_true():
+ # test for gh-6294
+ assert bool(np.dtype('f8'))
+ assert bool(np.dtype('i8'))
+ assert bool(np.dtype([('a', 'i8'), ('b', 'f4')]))
+
+
+def test_invalid_dtype_string():
+ # test for gh-10440
+ assert_raises(TypeError, np.dtype, 'f8,i8,[f8,i8]')
+ assert_raises(TypeError, np.dtype, u'Fl\xfcgel')
+
+
+class TestFromCTypes(object):
+
+ @staticmethod
+ def check(ctype, dtype):
+ dtype = np.dtype(dtype)
+ assert_equal(np.dtype(ctype), dtype)
+ assert_equal(np.dtype(ctype()), dtype)
+
+ def test_array(self):
+ c8 = ctypes.c_uint8
+ self.check( 3 * c8, (np.uint8, (3,)))
+ self.check( 1 * c8, (np.uint8, (1,)))
+ self.check( 0 * c8, (np.uint8, (0,)))
+ self.check(1 * (3 * c8), ((np.uint8, (3,)), (1,)))
+ self.check(3 * (1 * c8), ((np.uint8, (1,)), (3,)))
+
+ def test_padded_structure(self):
+ class PaddedStruct(ctypes.Structure):
+ _fields_ = [
+ ('a', ctypes.c_uint8),
+ ('b', ctypes.c_uint16)
+ ]
+ expected = np.dtype([
+ ('a', np.uint8),
+ ('b', np.uint16)
+ ], align=True)
+ self.check(PaddedStruct, expected)
+
+ def test_bit_fields(self):
+ class BitfieldStruct(ctypes.Structure):
+ _fields_ = [
+ ('a', ctypes.c_uint8, 7),
+ ('b', ctypes.c_uint8, 1)
+ ]
+ assert_raises(TypeError, np.dtype, BitfieldStruct)
+ assert_raises(TypeError, np.dtype, BitfieldStruct())
+
+ def test_pointer(self):
+ p_uint8 = ctypes.POINTER(ctypes.c_uint8)
+ assert_raises(TypeError, np.dtype, p_uint8)
+
+ def test_void_pointer(self):
+ self.check(ctypes.c_void_p, np.uintp)
+
+ def test_union(self):
+ class Union(ctypes.Union):
+ _fields_ = [
+ ('a', ctypes.c_uint8),
+ ('b', ctypes.c_uint16),
+ ]
+ expected = np.dtype(dict(
+ names=['a', 'b'],
+ formats=[np.uint8, np.uint16],
+ offsets=[0, 0],
+ itemsize=2
+ ))
+ self.check(Union, expected)
+
+ def test_union_with_struct_packed(self):
+ class Struct(ctypes.Structure):
+ _pack_ = 1
+ _fields_ = [
+ ('one', ctypes.c_uint8),
+ ('two', ctypes.c_uint32)
+ ]
+
+ class Union(ctypes.Union):
+ _fields_ = [
+ ('a', ctypes.c_uint8),
+ ('b', ctypes.c_uint16),
+ ('c', ctypes.c_uint32),
+ ('d', Struct),
+ ]
+ expected = np.dtype(dict(
+ names=['a', 'b', 'c', 'd'],
+ formats=['u1', np.uint16, np.uint32, [('one', 'u1'), ('two', np.uint32)]],
+ offsets=[0, 0, 0, 0],
+ itemsize=ctypes.sizeof(Union)
+ ))
+ self.check(Union, expected)
+
+ def test_union_packed(self):
+ class Struct(ctypes.Structure):
+ _fields_ = [
+ ('one', ctypes.c_uint8),
+ ('two', ctypes.c_uint32)
+ ]
+ _pack_ = 1
+ class Union(ctypes.Union):
+ _pack_ = 1
+ _fields_ = [
+ ('a', ctypes.c_uint8),
+ ('b', ctypes.c_uint16),
+ ('c', ctypes.c_uint32),
+ ('d', Struct),
+ ]
+ expected = np.dtype(dict(
+ names=['a', 'b', 'c', 'd'],
+ formats=['u1', np.uint16, np.uint32, [('one', 'u1'), ('two', np.uint32)]],
+ offsets=[0, 0, 0, 0],
+ itemsize=ctypes.sizeof(Union)
+ ))
+ self.check(Union, expected)
+
+ def test_packed_structure(self):
+ class PackedStructure(ctypes.Structure):
+ _pack_ = 1
+ _fields_ = [
+ ('a', ctypes.c_uint8),
+ ('b', ctypes.c_uint16)
+ ]
+ expected = np.dtype([
+ ('a', np.uint8),
+ ('b', np.uint16)
+ ])
+ self.check(PackedStructure, expected)
+
+ def test_large_packed_structure(self):
+ class PackedStructure(ctypes.Structure):
+ _pack_ = 2
+ _fields_ = [
+ ('a', ctypes.c_uint8),
+ ('b', ctypes.c_uint16),
+ ('c', ctypes.c_uint8),
+ ('d', ctypes.c_uint16),
+ ('e', ctypes.c_uint32),
+ ('f', ctypes.c_uint32),
+ ('g', ctypes.c_uint8)
+ ]
+ expected = np.dtype(dict(
+ formats=[np.uint8, np.uint16, np.uint8, np.uint16, np.uint32, np.uint32, np.uint8 ],
+ offsets=[0, 2, 4, 6, 8, 12, 16],
+ names=['a', 'b', 'c', 'd', 'e', 'f', 'g'],
+ itemsize=18))
+ self.check(PackedStructure, expected)
+
+ def test_big_endian_structure_packed(self):
+ class BigEndStruct(ctypes.BigEndianStructure):
+ _fields_ = [
+ ('one', ctypes.c_uint8),
+ ('two', ctypes.c_uint32)
+ ]
+ _pack_ = 1
+ expected = np.dtype([('one', 'u1'), ('two', '>u4')])
+ self.check(BigEndStruct, expected)
+
+ def test_little_endian_structure_packed(self):
+ class LittleEndStruct(ctypes.LittleEndianStructure):
+ _fields_ = [
+ ('one', ctypes.c_uint8),
+ ('two', ctypes.c_uint32)
+ ]
+ _pack_ = 1
+ expected = np.dtype([('one', 'u1'), ('two', '<u4')])
+ self.check(LittleEndStruct, expected)
+
+ def test_little_endian_structure(self):
+ class PaddedStruct(ctypes.LittleEndianStructure):
+ _fields_ = [
+ ('a', ctypes.c_uint8),
+ ('b', ctypes.c_uint16)
+ ]
+ expected = np.dtype([
+ ('a', '<B'),
+ ('b', '<H')
+ ], align=True)
+ self.check(PaddedStruct, expected)
+
+ def test_big_endian_structure(self):
+ class PaddedStruct(ctypes.BigEndianStructure):
+ _fields_ = [
+ ('a', ctypes.c_uint8),
+ ('b', ctypes.c_uint16)
+ ]
+ expected = np.dtype([
+ ('a', '>B'),
+ ('b', '>H')
+ ], align=True)
+ self.check(PaddedStruct, expected)
+
+ def test_simple_endian_types(self):
+ self.check(ctypes.c_uint16.__ctype_le__, np.dtype('<u2'))
+ self.check(ctypes.c_uint16.__ctype_be__, np.dtype('>u2'))
+ self.check(ctypes.c_uint8.__ctype_le__, np.dtype('u1'))
+ self.check(ctypes.c_uint8.__ctype_be__, np.dtype('u1'))
diff --git a/numpy/core/tests/test_einsum.py b/numpy/core/tests/test_einsum.py
index 77fb75f10..3be4a8a26 100644
--- a/numpy/core/tests/test_einsum.py
+++ b/numpy/core/tests/test_einsum.py
@@ -1,350 +1,383 @@
from __future__ import division, absolute_import, print_function
-import warnings
+import itertools
import numpy as np
from numpy.testing import (
- TestCase, run_module_suite, assert_, assert_equal, assert_array_equal,
- assert_raises
+ assert_, assert_equal, assert_array_equal, assert_almost_equal,
+ assert_raises, suppress_warnings
)
-class TestEinSum(TestCase):
- def test_einsum_errors(self):
- # Need enough arguments
- assert_raises(ValueError, np.einsum)
- assert_raises(ValueError, np.einsum, "")
-
- # subscripts must be a string
- assert_raises(TypeError, np.einsum, 0, 0)
-
- # out parameter must be an array
- assert_raises(TypeError, np.einsum, "", 0, out='test')
-
- # order parameter must be a valid order
- assert_raises(TypeError, np.einsum, "", 0, order='W')
-
- # casting parameter must be a valid casting
- assert_raises(ValueError, np.einsum, "", 0, casting='blah')
-
- # dtype parameter must be a valid dtype
- assert_raises(TypeError, np.einsum, "", 0, dtype='bad_data_type')
-
- # other keyword arguments are rejected
- assert_raises(TypeError, np.einsum, "", 0, bad_arg=0)
-
- # issue 4528 revealed a segfault with this call
- assert_raises(TypeError, np.einsum, *(None,)*63)
-
- # number of operands must match count in subscripts string
- assert_raises(ValueError, np.einsum, "", 0, 0)
- assert_raises(ValueError, np.einsum, ",", 0, [0], [0])
- assert_raises(ValueError, np.einsum, ",", [0])
-
- # can't have more subscripts than dimensions in the operand
- assert_raises(ValueError, np.einsum, "i", 0)
- assert_raises(ValueError, np.einsum, "ij", [0, 0])
- assert_raises(ValueError, np.einsum, "...i", 0)
- assert_raises(ValueError, np.einsum, "i...j", [0, 0])
- assert_raises(ValueError, np.einsum, "i...", 0)
- assert_raises(ValueError, np.einsum, "ij...", [0, 0])
-
- # invalid ellipsis
- assert_raises(ValueError, np.einsum, "i..", [0, 0])
- assert_raises(ValueError, np.einsum, ".i...", [0, 0])
- assert_raises(ValueError, np.einsum, "j->..j", [0, 0])
- assert_raises(ValueError, np.einsum, "j->.j...", [0, 0])
-
- # invalid subscript character
- assert_raises(ValueError, np.einsum, "i%...", [0, 0])
- assert_raises(ValueError, np.einsum, "...j$", [0, 0])
- assert_raises(ValueError, np.einsum, "i->&", [0, 0])
+# Setup for optimize einsum
+chars = 'abcdefghij'
+sizes = np.array([2, 3, 4, 5, 4, 3, 2, 6, 5, 4, 3])
+global_size_dict = dict(zip(chars, sizes))
- # output subscripts must appear in input
- assert_raises(ValueError, np.einsum, "i->ij", [0, 0])
- # output subscripts may only be specified once
- assert_raises(ValueError, np.einsum, "ij->jij", [[0, 0], [0, 0]])
-
- # dimensions much match when being collapsed
- assert_raises(ValueError, np.einsum, "ii", np.arange(6).reshape(2, 3))
- assert_raises(ValueError, np.einsum, "ii->i", np.arange(6).reshape(2, 3))
-
- # broadcasting to new dimensions must be enabled explicitly
- assert_raises(ValueError, np.einsum, "i", np.arange(6).reshape(2, 3))
- assert_raises(ValueError, np.einsum, "i->i", [[0, 1], [0, 1]],
- out=np.arange(4).reshape(2, 2))
+class TestEinsum(object):
+ def test_einsum_errors(self):
+ for do_opt in [True, False]:
+ # Need enough arguments
+ assert_raises(ValueError, np.einsum, optimize=do_opt)
+ assert_raises(ValueError, np.einsum, "", optimize=do_opt)
+
+ # subscripts must be a string
+ assert_raises(TypeError, np.einsum, 0, 0, optimize=do_opt)
+
+ # out parameter must be an array
+ assert_raises(TypeError, np.einsum, "", 0, out='test',
+ optimize=do_opt)
+
+ # order parameter must be a valid order
+ assert_raises(TypeError, np.einsum, "", 0, order='W',
+ optimize=do_opt)
+
+ # casting parameter must be a valid casting
+ assert_raises(ValueError, np.einsum, "", 0, casting='blah',
+ optimize=do_opt)
+
+ # dtype parameter must be a valid dtype
+ assert_raises(TypeError, np.einsum, "", 0, dtype='bad_data_type',
+ optimize=do_opt)
+
+ # other keyword arguments are rejected
+ assert_raises(TypeError, np.einsum, "", 0, bad_arg=0,
+ optimize=do_opt)
+
+ # issue 4528 revealed a segfault with this call
+ assert_raises(TypeError, np.einsum, *(None,)*63, optimize=do_opt)
+
+ # number of operands must match count in subscripts string
+ assert_raises(ValueError, np.einsum, "", 0, 0, optimize=do_opt)
+ assert_raises(ValueError, np.einsum, ",", 0, [0], [0],
+ optimize=do_opt)
+ assert_raises(ValueError, np.einsum, ",", [0], optimize=do_opt)
+
+ # can't have more subscripts than dimensions in the operand
+ assert_raises(ValueError, np.einsum, "i", 0, optimize=do_opt)
+ assert_raises(ValueError, np.einsum, "ij", [0, 0], optimize=do_opt)
+ assert_raises(ValueError, np.einsum, "...i", 0, optimize=do_opt)
+ assert_raises(ValueError, np.einsum, "i...j", [0, 0], optimize=do_opt)
+ assert_raises(ValueError, np.einsum, "i...", 0, optimize=do_opt)
+ assert_raises(ValueError, np.einsum, "ij...", [0, 0], optimize=do_opt)
+
+ # invalid ellipsis
+ assert_raises(ValueError, np.einsum, "i..", [0, 0], optimize=do_opt)
+ assert_raises(ValueError, np.einsum, ".i...", [0, 0], optimize=do_opt)
+ assert_raises(ValueError, np.einsum, "j->..j", [0, 0], optimize=do_opt)
+ assert_raises(ValueError, np.einsum, "j->.j...", [0, 0], optimize=do_opt)
+
+ # invalid subscript character
+ assert_raises(ValueError, np.einsum, "i%...", [0, 0], optimize=do_opt)
+ assert_raises(ValueError, np.einsum, "...j$", [0, 0], optimize=do_opt)
+ assert_raises(ValueError, np.einsum, "i->&", [0, 0], optimize=do_opt)
+
+ # output subscripts must appear in input
+ assert_raises(ValueError, np.einsum, "i->ij", [0, 0], optimize=do_opt)
+
+ # output subscripts may only be specified once
+ assert_raises(ValueError, np.einsum, "ij->jij", [[0, 0], [0, 0]],
+ optimize=do_opt)
+
+ # dimensions much match when being collapsed
+ assert_raises(ValueError, np.einsum, "ii",
+ np.arange(6).reshape(2, 3), optimize=do_opt)
+ assert_raises(ValueError, np.einsum, "ii->i",
+ np.arange(6).reshape(2, 3), optimize=do_opt)
+
+ # broadcasting to new dimensions must be enabled explicitly
+ assert_raises(ValueError, np.einsum, "i", np.arange(6).reshape(2, 3),
+ optimize=do_opt)
+ assert_raises(ValueError, np.einsum, "i->i", [[0, 1], [0, 1]],
+ out=np.arange(4).reshape(2, 2), optimize=do_opt)
def test_einsum_views(self):
# pass-through
- a = np.arange(6)
- a.shape = (2, 3)
+ for do_opt in [True, False]:
+ a = np.arange(6)
+ a.shape = (2, 3)
+
+ b = np.einsum("...", a, optimize=do_opt)
+ assert_(b.base is a)
+
+ b = np.einsum(a, [Ellipsis], optimize=do_opt)
+ assert_(b.base is a)
+
+ b = np.einsum("ij", a, optimize=do_opt)
+ assert_(b.base is a)
+ assert_equal(b, a)
+
+ b = np.einsum(a, [0, 1], optimize=do_opt)
+ assert_(b.base is a)
+ assert_equal(b, a)
+
+ # output is writeable whenever input is writeable
+ b = np.einsum("...", a, optimize=do_opt)
+ assert_(b.flags['WRITEABLE'])
+ a.flags['WRITEABLE'] = False
+ b = np.einsum("...", a, optimize=do_opt)
+ assert_(not b.flags['WRITEABLE'])
+
+ # transpose
+ a = np.arange(6)
+ a.shape = (2, 3)
+
+ b = np.einsum("ji", a, optimize=do_opt)
+ assert_(b.base is a)
+ assert_equal(b, a.T)
+
+ b = np.einsum(a, [1, 0], optimize=do_opt)
+ assert_(b.base is a)
+ assert_equal(b, a.T)
+
+ # diagonal
+ a = np.arange(9)
+ a.shape = (3, 3)
+
+ b = np.einsum("ii->i", a, optimize=do_opt)
+ assert_(b.base is a)
+ assert_equal(b, [a[i, i] for i in range(3)])
+
+ b = np.einsum(a, [0, 0], [0], optimize=do_opt)
+ assert_(b.base is a)
+ assert_equal(b, [a[i, i] for i in range(3)])
+
+ # diagonal with various ways of broadcasting an additional dimension
+ a = np.arange(27)
+ a.shape = (3, 3, 3)
+
+ b = np.einsum("...ii->...i", a, optimize=do_opt)
+ assert_(b.base is a)
+ assert_equal(b, [[x[i, i] for i in range(3)] for x in a])
+
+ b = np.einsum(a, [Ellipsis, 0, 0], [Ellipsis, 0], optimize=do_opt)
+ assert_(b.base is a)
+ assert_equal(b, [[x[i, i] for i in range(3)] for x in a])
+
+ b = np.einsum("ii...->...i", a, optimize=do_opt)
+ assert_(b.base is a)
+ assert_equal(b, [[x[i, i] for i in range(3)]
+ for x in a.transpose(2, 0, 1)])
+
+ b = np.einsum(a, [0, 0, Ellipsis], [Ellipsis, 0], optimize=do_opt)
+ assert_(b.base is a)
+ assert_equal(b, [[x[i, i] for i in range(3)]
+ for x in a.transpose(2, 0, 1)])
+
+ b = np.einsum("...ii->i...", a, optimize=do_opt)
+ assert_(b.base is a)
+ assert_equal(b, [a[:, i, i] for i in range(3)])
+
+ b = np.einsum(a, [Ellipsis, 0, 0], [0, Ellipsis], optimize=do_opt)
+ assert_(b.base is a)
+ assert_equal(b, [a[:, i, i] for i in range(3)])
+
+ b = np.einsum("jii->ij", a, optimize=do_opt)
+ assert_(b.base is a)
+ assert_equal(b, [a[:, i, i] for i in range(3)])
+
+ b = np.einsum(a, [1, 0, 0], [0, 1], optimize=do_opt)
+ assert_(b.base is a)
+ assert_equal(b, [a[:, i, i] for i in range(3)])
+
+ b = np.einsum("ii...->i...", a, optimize=do_opt)
+ assert_(b.base is a)
+ assert_equal(b, [a.transpose(2, 0, 1)[:, i, i] for i in range(3)])
+
+ b = np.einsum(a, [0, 0, Ellipsis], [0, Ellipsis], optimize=do_opt)
+ assert_(b.base is a)
+ assert_equal(b, [a.transpose(2, 0, 1)[:, i, i] for i in range(3)])
+
+ b = np.einsum("i...i->i...", a, optimize=do_opt)
+ assert_(b.base is a)
+ assert_equal(b, [a.transpose(1, 0, 2)[:, i, i] for i in range(3)])
+
+ b = np.einsum(a, [0, Ellipsis, 0], [0, Ellipsis], optimize=do_opt)
+ assert_(b.base is a)
+ assert_equal(b, [a.transpose(1, 0, 2)[:, i, i] for i in range(3)])
+
+ b = np.einsum("i...i->...i", a, optimize=do_opt)
+ assert_(b.base is a)
+ assert_equal(b, [[x[i, i] for i in range(3)]
+ for x in a.transpose(1, 0, 2)])
+
+ b = np.einsum(a, [0, Ellipsis, 0], [Ellipsis, 0], optimize=do_opt)
+ assert_(b.base is a)
+ assert_equal(b, [[x[i, i] for i in range(3)]
+ for x in a.transpose(1, 0, 2)])
+
+ # triple diagonal
+ a = np.arange(27)
+ a.shape = (3, 3, 3)
+
+ b = np.einsum("iii->i", a, optimize=do_opt)
+ assert_(b.base is a)
+ assert_equal(b, [a[i, i, i] for i in range(3)])
+
+ b = np.einsum(a, [0, 0, 0], [0], optimize=do_opt)
+ assert_(b.base is a)
+ assert_equal(b, [a[i, i, i] for i in range(3)])
- b = np.einsum("...", a)
- assert_(b.base is a)
-
- b = np.einsum(a, [Ellipsis])
- assert_(b.base is a)
-
- b = np.einsum("ij", a)
- assert_(b.base is a)
- assert_equal(b, a)
-
- b = np.einsum(a, [0, 1])
- assert_(b.base is a)
- assert_equal(b, a)
-
- # output is writeable whenever input is writeable
- b = np.einsum("...", a)
- assert_(b.flags['WRITEABLE'])
- a.flags['WRITEABLE'] = False
- b = np.einsum("...", a)
- assert_(not b.flags['WRITEABLE'])
-
- # transpose
- a = np.arange(6)
- a.shape = (2, 3)
-
- b = np.einsum("ji", a)
- assert_(b.base is a)
- assert_equal(b, a.T)
-
- b = np.einsum(a, [1, 0])
- assert_(b.base is a)
- assert_equal(b, a.T)
-
- # diagonal
- a = np.arange(9)
- a.shape = (3, 3)
-
- b = np.einsum("ii->i", a)
- assert_(b.base is a)
- assert_equal(b, [a[i, i] for i in range(3)])
-
- b = np.einsum(a, [0, 0], [0])
- assert_(b.base is a)
- assert_equal(b, [a[i, i] for i in range(3)])
-
- # diagonal with various ways of broadcasting an additional dimension
- a = np.arange(27)
- a.shape = (3, 3, 3)
-
- b = np.einsum("...ii->...i", a)
- assert_(b.base is a)
- assert_equal(b, [[x[i, i] for i in range(3)] for x in a])
-
- b = np.einsum(a, [Ellipsis, 0, 0], [Ellipsis, 0])
- assert_(b.base is a)
- assert_equal(b, [[x[i, i] for i in range(3)] for x in a])
-
- b = np.einsum("ii...->...i", a)
- assert_(b.base is a)
- assert_equal(b, [[x[i, i] for i in range(3)]
- for x in a.transpose(2, 0, 1)])
-
- b = np.einsum(a, [0, 0, Ellipsis], [Ellipsis, 0])
- assert_(b.base is a)
- assert_equal(b, [[x[i, i] for i in range(3)]
- for x in a.transpose(2, 0, 1)])
-
- b = np.einsum("...ii->i...", a)
- assert_(b.base is a)
- assert_equal(b, [a[:, i, i] for i in range(3)])
-
- b = np.einsum(a, [Ellipsis, 0, 0], [0, Ellipsis])
- assert_(b.base is a)
- assert_equal(b, [a[:, i, i] for i in range(3)])
-
- b = np.einsum("jii->ij", a)
- assert_(b.base is a)
- assert_equal(b, [a[:, i, i] for i in range(3)])
-
- b = np.einsum(a, [1, 0, 0], [0, 1])
- assert_(b.base is a)
- assert_equal(b, [a[:, i, i] for i in range(3)])
-
- b = np.einsum("ii...->i...", a)
- assert_(b.base is a)
- assert_equal(b, [a.transpose(2, 0, 1)[:, i, i] for i in range(3)])
-
- b = np.einsum(a, [0, 0, Ellipsis], [0, Ellipsis])
- assert_(b.base is a)
- assert_equal(b, [a.transpose(2, 0, 1)[:, i, i] for i in range(3)])
-
- b = np.einsum("i...i->i...", a)
- assert_(b.base is a)
- assert_equal(b, [a.transpose(1, 0, 2)[:, i, i] for i in range(3)])
-
- b = np.einsum(a, [0, Ellipsis, 0], [0, Ellipsis])
- assert_(b.base is a)
- assert_equal(b, [a.transpose(1, 0, 2)[:, i, i] for i in range(3)])
-
- b = np.einsum("i...i->...i", a)
- assert_(b.base is a)
- assert_equal(b, [[x[i, i] for i in range(3)]
- for x in a.transpose(1, 0, 2)])
-
- b = np.einsum(a, [0, Ellipsis, 0], [Ellipsis, 0])
- assert_(b.base is a)
- assert_equal(b, [[x[i, i] for i in range(3)]
- for x in a.transpose(1, 0, 2)])
-
- # triple diagonal
- a = np.arange(27)
- a.shape = (3, 3, 3)
-
- b = np.einsum("iii->i", a)
- assert_(b.base is a)
- assert_equal(b, [a[i, i, i] for i in range(3)])
-
- b = np.einsum(a, [0, 0, 0], [0])
- assert_(b.base is a)
- assert_equal(b, [a[i, i, i] for i in range(3)])
+ # swap axes
+ a = np.arange(24)
+ a.shape = (2, 3, 4)
- # swap axes
- a = np.arange(24)
- a.shape = (2, 3, 4)
+ b = np.einsum("ijk->jik", a, optimize=do_opt)
+ assert_(b.base is a)
+ assert_equal(b, a.swapaxes(0, 1))
- b = np.einsum("ijk->jik", a)
- assert_(b.base is a)
- assert_equal(b, a.swapaxes(0, 1))
+ b = np.einsum(a, [0, 1, 2], [1, 0, 2], optimize=do_opt)
+ assert_(b.base is a)
+ assert_equal(b, a.swapaxes(0, 1))
- b = np.einsum(a, [0, 1, 2], [1, 0, 2])
- assert_(b.base is a)
- assert_equal(b, a.swapaxes(0, 1))
-
- def check_einsum_sums(self, dtype):
+ def check_einsum_sums(self, dtype, do_opt=False):
# Check various sums. Does many sizes to exercise unrolled loops.
# sum(a, axis=-1)
for n in range(1, 17):
a = np.arange(n, dtype=dtype)
- assert_equal(np.einsum("i->", a), np.sum(a, axis=-1).astype(dtype))
- assert_equal(np.einsum(a, [0], []),
+ assert_equal(np.einsum("i->", a, optimize=do_opt),
+ np.sum(a, axis=-1).astype(dtype))
+ assert_equal(np.einsum(a, [0], [], optimize=do_opt),
np.sum(a, axis=-1).astype(dtype))
for n in range(1, 17):
a = np.arange(2*3*n, dtype=dtype).reshape(2, 3, n)
- assert_equal(np.einsum("...i->...", a),
+ assert_equal(np.einsum("...i->...", a, optimize=do_opt),
np.sum(a, axis=-1).astype(dtype))
- assert_equal(np.einsum(a, [Ellipsis, 0], [Ellipsis]),
+ assert_equal(np.einsum(a, [Ellipsis, 0], [Ellipsis], optimize=do_opt),
np.sum(a, axis=-1).astype(dtype))
# sum(a, axis=0)
for n in range(1, 17):
a = np.arange(2*n, dtype=dtype).reshape(2, n)
- assert_equal(np.einsum("i...->...", a),
+ assert_equal(np.einsum("i...->...", a, optimize=do_opt),
np.sum(a, axis=0).astype(dtype))
- assert_equal(np.einsum(a, [0, Ellipsis], [Ellipsis]),
+ assert_equal(np.einsum(a, [0, Ellipsis], [Ellipsis], optimize=do_opt),
np.sum(a, axis=0).astype(dtype))
for n in range(1, 17):
a = np.arange(2*3*n, dtype=dtype).reshape(2, 3, n)
- assert_equal(np.einsum("i...->...", a),
+ assert_equal(np.einsum("i...->...", a, optimize=do_opt),
np.sum(a, axis=0).astype(dtype))
- assert_equal(np.einsum(a, [0, Ellipsis], [Ellipsis]),
+ assert_equal(np.einsum(a, [0, Ellipsis], [Ellipsis], optimize=do_opt),
np.sum(a, axis=0).astype(dtype))
# trace(a)
for n in range(1, 17):
a = np.arange(n*n, dtype=dtype).reshape(n, n)
- assert_equal(np.einsum("ii", a), np.trace(a).astype(dtype))
- assert_equal(np.einsum(a, [0, 0]), np.trace(a).astype(dtype))
+ assert_equal(np.einsum("ii", a, optimize=do_opt),
+ np.trace(a).astype(dtype))
+ assert_equal(np.einsum(a, [0, 0], optimize=do_opt),
+ np.trace(a).astype(dtype))
# multiply(a, b)
assert_equal(np.einsum("..., ...", 3, 4), 12) # scalar case
for n in range(1, 17):
- a = np.arange(3*n, dtype=dtype).reshape(3, n)
- b = np.arange(2*3*n, dtype=dtype).reshape(2, 3, n)
- assert_equal(np.einsum("..., ...", a, b), np.multiply(a, b))
- assert_equal(np.einsum(a, [Ellipsis], b, [Ellipsis]),
+ a = np.arange(3 * n, dtype=dtype).reshape(3, n)
+ b = np.arange(2 * 3 * n, dtype=dtype).reshape(2, 3, n)
+ assert_equal(np.einsum("..., ...", a, b, optimize=do_opt),
+ np.multiply(a, b))
+ assert_equal(np.einsum(a, [Ellipsis], b, [Ellipsis], optimize=do_opt),
np.multiply(a, b))
# inner(a,b)
for n in range(1, 17):
- a = np.arange(2*3*n, dtype=dtype).reshape(2, 3, n)
+ a = np.arange(2 * 3 * n, dtype=dtype).reshape(2, 3, n)
b = np.arange(n, dtype=dtype)
- assert_equal(np.einsum("...i, ...i", a, b), np.inner(a, b))
- assert_equal(np.einsum(a, [Ellipsis, 0], b, [Ellipsis, 0]),
+ assert_equal(np.einsum("...i, ...i", a, b, optimize=do_opt), np.inner(a, b))
+ assert_equal(np.einsum(a, [Ellipsis, 0], b, [Ellipsis, 0], optimize=do_opt),
np.inner(a, b))
for n in range(1, 11):
- a = np.arange(n*3*2, dtype=dtype).reshape(n, 3, 2)
+ a = np.arange(n * 3 * 2, dtype=dtype).reshape(n, 3, 2)
b = np.arange(n, dtype=dtype)
- assert_equal(np.einsum("i..., i...", a, b), np.inner(a.T, b.T).T)
- assert_equal(np.einsum(a, [0, Ellipsis], b, [0, Ellipsis]),
+ assert_equal(np.einsum("i..., i...", a, b, optimize=do_opt),
+ np.inner(a.T, b.T).T)
+ assert_equal(np.einsum(a, [0, Ellipsis], b, [0, Ellipsis], optimize=do_opt),
np.inner(a.T, b.T).T)
# outer(a,b)
for n in range(1, 17):
a = np.arange(3, dtype=dtype)+1
b = np.arange(n, dtype=dtype)+1
- assert_equal(np.einsum("i,j", a, b), np.outer(a, b))
- assert_equal(np.einsum(a, [0], b, [1]), np.outer(a, b))
+ assert_equal(np.einsum("i,j", a, b, optimize=do_opt),
+ np.outer(a, b))
+ assert_equal(np.einsum(a, [0], b, [1], optimize=do_opt),
+ np.outer(a, b))
# Suppress the complex warnings for the 'as f8' tests
- with warnings.catch_warnings():
- warnings.simplefilter('ignore', np.ComplexWarning)
+ with suppress_warnings() as sup:
+ sup.filter(np.ComplexWarning)
# matvec(a,b) / a.dot(b) where a is matrix, b is vector
for n in range(1, 17):
a = np.arange(4*n, dtype=dtype).reshape(4, n)
b = np.arange(n, dtype=dtype)
- assert_equal(np.einsum("ij, j", a, b), np.dot(a, b))
- assert_equal(np.einsum(a, [0, 1], b, [1]), np.dot(a, b))
+ assert_equal(np.einsum("ij, j", a, b, optimize=do_opt),
+ np.dot(a, b))
+ assert_equal(np.einsum(a, [0, 1], b, [1], optimize=do_opt),
+ np.dot(a, b))
c = np.arange(4, dtype=dtype)
np.einsum("ij,j", a, b, out=c,
- dtype='f8', casting='unsafe')
+ dtype='f8', casting='unsafe', optimize=do_opt)
assert_equal(c,
- np.dot(a.astype('f8'),
- b.astype('f8')).astype(dtype))
+ np.dot(a.astype('f8'),
+ b.astype('f8')).astype(dtype))
c[...] = 0
np.einsum(a, [0, 1], b, [1], out=c,
- dtype='f8', casting='unsafe')
+ dtype='f8', casting='unsafe', optimize=do_opt)
assert_equal(c,
- np.dot(a.astype('f8'),
- b.astype('f8')).astype(dtype))
+ np.dot(a.astype('f8'),
+ b.astype('f8')).astype(dtype))
for n in range(1, 17):
a = np.arange(4*n, dtype=dtype).reshape(4, n)
b = np.arange(n, dtype=dtype)
- assert_equal(np.einsum("ji,j", a.T, b.T), np.dot(b.T, a.T))
- assert_equal(np.einsum(a.T, [1, 0], b.T, [1]), np.dot(b.T, a.T))
+ assert_equal(np.einsum("ji,j", a.T, b.T, optimize=do_opt),
+ np.dot(b.T, a.T))
+ assert_equal(np.einsum(a.T, [1, 0], b.T, [1], optimize=do_opt),
+ np.dot(b.T, a.T))
c = np.arange(4, dtype=dtype)
- np.einsum("ji,j", a.T, b.T, out=c, dtype='f8', casting='unsafe')
+ np.einsum("ji,j", a.T, b.T, out=c,
+ dtype='f8', casting='unsafe', optimize=do_opt)
assert_equal(c,
- np.dot(b.T.astype('f8'),
- a.T.astype('f8')).astype(dtype))
+ np.dot(b.T.astype('f8'),
+ a.T.astype('f8')).astype(dtype))
c[...] = 0
np.einsum(a.T, [1, 0], b.T, [1], out=c,
- dtype='f8', casting='unsafe')
+ dtype='f8', casting='unsafe', optimize=do_opt)
assert_equal(c,
- np.dot(b.T.astype('f8'),
- a.T.astype('f8')).astype(dtype))
+ np.dot(b.T.astype('f8'),
+ a.T.astype('f8')).astype(dtype))
# matmat(a,b) / a.dot(b) where a is matrix, b is matrix
for n in range(1, 17):
if n < 8 or dtype != 'f2':
a = np.arange(4*n, dtype=dtype).reshape(4, n)
b = np.arange(n*6, dtype=dtype).reshape(n, 6)
- assert_equal(np.einsum("ij,jk", a, b), np.dot(a, b))
- assert_equal(np.einsum(a, [0, 1], b, [1, 2]), np.dot(a, b))
+ assert_equal(np.einsum("ij,jk", a, b, optimize=do_opt),
+ np.dot(a, b))
+ assert_equal(np.einsum(a, [0, 1], b, [1, 2], optimize=do_opt),
+ np.dot(a, b))
for n in range(1, 17):
a = np.arange(4*n, dtype=dtype).reshape(4, n)
b = np.arange(n*6, dtype=dtype).reshape(n, 6)
c = np.arange(24, dtype=dtype).reshape(4, 6)
- np.einsum("ij,jk", a, b, out=c, dtype='f8', casting='unsafe')
+ np.einsum("ij,jk", a, b, out=c, dtype='f8', casting='unsafe',
+ optimize=do_opt)
assert_equal(c,
- np.dot(a.astype('f8'),
- b.astype('f8')).astype(dtype))
+ np.dot(a.astype('f8'),
+ b.astype('f8')).astype(dtype))
c[...] = 0
np.einsum(a, [0, 1], b, [1, 2], out=c,
- dtype='f8', casting='unsafe')
+ dtype='f8', casting='unsafe', optimize=do_opt)
assert_equal(c,
- np.dot(a.astype('f8'),
- b.astype('f8')).astype(dtype))
+ np.dot(a.astype('f8'),
+ b.astype('f8')).astype(dtype))
# matrix triple product (note this is not currently an efficient
# way to multiply 3 matrices)
@@ -352,21 +385,21 @@ class TestEinSum(TestCase):
b = np.arange(20, dtype=dtype).reshape(4, 5)
c = np.arange(30, dtype=dtype).reshape(5, 6)
if dtype != 'f2':
- assert_equal(np.einsum("ij,jk,kl", a, b, c),
- a.dot(b).dot(c))
- assert_equal(np.einsum(a, [0, 1], b, [1, 2], c, [2, 3]),
- a.dot(b).dot(c))
+ assert_equal(np.einsum("ij,jk,kl", a, b, c, optimize=do_opt),
+ a.dot(b).dot(c))
+ assert_equal(np.einsum(a, [0, 1], b, [1, 2], c, [2, 3],
+ optimize=do_opt), a.dot(b).dot(c))
d = np.arange(18, dtype=dtype).reshape(3, 6)
np.einsum("ij,jk,kl", a, b, c, out=d,
- dtype='f8', casting='unsafe')
+ dtype='f8', casting='unsafe', optimize=do_opt)
tgt = a.astype('f8').dot(b.astype('f8'))
tgt = tgt.dot(c.astype('f8')).astype(dtype)
assert_equal(d, tgt)
d[...] = 0
np.einsum(a, [0, 1], b, [1, 2], c, [2, 3], out=d,
- dtype='f8', casting='unsafe')
+ dtype='f8', casting='unsafe', optimize=do_opt)
tgt = a.astype('f8').dot(b.astype('f8'))
tgt = tgt.dot(c.astype('f8')).astype(dtype)
assert_equal(d, tgt)
@@ -376,31 +409,31 @@ class TestEinSum(TestCase):
a = np.arange(60, dtype=dtype).reshape(3, 4, 5)
b = np.arange(24, dtype=dtype).reshape(4, 3, 2)
assert_equal(np.einsum("ijk, jil -> kl", a, b),
- np.tensordot(a, b, axes=([1, 0], [0, 1])))
+ np.tensordot(a, b, axes=([1, 0], [0, 1])))
assert_equal(np.einsum(a, [0, 1, 2], b, [1, 0, 3], [2, 3]),
- np.tensordot(a, b, axes=([1, 0], [0, 1])))
+ np.tensordot(a, b, axes=([1, 0], [0, 1])))
c = np.arange(10, dtype=dtype).reshape(5, 2)
np.einsum("ijk,jil->kl", a, b, out=c,
- dtype='f8', casting='unsafe')
+ dtype='f8', casting='unsafe', optimize=do_opt)
assert_equal(c, np.tensordot(a.astype('f8'), b.astype('f8'),
- axes=([1, 0], [0, 1])).astype(dtype))
+ axes=([1, 0], [0, 1])).astype(dtype))
c[...] = 0
np.einsum(a, [0, 1, 2], b, [1, 0, 3], [2, 3], out=c,
- dtype='f8', casting='unsafe')
+ dtype='f8', casting='unsafe', optimize=do_opt)
assert_equal(c, np.tensordot(a.astype('f8'), b.astype('f8'),
- axes=([1, 0], [0, 1])).astype(dtype))
+ axes=([1, 0], [0, 1])).astype(dtype))
# logical_and(logical_and(a!=0, b!=0), c!=0)
a = np.array([1, 3, -2, 0, 12, 13, 0, 1], dtype=dtype)
b = np.array([0, 3.5, 0., -2, 0, 1, 3, 12], dtype=dtype)
c = np.array([True, True, False, True, True, False, True, True])
assert_equal(np.einsum("i,i,i->i", a, b, c,
- dtype='?', casting='unsafe'),
- np.logical_and(np.logical_and(a != 0, b != 0), c != 0))
+ dtype='?', casting='unsafe', optimize=do_opt),
+ np.logical_and(np.logical_and(a != 0, b != 0), c != 0))
assert_equal(np.einsum(a, [0], b, [0], c, [0], [0],
- dtype='?', casting='unsafe'),
- np.logical_and(np.logical_and(a != 0, b != 0), c != 0))
+ dtype='?', casting='unsafe'),
+ np.logical_and(np.logical_and(a != 0, b != 0), c != 0))
a = np.arange(9, dtype=dtype)
assert_equal(np.einsum(",i->", 3, a), 3*np.sum(a))
@@ -412,21 +445,24 @@ class TestEinSum(TestCase):
for n in range(1, 25):
a = np.arange(n, dtype=dtype)
if np.dtype(dtype).itemsize > 1:
- assert_equal(np.einsum("...,...", a, a), np.multiply(a, a))
- assert_equal(np.einsum("i,i", a, a), np.dot(a, a))
- assert_equal(np.einsum("i,->i", a, 2), 2*a)
- assert_equal(np.einsum(",i->i", 2, a), 2*a)
- assert_equal(np.einsum("i,->", a, 2), 2*np.sum(a))
- assert_equal(np.einsum(",i->", 2, a), 2*np.sum(a))
-
- assert_equal(np.einsum("...,...", a[1:], a[:-1]),
+ assert_equal(np.einsum("...,...", a, a, optimize=do_opt),
+ np.multiply(a, a))
+ assert_equal(np.einsum("i,i", a, a, optimize=do_opt), np.dot(a, a))
+ assert_equal(np.einsum("i,->i", a, 2, optimize=do_opt), 2*a)
+ assert_equal(np.einsum(",i->i", 2, a, optimize=do_opt), 2*a)
+ assert_equal(np.einsum("i,->", a, 2, optimize=do_opt), 2*np.sum(a))
+ assert_equal(np.einsum(",i->", 2, a, optimize=do_opt), 2*np.sum(a))
+
+ assert_equal(np.einsum("...,...", a[1:], a[:-1], optimize=do_opt),
np.multiply(a[1:], a[:-1]))
- assert_equal(np.einsum("i,i", a[1:], a[:-1]),
+ assert_equal(np.einsum("i,i", a[1:], a[:-1], optimize=do_opt),
np.dot(a[1:], a[:-1]))
- assert_equal(np.einsum("i,->i", a[1:], 2), 2*a[1:])
- assert_equal(np.einsum(",i->i", 2, a[1:]), 2*a[1:])
- assert_equal(np.einsum("i,->", a[1:], 2), 2*np.sum(a[1:]))
- assert_equal(np.einsum(",i->", 2, a[1:]), 2*np.sum(a[1:]))
+ assert_equal(np.einsum("i,->i", a[1:], 2, optimize=do_opt), 2*a[1:])
+ assert_equal(np.einsum(",i->i", 2, a[1:], optimize=do_opt), 2*a[1:])
+ assert_equal(np.einsum("i,->", a[1:], 2, optimize=do_opt),
+ 2*np.sum(a[1:]))
+ assert_equal(np.einsum(",i->", 2, a[1:], optimize=do_opt),
+ 2*np.sum(a[1:]))
# An object array, summed as the data type
a = np.arange(9, dtype=object)
@@ -445,6 +481,43 @@ class TestEinSum(TestCase):
r = np.arange(4).reshape(2, 2) + 7
assert_equal(np.einsum('z,mz,zm->', p, q, r), 253)
+ # singleton dimensions broadcast (gh-10343)
+ p = np.ones((10,2))
+ q = np.ones((1,2))
+ assert_array_equal(np.einsum('ij,ij->j', p, q, optimize=True),
+ np.einsum('ij,ij->j', p, q, optimize=False))
+ assert_array_equal(np.einsum('ij,ij->j', p, q, optimize=True),
+ [10.] * 2)
+
+ # a blas-compatible contraction broadcasting case which was failing
+ # for optimize=True (ticket #10930)
+ x = np.array([2., 3.])
+ y = np.array([4.])
+ assert_array_equal(np.einsum("i, i", x, y, optimize=False), 20.)
+ assert_array_equal(np.einsum("i, i", x, y, optimize=True), 20.)
+
+ # all-ones array was bypassing bug (ticket #10930)
+ p = np.ones((1, 5)) / 2
+ q = np.ones((5, 5)) / 2
+ for optimize in (True, False):
+ assert_array_equal(np.einsum("...ij,...jk->...ik", p, p,
+ optimize=optimize),
+ np.einsum("...ij,...jk->...ik", p, q,
+ optimize=optimize))
+ assert_array_equal(np.einsum("...ij,...jk->...ik", p, q,
+ optimize=optimize),
+ np.full((1, 5), 1.25))
+
+ # Cases which were failing (gh-10899)
+ x = np.eye(2, dtype=dtype)
+ y = np.ones(2, dtype=dtype)
+ assert_array_equal(np.einsum("ji,i->", x, y, optimize=optimize),
+ [2.]) # contig_contig_outstride0_two
+ assert_array_equal(np.einsum("i,ij->", y, x, optimize=optimize),
+ [2.]) # stride0_contig_outstride0_two
+ assert_array_equal(np.einsum("ij,i->", x, y, optimize=optimize),
+ [2.]) # contig_stride0_outstride0_two
+
def test_einsum_sums_int8(self):
self.check_einsum_sums('i1')
@@ -459,9 +532,11 @@ class TestEinSum(TestCase):
def test_einsum_sums_int32(self):
self.check_einsum_sums('i4')
+ self.check_einsum_sums('i4', True)
def test_einsum_sums_uint32(self):
self.check_einsum_sums('u4')
+ self.check_einsum_sums('u4', True)
def test_einsum_sums_int64(self):
self.check_einsum_sums('i8')
@@ -477,12 +552,14 @@ class TestEinSum(TestCase):
def test_einsum_sums_float64(self):
self.check_einsum_sums('f8')
+ self.check_einsum_sums('f8', True)
def test_einsum_sums_longdouble(self):
self.check_einsum_sums(np.longdouble)
def test_einsum_sums_cfloat64(self):
self.check_einsum_sums('c8')
+ self.check_einsum_sums('c8', True)
def test_einsum_sums_cfloat128(self):
self.check_einsum_sums('c16')
@@ -496,12 +573,22 @@ class TestEinSum(TestCase):
a = np.ones((1, 2))
b = np.ones((2, 2, 1))
assert_equal(np.einsum('ij...,j...->i...', a, b), [[[2], [2]]])
+ assert_equal(np.einsum('ij...,j...->i...', a, b, optimize=True), [[[2], [2]]])
+
+ # Regression test for issue #10369 (test unicode inputs with Python 2)
+ assert_equal(np.einsum(u'ij...,j...->i...', a, b), [[[2], [2]]])
+ assert_equal(np.einsum('...i,...i', [1, 2, 3], [2, 3, 4]), 20)
+ assert_equal(np.einsum(u'...i,...i', [1, 2, 3], [2, 3, 4]), 20)
+ assert_equal(np.einsum('...i,...i', [1, 2, 3], [2, 3, 4],
+ optimize=u'greedy'), 20)
# The iterator had an issue with buffering this reduction
a = np.ones((5, 12, 4, 2, 3), np.int64)
b = np.ones((5, 12, 11), np.int64)
assert_equal(np.einsum('ijklm,ijn,ijn->', a, b, b),
- np.einsum('ijklm,ijn->', a, b))
+ np.einsum('ijklm,ijn->', a, b))
+ assert_equal(np.einsum('ijklm,ijn,ijn->', a, b, b, optimize=True),
+ np.einsum('ijklm,ijn->', a, b, optimize=True))
# Issue #2027, was a problem in the contiguous 3-argument
# inner loop implementation
@@ -509,8 +596,22 @@ class TestEinSum(TestCase):
b = np.arange(1, 5).reshape(2, 2)
c = np.arange(1, 9).reshape(4, 2)
assert_equal(np.einsum('x,yx,zx->xzy', a, b, c),
- [[[1, 3], [3, 9], [5, 15], [7, 21]],
- [[8, 16], [16, 32], [24, 48], [32, 64]]])
+ [[[1, 3], [3, 9], [5, 15], [7, 21]],
+ [[8, 16], [16, 32], [24, 48], [32, 64]]])
+ assert_equal(np.einsum('x,yx,zx->xzy', a, b, c, optimize=True),
+ [[[1, 3], [3, 9], [5, 15], [7, 21]],
+ [[8, 16], [16, 32], [24, 48], [32, 64]]])
+
+ def test_subscript_range(self):
+ # Issue #7741, make sure that all letters of Latin alphabet (both uppercase & lowercase) can be used
+ # when creating a subscript from arrays
+ a = np.ones((2, 3))
+ b = np.ones((3, 4))
+ np.einsum(a, [0, 20], b, [20, 2], [0, 2], optimize=False)
+ np.einsum(a, [0, 27], b, [27, 2], [0, 2], optimize=False)
+ np.einsum(a, [0, 51], b, [51, 2], [0, 2], optimize=False)
+ assert_raises(ValueError, lambda: np.einsum(a, [0, 52], b, [52, 2], [0, 2], optimize=False))
+ assert_raises(ValueError, lambda: np.einsum(a, [-1, 5], b, [5, 2], [-1, 2], optimize=False))
def test_einsum_broadcast(self):
# Issue #2455 change in handling ellipsis
@@ -518,36 +619,41 @@ class TestEinSum(TestCase):
# only use the 'RIGHT' iteration in prepare_op_axes
# adds auto broadcast on left where it belongs
# broadcast on right has to be explicit
+ # We need to test the optimized parsing as well
- A = np.arange(2*3*4).reshape(2,3,4)
+ A = np.arange(2 * 3 * 4).reshape(2, 3, 4)
B = np.arange(3)
- ref = np.einsum('ijk,j->ijk',A, B)
- assert_equal(np.einsum('ij...,j...->ij...',A, B), ref)
- assert_equal(np.einsum('ij...,...j->ij...',A, B), ref)
- assert_equal(np.einsum('ij...,j->ij...',A, B), ref) # used to raise error
-
- A = np.arange(12).reshape((4,3))
- B = np.arange(6).reshape((3,2))
- ref = np.einsum('ik,kj->ij', A, B)
- assert_equal(np.einsum('ik...,k...->i...', A, B), ref)
- assert_equal(np.einsum('ik...,...kj->i...j', A, B), ref)
- assert_equal(np.einsum('...k,kj', A, B), ref) # used to raise error
- assert_equal(np.einsum('ik,k...->i...', A, B), ref) # used to raise error
-
- dims = [2,3,4,5]
+ ref = np.einsum('ijk,j->ijk', A, B, optimize=False)
+ for opt in [True, False]:
+ assert_equal(np.einsum('ij...,j...->ij...', A, B, optimize=opt), ref)
+ assert_equal(np.einsum('ij...,...j->ij...', A, B, optimize=opt), ref)
+ assert_equal(np.einsum('ij...,j->ij...', A, B, optimize=opt), ref) # used to raise error
+
+ A = np.arange(12).reshape((4, 3))
+ B = np.arange(6).reshape((3, 2))
+ ref = np.einsum('ik,kj->ij', A, B, optimize=False)
+ for opt in [True, False]:
+ assert_equal(np.einsum('ik...,k...->i...', A, B, optimize=opt), ref)
+ assert_equal(np.einsum('ik...,...kj->i...j', A, B, optimize=opt), ref)
+ assert_equal(np.einsum('...k,kj', A, B, optimize=opt), ref) # used to raise error
+ assert_equal(np.einsum('ik,k...->i...', A, B, optimize=opt), ref) # used to raise error
+
+ dims = [2, 3, 4, 5]
a = np.arange(np.prod(dims)).reshape(dims)
v = np.arange(dims[2])
- ref = np.einsum('ijkl,k->ijl', a, v)
- assert_equal(np.einsum('ijkl,k', a, v), ref)
- assert_equal(np.einsum('...kl,k', a, v), ref) # used to raise error
- assert_equal(np.einsum('...kl,k...', a, v), ref)
- # no real diff from 1st
-
- J,K,M = 160,160,120
- A = np.arange(J*K*M).reshape(1,1,1,J,K,M)
- B = np.arange(J*K*M*3).reshape(J,K,M,3)
- ref = np.einsum('...lmn,...lmno->...o', A, B)
- assert_equal(np.einsum('...lmn,lmno->...o', A, B), ref) # used to raise error
+ ref = np.einsum('ijkl,k->ijl', a, v, optimize=False)
+ for opt in [True, False]:
+ assert_equal(np.einsum('ijkl,k', a, v, optimize=opt), ref)
+ assert_equal(np.einsum('...kl,k', a, v, optimize=opt), ref) # used to raise error
+ assert_equal(np.einsum('...kl,k...', a, v, optimize=opt), ref)
+
+ J, K, M = 160, 160, 120
+ A = np.arange(J * K * M).reshape(1, 1, 1, J, K, M)
+ B = np.arange(J * K * M * 3).reshape(J, K, M, 3)
+ ref = np.einsum('...lmn,...lmno->...o', A, B, optimize=False)
+ for opt in [True, False]:
+ assert_equal(np.einsum('...lmn,lmno->...o', A, B,
+ optimize=opt), ref) # used to raise error
def test_einsum_fixedstridebug(self):
# Issue #4485 obscure einsum bug
@@ -566,17 +672,17 @@ class TestEinSum(TestCase):
# used by einsum, is 8192, and 3*2731 = 8193, is larger than that
# and results in a mismatch between the buffering and the
# striding for operand A.
- A = np.arange(2*3).reshape(2,3).astype(np.float32)
- B = np.arange(2*3*2731).reshape(2,3,2731).astype(np.int16)
- es = np.einsum('cl,cpx->lpx', A, B)
- tp = np.tensordot(A, B, axes=(0, 0))
- assert_equal(es, tp)
+ A = np.arange(2 * 3).reshape(2, 3).astype(np.float32)
+ B = np.arange(2 * 3 * 2731).reshape(2, 3, 2731).astype(np.int16)
+ es = np.einsum('cl, cpx->lpx', A, B)
+ tp = np.tensordot(A, B, axes=(0, 0))
+ assert_equal(es, tp)
# The following is the original test case from the bug report,
# made repeatable by changing random arrays to aranges.
- A = np.arange(3*3).reshape(3,3).astype(np.float64)
- B = np.arange(3*3*64*64).reshape(3,3,64,64).astype(np.float32)
- es = np.einsum('cl,cpxy->lpxy', A,B)
- tp = np.tensordot(A,B, axes=(0,0))
+ A = np.arange(3 * 3).reshape(3, 3).astype(np.float64)
+ B = np.arange(3 * 3 * 64 * 64).reshape(3, 3, 64, 64).astype(np.float32)
+ es = np.einsum('cl, cpxy->lpxy', A, B)
+ tp = np.tensordot(A, B, axes=(0, 0))
assert_equal(es, tp)
def test_einsum_fixed_collapsingbug(self):
@@ -618,10 +724,278 @@ class TestEinSum(TestCase):
a = np.zeros((16, 1, 1), dtype=np.bool_)[:2]
a[...] = True
out = np.zeros((16, 1, 1), dtype=np.bool_)[:2]
- tgt = np.ones((2,1,1), dtype=np.bool_)
+ tgt = np.ones((2, 1, 1), dtype=np.bool_)
res = np.einsum('...ij,...jk->...ik', a, a, out=out)
assert_equal(res, tgt)
-
-if __name__ == "__main__":
- run_module_suite()
+ def test_out_is_res(self):
+ a = np.arange(9).reshape(3, 3)
+ res = np.einsum('...ij,...jk->...ik', a, a, out=a)
+ assert res is a
+
+ def optimize_compare(self, subscripts, operands=None):
+ # Tests all paths of the optimization function against
+ # conventional einsum
+ if operands is None:
+ args = [subscripts]
+ terms = subscripts.split('->')[0].split(',')
+ for term in terms:
+ dims = [global_size_dict[x] for x in term]
+ args.append(np.random.rand(*dims))
+ else:
+ args = [subscripts] + operands
+
+ noopt = np.einsum(*args, optimize=False)
+ opt = np.einsum(*args, optimize='greedy')
+ assert_almost_equal(opt, noopt)
+ opt = np.einsum(*args, optimize='optimal')
+ assert_almost_equal(opt, noopt)
+
+ def test_hadamard_like_products(self):
+ # Hadamard outer products
+ self.optimize_compare('a,ab,abc->abc')
+ self.optimize_compare('a,b,ab->ab')
+
+ def test_index_transformations(self):
+ # Simple index transformation cases
+ self.optimize_compare('ea,fb,gc,hd,abcd->efgh')
+ self.optimize_compare('ea,fb,abcd,gc,hd->efgh')
+ self.optimize_compare('abcd,ea,fb,gc,hd->efgh')
+
+ def test_complex(self):
+ # Long test cases
+ self.optimize_compare('acdf,jbje,gihb,hfac,gfac,gifabc,hfac')
+ self.optimize_compare('acdf,jbje,gihb,hfac,gfac,gifabc,hfac')
+ self.optimize_compare('cd,bdhe,aidb,hgca,gc,hgibcd,hgac')
+ self.optimize_compare('abhe,hidj,jgba,hiab,gab')
+ self.optimize_compare('bde,cdh,agdb,hica,ibd,hgicd,hiac')
+ self.optimize_compare('chd,bde,agbc,hiad,hgc,hgi,hiad')
+ self.optimize_compare('chd,bde,agbc,hiad,bdi,cgh,agdb')
+ self.optimize_compare('bdhe,acad,hiab,agac,hibd')
+
+ def test_collapse(self):
+ # Inner products
+ self.optimize_compare('ab,ab,c->')
+ self.optimize_compare('ab,ab,c->c')
+ self.optimize_compare('ab,ab,cd,cd->')
+ self.optimize_compare('ab,ab,cd,cd->ac')
+ self.optimize_compare('ab,ab,cd,cd->cd')
+ self.optimize_compare('ab,ab,cd,cd,ef,ef->')
+
+ def test_expand(self):
+ # Outer products
+ self.optimize_compare('ab,cd,ef->abcdef')
+ self.optimize_compare('ab,cd,ef->acdf')
+ self.optimize_compare('ab,cd,de->abcde')
+ self.optimize_compare('ab,cd,de->be')
+ self.optimize_compare('ab,bcd,cd->abcd')
+ self.optimize_compare('ab,bcd,cd->abd')
+
+ def test_edge_cases(self):
+ # Difficult edge cases for optimization
+ self.optimize_compare('eb,cb,fb->cef')
+ self.optimize_compare('dd,fb,be,cdb->cef')
+ self.optimize_compare('bca,cdb,dbf,afc->')
+ self.optimize_compare('dcc,fce,ea,dbf->ab')
+ self.optimize_compare('fdf,cdd,ccd,afe->ae')
+ self.optimize_compare('abcd,ad')
+ self.optimize_compare('ed,fcd,ff,bcf->be')
+ self.optimize_compare('baa,dcf,af,cde->be')
+ self.optimize_compare('bd,db,eac->ace')
+ self.optimize_compare('fff,fae,bef,def->abd')
+ self.optimize_compare('efc,dbc,acf,fd->abe')
+ self.optimize_compare('ba,ac,da->bcd')
+
+ def test_inner_product(self):
+ # Inner products
+ self.optimize_compare('ab,ab')
+ self.optimize_compare('ab,ba')
+ self.optimize_compare('abc,abc')
+ self.optimize_compare('abc,bac')
+ self.optimize_compare('abc,cba')
+
+ def test_random_cases(self):
+ # Randomly built test cases
+ self.optimize_compare('aab,fa,df,ecc->bde')
+ self.optimize_compare('ecb,fef,bad,ed->ac')
+ self.optimize_compare('bcf,bbb,fbf,fc->')
+ self.optimize_compare('bb,ff,be->e')
+ self.optimize_compare('bcb,bb,fc,fff->')
+ self.optimize_compare('fbb,dfd,fc,fc->')
+ self.optimize_compare('afd,ba,cc,dc->bf')
+ self.optimize_compare('adb,bc,fa,cfc->d')
+ self.optimize_compare('bbd,bda,fc,db->acf')
+ self.optimize_compare('dba,ead,cad->bce')
+ self.optimize_compare('aef,fbc,dca->bde')
+
+ def test_combined_views_mapping(self):
+ # gh-10792
+ a = np.arange(9).reshape(1, 1, 3, 1, 3)
+ b = np.einsum('bbcdc->d', a)
+ assert_equal(b, [12])
+
+ def test_broadcasting_dot_cases(self):
+ # Ensures broadcasting cases are not mistaken for GEMM
+
+ a = np.random.rand(1, 5, 4)
+ b = np.random.rand(4, 6)
+ c = np.random.rand(5, 6)
+ d = np.random.rand(10)
+
+ self.optimize_compare('ijk,kl,jl', operands=[a, b, c])
+ self.optimize_compare('ijk,kl,jl,i->i', operands=[a, b, c, d])
+
+ e = np.random.rand(1, 1, 5, 4)
+ f = np.random.rand(7, 7)
+ self.optimize_compare('abjk,kl,jl', operands=[e, b, c])
+ self.optimize_compare('abjk,kl,jl,ab->ab', operands=[e, b, c, f])
+
+ # Edge case found in gh-11308
+ g = np.arange(64).reshape(2, 4, 8)
+ self.optimize_compare('obk,ijk->ioj', operands=[g, g])
+
+
+class TestEinsumPath(object):
+ def build_operands(self, string, size_dict=global_size_dict):
+
+ # Builds views based off initial operands
+ operands = [string]
+ terms = string.split('->')[0].split(',')
+ for term in terms:
+ dims = [size_dict[x] for x in term]
+ operands.append(np.random.rand(*dims))
+
+ return operands
+
+ def assert_path_equal(self, comp, benchmark):
+ # Checks if list of tuples are equivalent
+ ret = (len(comp) == len(benchmark))
+ assert_(ret)
+ for pos in range(len(comp) - 1):
+ ret &= isinstance(comp[pos + 1], tuple)
+ ret &= (comp[pos + 1] == benchmark[pos + 1])
+ assert_(ret)
+
+ def test_memory_contraints(self):
+ # Ensure memory constraints are satisfied
+
+ outer_test = self.build_operands('a,b,c->abc')
+
+ path, path_str = np.einsum_path(*outer_test, optimize=('greedy', 0))
+ self.assert_path_equal(path, ['einsum_path', (0, 1, 2)])
+
+ path, path_str = np.einsum_path(*outer_test, optimize=('optimal', 0))
+ self.assert_path_equal(path, ['einsum_path', (0, 1, 2)])
+
+ long_test = self.build_operands('acdf,jbje,gihb,hfac')
+ path, path_str = np.einsum_path(*long_test, optimize=('greedy', 0))
+ self.assert_path_equal(path, ['einsum_path', (0, 1, 2, 3)])
+
+ path, path_str = np.einsum_path(*long_test, optimize=('optimal', 0))
+ self.assert_path_equal(path, ['einsum_path', (0, 1, 2, 3)])
+
+ def test_long_paths(self):
+ # Long complex cases
+
+ # Long test 1
+ long_test1 = self.build_operands('acdf,jbje,gihb,hfac,gfac,gifabc,hfac')
+ path, path_str = np.einsum_path(*long_test1, optimize='greedy')
+ self.assert_path_equal(path, ['einsum_path',
+ (3, 6), (3, 4), (2, 4), (2, 3), (0, 2), (0, 1)])
+
+ path, path_str = np.einsum_path(*long_test1, optimize='optimal')
+ self.assert_path_equal(path, ['einsum_path',
+ (3, 6), (3, 4), (2, 4), (2, 3), (0, 2), (0, 1)])
+
+ # Long test 2
+ long_test2 = self.build_operands('chd,bde,agbc,hiad,bdi,cgh,agdb')
+ path, path_str = np.einsum_path(*long_test2, optimize='greedy')
+ print(path)
+ self.assert_path_equal(path, ['einsum_path',
+ (3, 4), (0, 3), (3, 4), (1, 3), (1, 2), (0, 1)])
+
+ path, path_str = np.einsum_path(*long_test2, optimize='optimal')
+ print(path)
+ self.assert_path_equal(path, ['einsum_path',
+ (0, 5), (1, 4), (3, 4), (1, 3), (1, 2), (0, 1)])
+
+ def test_edge_paths(self):
+ # Difficult edge cases
+
+ # Edge test1
+ edge_test1 = self.build_operands('eb,cb,fb->cef')
+ path, path_str = np.einsum_path(*edge_test1, optimize='greedy')
+ self.assert_path_equal(path, ['einsum_path', (0, 2), (0, 1)])
+
+ path, path_str = np.einsum_path(*edge_test1, optimize='optimal')
+ self.assert_path_equal(path, ['einsum_path', (0, 2), (0, 1)])
+
+ # Edge test2
+ edge_test2 = self.build_operands('dd,fb,be,cdb->cef')
+ path, path_str = np.einsum_path(*edge_test2, optimize='greedy')
+ self.assert_path_equal(path, ['einsum_path', (0, 3), (0, 1), (0, 1)])
+
+ path, path_str = np.einsum_path(*edge_test2, optimize='optimal')
+ self.assert_path_equal(path, ['einsum_path', (0, 3), (0, 1), (0, 1)])
+
+ # Edge test3
+ edge_test3 = self.build_operands('bca,cdb,dbf,afc->')
+ path, path_str = np.einsum_path(*edge_test3, optimize='greedy')
+ self.assert_path_equal(path, ['einsum_path', (1, 2), (0, 2), (0, 1)])
+
+ path, path_str = np.einsum_path(*edge_test3, optimize='optimal')
+ self.assert_path_equal(path, ['einsum_path', (1, 2), (0, 2), (0, 1)])
+
+ # Edge test4
+ edge_test4 = self.build_operands('dcc,fce,ea,dbf->ab')
+ path, path_str = np.einsum_path(*edge_test4, optimize='greedy')
+ self.assert_path_equal(path, ['einsum_path', (1, 2), (0, 1), (0, 1)])
+
+ path, path_str = np.einsum_path(*edge_test4, optimize='optimal')
+ self.assert_path_equal(path, ['einsum_path', (1, 2), (0, 2), (0, 1)])
+
+ # Edge test5
+ edge_test4 = self.build_operands('a,ac,ab,ad,cd,bd,bc->',
+ size_dict={"a": 20, "b": 20, "c": 20, "d": 20})
+ path, path_str = np.einsum_path(*edge_test4, optimize='greedy')
+ self.assert_path_equal(path, ['einsum_path', (0, 1), (0, 1, 2, 3, 4, 5)])
+
+ path, path_str = np.einsum_path(*edge_test4, optimize='optimal')
+ self.assert_path_equal(path, ['einsum_path', (0, 1), (0, 1, 2, 3, 4, 5)])
+
+ def test_path_type_input(self):
+ # Test explicit path handeling
+ path_test = self.build_operands('dcc,fce,ea,dbf->ab')
+
+ path, path_str = np.einsum_path(*path_test, optimize=False)
+ self.assert_path_equal(path, ['einsum_path', (0, 1, 2, 3)])
+
+ path, path_str = np.einsum_path(*path_test, optimize=True)
+ self.assert_path_equal(path, ['einsum_path', (1, 2), (0, 1), (0, 1)])
+
+ exp_path = ['einsum_path', (0, 2), (0, 2), (0, 1)]
+ path, path_str = np.einsum_path(*path_test, optimize=exp_path)
+ self.assert_path_equal(path, exp_path)
+
+ # Double check einsum works on the input path
+ noopt = np.einsum(*path_test, optimize=False)
+ opt = np.einsum(*path_test, optimize=exp_path)
+ assert_almost_equal(noopt, opt)
+
+ def test_spaces(self):
+ #gh-10794
+ arr = np.array([[1]])
+ for sp in itertools.product(['', ' '], repeat=4):
+ # no error for any spacing
+ np.einsum('{}...a{}->{}...a{}'.format(*sp), arr)
+
+def test_overlap():
+ a = np.arange(9, dtype=int).reshape(3, 3)
+ b = np.arange(9, dtype=int).reshape(3, 3)
+ d = np.dot(a, b)
+ # sanity check
+ c = np.einsum('ij,jk->ik', a, b)
+ assert_equal(c, d)
+ #gh-10080, out overlaps one of the operands
+ c = np.einsum('ij,jk->ik', a, b, out=b)
+ assert_equal(c, d)
diff --git a/numpy/core/tests/test_errstate.py b/numpy/core/tests/test_errstate.py
index 7fc749a7e..670d485c1 100644
--- a/numpy/core/tests/test_errstate.py
+++ b/numpy/core/tests/test_errstate.py
@@ -1,13 +1,14 @@
from __future__ import division, absolute_import, print_function
import platform
+import pytest
import numpy as np
-from numpy.testing import TestCase, assert_, run_module_suite, dec
+from numpy.testing import assert_, assert_raises
-class TestErrstate(TestCase):
- @dec.skipif(platform.machine() == "armv5tel", "See gh-413.")
+class TestErrstate(object):
+ @pytest.mark.skipif(platform.machine() == "armv5tel", reason="See gh-413.")
def test_invalid(self):
with np.errstate(all='raise', under='ignore'):
a = -np.arange(3)
@@ -15,12 +16,8 @@ class TestErrstate(TestCase):
with np.errstate(invalid='ignore'):
np.sqrt(a)
# While this should fail!
- try:
+ with assert_raises(FloatingPointError):
np.sqrt(a)
- except FloatingPointError:
- pass
- else:
- self.fail("Did not raise an invalid error")
def test_divide(self):
with np.errstate(all='raise', under='ignore'):
@@ -29,12 +26,8 @@ class TestErrstate(TestCase):
with np.errstate(divide='ignore'):
a // 0
# While this should fail!
- try:
+ with assert_raises(FloatingPointError):
a // 0
- except FloatingPointError:
- pass
- else:
- self.fail("Did not raise divide by zero error")
def test_errcall(self):
def foo(*args):
@@ -46,7 +39,3 @@ class TestErrstate(TestCase):
with np.errstate(call=None):
assert_(np.geterrcall() is None, 'call is not None')
assert_(np.geterrcall() is olderrcall, 'call is not olderrcall')
-
-
-if __name__ == "__main__":
- run_module_suite()
diff --git a/numpy/core/tests/test_extint128.py b/numpy/core/tests/test_extint128.py
index 2afae2f6b..7c454a603 100644
--- a/numpy/core/tests/test_extint128.py
+++ b/numpy/core/tests/test_extint128.py
@@ -1,13 +1,12 @@
from __future__ import division, absolute_import, print_function
-import sys
import itertools
import contextlib
import operator
+import pytest
import numpy as np
-import numpy.core.multiarray_tests as mt
-from numpy.compat import long
+import numpy.core._multiarray_tests as mt
from numpy.testing import assert_raises, assert_equal
@@ -59,7 +58,7 @@ def exc_iter(*args):
try:
yield iterate()
- except:
+ except Exception:
import traceback
msg = "At: %r\n%s" % (repr(value[0]),
traceback.format_exc())
@@ -183,6 +182,7 @@ def test_gt_128():
assert_equal(d, c)
+@pytest.mark.slow
def test_divmod_128_64():
with exc_iter(INT128_VALUES, INT64_POS_VALUES) as it:
for a, b in it:
@@ -219,7 +219,3 @@ def test_ceildiv_128_64():
if c != d:
assert_equal(d, c)
-
-
-if __name__ == "__main__":
- run_module_suite()
diff --git a/numpy/core/tests/test_function_base.py b/numpy/core/tests/test_function_base.py
index 6b5430611..459bacab0 100644
--- a/numpy/core/tests/test_function_base.py
+++ b/numpy/core/tests/test_function_base.py
@@ -1,14 +1,48 @@
from __future__ import division, absolute_import, print_function
-from numpy import (logspace, linspace, dtype, array, finfo, typecodes, arange,
- isnan, ndarray)
+from numpy import (
+ logspace, linspace, geomspace, dtype, array, sctypes, arange, isnan,
+ ndarray, sqrt, nextafter, stack
+ )
from numpy.testing import (
- TestCase, run_module_suite, assert_, assert_equal, assert_raises,
- assert_array_equal
-)
+ assert_, assert_equal, assert_raises, assert_array_equal, assert_allclose,
+ suppress_warnings
+ )
-class TestLogspace(TestCase):
+class PhysicalQuantity(float):
+ def __new__(cls, value):
+ return float.__new__(cls, value)
+
+ def __add__(self, x):
+ assert_(isinstance(x, PhysicalQuantity))
+ return PhysicalQuantity(float(x) + float(self))
+ __radd__ = __add__
+
+ def __sub__(self, x):
+ assert_(isinstance(x, PhysicalQuantity))
+ return PhysicalQuantity(float(self) - float(x))
+
+ def __rsub__(self, x):
+ assert_(isinstance(x, PhysicalQuantity))
+ return PhysicalQuantity(float(x) - float(self))
+
+ def __mul__(self, x):
+ return PhysicalQuantity(float(x) * float(self))
+ __rmul__ = __mul__
+
+ def __div__(self, x):
+ return PhysicalQuantity(float(self) / float(x))
+
+ def __rdiv__(self, x):
+ return PhysicalQuantity(float(x) / float(self))
+
+
+class PhysicalQuantity2(ndarray):
+ __array_priority__ = 10
+
+
+class TestLogspace(object):
def test_basic(self):
y = logspace(0, 6)
@@ -20,6 +54,20 @@ class TestLogspace(TestCase):
y = logspace(0, 6, num=7)
assert_array_equal(y, [1, 10, 100, 1e3, 1e4, 1e5, 1e6])
+ def test_start_stop_array(self):
+ start = array([0., 1.])
+ stop = array([6., 7.])
+ t1 = logspace(start, stop, 6)
+ t2 = stack([logspace(_start, _stop, 6)
+ for _start, _stop in zip(start, stop)], axis=1)
+ assert_equal(t1, t2)
+ t3 = logspace(start, stop[0], 6)
+ t4 = stack([logspace(_start, stop[0], 6)
+ for _start in start], axis=1)
+ assert_equal(t3, t4)
+ t5 = logspace(start, stop, 6, axis=-1)
+ assert_equal(t5, t2.T)
+
def test_dtype(self):
y = logspace(0, 6, dtype='float32')
assert_equal(y.dtype, dtype('float32'))
@@ -28,8 +76,153 @@ class TestLogspace(TestCase):
y = logspace(0, 6, dtype='int32')
assert_equal(y.dtype, dtype('int32'))
+ def test_physical_quantities(self):
+ a = PhysicalQuantity(1.0)
+ b = PhysicalQuantity(5.0)
+ assert_equal(logspace(a, b), logspace(1.0, 5.0))
+
+ def test_subclass(self):
+ a = array(1).view(PhysicalQuantity2)
+ b = array(7).view(PhysicalQuantity2)
+ ls = logspace(a, b)
+ assert type(ls) is PhysicalQuantity2
+ assert_equal(ls, logspace(1.0, 7.0))
+ ls = logspace(a, b, 1)
+ assert type(ls) is PhysicalQuantity2
+ assert_equal(ls, logspace(1.0, 7.0, 1))
-class TestLinspace(TestCase):
+
+class TestGeomspace(object):
+
+ def test_basic(self):
+ y = geomspace(1, 1e6)
+ assert_(len(y) == 50)
+ y = geomspace(1, 1e6, num=100)
+ assert_(y[-1] == 10 ** 6)
+ y = geomspace(1, 1e6, endpoint=False)
+ assert_(y[-1] < 10 ** 6)
+ y = geomspace(1, 1e6, num=7)
+ assert_array_equal(y, [1, 10, 100, 1e3, 1e4, 1e5, 1e6])
+
+ y = geomspace(8, 2, num=3)
+ assert_allclose(y, [8, 4, 2])
+ assert_array_equal(y.imag, 0)
+
+ y = geomspace(-1, -100, num=3)
+ assert_array_equal(y, [-1, -10, -100])
+ assert_array_equal(y.imag, 0)
+
+ y = geomspace(-100, -1, num=3)
+ assert_array_equal(y, [-100, -10, -1])
+ assert_array_equal(y.imag, 0)
+
+ def test_complex(self):
+ # Purely imaginary
+ y = geomspace(1j, 16j, num=5)
+ assert_allclose(y, [1j, 2j, 4j, 8j, 16j])
+ assert_array_equal(y.real, 0)
+
+ y = geomspace(-4j, -324j, num=5)
+ assert_allclose(y, [-4j, -12j, -36j, -108j, -324j])
+ assert_array_equal(y.real, 0)
+
+ y = geomspace(1+1j, 1000+1000j, num=4)
+ assert_allclose(y, [1+1j, 10+10j, 100+100j, 1000+1000j])
+
+ y = geomspace(-1+1j, -1000+1000j, num=4)
+ assert_allclose(y, [-1+1j, -10+10j, -100+100j, -1000+1000j])
+
+ # Logarithmic spirals
+ y = geomspace(-1, 1, num=3, dtype=complex)
+ assert_allclose(y, [-1, 1j, +1])
+
+ y = geomspace(0+3j, -3+0j, 3)
+ assert_allclose(y, [0+3j, -3/sqrt(2)+3j/sqrt(2), -3+0j])
+ y = geomspace(0+3j, 3+0j, 3)
+ assert_allclose(y, [0+3j, 3/sqrt(2)+3j/sqrt(2), 3+0j])
+ y = geomspace(-3+0j, 0-3j, 3)
+ assert_allclose(y, [-3+0j, -3/sqrt(2)-3j/sqrt(2), 0-3j])
+ y = geomspace(0+3j, -3+0j, 3)
+ assert_allclose(y, [0+3j, -3/sqrt(2)+3j/sqrt(2), -3+0j])
+ y = geomspace(-2-3j, 5+7j, 7)
+ assert_allclose(y, [-2-3j, -0.29058977-4.15771027j,
+ 2.08885354-4.34146838j, 4.58345529-3.16355218j,
+ 6.41401745-0.55233457j, 6.75707386+3.11795092j,
+ 5+7j])
+
+ # Type promotion should prevent the -5 from becoming a NaN
+ y = geomspace(3j, -5, 2)
+ assert_allclose(y, [3j, -5])
+ y = geomspace(-5, 3j, 2)
+ assert_allclose(y, [-5, 3j])
+
+ def test_dtype(self):
+ y = geomspace(1, 1e6, dtype='float32')
+ assert_equal(y.dtype, dtype('float32'))
+ y = geomspace(1, 1e6, dtype='float64')
+ assert_equal(y.dtype, dtype('float64'))
+ y = geomspace(1, 1e6, dtype='int32')
+ assert_equal(y.dtype, dtype('int32'))
+
+ # Native types
+ y = geomspace(1, 1e6, dtype=float)
+ assert_equal(y.dtype, dtype('float_'))
+ y = geomspace(1, 1e6, dtype=complex)
+ assert_equal(y.dtype, dtype('complex'))
+
+ def test_start_stop_array_scalar(self):
+ lim1 = array([120, 100], dtype="int8")
+ lim2 = array([-120, -100], dtype="int8")
+ lim3 = array([1200, 1000], dtype="uint16")
+ t1 = geomspace(lim1[0], lim1[1], 5)
+ t2 = geomspace(lim2[0], lim2[1], 5)
+ t3 = geomspace(lim3[0], lim3[1], 5)
+ t4 = geomspace(120.0, 100.0, 5)
+ t5 = geomspace(-120.0, -100.0, 5)
+ t6 = geomspace(1200.0, 1000.0, 5)
+
+ # t3 uses float32, t6 uses float64
+ assert_allclose(t1, t4, rtol=1e-2)
+ assert_allclose(t2, t5, rtol=1e-2)
+ assert_allclose(t3, t6, rtol=1e-5)
+
+ def test_start_stop_array(self):
+ # Try to use all special cases.
+ start = array([1.e0, 32., 1j, -4j, 1+1j, -1])
+ stop = array([1.e4, 2., 16j, -324j, 10000+10000j, 1])
+ t1 = geomspace(start, stop, 5)
+ t2 = stack([geomspace(_start, _stop, 5)
+ for _start, _stop in zip(start, stop)], axis=1)
+ assert_equal(t1, t2)
+ t3 = geomspace(start, stop[0], 5)
+ t4 = stack([geomspace(_start, stop[0], 5)
+ for _start in start], axis=1)
+ assert_equal(t3, t4)
+ t5 = geomspace(start, stop, 5, axis=-1)
+ assert_equal(t5, t2.T)
+
+ def test_physical_quantities(self):
+ a = PhysicalQuantity(1.0)
+ b = PhysicalQuantity(5.0)
+ assert_equal(geomspace(a, b), geomspace(1.0, 5.0))
+
+ def test_subclass(self):
+ a = array(1).view(PhysicalQuantity2)
+ b = array(7).view(PhysicalQuantity2)
+ gs = geomspace(a, b)
+ assert type(gs) is PhysicalQuantity2
+ assert_equal(gs, geomspace(1.0, 7.0))
+ gs = geomspace(a, b, 1)
+ assert type(gs) is PhysicalQuantity2
+ assert_equal(gs, geomspace(1.0, 7.0, 1))
+
+ def test_bounds(self):
+ assert_raises(ValueError, geomspace, 0, 10)
+ assert_raises(ValueError, geomspace, 10, 0)
+ assert_raises(ValueError, geomspace, 0, 0)
+
+
+class TestLinspace(object):
def test_basic(self):
y = linspace(0, 10)
@@ -43,8 +236,10 @@ class TestLinspace(TestCase):
def test_corner(self):
y = list(linspace(0, 1, 1))
assert_(y == [0.0], y)
- y = list(linspace(0, 1, 2.5))
- assert_(y == [0.0, 1.0])
+ with suppress_warnings() as sup:
+ sup.filter(DeprecationWarning, ".*safely interpreted as an integer")
+ y = list(linspace(0, 1, 2.5))
+ assert_(y == [0.0, 1.0])
def test_type(self):
t1 = linspace(0, 1, 0).dtype
@@ -61,7 +256,7 @@ class TestLinspace(TestCase):
y = linspace(0, 6, dtype='int32')
assert_equal(y.dtype, dtype('int32'))
- def test_array_scalar(self):
+ def test_start_stop_array_scalar(self):
lim1 = array([-120, 100], dtype="int8")
lim2 = array([120, -100], dtype="int8")
lim3 = array([1200, 1000], dtype="uint16")
@@ -75,50 +270,34 @@ class TestLinspace(TestCase):
assert_equal(t2, t5)
assert_equal(t3, t6)
+ def test_start_stop_array(self):
+ start = array([-120, 120], dtype="int8")
+ stop = array([100, -100], dtype="int8")
+ t1 = linspace(start, stop, 5)
+ t2 = stack([linspace(_start, _stop, 5)
+ for _start, _stop in zip(start, stop)], axis=1)
+ assert_equal(t1, t2)
+ t3 = linspace(start, stop[0], 5)
+ t4 = stack([linspace(_start, stop[0], 5)
+ for _start in start], axis=1)
+ assert_equal(t3, t4)
+ t5 = linspace(start, stop, 5, axis=-1)
+ assert_equal(t5, t2.T)
+
def test_complex(self):
lim1 = linspace(1 + 2j, 3 + 4j, 5)
- t1 = array([ 1.0+2.j, 1.5+2.5j, 2.0+3.j, 2.5+3.5j, 3.0+4.j])
+ t1 = array([1.0+2.j, 1.5+2.5j, 2.0+3j, 2.5+3.5j, 3.0+4j])
lim2 = linspace(1j, 10, 5)
- t2 = array([ 0.0+1.j, 2.5+0.75j, 5.0+0.5j, 7.5+0.25j, 10.0+0.j])
+ t2 = array([0.0+1.j, 2.5+0.75j, 5.0+0.5j, 7.5+0.25j, 10.0+0j])
assert_equal(lim1, t1)
assert_equal(lim2, t2)
def test_physical_quantities(self):
- class PhysicalQuantity(float):
- def __new__(cls, value):
- return float.__new__(cls, value)
-
- def __add__(self, x):
- assert_(isinstance(x, PhysicalQuantity))
- return PhysicalQuantity(float(x) + float(self))
- __radd__ = __add__
-
- def __sub__(self, x):
- assert_(isinstance(x, PhysicalQuantity))
- return PhysicalQuantity(float(self) - float(x))
-
- def __rsub__(self, x):
- assert_(isinstance(x, PhysicalQuantity))
- return PhysicalQuantity(float(x) - float(self))
-
- def __mul__(self, x):
- return PhysicalQuantity(float(x) * float(self))
- __rmul__ = __mul__
-
- def __div__(self, x):
- return PhysicalQuantity(float(self) / float(x))
-
- def __rdiv__(self, x):
- return PhysicalQuantity(float(x) / float(self))
-
a = PhysicalQuantity(0.0)
b = PhysicalQuantity(1.0)
assert_equal(linspace(a, b), linspace(0.0, 1.0))
def test_subclass(self):
- class PhysicalQuantity2(ndarray):
- __array_priority__ = 10
-
a = array(0).view(PhysicalQuantity2)
b = array(1).view(PhysicalQuantity2)
ls = linspace(a, b)
@@ -128,12 +307,46 @@ class TestLinspace(TestCase):
assert type(ls) is PhysicalQuantity2
assert_equal(ls, linspace(0.0, 1.0, 1))
+ def test_array_interface(self):
+ # Regression test for https://github.com/numpy/numpy/pull/6659
+ # Ensure that start/stop can be objects that implement
+ # __array_interface__ and are convertible to numeric scalars
+
+ class Arrayish(object):
+ """
+ A generic object that supports the __array_interface__ and hence
+ can in principle be converted to a numeric scalar, but is not
+ otherwise recognized as numeric, but also happens to support
+ multiplication by floats.
+
+ Data should be an object that implements the buffer interface,
+ and contains at least 4 bytes.
+ """
+
+ def __init__(self, data):
+ self._data = data
+
+ @property
+ def __array_interface__(self):
+ return {'shape': (), 'typestr': '<i4', 'data': self._data,
+ 'version': 3}
+
+ def __mul__(self, other):
+ # For the purposes of this test any multiplication is an
+ # identity operation :)
+ return self
+
+ one = Arrayish(array(1, dtype='<i4'))
+ five = Arrayish(array(5, dtype='<i4'))
+
+ assert_equal(linspace(one, five), linspace(1, 5))
+
def test_denormal_numbers(self):
# Regression test for gh-5437. Will probably fail when compiled
# with ICC, which flushes denormals to zero
- for dt in (dtype(f) for f in typecodes['Float']):
- stop = finfo(dt).tiny * finfo(dt).resolution
- assert_(any(linspace(0, stop, 10, endpoint=False, dtype=dt)))
+ for ftype in sctypes['float']:
+ stop = nextafter(ftype(0), ftype(1)) * 5 # A denormal number
+ assert_(any(linspace(0, stop, 10, endpoint=False, dtype=ftype)))
def test_equivalent_to_arange(self):
for j in range(1000):
@@ -149,7 +362,3 @@ class TestLinspace(TestCase):
assert_(isinstance(y, tuple) and len(y) == 2 and
len(y[0]) == num and isnan(y[1]),
'num={0}, endpoint={1}'.format(num, ept))
-
-
-if __name__ == "__main__":
- run_module_suite()
diff --git a/numpy/core/tests/test_getlimits.py b/numpy/core/tests/test_getlimits.py
index c36d7c068..2f6648183 100644
--- a/numpy/core/tests/test_getlimits.py
+++ b/numpy/core/tests/test_getlimits.py
@@ -6,59 +6,72 @@ from __future__ import division, absolute_import, print_function
import numpy as np
from numpy.core import finfo, iinfo
from numpy import half, single, double, longdouble
-from numpy.testing import (
- TestCase, run_module_suite, assert_equal
-)
+from numpy.testing import assert_equal, assert_, assert_raises
+from numpy.core.getlimits import _discovered_machar, _float_ma
##################################################
-class TestPythonFloat(TestCase):
+class TestPythonFloat(object):
def test_singleton(self):
ftype = finfo(float)
ftype2 = finfo(float)
assert_equal(id(ftype), id(ftype2))
-class TestHalf(TestCase):
+class TestHalf(object):
def test_singleton(self):
ftype = finfo(half)
ftype2 = finfo(half)
assert_equal(id(ftype), id(ftype2))
-class TestSingle(TestCase):
+class TestSingle(object):
def test_singleton(self):
ftype = finfo(single)
ftype2 = finfo(single)
assert_equal(id(ftype), id(ftype2))
-class TestDouble(TestCase):
+class TestDouble(object):
def test_singleton(self):
ftype = finfo(double)
ftype2 = finfo(double)
assert_equal(id(ftype), id(ftype2))
-class TestLongdouble(TestCase):
- def test_singleton(self,level=2):
+class TestLongdouble(object):
+ def test_singleton(self):
ftype = finfo(longdouble)
ftype2 = finfo(longdouble)
assert_equal(id(ftype), id(ftype2))
-class TestIinfo(TestCase):
+class TestFinfo(object):
+ def test_basic(self):
+ dts = list(zip(['f2', 'f4', 'f8', 'c8', 'c16'],
+ [np.float16, np.float32, np.float64, np.complex64,
+ np.complex128]))
+ for dt1, dt2 in dts:
+ for attr in ('bits', 'eps', 'epsneg', 'iexp', 'machar', 'machep',
+ 'max', 'maxexp', 'min', 'minexp', 'negep', 'nexp',
+ 'nmant', 'precision', 'resolution', 'tiny'):
+ assert_equal(getattr(finfo(dt1), attr),
+ getattr(finfo(dt2), attr), attr)
+ assert_raises(ValueError, finfo, 'i4')
+
+class TestIinfo(object):
def test_basic(self):
dts = list(zip(['i1', 'i2', 'i4', 'i8',
'u1', 'u2', 'u4', 'u8'],
[np.int8, np.int16, np.int32, np.int64,
np.uint8, np.uint16, np.uint32, np.uint64]))
for dt1, dt2 in dts:
- assert_equal(iinfo(dt1).min, iinfo(dt2).min)
- assert_equal(iinfo(dt1).max, iinfo(dt2).max)
- self.assertRaises(ValueError, iinfo, 'f4')
+ for attr in ('bits', 'min', 'max'):
+ assert_equal(getattr(iinfo(dt1), attr),
+ getattr(iinfo(dt2), attr), attr)
+ assert_raises(ValueError, iinfo, 'f4')
def test_unsigned_max(self):
types = np.sctypes['uint']
for T in types:
assert_equal(iinfo(T).max, T(-1))
-class TestRepr(TestCase):
+class TestRepr(object):
def test_iinfo_repr(self):
expected = "iinfo(min=-32768, max=32767, dtype=int16)"
assert_equal(repr(np.iinfo(np.int16)), expected)
@@ -73,5 +86,38 @@ def test_instances():
iinfo(10)
finfo(3.0)
-if __name__ == "__main__":
- run_module_suite()
+
+def assert_ma_equal(discovered, ma_like):
+ # Check MachAr-like objects same as calculated MachAr instances
+ for key, value in discovered.__dict__.items():
+ assert_equal(value, getattr(ma_like, key))
+ if hasattr(value, 'shape'):
+ assert_equal(value.shape, getattr(ma_like, key).shape)
+ assert_equal(value.dtype, getattr(ma_like, key).dtype)
+
+
+def test_known_types():
+ # Test we are correctly compiling parameters for known types
+ for ftype, ma_like in ((np.float16, _float_ma[16]),
+ (np.float32, _float_ma[32]),
+ (np.float64, _float_ma[64])):
+ assert_ma_equal(_discovered_machar(ftype), ma_like)
+ # Suppress warning for broken discovery of double double on PPC
+ with np.errstate(all='ignore'):
+ ld_ma = _discovered_machar(np.longdouble)
+ bytes = np.dtype(np.longdouble).itemsize
+ if (ld_ma.it, ld_ma.maxexp) == (63, 16384) and bytes in (12, 16):
+ # 80-bit extended precision
+ assert_ma_equal(ld_ma, _float_ma[80])
+ elif (ld_ma.it, ld_ma.maxexp) == (112, 16384) and bytes == 16:
+ # IEE 754 128-bit
+ assert_ma_equal(ld_ma, _float_ma[128])
+
+
+def test_plausible_finfo():
+ # Assert that finfo returns reasonable results for all types
+ for ftype in np.sctypes['float'] + np.sctypes['complex']:
+ info = np.finfo(ftype)
+ assert_(info.nmant > 1)
+ assert_(info.minexp < -1)
+ assert_(info.maxexp > 1)
diff --git a/numpy/core/tests/test_half.py b/numpy/core/tests/test_half.py
index 56b574ae8..b28c933db 100644
--- a/numpy/core/tests/test_half.py
+++ b/numpy/core/tests/test_half.py
@@ -1,11 +1,11 @@
from __future__ import division, absolute_import, print_function
import platform
+import pytest
import numpy as np
from numpy import uint16, float16, float32, float64
-from numpy.testing import TestCase, run_module_suite, assert_, assert_equal, \
- dec
+from numpy.testing import assert_, assert_equal
def assert_raises_fpe(strmatch, callable, *args, **kwargs):
@@ -18,8 +18,8 @@ def assert_raises_fpe(strmatch, callable, *args, **kwargs):
assert_(False,
"Did not raise floating point %s error" % strmatch)
-class TestHalf(TestCase):
- def setUp(self):
+class TestHalf(object):
+ def setup(self):
# An array of all possible float16 values
self.all_f16 = np.arange(0x10000, dtype=uint16)
self.all_f16.dtype = float16
@@ -66,7 +66,7 @@ class TestHalf(TestCase):
# Check the range for which all integers can be represented
i_int = np.arange(-2048, 2049)
i_f16 = np.array(i_int, dtype=float16)
- j = np.array(i_f16, dtype=np.int)
+ j = np.array(i_f16, dtype=int)
assert_equal(i_int, j)
def test_nans_infs(self):
@@ -301,15 +301,19 @@ class TestHalf(TestCase):
assert_equal(np.copysign(b, a), [2, 5, 1, 4, 3])
assert_equal(np.maximum(a, b), [0, 5, 2, 4, 3])
+
x = np.maximum(b, c)
assert_(np.isnan(x[3]))
x[3] = 0
assert_equal(x, [0, 5, 1, 0, 6])
+
assert_equal(np.minimum(a, b), [-2, 1, 1, 4, 2])
+
x = np.minimum(b, c)
assert_(np.isnan(x[3]))
x[3] = 0
assert_equal(x, [-2, -1, -np.inf, 0, 3])
+
assert_equal(np.fmax(a, b), [0, 5, 2, 4, 3])
assert_equal(np.fmax(b, c), [0, 5, 1, 4, 6])
assert_equal(np.fmin(a, b), [-2, 1, 1, 4, 2])
@@ -317,12 +321,14 @@ class TestHalf(TestCase):
assert_equal(np.floor_divide(a, b), [0, 0, 2, 1, 0])
assert_equal(np.remainder(a, b), [0, 1, 0, 0, 2])
+ assert_equal(np.divmod(a, b), ([0, 0, 2, 1, 0], [0, 1, 0, 0, 2]))
assert_equal(np.square(b), [4, 25, 1, 16, 9])
assert_equal(np.reciprocal(b), [-0.5, 0.199951171875, 1, 0.25, 0.333251953125])
assert_equal(np.ones_like(b), [1, 1, 1, 1, 1])
assert_equal(np.conjugate(b), b)
assert_equal(np.absolute(b), [2, 5, 1, 4, 3])
assert_equal(np.negative(b), [2, -5, -1, -4, -3])
+ assert_equal(np.positive(b), b)
assert_equal(np.sign(b), [-1, 1, 1, 1, 1])
assert_equal(np.modf(b), ([0, 0, 0, 0, 0], b))
assert_equal(np.frexp(b), ([-0.5, 0.625, 0.5, 0.5, 0.75], [2, 3, 1, 3, 2]))
@@ -354,7 +360,8 @@ class TestHalf(TestCase):
assert_equal(np.power(b32, a16).dtype, float16)
assert_equal(np.power(b32, b16).dtype, float32)
- @dec.skipif(platform.machine() == "armv5tel", "See gh-413.")
+ @pytest.mark.skipif(platform.machine() == "armv5tel",
+ reason="See gh-413.")
def test_half_fpe(self):
with np.errstate(all='raise'):
sx16 = np.array((1e-4,), dtype=float16)
@@ -430,7 +437,3 @@ class TestHalf(TestCase):
c = np.array(b)
assert_(c.dtype == float16)
assert_equal(a, c)
-
-
-if __name__ == "__main__":
- run_module_suite()
diff --git a/numpy/core/tests/test_indexerrors.py b/numpy/core/tests/test_indexerrors.py
index e6b6be361..63b43c473 100644
--- a/numpy/core/tests/test_indexerrors.py
+++ b/numpy/core/tests/test_indexerrors.py
@@ -1,9 +1,9 @@
from __future__ import division, absolute_import, print_function
import numpy as np
-from numpy.testing import TestCase, run_module_suite, assert_raises
+from numpy.testing import assert_raises
-class TestIndexErrors(TestCase):
+class TestIndexErrors(object):
'''Tests to exercise indexerrors not covered by other tests.'''
def test_arraytypes_fasttake(self):
@@ -121,6 +121,3 @@ class TestIndexErrors(TestCase):
a = np.zeros((0, 3))
assert_raises(IndexError, lambda: a.item(100))
assert_raises(IndexError, lambda: a.itemset(100, 1))
-
-if __name__ == "__main__":
- run_module_suite()
diff --git a/numpy/core/tests/test_indexing.py b/numpy/core/tests/test_indexing.py
index 8d6f6a96b..99792cee7 100644
--- a/numpy/core/tests/test_indexing.py
+++ b/numpy/core/tests/test_indexing.py
@@ -4,24 +4,18 @@ import sys
import warnings
import functools
import operator
+import pytest
import numpy as np
-from numpy.core.multiarray_tests import array_indexing
+from numpy.core._multiarray_tests import array_indexing
from itertools import product
from numpy.testing import (
- TestCase, run_module_suite, assert_, assert_equal, assert_raises,
- assert_array_equal, assert_warns
-)
+ assert_, assert_equal, assert_raises, assert_array_equal, assert_warns,
+ HAS_REFCOUNT, suppress_warnings,
+ )
-try:
- cdll = np.ctypeslib.load_library('multiarray', np.core.multiarray.__file__)
- _HAS_CTYPE = True
-except ImportError:
- _HAS_CTYPE = False
-
-
-class TestIndexing(TestCase):
+class TestIndexing(object):
def test_index_no_floats(self):
a = np.array([[[5]]])
@@ -99,6 +93,12 @@ class TestIndexing(TestCase):
a = np.array(0)
assert_(isinstance(a[()], np.int_))
+ def test_void_scalar_empty_tuple(self):
+ s = np.zeros((), dtype='V4')
+ assert_equal(s[()].dtype, s.dtype)
+ assert_equal(s[()], s)
+ assert_equal(type(s[...]), np.ndarray)
+
def test_same_kind_index_casting(self):
# Indexes should be cast with same-kind and not safe, even if that
# is somewhat unsafe. So test various different code paths.
@@ -134,10 +134,10 @@ class TestIndexing(TestCase):
assert_raises(IndexError, a.__getitem__, b)
def test_ellipsis_index(self):
- # Ellipsis index does not create a view
a = np.array([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]])
+ assert_(a[...] is not a)
assert_equal(a[...], a)
# `a[...]` was `a` in numpy <1.9.
assert_(a[...].base is a)
@@ -177,19 +177,22 @@ class TestIndexing(TestCase):
[4, 5, 6],
[7, 8, 9]])
- # Python boolean converts to integer
- # These are being deprecated (and test in test_deprecations)
- #assert_equal(a[True], a[1])
- #assert_equal(a[False], a[0])
+ assert_equal(a[np.array(True)], a[None])
+ assert_equal(a[np.array(False)], a[None][0:0])
+
+ def test_boolean_shape_mismatch(self):
+ arr = np.ones((5, 4, 3))
+
+ index = np.array([True])
+ assert_raises(IndexError, arr.__getitem__, index)
+
+ index = np.array([False] * 6)
+ assert_raises(IndexError, arr.__getitem__, index)
- # Same with NumPy boolean scalar
- # Before DEPRECATE, this is an error (as always, but telling about
- # future change):
- assert_raises(IndexError, a.__getitem__, np.array(True))
- assert_raises(IndexError, a.__getitem__, np.array(False))
- # After DEPRECATE, this behaviour can be enabled:
- #assert_equal(a[np.array(True)], a[None])
- #assert_equal(a[np.array(False), a[None][0:0]])
+ index = np.zeros((4, 4), dtype=bool)
+ assert_raises(IndexError, arr.__getitem__, index)
+
+ assert_raises(IndexError, arr.__getitem__, (slice(None), index))
def test_boolean_indexing_onedim(self):
# Indexing a 2-dimensional array with
@@ -213,6 +216,20 @@ class TestIndexing(TestCase):
assert_raises(ValueError, f, a, [1, 2, 3])
assert_raises(ValueError, f, a[:1], [1, 2, 3])
+ def test_boolean_assignment_needs_api(self):
+ # See also gh-7666
+ # This caused a segfault on Python 2 due to the GIL not being
+ # held when the iterator does not need it, but the transfer function
+ # does
+ arr = np.zeros(1000)
+ indx = np.zeros(1000, dtype=bool)
+ indx[:100] = True
+ arr[indx] = np.ones(100, dtype=object)
+
+ expected = np.zeros(1000)
+ expected[:100] = 1
+ assert_array_equal(arr, expected)
+
def test_boolean_indexing_twodim(self):
# Indexing a 2-dimensional array with
# 2-dimensional boolean array
@@ -275,7 +292,7 @@ class TestIndexing(TestCase):
assert_raises(IndexError, a.__getitem__, (np.array([0]),) * 32)
def test_scalar_array_bool(self):
- # Numpy bools can be used as boolean index (python ones as of yet not)
+ # NumPy bools can be used as boolean index (python ones as of yet not)
a = np.array(1)
assert_equal(a[np.bool_(True)], a[np.array(True)])
assert_equal(a[np.bool_(False)], a[np.array(False)])
@@ -311,6 +328,21 @@ class TestIndexing(TestCase):
assert_raises(IndexError, a.__getitem__, ind)
assert_raises(IndexError, a.__setitem__, ind, 0)
+ def test_trivial_fancy_not_possible(self):
+ # Test that the fast path for trivial assignment is not incorrectly
+ # used when the index is not contiguous or 1D, see also gh-11467.
+ a = np.arange(6)
+ idx = np.arange(6, dtype=np.intp).reshape(2, 1, 3)[:, :, 0]
+ assert_array_equal(a[idx], idx)
+
+ # this case must not go into the fast path, note that idx is
+ # a non-contiuguous none 1D array here.
+ a[idx] = -1
+ res = np.arange(6)
+ res[0] = -1
+ res[3] = -1
+ assert_array_equal(a, res)
+
def test_nonbaseclass_values(self):
class SubClass(np.ndarray):
def __array_finalize__(self, old):
@@ -390,7 +422,8 @@ class TestIndexing(TestCase):
def test_small_regressions(self):
# Reference count of intp for index checks
a = np.array([0])
- refcount = sys.getrefcount(np.dtype(np.intp))
+ if HAS_REFCOUNT:
+ refcount = sys.getrefcount(np.dtype(np.intp))
# item setting always checks indices in separate function:
a[np.array([0], dtype=np.intp)] = 1
a[np.array([0], dtype=np.uint8)] = 1
@@ -399,7 +432,8 @@ class TestIndexing(TestCase):
assert_raises(IndexError, a.__setitem__,
np.array([1], dtype=np.uint8), 1)
- assert_equal(sys.getrefcount(np.dtype(np.intp)), refcount)
+ if HAS_REFCOUNT:
+ assert_equal(sys.getrefcount(np.dtype(np.intp)), refcount)
def test_unaligned(self):
v = (np.zeros(64, dtype=np.int8) + ord('a'))[1:-7]
@@ -474,8 +508,17 @@ class TestIndexing(TestCase):
zind = np.zeros(4, dtype=np.intp)
assert_array_equal(x2[ind, zind], x2[ind.copy(), zind])
+ def test_indexing_array_negative_strides(self):
+ # From gh-8264,
+ # core dumps if negative strides are used in iteration
+ arro = np.zeros((4, 4))
+ arr = arro[::-1, ::-1]
+
+ slices = (slice(None), [0, 1, 2, 3])
+ arr[slices] = 10
+ assert_array_equal(arr, 10.)
-class TestFieldIndexing(TestCase):
+class TestFieldIndexing(object):
def test_scalar_return_type(self):
# Field access on an array should return an array, even if it
# is 0-d.
@@ -484,7 +527,7 @@ class TestFieldIndexing(TestCase):
assert_(isinstance(a[['a']], np.ndarray))
-class TestBroadcastedAssignments(TestCase):
+class TestBroadcastedAssignments(object):
def assign(self, a, ind, val):
a[ind] = val
return a
@@ -502,31 +545,22 @@ class TestBroadcastedAssignments(TestCase):
def test_prepend_not_one(self):
assign = self.assign
s_ = np.s_
-
a = np.zeros(5)
# Too large and not only ones.
assert_raises(ValueError, assign, a, s_[...], np.ones((2, 1)))
-
- with warnings.catch_warnings():
- # Will be a ValueError as well.
- warnings.simplefilter("error", DeprecationWarning)
- assert_raises(DeprecationWarning, assign, a, s_[[1, 2, 3],],
- np.ones((2, 1)))
- assert_raises(DeprecationWarning, assign, a, s_[[[1], [2]],],
- np.ones((2,2,1)))
+ assert_raises(ValueError, assign, a, s_[[1, 2, 3],], np.ones((2, 1)))
+ assert_raises(ValueError, assign, a, s_[[[1], [2]],], np.ones((2,2,1)))
def test_simple_broadcasting_errors(self):
assign = self.assign
s_ = np.s_
-
a = np.zeros((5, 1))
+
assert_raises(ValueError, assign, a, s_[...], np.zeros((5, 2)))
assert_raises(ValueError, assign, a, s_[...], np.zeros((5, 0)))
-
assert_raises(ValueError, assign, a, s_[:, [0]], np.zeros((5, 2)))
assert_raises(ValueError, assign, a, s_[:, [0]], np.zeros((5, 0)))
-
assert_raises(ValueError, assign, a, s_[[0], :], np.zeros((2, 1)))
def test_index_is_larger(self):
@@ -544,30 +578,35 @@ class TestBroadcastedAssignments(TestCase):
assert_((a[::-1] == v).all())
-class TestSubclasses(TestCase):
+class TestSubclasses(object):
def test_basic(self):
+ # Test that indexing in various ways produces SubClass instances,
+ # and that the base is set up correctly: the original subclass
+ # instance for views, and a new ndarray for advanced/boolean indexing
+ # where a copy was made (latter a regression test for gh-11983).
class SubClass(np.ndarray):
pass
- s = np.arange(5).view(SubClass)
- assert_(isinstance(s[:3], SubClass))
- assert_(s[:3].base is s)
-
- assert_(isinstance(s[[0, 1, 2]], SubClass))
- assert_(isinstance(s[s > 0], SubClass))
-
- def test_matrix_fancy(self):
- # The matrix class messes with the shape. While this is always
- # weird (getitem is not used, it does not have setitem nor knows
- # about fancy indexing), this tests gh-3110
- m = np.matrix([[1, 2], [3, 4]])
-
- assert_(isinstance(m[[0,1,0], :], np.matrix))
-
- # gh-3110. Note the transpose currently because matrices do *not*
- # support dimension fixing for fancy indexing correctly.
- x = np.asmatrix(np.arange(50).reshape(5,10))
- assert_equal(x[:2, np.array(-1)], x[:2, -1].T)
+ a = np.arange(5)
+ s = a.view(SubClass)
+ s_slice = s[:3]
+ assert_(type(s_slice) is SubClass)
+ assert_(s_slice.base is s)
+ assert_array_equal(s_slice, a[:3])
+
+ s_fancy = s[[0, 1, 2]]
+ assert_(type(s_fancy) is SubClass)
+ assert_(s_fancy.base is not s)
+ assert_(type(s_fancy.base) is np.ndarray)
+ assert_array_equal(s_fancy, a[[0, 1, 2]])
+ assert_array_equal(s_fancy.base, a[[0, 1, 2]])
+
+ s_bool = s[s > 0]
+ assert_(type(s_bool) is SubClass)
+ assert_(s_bool.base is not s)
+ assert_(type(s_bool.base) is np.ndarray)
+ assert_array_equal(s_bool, a[a > 0])
+ assert_array_equal(s_bool.base, a[a > 0])
def test_finalize_gets_full_info(self):
# Array finalize should be called on the filled array.
@@ -589,7 +628,56 @@ class TestSubclasses(TestCase):
assert_array_equal(new_s.finalize_status, new_s)
assert_array_equal(new_s.old, s)
-class TestFancingIndexingCast(TestCase):
+ @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
+ def test_slice_decref_getsetslice(self):
+ # See gh-10066, a temporary slice object should be discarted.
+ # This test is only really interesting on Python 2 since
+ # it goes through `__set/getslice__` here and can probably be
+ # removed. Use 0:7 to make sure it is never None:7.
+ class KeepIndexObject(np.ndarray):
+ def __getitem__(self, indx):
+ self.indx = indx
+ if indx == slice(0, 7):
+ raise ValueError
+
+ def __setitem__(self, indx, val):
+ self.indx = indx
+ if indx == slice(0, 4):
+ raise ValueError
+
+ k = np.array([1]).view(KeepIndexObject)
+ k[0:5]
+ assert_equal(k.indx, slice(0, 5))
+ assert_equal(sys.getrefcount(k.indx), 2)
+ try:
+ k[0:7]
+ raise AssertionError
+ except ValueError:
+ # The exception holds a reference to the slice so clear on Py2
+ if hasattr(sys, 'exc_clear'):
+ with suppress_warnings() as sup:
+ sup.filter(DeprecationWarning)
+ sys.exc_clear()
+ assert_equal(k.indx, slice(0, 7))
+ assert_equal(sys.getrefcount(k.indx), 2)
+
+ k[0:3] = 6
+ assert_equal(k.indx, slice(0, 3))
+ assert_equal(sys.getrefcount(k.indx), 2)
+ try:
+ k[0:4] = 2
+ raise AssertionError
+ except ValueError:
+ # The exception holds a reference to the slice so clear on Py2
+ if hasattr(sys, 'exc_clear'):
+ with suppress_warnings() as sup:
+ sup.filter(DeprecationWarning)
+ sys.exc_clear()
+ assert_equal(k.indx, slice(0, 4))
+ assert_equal(sys.getrefcount(k.indx), 2)
+
+
+class TestFancyIndexingCast(object):
def test_boolean_index_cast_assign(self):
# Setup the boolean index and float arrays.
shape = (8, 63)
@@ -611,7 +699,7 @@ class TestFancingIndexingCast(TestCase):
zero_array.__setitem__, bool_index, np.array([1j]))
assert_equal(zero_array[0, 1], 0)
-class TestFancyIndexingEquivalence(TestCase):
+class TestFancyIndexingEquivalence(object):
def test_object_assign(self):
# Check that the field and object special case using copyto is active.
# The right hand side cannot be converted to an array here.
@@ -659,30 +747,35 @@ class TestFancyIndexingEquivalence(TestCase):
assert_array_equal(a, b[0])
-class TestMultiIndexingAutomated(TestCase):
+class TestMultiIndexingAutomated(object):
"""
- These test use code to mimic the C-Code indexing for selection.
-
- NOTE: * This still lacks tests for complex item setting.
- * If you change behavior of indexing, you might want to modify
- these tests to try more combinations.
- * Behavior was written to match numpy version 1.8. (though a
- first version matched 1.7.)
- * Only tuple indices are supported by the mimicking code.
- (and tested as of writing this)
- * Error types should match most of the time as long as there
- is only one error. For multiple errors, what gets raised
- will usually not be the same one. They are *not* tested.
+ These tests use code to mimic the C-Code indexing for selection.
+
+ NOTE:
+
+ * This still lacks tests for complex item setting.
+ * If you change behavior of indexing, you might want to modify
+ these tests to try more combinations.
+ * Behavior was written to match numpy version 1.8. (though a
+ first version matched 1.7.)
+ * Only tuple indices are supported by the mimicking code.
+ (and tested as of writing this)
+ * Error types should match most of the time as long as there
+ is only one error. For multiple errors, what gets raised
+ will usually not be the same one. They are *not* tested.
+
+ Update 2016-11-30: It is probably not worth maintaining this test
+ indefinitely and it can be dropped if maintenance becomes a burden.
+
"""
- def setUp(self):
+ def setup(self):
self.a = np.arange(np.prod([3, 1, 5, 6])).reshape(3, 1, 5, 6)
self.b = np.empty((3, 0, 5, 6))
self.complex_indices = ['skip', Ellipsis,
0,
# Boolean indices, up to 3-d for some special cases of eating up
# dimensions, also need to test all False
- np.array(False),
np.array([True, False, False]),
np.array([[True, False], [False, True]]),
np.array([[[False, False], [False, False]]]),
@@ -721,7 +814,7 @@ class TestMultiIndexingAutomated(TestCase):
`arr[indices]` should be identical.
no_copy : bool
Whether the indexing operation requires a copy. If this is `True`,
- `np.may_share_memory(arr, arr[indicies])` should be `True` (with
+ `np.may_share_memory(arr, arr[indices])` should be `True` (with
some exceptions for scalars and possibly 0-d arrays).
Notes
@@ -772,7 +865,10 @@ class TestMultiIndexingAutomated(TestCase):
# is not safe. It rejects np.array([1., 2.]) but not
# [1., 2.] as index (same for ie. np.take).
# (Note the importance of empty lists if changing this here)
- indx = np.array(indx, dtype=np.intp)
+ try:
+ indx = np.array(indx, dtype=np.intp)
+ except ValueError:
+ raise IndexError
in_indices[i] = indx
elif indx.dtype.kind != 'b' and indx.dtype.kind != 'i':
raise IndexError('arrays used as indices must be of '
@@ -815,7 +911,7 @@ class TestMultiIndexingAutomated(TestCase):
try:
flat_indx = np.ravel_multi_index(np.nonzero(indx),
arr.shape[ax:ax+indx.ndim], mode='raise')
- except:
+ except Exception:
error_unless_broadcast_to_empty = True
# fill with 0s instead, and raise error later
flat_indx = np.array([0]*indx.sum(), dtype=np.intp)
@@ -914,7 +1010,7 @@ class TestMultiIndexingAutomated(TestCase):
try:
mi = np.ravel_multi_index(indx[1:], orig_slice,
mode='raise')
- except:
+ except Exception:
# This happens with 0-sized orig_slice (sometimes?)
# here it is a ValueError, but indexing gives a:
raise IndexError('invalid index into 0-sized')
@@ -925,9 +1021,13 @@ class TestMultiIndexingAutomated(TestCase):
# Maybe never happens...
raise ValueError
arr = arr.take(mi.ravel(), axis=ax)
- arr = arr.reshape((arr.shape[:ax]
- + mi.shape
- + arr.shape[ax+1:]))
+ try:
+ arr = arr.reshape((arr.shape[:ax]
+ + mi.shape
+ + arr.shape[ax+1:]))
+ except ValueError:
+ # too many dimensions, probably
+ raise IndexError
ax += mi.ndim
continue
@@ -950,11 +1050,13 @@ class TestMultiIndexingAutomated(TestCase):
# Test item getting
try:
mimic_get, no_copy = self._get_multi_index(arr, index)
- except Exception:
- prev_refcount = sys.getrefcount(arr)
- assert_raises(Exception, arr.__getitem__, index)
- assert_raises(Exception, arr.__setitem__, index, 0)
- assert_equal(prev_refcount, sys.getrefcount(arr))
+ except Exception as e:
+ if HAS_REFCOUNT:
+ prev_refcount = sys.getrefcount(arr)
+ assert_raises(type(e), arr.__getitem__, index)
+ assert_raises(type(e), arr.__setitem__, index, 0)
+ if HAS_REFCOUNT:
+ assert_equal(prev_refcount, sys.getrefcount(arr))
return
self._compare_index_result(arr, index, mimic_get, no_copy)
@@ -972,11 +1074,13 @@ class TestMultiIndexingAutomated(TestCase):
"""
try:
mimic_get, no_copy = self._get_multi_index(arr, (index,))
- except Exception:
- prev_refcount = sys.getrefcount(arr)
- assert_raises(Exception, arr.__getitem__, index)
- assert_raises(Exception, arr.__setitem__, index, 0)
- assert_equal(prev_refcount, sys.getrefcount(arr))
+ except Exception as e:
+ if HAS_REFCOUNT:
+ prev_refcount = sys.getrefcount(arr)
+ assert_raises(type(e), arr.__getitem__, index)
+ assert_raises(type(e), arr.__setitem__, index, 0)
+ if HAS_REFCOUNT:
+ assert_equal(prev_refcount, sys.getrefcount(arr))
return
self._compare_index_result(arr, index, mimic_get, no_copy)
@@ -992,11 +1096,12 @@ class TestMultiIndexingAutomated(TestCase):
if indexed_arr.size != 0 and indexed_arr.ndim != 0:
assert_(np.may_share_memory(indexed_arr, arr) == no_copy)
# Check reference count of the original array
- if no_copy:
- # refcount increases by one:
- assert_equal(sys.getrefcount(arr), 3)
- else:
- assert_equal(sys.getrefcount(arr), 2)
+ if HAS_REFCOUNT:
+ if no_copy:
+ # refcount increases by one:
+ assert_equal(sys.getrefcount(arr), 3)
+ else:
+ assert_equal(sys.getrefcount(arr), 2)
# Test non-broadcast setitem:
b = arr.copy()
@@ -1061,12 +1166,10 @@ class TestMultiIndexingAutomated(TestCase):
def test_1d(self):
a = np.arange(10)
- with warnings.catch_warnings():
- warnings.filterwarnings('error', '', np.VisibleDeprecationWarning)
- for index in self.complex_indices:
- self._check_single_index(a, index)
+ for index in self.complex_indices:
+ self._check_single_index(a, index)
-class TestFloatNonIntegerArgument(TestCase):
+class TestFloatNonIntegerArgument(object):
"""
These test that ``TypeError`` is raised when you try to use
non-integers as arguments to for indexing and slicing e.g. ``a[0.0:5]``
@@ -1105,7 +1208,7 @@ class TestFloatNonIntegerArgument(TestCase):
assert_raises(TypeError, np.take, a, [0], np.float64(1.))
def test_non_integer_sequence_multiplication(self):
- # Numpy scalar sequence multiply should not work with non-integers
+ # NumPy scalar sequence multiply should not work with non-integers
def mult(a, b):
return a * b
@@ -1121,11 +1224,9 @@ class TestFloatNonIntegerArgument(TestCase):
assert_raises(TypeError, np.min, d, (.2, 1.2))
-class TestBooleanArgumentErrors(TestCase):
- """Using a boolean as integer argument/indexing is an error.
-
- """
- def test_bool_as_int_argument(self):
+class TestBooleanIndexing(object):
+ # Using a boolean as integer argument/indexing is an error.
+ def test_bool_as_int_argument_errors(self):
a = np.array([[[1]]])
assert_raises(TypeError, np.reshape, a, (True, -1))
@@ -1133,12 +1234,18 @@ class TestBooleanArgumentErrors(TestCase):
# Note that operator.index(np.array(True)) does not work, a boolean
# array is thus also deprecated, but not with the same message:
assert_raises(TypeError, operator.index, np.array(True))
+ assert_warns(DeprecationWarning, operator.index, np.True_)
assert_raises(TypeError, np.take, args=(a, [0], False))
- assert_raises(IndexError, lambda: a[False, 0])
- assert_raises(IndexError, lambda: a[False, 0, 0])
+ def test_boolean_indexing_weirdness(self):
+ # Weird boolean indexing things
+ a = np.ones((2, 3, 4))
+ a[False, True, ...].shape == (0, 2, 3, 4)
+ a[True, [0, 1], True, True, [1], [[2]]] == (1, 2)
+ assert_raises(IndexError, lambda: a[False, [0, 1], ...])
-class TestArrayToIndexDeprecation(TestCase):
+
+class TestArrayToIndexDeprecation(object):
"""Creating an an index from array not 0-D is an error.
"""
@@ -1151,7 +1258,7 @@ class TestArrayToIndexDeprecation(TestCase):
assert_raises(TypeError, np.take, a, [0], a)
-class TestNonIntegerArrayLike(TestCase):
+class TestNonIntegerArrayLike(object):
"""Tests that array_likes only valid if can safely cast to integer.
For instance, lists give IndexError when they cannot be safely cast to
@@ -1168,7 +1275,7 @@ class TestNonIntegerArrayLike(TestCase):
a.__getitem__([])
-class TestMultipleEllipsisError(TestCase):
+class TestMultipleEllipsisError(object):
"""An index can only have a single ellipsis.
"""
@@ -1179,7 +1286,7 @@ class TestMultipleEllipsisError(TestCase):
assert_raises(IndexError, a.__getitem__, ((Ellipsis,) * 3,))
-class TestCApiAccess(TestCase):
+class TestCApiAccess(object):
def test_getitem(self):
subscript = functools.partial(array_indexing, 0)
@@ -1216,7 +1323,3 @@ class TestCApiAccess(TestCase):
a = a.reshape(5, 2)
assign(a, 4, 10)
assert_array_equal(a[-1], [10, 10])
-
-
-if __name__ == "__main__":
- run_module_suite()
diff --git a/numpy/core/tests/test_item_selection.py b/numpy/core/tests/test_item_selection.py
index ddce20fe9..3bc24fc95 100644
--- a/numpy/core/tests/test_item_selection.py
+++ b/numpy/core/tests/test_item_selection.py
@@ -4,12 +4,11 @@ import sys
import numpy as np
from numpy.testing import (
- TestCase, run_module_suite, assert_, assert_raises,
- assert_array_equal
-)
+ assert_, assert_raises, assert_array_equal, HAS_REFCOUNT
+ )
-class TestTake(TestCase):
+class TestTake(object):
def test_simple(self):
a = [[1, 2], [3, 4]]
a_str = [[b'1', b'2'], [b'3', b'4']]
@@ -24,7 +23,7 @@ class TestTake(TestCase):
# Currently all types but object, use the same function generation.
# So it should not be necessary to test all. However test also a non
# refcounted struct on top of object.
- types = np.int, np.object, np.dtype([('', 'i', 2)])
+ types = int, object, np.dtype([('', 'i', 2)])
for t in types:
# ta works, even if the array may be odd if buffer interface is used
ta = np.array(a if np.issubdtype(t, np.number) else a_str, dtype=t)
@@ -53,14 +52,16 @@ class TestTake(TestCase):
for mode in ('raise', 'clip', 'wrap'):
a = np.array(objects)
b = np.array([2, 2, 4, 5, 3, 5])
- a.take(b, out=a[:6])
+ a.take(b, out=a[:6], mode=mode)
del a
- assert_(all(sys.getrefcount(o) == 3 for o in objects))
+ if HAS_REFCOUNT:
+ assert_(all(sys.getrefcount(o) == 3 for o in objects))
# not contiguous, example:
a = np.array(objects * 2)[::2]
- a.take(b, out=a[:6])
+ a.take(b, out=a[:6], mode=mode)
del a
- assert_(all(sys.getrefcount(o) == 3 for o in objects))
+ if HAS_REFCOUNT:
+ assert_(all(sys.getrefcount(o) == 3 for o in objects))
def test_unicode_mode(self):
d = np.arange(10)
@@ -84,7 +85,3 @@ class TestTake(TestCase):
b = np.array([0, 1, 2, 3, 4, 5])
assert_array_equal(a, b)
-
-
-if __name__ == "__main__":
- run_module_suite()
diff --git a/numpy/core/tests/test_longdouble.py b/numpy/core/tests/test_longdouble.py
index 1c561a48f..cf50d5d5c 100644
--- a/numpy/core/tests/test_longdouble.py
+++ b/numpy/core/tests/test_longdouble.py
@@ -1,42 +1,44 @@
from __future__ import division, absolute_import, print_function
-import locale
+import pytest
import numpy as np
from numpy.testing import (
- run_module_suite, assert_, assert_equal, dec, assert_raises,
- assert_array_equal, TestCase, temppath,
-)
-from numpy.compat import sixu
-from test_print import in_foreign_locale
+ assert_, assert_equal, assert_raises, assert_array_equal, temppath,
+ )
+from numpy.core.tests._locales import CommaDecimalPointLocale
-longdouble_longer_than_double = (np.finfo(np.longdouble).eps
- < np.finfo(np.double).eps)
+LD_INFO = np.finfo(np.longdouble)
+longdouble_longer_than_double = (LD_INFO.eps < np.finfo(np.double).eps)
-_o = 1 + np.finfo(np.longdouble).eps
+_o = 1 + LD_INFO.eps
string_to_longdouble_inaccurate = (_o != np.longdouble(repr(_o)))
del _o
def test_scalar_extraction():
"""Confirm that extracting a value doesn't convert to python float"""
- o = 1 + np.finfo(np.longdouble).eps
+ o = 1 + LD_INFO.eps
a = np.array([o, o, o])
assert_equal(a[1], o)
# Conversions string -> long double
-
+# 0.1 not exactly representable in base 2 floating point.
+repr_precision = len(repr(np.longdouble(0.1)))
+# +2 from macro block starting around line 842 in scalartypes.c.src.
+@pytest.mark.skipif(LD_INFO.precision + 2 >= repr_precision,
+ reason="repr precision not enough to show eps")
def test_repr_roundtrip():
- o = 1 + np.finfo(np.longdouble).eps
- assert_equal(np.longdouble(repr(o)), o,
- "repr was %s" % repr(o))
+ # We will only see eps in repr if within printing precision.
+ o = 1 + LD_INFO.eps
+ assert_equal(np.longdouble(repr(o)), o, "repr was %s" % repr(o))
def test_unicode():
- np.longdouble(sixu("1.2"))
+ np.longdouble(u"1.2")
def test_string():
@@ -47,51 +49,26 @@ def test_bytes():
np.longdouble(b"1.2")
-@in_foreign_locale
-def test_fromstring_foreign():
- f = 1.234
- a = np.fromstring(repr(f), dtype=float, sep=" ")
- assert_equal(a[0], f)
-
-
-@dec.knownfailureif(string_to_longdouble_inaccurate, "Need strtold_l")
+@pytest.mark.skipif(string_to_longdouble_inaccurate, reason="Need strtold_l")
def test_repr_roundtrip_bytes():
- o = 1 + np.finfo(np.longdouble).eps
+ o = 1 + LD_INFO.eps
assert_equal(np.longdouble(repr(o).encode("ascii")), o)
-@in_foreign_locale
-def test_repr_roundtrip_foreign():
- o = 1.5
- assert_equal(o, np.longdouble(repr(o)))
-
-
def test_bogus_string():
assert_raises(ValueError, np.longdouble, "spam")
assert_raises(ValueError, np.longdouble, "1.0 flub")
-@dec.knownfailureif(string_to_longdouble_inaccurate, "Need strtold_l")
+@pytest.mark.skipif(string_to_longdouble_inaccurate, reason="Need strtold_l")
def test_fromstring():
- o = 1 + np.finfo(np.longdouble).eps
+ o = 1 + LD_INFO.eps
s = (" " + repr(o))*5
a = np.array([o]*5)
assert_equal(np.fromstring(s, sep=" ", dtype=np.longdouble), a,
err_msg="reading '%s'" % s)
-@in_foreign_locale
-def test_fromstring_best_effort_float():
- assert_equal(np.fromstring("1,234", dtype=float, sep=" "),
- np.array([1.]))
-
-
-@in_foreign_locale
-def test_fromstring_best_effort():
- assert_equal(np.fromstring("1,234", dtype=np.longdouble, sep=" "),
- np.array([1.]))
-
-
def test_fromstring_bogus():
assert_equal(np.fromstring("1. 2. 3. flop 4.", dtype=float, sep=" "),
np.array([1., 2., 3.]))
@@ -107,9 +84,9 @@ def test_fromstring_missing():
np.array([1]))
-class FileBased(TestCase):
+class TestFileBased(object):
- ldbl = 1 + np.finfo(np.longdouble).eps
+ ldbl = 1 + LD_INFO.eps
tgt = np.array([ldbl]*5)
out = ''.join([repr(t) + '\n' for t in tgt])
@@ -120,7 +97,8 @@ class FileBased(TestCase):
res = np.fromfile(path, dtype=float, sep=" ")
assert_equal(res, np.array([1., 2., 3.]))
- @dec.knownfailureif(string_to_longdouble_inaccurate, "Need strtold_l")
+ @pytest.mark.skipif(string_to_longdouble_inaccurate,
+ reason="Need strtold_l")
def test_fromfile(self):
with temppath() as path:
with open(path, 'wt') as f:
@@ -128,7 +106,8 @@ class FileBased(TestCase):
res = np.fromfile(path, dtype=np.longdouble, sep="\n")
assert_equal(res, self.tgt)
- @dec.knownfailureif(string_to_longdouble_inaccurate, "Need strtold_l")
+ @pytest.mark.skipif(string_to_longdouble_inaccurate,
+ reason="Need strtold_l")
def test_genfromtxt(self):
with temppath() as path:
with open(path, 'wt') as f:
@@ -136,7 +115,8 @@ class FileBased(TestCase):
res = np.genfromtxt(path, dtype=np.longdouble)
assert_equal(res, self.tgt)
- @dec.knownfailureif(string_to_longdouble_inaccurate, "Need strtold_l")
+ @pytest.mark.skipif(string_to_longdouble_inaccurate,
+ reason="Need strtold_l")
def test_loadtxt(self):
with temppath() as path:
with open(path, 'wt') as f:
@@ -144,7 +124,8 @@ class FileBased(TestCase):
res = np.loadtxt(path, dtype=np.longdouble)
assert_equal(res, self.tgt)
- @dec.knownfailureif(string_to_longdouble_inaccurate, "Need strtold_l")
+ @pytest.mark.skipif(string_to_longdouble_inaccurate,
+ reason="Need strtold_l")
def test_tofile_roundtrip(self):
with temppath() as path:
self.tgt.tofile(path, sep=" ")
@@ -152,58 +133,75 @@ class FileBased(TestCase):
assert_equal(res, self.tgt)
-@in_foreign_locale
-def test_fromstring_foreign():
- s = "1.234"
- a = np.fromstring(s, dtype=np.longdouble, sep=" ")
- assert_equal(a[0], np.longdouble(s))
-
-
-@in_foreign_locale
-def test_fromstring_foreign_sep():
- a = np.array([1, 2, 3, 4])
- b = np.fromstring("1,2,3,4,", dtype=np.longdouble, sep=",")
- assert_array_equal(a, b)
-
-
-@in_foreign_locale
-def test_fromstring_foreign_value():
- b = np.fromstring("1,234", dtype=np.longdouble, sep=" ")
- assert_array_equal(b[0], 1)
-
-
# Conversions long double -> string
def test_repr_exact():
- o = 1 + np.finfo(np.longdouble).eps
+ o = 1 + LD_INFO.eps
assert_(repr(o) != '1')
-@dec.knownfailureif(longdouble_longer_than_double, "BUG #2376")
-@dec.knownfailureif(string_to_longdouble_inaccurate, "Need strtold_l")
+@pytest.mark.skipif(longdouble_longer_than_double, reason="BUG #2376")
+@pytest.mark.skipif(string_to_longdouble_inaccurate,
+ reason="Need strtold_l")
def test_format():
- o = 1 + np.finfo(np.longdouble).eps
+ o = 1 + LD_INFO.eps
assert_("{0:.40g}".format(o) != '1')
-@dec.knownfailureif(longdouble_longer_than_double, "BUG #2376")
-@dec.knownfailureif(string_to_longdouble_inaccurate, "Need strtold_l")
+@pytest.mark.skipif(longdouble_longer_than_double, reason="BUG #2376")
+@pytest.mark.skipif(string_to_longdouble_inaccurate,
+ reason="Need strtold_l")
def test_percent():
- o = 1 + np.finfo(np.longdouble).eps
+ o = 1 + LD_INFO.eps
assert_("%.40g" % o != '1')
-@dec.knownfailureif(longdouble_longer_than_double, "array repr problem")
-@dec.knownfailureif(string_to_longdouble_inaccurate, "Need strtold_l")
+@pytest.mark.skipif(longdouble_longer_than_double,
+ reason="array repr problem")
+@pytest.mark.skipif(string_to_longdouble_inaccurate,
+ reason="Need strtold_l")
def test_array_repr():
- o = 1 + np.finfo(np.longdouble).eps
+ o = 1 + LD_INFO.eps
a = np.array([o])
b = np.array([1], dtype=np.longdouble)
if not np.all(a != b):
raise ValueError("precision loss creating arrays")
assert_(repr(a) != repr(b))
+#
+# Locale tests: scalar types formatting should be independent of the locale
+#
+
+class TestCommaDecimalPointLocale(CommaDecimalPointLocale):
+
+ def test_repr_roundtrip_foreign(self):
+ o = 1.5
+ assert_equal(o, np.longdouble(repr(o)))
+
+ def test_fromstring_foreign_repr(self):
+ f = 1.234
+ a = np.fromstring(repr(f), dtype=float, sep=" ")
+ assert_equal(a[0], f)
+
+ def test_fromstring_best_effort_float(self):
+ assert_equal(np.fromstring("1,234", dtype=float, sep=" "),
+ np.array([1.]))
+
+ def test_fromstring_best_effort(self):
+ assert_equal(np.fromstring("1,234", dtype=np.longdouble, sep=" "),
+ np.array([1.]))
+
+ def test_fromstring_foreign(self):
+ s = "1.234"
+ a = np.fromstring(s, dtype=np.longdouble, sep=" ")
+ assert_equal(a[0], np.longdouble(s))
+
+ def test_fromstring_foreign_sep(self):
+ a = np.array([1, 2, 3, 4])
+ b = np.fromstring("1,2,3,4,", dtype=np.longdouble, sep=",")
+ assert_array_equal(a, b)
-if __name__ == "__main__":
- run_module_suite()
+ def test_fromstring_foreign_value(self):
+ b = np.fromstring("1,234", dtype=np.longdouble, sep=" ")
+ assert_array_equal(b[0], 1)
diff --git a/numpy/core/tests/test_machar.py b/numpy/core/tests/test_machar.py
index 765b38ae0..ab8800c09 100644
--- a/numpy/core/tests/test_machar.py
+++ b/numpy/core/tests/test_machar.py
@@ -1,11 +1,16 @@
+"""
+Test machar. Given recent changes to hardcode type data, we might want to get
+rid of both MachAr and this test at some point.
+
+"""
from __future__ import division, absolute_import, print_function
from numpy.core.machar import MachAr
import numpy.core.numerictypes as ntypes
from numpy import errstate, array
-from numpy.testing import TestCase, run_module_suite
-class TestMachAr(TestCase):
+
+class TestMachAr(object):
def _run_machar_highprec(self):
# Instantiate MachAr instance with high enough precision to cause
# underflow
@@ -13,6 +18,7 @@ class TestMachAr(TestCase):
hiprec = ntypes.float96
MachAr(lambda v:array([v], hiprec))
except AttributeError:
+ # Fixme, this needs to raise a 'skip' exception.
"Skipping test: no ntypes.float96 available on this platform."
def test_underlow(self):
@@ -22,8 +28,5 @@ class TestMachAr(TestCase):
try:
self._run_machar_highprec()
except FloatingPointError as e:
- self.fail("Caught %s exception, should not have been raised." % e)
-
-
-if __name__ == "__main__":
- run_module_suite()
+ msg = "Caught %s exception, should not have been raised." % e
+ raise AssertionError(msg)
diff --git a/numpy/core/tests/test_mem_overlap.py b/numpy/core/tests/test_mem_overlap.py
index 5a1f6ac98..3c8e0e722 100644
--- a/numpy/core/tests/test_mem_overlap.py
+++ b/numpy/core/tests/test_mem_overlap.py
@@ -2,13 +2,16 @@ from __future__ import division, absolute_import, print_function
import sys
import itertools
+import pytest
import numpy as np
-from numpy.testing import run_module_suite, assert_, assert_raises, assert_equal
-
-from numpy.core.multiarray_tests import solve_diophantine, internal_overlap
+from numpy.core._multiarray_tests import solve_diophantine, internal_overlap
+from numpy.core import _umath_tests
from numpy.lib.stride_tricks import as_strided
from numpy.compat import long
+from numpy.testing import (
+ assert_, assert_raises, assert_equal, assert_array_equal
+ )
if sys.version_info[0] >= 3:
xrange = range
@@ -84,7 +87,7 @@ def _check_assignment(srcidx, dstidx):
def test_overlapping_assignments():
- """Test automatically generated assignments which overlap in memory."""
+ # Test automatically generated assignments which overlap in memory.
inds = _indices(ndims)
@@ -92,9 +95,10 @@ def test_overlapping_assignments():
srcidx = tuple([a[0] for a in ind])
dstidx = tuple([a[1] for a in ind])
- yield _check_assignment, srcidx, dstidx
+ _check_assignment(srcidx, dstidx)
+@pytest.mark.slow
def test_diophantine_fuzz():
# Fuzz test the diophantine solver
rng = np.random.RandomState(1234)
@@ -107,7 +111,6 @@ def test_diophantine_fuzz():
min_count = 500//(ndim + 1)
- numbers = []
while min(feasible_count, infeasible_count) < min_count:
# Ensure big and small integer problems
A_max = 1 + rng.randint(0, 11, dtype=np.intp)**6
@@ -252,13 +255,12 @@ def test_may_share_memory_manual():
check_may_share_memory_exact(x, x.copy())
-def check_may_share_memory_easy_fuzz(get_max_work, same_steps, min_count):
- # Check that overlap problems with common strides are solved with
- # little work.
- x = np.zeros([17,34,71,97], dtype=np.int16)
-
+def iter_random_view_pairs(x, same_steps=True, equal_size=False):
rng = np.random.RandomState(1234)
+ if equal_size and same_steps:
+ raise ValueError()
+
def random_slice(n, step):
start = rng.randint(0, n+1, dtype=np.intp)
stop = rng.randint(start, n+1, dtype=np.intp)
@@ -267,31 +269,93 @@ def check_may_share_memory_easy_fuzz(get_max_work, same_steps, min_count):
step *= -1
return slice(start, stop, step)
- feasible = 0
- infeasible = 0
+ def random_slice_fixed_size(n, step, size):
+ start = rng.randint(0, n+1 - size*step)
+ stop = start + (size-1)*step + 1
+ if rng.randint(0, 2) == 0:
+ stop, start = start-1, stop-1
+ if stop < 0:
+ stop = None
+ step *= -1
+ return slice(start, stop, step)
- while min(feasible, infeasible) < min_count:
+ # First a few regular views
+ yield x, x
+ for j in range(1, 7, 3):
+ yield x[j:], x[:-j]
+ yield x[...,j:], x[...,:-j]
+
+ # An array with zero stride internal overlap
+ strides = list(x.strides)
+ strides[0] = 0
+ xp = as_strided(x, shape=x.shape, strides=strides)
+ yield x, xp
+ yield xp, xp
+
+ # An array with non-zero stride internal overlap
+ strides = list(x.strides)
+ if strides[0] > 1:
+ strides[0] = 1
+ xp = as_strided(x, shape=x.shape, strides=strides)
+ yield x, xp
+ yield xp, xp
+
+ # Then discontiguous views
+ while True:
steps = tuple(rng.randint(1, 11, dtype=np.intp)
if rng.randint(0, 5, dtype=np.intp) == 0 else 1
for j in range(x.ndim))
- if same_steps:
+ s1 = tuple(random_slice(p, s) for p, s in zip(x.shape, steps))
+
+ t1 = np.arange(x.ndim)
+ rng.shuffle(t1)
+
+ if equal_size:
+ t2 = t1
+ else:
+ t2 = np.arange(x.ndim)
+ rng.shuffle(t2)
+
+ a = x[s1]
+
+ if equal_size:
+ if a.size == 0:
+ continue
+
+ steps2 = tuple(rng.randint(1, max(2, p//(1+pa)))
+ if rng.randint(0, 5) == 0 else 1
+ for p, s, pa in zip(x.shape, s1, a.shape))
+ s2 = tuple(random_slice_fixed_size(p, s, pa)
+ for p, s, pa in zip(x.shape, steps2, a.shape))
+ elif same_steps:
steps2 = steps
else:
steps2 = tuple(rng.randint(1, 11, dtype=np.intp)
if rng.randint(0, 5, dtype=np.intp) == 0 else 1
for j in range(x.ndim))
- t1 = np.arange(x.ndim)
- rng.shuffle(t1)
-
- t2 = np.arange(x.ndim)
- rng.shuffle(t2)
+ if not equal_size:
+ s2 = tuple(random_slice(p, s) for p, s in zip(x.shape, steps2))
- s1 = tuple(random_slice(p, s) for p, s in zip(x.shape, steps))
- s2 = tuple(random_slice(p, s) for p, s in zip(x.shape, steps2))
- a = x[s1].transpose(t1)
+ a = a.transpose(t1)
b = x[s2].transpose(t2)
+ yield a, b
+
+
+def check_may_share_memory_easy_fuzz(get_max_work, same_steps, min_count):
+ # Check that overlap problems with common strides are solved with
+ # little work.
+ x = np.zeros([17,34,71,97], dtype=np.int16)
+
+ feasible = 0
+ infeasible = 0
+
+ pair_iter = iter_random_view_pairs(x, same_steps)
+
+ while min(feasible, infeasible) < min_count:
+ a, b = next(pair_iter)
+
bounds_overlap = np.may_share_memory(a, b)
may_share_answer = np.may_share_memory(a, b)
easy_answer = np.may_share_memory(a, b, max_work=get_max_work(a, b))
@@ -299,11 +363,10 @@ def check_may_share_memory_easy_fuzz(get_max_work, same_steps, min_count):
if easy_answer != exact_answer:
# assert_equal is slow...
- assert_equal(easy_answer, exact_answer, err_msg=repr((s1, s2)))
+ assert_equal(easy_answer, exact_answer)
if may_share_answer != bounds_overlap:
- assert_equal(may_share_answer, bounds_overlap,
- err_msg=repr((s1, s2)))
+ assert_equal(may_share_answer, bounds_overlap)
if bounds_overlap:
if exact_answer:
@@ -312,6 +375,7 @@ def check_may_share_memory_easy_fuzz(get_max_work, same_steps, min_count):
infeasible += 1
+@pytest.mark.slow
def test_may_share_memory_easy_fuzz():
# Check that overlap problems with common strides are always
# solved with little work.
@@ -321,6 +385,7 @@ def test_may_share_memory_easy_fuzz():
min_count=2000)
+@pytest.mark.slow
def test_may_share_memory_harder_fuzz():
# Overlap problems with not necessarily common strides take more
# work.
@@ -348,6 +413,12 @@ def test_shares_memory_api():
assert_raises(np.TooHardError, np.shares_memory, a, b, max_work=long(1))
+def test_may_share_memory_bad_max_work():
+ x = np.zeros([1])
+ assert_raises(OverflowError, np.may_share_memory, x, x, max_work=10**100)
+ assert_raises(OverflowError, np.shares_memory, x, x, max_work=10**100)
+
+
def test_internal_overlap_diophantine():
def check(A, U, exists=None):
X = solve_diophantine(A, U, 0, require_ub_nontrivial=1)
@@ -518,5 +589,362 @@ def test_non_ndarray_inputs():
assert_(np.may_share_memory(cls(x[1::3]), x[::2]))
-if __name__ == "__main__":
- run_module_suite()
+def view_element_first_byte(x):
+ """Construct an array viewing the first byte of each element of `x`"""
+ from numpy.lib.stride_tricks import DummyArray
+ interface = dict(x.__array_interface__)
+ interface['typestr'] = '|b1'
+ interface['descr'] = [('', '|b1')]
+ return np.asarray(DummyArray(interface, x))
+
+
+def assert_copy_equivalent(operation, args, out, **kwargs):
+ """
+ Check that operation(*args, out=out) produces results
+ equivalent to out[...] = operation(*args, out=out.copy())
+ """
+
+ kwargs['out'] = out
+ kwargs2 = dict(kwargs)
+ kwargs2['out'] = out.copy()
+
+ out_orig = out.copy()
+ out[...] = operation(*args, **kwargs2)
+ expected = out.copy()
+ out[...] = out_orig
+
+ got = operation(*args, **kwargs).copy()
+
+ if (got != expected).any():
+ assert_equal(got, expected)
+
+
+class TestUFunc(object):
+ """
+ Test ufunc call memory overlap handling
+ """
+
+ def check_unary_fuzz(self, operation, get_out_axis_size, dtype=np.int16,
+ count=5000):
+ shapes = [7, 13, 8, 21, 29, 32]
+
+ rng = np.random.RandomState(1234)
+
+ for ndim in range(1, 6):
+ x = rng.randint(0, 2**16, size=shapes[:ndim]).astype(dtype)
+
+ it = iter_random_view_pairs(x, same_steps=False, equal_size=True)
+
+ min_count = count // (ndim + 1)**2
+
+ overlapping = 0
+ while overlapping < min_count:
+ a, b = next(it)
+
+ a_orig = a.copy()
+ b_orig = b.copy()
+
+ if get_out_axis_size is None:
+ assert_copy_equivalent(operation, [a], out=b)
+
+ if np.shares_memory(a, b):
+ overlapping += 1
+ else:
+ for axis in itertools.chain(range(ndim), [None]):
+ a[...] = a_orig
+ b[...] = b_orig
+
+ # Determine size for reduction axis (None if scalar)
+ outsize, scalarize = get_out_axis_size(a, b, axis)
+ if outsize == 'skip':
+ continue
+
+ # Slice b to get an output array of the correct size
+ sl = [slice(None)] * ndim
+ if axis is None:
+ if outsize is None:
+ sl = [slice(0, 1)] + [0]*(ndim - 1)
+ else:
+ sl = [slice(0, outsize)] + [0]*(ndim - 1)
+ else:
+ if outsize is None:
+ k = b.shape[axis]//2
+ if ndim == 1:
+ sl[axis] = slice(k, k + 1)
+ else:
+ sl[axis] = k
+ else:
+ assert b.shape[axis] >= outsize
+ sl[axis] = slice(0, outsize)
+ b_out = b[tuple(sl)]
+
+ if scalarize:
+ b_out = b_out.reshape([])
+
+ if np.shares_memory(a, b_out):
+ overlapping += 1
+
+ # Check result
+ assert_copy_equivalent(operation, [a], out=b_out, axis=axis)
+
+ @pytest.mark.slow
+ def test_unary_ufunc_call_fuzz(self):
+ self.check_unary_fuzz(np.invert, None, np.int16)
+
+ def test_binary_ufunc_accumulate_fuzz(self):
+ def get_out_axis_size(a, b, axis):
+ if axis is None:
+ if a.ndim == 1:
+ return a.size, False
+ else:
+ return 'skip', False # accumulate doesn't support this
+ else:
+ return a.shape[axis], False
+
+ self.check_unary_fuzz(np.add.accumulate, get_out_axis_size,
+ dtype=np.int16, count=500)
+
+ def test_binary_ufunc_reduce_fuzz(self):
+ def get_out_axis_size(a, b, axis):
+ return None, (axis is None or a.ndim == 1)
+
+ self.check_unary_fuzz(np.add.reduce, get_out_axis_size,
+ dtype=np.int16, count=500)
+
+ def test_binary_ufunc_reduceat_fuzz(self):
+ def get_out_axis_size(a, b, axis):
+ if axis is None:
+ if a.ndim == 1:
+ return a.size, False
+ else:
+ return 'skip', False # reduceat doesn't support this
+ else:
+ return a.shape[axis], False
+
+ def do_reduceat(a, out, axis):
+ if axis is None:
+ size = len(a)
+ step = size//len(out)
+ else:
+ size = a.shape[axis]
+ step = a.shape[axis] // out.shape[axis]
+ idx = np.arange(0, size, step)
+ return np.add.reduceat(a, idx, out=out, axis=axis)
+
+ self.check_unary_fuzz(do_reduceat, get_out_axis_size,
+ dtype=np.int16, count=500)
+
+ def test_binary_ufunc_reduceat_manual(self):
+ def check(ufunc, a, ind, out):
+ c1 = ufunc.reduceat(a.copy(), ind.copy(), out=out.copy())
+ c2 = ufunc.reduceat(a, ind, out=out)
+ assert_array_equal(c1, c2)
+
+ # Exactly same input/output arrays
+ a = np.arange(10000, dtype=np.int16)
+ check(np.add, a, a[::-1].copy(), a)
+
+ # Overlap with index
+ a = np.arange(10000, dtype=np.int16)
+ check(np.add, a, a[::-1], a)
+
+ def test_unary_gufunc_fuzz(self):
+ shapes = [7, 13, 8, 21, 29, 32]
+ gufunc = _umath_tests.euclidean_pdist
+
+ rng = np.random.RandomState(1234)
+
+ for ndim in range(2, 6):
+ x = rng.rand(*shapes[:ndim])
+
+ it = iter_random_view_pairs(x, same_steps=False, equal_size=True)
+
+ min_count = 500 // (ndim + 1)**2
+
+ overlapping = 0
+ while overlapping < min_count:
+ a, b = next(it)
+
+ if min(a.shape[-2:]) < 2 or min(b.shape[-2:]) < 2 or a.shape[-1] < 2:
+ continue
+
+ # Ensure the shapes are so that euclidean_pdist is happy
+ if b.shape[-1] > b.shape[-2]:
+ b = b[...,0,:]
+ else:
+ b = b[...,:,0]
+
+ n = a.shape[-2]
+ p = n * (n - 1) // 2
+ if p <= b.shape[-1] and p > 0:
+ b = b[...,:p]
+ else:
+ n = max(2, int(np.sqrt(b.shape[-1]))//2)
+ p = n * (n - 1) // 2
+ a = a[...,:n,:]
+ b = b[...,:p]
+
+ # Call
+ if np.shares_memory(a, b):
+ overlapping += 1
+
+ with np.errstate(over='ignore', invalid='ignore'):
+ assert_copy_equivalent(gufunc, [a], out=b)
+
+ def test_ufunc_at_manual(self):
+ def check(ufunc, a, ind, b=None):
+ a0 = a.copy()
+ if b is None:
+ ufunc.at(a0, ind.copy())
+ c1 = a0.copy()
+ ufunc.at(a, ind)
+ c2 = a.copy()
+ else:
+ ufunc.at(a0, ind.copy(), b.copy())
+ c1 = a0.copy()
+ ufunc.at(a, ind, b)
+ c2 = a.copy()
+ assert_array_equal(c1, c2)
+
+ # Overlap with index
+ a = np.arange(10000, dtype=np.int16)
+ check(np.invert, a[::-1], a)
+
+ # Overlap with second data array
+ a = np.arange(100, dtype=np.int16)
+ ind = np.arange(0, 100, 2, dtype=np.int16)
+ check(np.add, a, ind, a[25:75])
+
+ def test_unary_ufunc_1d_manual(self):
+ # Exercise branches in PyArray_EQUIVALENTLY_ITERABLE
+
+ def check(a, b):
+ a_orig = a.copy()
+ b_orig = b.copy()
+
+ b0 = b.copy()
+ c1 = ufunc(a, out=b0)
+ c2 = ufunc(a, out=b)
+ assert_array_equal(c1, c2)
+
+ # Trigger "fancy ufunc loop" code path
+ mask = view_element_first_byte(b).view(np.bool_)
+
+ a[...] = a_orig
+ b[...] = b_orig
+ c1 = ufunc(a, out=b.copy(), where=mask.copy()).copy()
+
+ a[...] = a_orig
+ b[...] = b_orig
+ c2 = ufunc(a, out=b, where=mask.copy()).copy()
+
+ # Also, mask overlapping with output
+ a[...] = a_orig
+ b[...] = b_orig
+ c3 = ufunc(a, out=b, where=mask).copy()
+
+ assert_array_equal(c1, c2)
+ assert_array_equal(c1, c3)
+
+ dtypes = [np.int8, np.int16, np.int32, np.int64, np.float32,
+ np.float64, np.complex64, np.complex128]
+ dtypes = [np.dtype(x) for x in dtypes]
+
+ for dtype in dtypes:
+ if np.issubdtype(dtype, np.integer):
+ ufunc = np.invert
+ else:
+ ufunc = np.reciprocal
+
+ n = 1000
+ k = 10
+ indices = [
+ np.index_exp[:n],
+ np.index_exp[k:k+n],
+ np.index_exp[n-1::-1],
+ np.index_exp[k+n-1:k-1:-1],
+ np.index_exp[:2*n:2],
+ np.index_exp[k:k+2*n:2],
+ np.index_exp[2*n-1::-2],
+ np.index_exp[k+2*n-1:k-1:-2],
+ ]
+
+ for xi, yi in itertools.product(indices, indices):
+ v = np.arange(1, 1 + n*2 + k, dtype=dtype)
+ x = v[xi]
+ y = v[yi]
+
+ with np.errstate(all='ignore'):
+ check(x, y)
+
+ # Scalar cases
+ check(x[:1], y)
+ check(x[-1:], y)
+ check(x[:1].reshape([]), y)
+ check(x[-1:].reshape([]), y)
+
+ def test_unary_ufunc_where_same(self):
+ # Check behavior at wheremask overlap
+ ufunc = np.invert
+
+ def check(a, out, mask):
+ c1 = ufunc(a, out=out.copy(), where=mask.copy())
+ c2 = ufunc(a, out=out, where=mask)
+ assert_array_equal(c1, c2)
+
+ # Check behavior with same input and output arrays
+ x = np.arange(100).astype(np.bool_)
+ check(x, x, x)
+ check(x, x.copy(), x)
+ check(x, x, x.copy())
+
+ @pytest.mark.slow
+ def test_binary_ufunc_1d_manual(self):
+ ufunc = np.add
+
+ def check(a, b, c):
+ c0 = c.copy()
+ c1 = ufunc(a, b, out=c0)
+ c2 = ufunc(a, b, out=c)
+ assert_array_equal(c1, c2)
+
+ for dtype in [np.int8, np.int16, np.int32, np.int64,
+ np.float32, np.float64, np.complex64, np.complex128]:
+ # Check different data dependency orders
+
+ n = 1000
+ k = 10
+
+ indices = []
+ for p in [1, 2]:
+ indices.extend([
+ np.index_exp[:p*n:p],
+ np.index_exp[k:k+p*n:p],
+ np.index_exp[p*n-1::-p],
+ np.index_exp[k+p*n-1:k-1:-p],
+ ])
+
+ for x, y, z in itertools.product(indices, indices, indices):
+ v = np.arange(6*n).astype(dtype)
+ x = v[x]
+ y = v[y]
+ z = v[z]
+
+ check(x, y, z)
+
+ # Scalar cases
+ check(x[:1], y, z)
+ check(x[-1:], y, z)
+ check(x[:1].reshape([]), y, z)
+ check(x[-1:].reshape([]), y, z)
+ check(x, y[:1], z)
+ check(x, y[-1:], z)
+ check(x, y[:1].reshape([]), z)
+ check(x, y[-1:].reshape([]), z)
+
+ def test_inplace_op_simple_manual(self):
+ rng = np.random.RandomState(1234)
+ x = rng.rand(200, 200) # bigger than bufsize
+
+ x += x.T
+ assert_array_equal(x - x.T, 0)
diff --git a/numpy/core/tests/test_memmap.py b/numpy/core/tests/test_memmap.py
index e41758c51..990d0ae26 100644
--- a/numpy/core/tests/test_memmap.py
+++ b/numpy/core/tests/test_memmap.py
@@ -3,17 +3,21 @@ from __future__ import division, absolute_import, print_function
import sys
import os
import shutil
+import mmap
+import pytest
from tempfile import NamedTemporaryFile, TemporaryFile, mktemp, mkdtemp
-from numpy import memmap
+from numpy import (
+ memmap, sum, average, product, ndarray, isscalar, add, subtract, multiply)
+from numpy.compat import Path
+
from numpy import arange, allclose, asarray
from numpy.testing import (
- TestCase, run_module_suite, assert_, assert_equal, assert_array_equal,
- dec
-)
+ assert_, assert_equal, assert_array_equal, suppress_warnings
+ )
-class TestMemmap(TestCase):
- def setUp(self):
+class TestMemmap(object):
+ def setup(self):
self.tmpfp = NamedTemporaryFile(prefix='mmap')
self.tempdir = mkdtemp()
self.shape = (3, 4)
@@ -21,7 +25,7 @@ class TestMemmap(TestCase):
self.data = arange(12, dtype=self.dtype)
self.data.resize(self.shape)
- def tearDown(self):
+ def teardown(self):
self.tmpfp.close()
shutil.rmtree(self.tempdir)
@@ -37,6 +41,7 @@ class TestMemmap(TestCase):
shape=self.shape)
assert_(allclose(self.data, newfp))
assert_array_equal(self.data, newfp)
+ assert_equal(newfp.flags.writeable, False)
def test_open_with_filename(self):
tmpname = mktemp('', 'mmap', dir=self.tempdir)
@@ -55,8 +60,8 @@ class TestMemmap(TestCase):
mode = "w+"
fp = memmap(self.tmpfp, dtype=self.dtype, mode=mode,
shape=self.shape, offset=offset)
- self.assertEqual(offset, fp.offset)
- self.assertEqual(mode, fp.mode)
+ assert_equal(offset, fp.offset)
+ assert_equal(mode, fp.mode)
del fp
def test_filename(self):
@@ -65,18 +70,35 @@ class TestMemmap(TestCase):
shape=self.shape)
abspath = os.path.abspath(tmpname)
fp[:] = self.data[:]
- self.assertEqual(abspath, fp.filename)
+ assert_equal(abspath, fp.filename)
+ b = fp[:1]
+ assert_equal(abspath, b.filename)
+ del b
+ del fp
+
+ @pytest.mark.skipif(Path is None, reason="No pathlib.Path")
+ def test_path(self):
+ tmpname = mktemp('', 'mmap', dir=self.tempdir)
+ fp = memmap(Path(tmpname), dtype=self.dtype, mode='w+',
+ shape=self.shape)
+ # os.path.realpath does not resolve symlinks on Windows
+ # see: https://bugs.python.org/issue9949
+ # use Path.resolve, just as memmap class does internally
+ abspath = str(Path(tmpname).resolve())
+ fp[:] = self.data[:]
+ assert_equal(abspath, str(fp.filename.resolve()))
b = fp[:1]
- self.assertEqual(abspath, b.filename)
+ assert_equal(abspath, str(b.filename.resolve()))
del b
del fp
def test_filename_fileobj(self):
fp = memmap(self.tmpfp, dtype=self.dtype, mode="w+",
shape=self.shape)
- self.assertEqual(fp.filename, self.tmpfp.name)
+ assert_equal(fp.filename, self.tmpfp.name)
- @dec.knownfailureif(sys.platform == 'gnu0', "This test is known to fail on hurd")
+ @pytest.mark.skipif(sys.platform == 'gnu0',
+ reason="Known to fail on hurd")
def test_flush(self):
fp = memmap(self.tmpfp, dtype=self.dtype, mode='w+',
shape=self.shape)
@@ -108,7 +130,7 @@ class TestMemmap(TestCase):
def test_indexing_drops_references(self):
fp = memmap(self.tmpfp, dtype=self.dtype, mode='w+',
shape=self.shape)
- tmp = fp[[(1, 2), (2, 3)]]
+ tmp = fp[(1, 2), (2, 3)]
if isinstance(tmp, memmap):
assert_(tmp._mmap is not fp._mmap)
@@ -126,5 +148,59 @@ class TestMemmap(TestCase):
new_array = asarray(fp)
assert_(new_array.base is fp)
-if __name__ == "__main__":
- run_module_suite()
+ def test_ufunc_return_ndarray(self):
+ fp = memmap(self.tmpfp, dtype=self.dtype, shape=self.shape)
+ fp[:] = self.data
+
+ with suppress_warnings() as sup:
+ sup.filter(FutureWarning, "np.average currently does not preserve")
+ for unary_op in [sum, average, product]:
+ result = unary_op(fp)
+ assert_(isscalar(result))
+ assert_(result.__class__ is self.data[0, 0].__class__)
+
+ assert_(unary_op(fp, axis=0).__class__ is ndarray)
+ assert_(unary_op(fp, axis=1).__class__ is ndarray)
+
+ for binary_op in [add, subtract, multiply]:
+ assert_(binary_op(fp, self.data).__class__ is ndarray)
+ assert_(binary_op(self.data, fp).__class__ is ndarray)
+ assert_(binary_op(fp, fp).__class__ is ndarray)
+
+ fp += 1
+ assert(fp.__class__ is memmap)
+ add(fp, 1, out=fp)
+ assert(fp.__class__ is memmap)
+
+ def test_getitem(self):
+ fp = memmap(self.tmpfp, dtype=self.dtype, shape=self.shape)
+ fp[:] = self.data
+
+ assert_(fp[1:, :-1].__class__ is memmap)
+ # Fancy indexing returns a copy that is not memmapped
+ assert_(fp[[0, 1]].__class__ is ndarray)
+
+ def test_memmap_subclass(self):
+ class MemmapSubClass(memmap):
+ pass
+
+ fp = MemmapSubClass(self.tmpfp, dtype=self.dtype, shape=self.shape)
+ fp[:] = self.data
+
+ # We keep previous behavior for subclasses of memmap, i.e. the
+ # ufunc and __getitem__ output is never turned into a ndarray
+ assert_(sum(fp, axis=0).__class__ is MemmapSubClass)
+ assert_(sum(fp).__class__ is MemmapSubClass)
+ assert_(fp[1:, :-1].__class__ is MemmapSubClass)
+ assert(fp[[0, 1]].__class__ is MemmapSubClass)
+
+ def test_mmap_offset_greater_than_allocation_granularity(self):
+ size = 5 * mmap.ALLOCATIONGRANULARITY
+ offset = mmap.ALLOCATIONGRANULARITY + 1
+ fp = memmap(self.tmpfp, shape=size, mode='w+', offset=offset)
+ assert_(fp.offset == offset)
+
+ def test_no_shape(self):
+ self.tmpfp.write(b'a'*16)
+ mm = memmap(self.tmpfp, dtype='float64')
+ assert_equal(mm.shape, (2,))
diff --git a/numpy/core/tests/test_multiarray.py b/numpy/core/tests/test_multiarray.py
index 262fbc0c0..cdacdabbe 100644
--- a/numpy/core/tests/test_multiarray.py
+++ b/numpy/core/tests/test_multiarray.py
@@ -1,6 +1,11 @@
from __future__ import division, absolute_import, print_function
-import collections
+try:
+ # Accessing collections abstract classes from collections
+ # has been deprecated since Python 3.3
+ import collections.abc as collections_abc
+except ImportError:
+ import collections as collections_abc
import tempfile
import sys
import shutil
@@ -8,58 +13,128 @@ import warnings
import operator
import io
import itertools
+import functools
import ctypes
import os
+import gc
+import weakref
+import pytest
+from contextlib import contextmanager
+
+from numpy.core.numeric import pickle
+
if sys.version_info[0] >= 3:
import builtins
else:
import __builtin__ as builtins
from decimal import Decimal
-
import numpy as np
-from numpy.compat import asbytes, getexception, strchar, unicode, sixu
-from test_print import in_foreign_locale
-from numpy.core.multiarray_tests import (
- test_neighborhood_iterator, test_neighborhood_iterator_oob,
- test_pydatamem_seteventhook_start, test_pydatamem_seteventhook_end,
- test_inplace_increment, get_buffer_info, test_as_c_array
- )
+from numpy.compat import strchar, unicode
+import numpy.core._multiarray_tests as _multiarray_tests
from numpy.testing import (
- TestCase, run_module_suite, assert_, assert_raises,
- assert_equal, assert_almost_equal, assert_array_equal,
- assert_array_almost_equal, assert_allclose,
- assert_array_less, runstring, dec, SkipTest
+ assert_, assert_raises, assert_warns, assert_equal, assert_almost_equal,
+ assert_array_equal, assert_raises_regex, assert_array_almost_equal,
+ assert_allclose, IS_PYPY, HAS_REFCOUNT, assert_array_less, runstring,
+ temppath, suppress_warnings
)
+from numpy.core.tests._locales import CommaDecimalPointLocale
# Need to test an object that does not fully implement math interface
-from datetime import timedelta
+from datetime import timedelta, datetime
if sys.version_info[:2] > (3, 2):
# In Python 3.3 the representation of empty shape, strides and sub-offsets
# is an empty tuple instead of None.
- # http://docs.python.org/dev/whatsnew/3.3.html#api-changes
+ # https://docs.python.org/dev/whatsnew/3.3.html#api-changes
EMPTY = ()
else:
EMPTY = None
-class TestFlags(TestCase):
- def setUp(self):
+def _aligned_zeros(shape, dtype=float, order="C", align=None):
+ """Allocate a new ndarray with aligned memory."""
+ dtype = np.dtype(dtype)
+ if dtype == np.dtype(object):
+ # Can't do this, fall back to standard allocation (which
+ # should always be sufficiently aligned)
+ if align is not None:
+ raise ValueError("object array alignment not supported")
+ return np.zeros(shape, dtype=dtype, order=order)
+ if align is None:
+ align = dtype.alignment
+ if not hasattr(shape, '__len__'):
+ shape = (shape,)
+ size = functools.reduce(operator.mul, shape) * dtype.itemsize
+ buf = np.empty(size + align + 1, np.uint8)
+ offset = buf.__array_interface__['data'][0] % align
+ if offset != 0:
+ offset = align - offset
+ # Note: slices producing 0-size arrays do not necessarily change
+ # data pointer --- so we use and allocate size+1
+ buf = buf[offset:offset+size+1][:-1]
+ data = np.ndarray(shape, dtype, buf, order=order)
+ data.fill(0)
+ return data
+
+
+class TestFlags(object):
+ def setup(self):
self.a = np.arange(10)
def test_writeable(self):
mydict = locals()
self.a.flags.writeable = False
- self.assertRaises(ValueError, runstring, 'self.a[0] = 3', mydict)
- self.assertRaises(ValueError, runstring, 'self.a[0:1].itemset(3)', mydict)
+ assert_raises(ValueError, runstring, 'self.a[0] = 3', mydict)
+ assert_raises(ValueError, runstring, 'self.a[0:1].itemset(3)', mydict)
self.a.flags.writeable = True
self.a[0] = 5
self.a[0] = 0
+ def test_writeable_from_readonly(self):
+ # gh-9440 - make sure fromstring, from buffer on readonly buffers
+ # set writeable False
+ data = b'\x00' * 100
+ vals = np.frombuffer(data, 'B')
+ assert_raises(ValueError, vals.setflags, write=True)
+ types = np.dtype( [('vals', 'u1'), ('res3', 'S4')] )
+ values = np.core.records.fromstring(data, types)
+ vals = values['vals']
+ assert_raises(ValueError, vals.setflags, write=True)
+
+ def test_writeable_from_buffer(self):
+ data = bytearray(b'\x00' * 100)
+ vals = np.frombuffer(data, 'B')
+ assert_(vals.flags.writeable)
+ vals.setflags(write=False)
+ assert_(vals.flags.writeable is False)
+ vals.setflags(write=True)
+ assert_(vals.flags.writeable)
+ types = np.dtype( [('vals', 'u1'), ('res3', 'S4')] )
+ values = np.core.records.fromstring(data, types)
+ vals = values['vals']
+ assert_(vals.flags.writeable)
+ vals.setflags(write=False)
+ assert_(vals.flags.writeable is False)
+ vals.setflags(write=True)
+ assert_(vals.flags.writeable)
+
+ @pytest.mark.skipif(sys.version_info[0] < 3, reason="Python 2 always copies")
+ def test_writeable_pickle(self):
+ import pickle
+ # Small arrays will be copied without setting base.
+ # See condition for using PyArray_SetBaseObject in
+ # array_setstate.
+ a = np.arange(1000)
+ for v in range(pickle.HIGHEST_PROTOCOL):
+ vals = pickle.loads(pickle.dumps(a, v))
+ assert_(vals.flags.writeable)
+ assert_(isinstance(vals.base, bytes))
+
def test_otherflags(self):
assert_equal(self.a.flags.carray, True)
+ assert_equal(self.a.flags['C'], True)
assert_equal(self.a.flags.farray, False)
assert_equal(self.a.flags.behaved, True)
assert_equal(self.a.flags.fnc, False)
@@ -67,7 +142,14 @@ class TestFlags(TestCase):
assert_equal(self.a.flags.owndata, True)
assert_equal(self.a.flags.writeable, True)
assert_equal(self.a.flags.aligned, True)
- assert_equal(self.a.flags.updateifcopy, False)
+ with assert_warns(DeprecationWarning):
+ assert_equal(self.a.flags.updateifcopy, False)
+ with assert_warns(DeprecationWarning):
+ assert_equal(self.a.flags['U'], False)
+ assert_equal(self.a.flags['UPDATEIFCOPY'], False)
+ assert_equal(self.a.flags.writebackifcopy, False)
+ assert_equal(self.a.flags['X'], False)
+ assert_equal(self.a.flags['WRITEBACKIFCOPY'], False)
def test_string_align(self):
a = np.zeros(4, dtype=np.dtype('|S4'))
@@ -81,7 +163,7 @@ class TestFlags(TestCase):
assert_(a.flags.aligned)
-class TestHash(TestCase):
+class TestHash(object):
# see #3793
def test_int(self):
for st, ut, s in [(np.int8, np.uint8, 8),
@@ -103,8 +185,8 @@ class TestHash(TestCase):
err_msg="%r: 2**%d - 1" % (ut, i))
-class TestAttributes(TestCase):
- def setUp(self):
+class TestAttributes(object):
+ def setup(self):
self.one = np.arange(10)
self.two = np.arange(20).reshape(4, 5)
self.three = np.arange(60, dtype=np.float64).reshape(2, 5, 6)
@@ -135,7 +217,7 @@ class TestAttributes(TestCase):
assert_equal(self.three.dtype, np.dtype(np.float_))
assert_equal(self.one.dtype.char, 'l')
assert_equal(self.three.dtype.char, 'd')
- self.assertTrue(self.three.dtype.str[0] in '<>')
+ assert_(self.three.dtype.str[0] in '<>')
assert_equal(self.one.dtype.str[1], 'i')
assert_equal(self.three.dtype.str[1], 'f')
@@ -153,7 +235,7 @@ class TestAttributes(TestCase):
assert_equal(isinstance(numpy_int, int), True)
# ... and fast-path checks on C-API level should also work
- from numpy.core.multiarray_tests import test_int_subclass
+ from numpy.core._multiarray_tests import test_int_subclass
assert_equal(test_int_subclass(numpy_int), True)
def test_stridesattr(self):
@@ -165,12 +247,12 @@ class TestAttributes(TestCase):
strides=strides*x.itemsize)
assert_equal(make_array(4, 4, -1), np.array([4, 3, 2, 1]))
- self.assertRaises(ValueError, make_array, 4, 4, -2)
- self.assertRaises(ValueError, make_array, 4, 2, -1)
- self.assertRaises(ValueError, make_array, 8, 3, 1)
+ assert_raises(ValueError, make_array, 4, 4, -2)
+ assert_raises(ValueError, make_array, 4, 2, -1)
+ assert_raises(ValueError, make_array, 8, 3, 1)
assert_equal(make_array(8, 3, 0), np.array([3]*8))
# Check behavior reported in gh-2503:
- self.assertRaises(ValueError, make_array, (2, 3), 5, np.array([-2, -3]))
+ assert_raises(ValueError, make_array, (2, 3), 5, np.array([-2, -3]))
make_array(0, 0, 10)
def test_set_stridesattr(self):
@@ -180,16 +262,16 @@ class TestAttributes(TestCase):
try:
r = np.ndarray([size], dtype=int, buffer=x,
offset=offset*x.itemsize)
- except:
- raise RuntimeError(getexception())
+ except Exception as e:
+ raise RuntimeError(e)
r.strides = strides = strides*x.itemsize
return r
assert_equal(make_array(4, 4, -1), np.array([4, 3, 2, 1]))
assert_equal(make_array(7, 3, 1), np.array([3, 4, 5, 6, 7, 8, 9]))
- self.assertRaises(ValueError, make_array, 4, 4, -2)
- self.assertRaises(ValueError, make_array, 4, 2, -1)
- self.assertRaises(RuntimeError, make_array, 8, 3, 1)
+ assert_raises(ValueError, make_array, 4, 4, -2)
+ assert_raises(ValueError, make_array, 4, 2, -1)
+ assert_raises(RuntimeError, make_array, 8, 3, 1)
# Check that the true extent of the array is used.
# Test relies on as_strided base not exposing a buffer.
x = np.lib.stride_tricks.as_strided(np.arange(1), (10, 10), (0, 0))
@@ -197,12 +279,12 @@ class TestAttributes(TestCase):
def set_strides(arr, strides):
arr.strides = strides
- self.assertRaises(ValueError, set_strides, x, (10*x.itemsize, x.itemsize))
+ assert_raises(ValueError, set_strides, x, (10*x.itemsize, x.itemsize))
# Test for offset calculations:
x = np.lib.stride_tricks.as_strided(np.arange(10, dtype=np.int8)[-1],
shape=(10,), strides=(-1,))
- self.assertRaises(ValueError, set_strides, x[::-1], -1)
+ assert_raises(ValueError, set_strides, x[::-1], -1)
a = x[::-1]
a.strides = 1
a[::2].strides = 2
@@ -236,7 +318,7 @@ class TestAttributes(TestCase):
assert_array_equal(x['b'], [-2, -2])
-class TestArrayConstruction(TestCase):
+class TestArrayConstruction(object):
def test_array(self):
d = np.ones(6)
r = np.array([d, d])
@@ -268,7 +350,7 @@ class TestArrayConstruction(TestCase):
assert_equal(r[0], [d, d + 1])
assert_equal(r[1], d + 2)
- tgt = np.ones((2, 3), dtype=np.bool)
+ tgt = np.ones((2, 3), dtype=bool)
tgt[0, 2] = False
tgt[1, 0:2] = False
r = np.array([[True, True, False], [False, False, True]])
@@ -314,7 +396,7 @@ class TestArrayConstruction(TestCase):
assert_(np.asfortranarray(d).flags.f_contiguous)
-class TestAssignment(TestCase):
+class TestAssignment(object):
def test_assignment_broadcasting(self):
a = np.arange(6).reshape(2, 3)
@@ -353,8 +435,85 @@ class TestAssignment(TestCase):
assert_raises((AttributeError, TypeError), assign, C())
assert_raises(ValueError, assign, [1])
+ def test_unicode_assignment(self):
+ # gh-5049
+ from numpy.core.numeric import set_string_function
+
+ @contextmanager
+ def inject_str(s):
+ """ replace ndarray.__str__ temporarily """
+ set_string_function(lambda x: s, repr=False)
+ try:
+ yield
+ finally:
+ set_string_function(None, repr=False)
+
+ a1d = np.array([u'test'])
+ a0d = np.array(u'done')
+ with inject_str(u'bad'):
+ a1d[0] = a0d # previously this would invoke __str__
+ assert_equal(a1d[0], u'done')
+
+ # this would crash for the same reason
+ np.array([np.array(u'\xe5\xe4\xf6')])
+
+ def test_stringlike_empty_list(self):
+ # gh-8902
+ u = np.array([u'done'])
+ b = np.array([b'done'])
+
+ class bad_sequence(object):
+ def __getitem__(self): pass
+ def __len__(self): raise RuntimeError
+
+ assert_raises(ValueError, operator.setitem, u, 0, [])
+ assert_raises(ValueError, operator.setitem, b, 0, [])
+
+ assert_raises(ValueError, operator.setitem, u, 0, bad_sequence())
+ assert_raises(ValueError, operator.setitem, b, 0, bad_sequence())
+
+ def test_longdouble_assignment(self):
+ # only relevant if longdouble is larger than float
+ # we're looking for loss of precision
+
+ for dtype in (np.longdouble, np.longcomplex):
+ # gh-8902
+ tinyb = np.nextafter(np.longdouble(0), 1).astype(dtype)
+ tinya = np.nextafter(np.longdouble(0), -1).astype(dtype)
+
+ # construction
+ tiny1d = np.array([tinya])
+ assert_equal(tiny1d[0], tinya)
+
+ # scalar = scalar
+ tiny1d[0] = tinyb
+ assert_equal(tiny1d[0], tinyb)
+
+ # 0d = scalar
+ tiny1d[0, ...] = tinya
+ assert_equal(tiny1d[0], tinya)
+
+ # 0d = 0d
+ tiny1d[0, ...] = tinyb[...]
+ assert_equal(tiny1d[0], tinyb)
+
+ # scalar = 0d
+ tiny1d[0] = tinyb[...]
+ assert_equal(tiny1d[0], tinyb)
+
+ arr = np.array([np.array(tinya)])
+ assert_equal(arr[0], tinya)
-class TestDtypedescr(TestCase):
+ def test_cast_to_string(self):
+ # cast to str should do "str(scalar)", not "str(scalar.item())"
+ # Example: In python2, str(float) is truncated, so we want to avoid
+ # str(np.float64(...).item()) as this would incorrectly truncate.
+ a = np.zeros(1, dtype='S20')
+ a[:] = np.array(['1.12345678901234567890'], dtype='f8')
+ assert_equal(a[0], b"1.1234567890123457")
+
+
+class TestDtypedescr(object):
def test_construction(self):
d1 = np.dtype('i4')
assert_equal(d1, np.dtype(np.int32))
@@ -362,48 +521,58 @@ class TestDtypedescr(TestCase):
assert_equal(d2, np.dtype(np.float64))
def test_byteorders(self):
- self.assertNotEqual(np.dtype('<i4'), np.dtype('>i4'))
- self.assertNotEqual(np.dtype([('a', '<i4')]), np.dtype([('a', '>i4')]))
+ assert_(np.dtype('<i4') != np.dtype('>i4'))
+ assert_(np.dtype([('a', '<i4')]) != np.dtype([('a', '>i4')]))
+
+ def test_structured_non_void(self):
+ fields = [('a', '<i2'), ('b', '<i2')]
+ dt_int = np.dtype(('i4', fields))
+ assert_equal(str(dt_int), "(numpy.int32, [('a', '<i2'), ('b', '<i2')])")
+ # gh-9821
+ arr_int = np.zeros(4, dt_int)
+ assert_equal(repr(arr_int),
+ "array([0, 0, 0, 0], dtype=(numpy.int32, [('a', '<i2'), ('b', '<i2')]))")
-class TestZeroRank(TestCase):
- def setUp(self):
+
+class TestZeroRank(object):
+ def setup(self):
self.d = np.array(0), np.array('x', object)
def test_ellipsis_subscript(self):
a, b = self.d
- self.assertEqual(a[...], 0)
- self.assertEqual(b[...], 'x')
- self.assertTrue(a[...].base is a) # `a[...] is a` in numpy <1.9.
- self.assertTrue(b[...].base is b) # `b[...] is b` in numpy <1.9.
+ assert_equal(a[...], 0)
+ assert_equal(b[...], 'x')
+ assert_(a[...].base is a) # `a[...] is a` in numpy <1.9.
+ assert_(b[...].base is b) # `b[...] is b` in numpy <1.9.
def test_empty_subscript(self):
a, b = self.d
- self.assertEqual(a[()], 0)
- self.assertEqual(b[()], 'x')
- self.assertTrue(type(a[()]) is a.dtype.type)
- self.assertTrue(type(b[()]) is str)
+ assert_equal(a[()], 0)
+ assert_equal(b[()], 'x')
+ assert_(type(a[()]) is a.dtype.type)
+ assert_(type(b[()]) is str)
def test_invalid_subscript(self):
a, b = self.d
- self.assertRaises(IndexError, lambda x: x[0], a)
- self.assertRaises(IndexError, lambda x: x[0], b)
- self.assertRaises(IndexError, lambda x: x[np.array([], int)], a)
- self.assertRaises(IndexError, lambda x: x[np.array([], int)], b)
+ assert_raises(IndexError, lambda x: x[0], a)
+ assert_raises(IndexError, lambda x: x[0], b)
+ assert_raises(IndexError, lambda x: x[np.array([], int)], a)
+ assert_raises(IndexError, lambda x: x[np.array([], int)], b)
def test_ellipsis_subscript_assignment(self):
a, b = self.d
a[...] = 42
- self.assertEqual(a, 42)
+ assert_equal(a, 42)
b[...] = ''
- self.assertEqual(b.item(), '')
+ assert_equal(b.item(), '')
def test_empty_subscript_assignment(self):
a, b = self.d
a[()] = 42
- self.assertEqual(a, 42)
+ assert_equal(a, 42)
b[()] = ''
- self.assertEqual(b.item(), '')
+ assert_equal(b.item(), '')
def test_invalid_subscript_assignment(self):
a, b = self.d
@@ -411,20 +580,20 @@ class TestZeroRank(TestCase):
def assign(x, i, v):
x[i] = v
- self.assertRaises(IndexError, assign, a, 0, 42)
- self.assertRaises(IndexError, assign, b, 0, '')
- self.assertRaises(ValueError, assign, a, (), '')
+ assert_raises(IndexError, assign, a, 0, 42)
+ assert_raises(IndexError, assign, b, 0, '')
+ assert_raises(ValueError, assign, a, (), '')
def test_newaxis(self):
a, b = self.d
- self.assertEqual(a[np.newaxis].shape, (1,))
- self.assertEqual(a[..., np.newaxis].shape, (1,))
- self.assertEqual(a[np.newaxis, ...].shape, (1,))
- self.assertEqual(a[..., np.newaxis].shape, (1,))
- self.assertEqual(a[np.newaxis, ..., np.newaxis].shape, (1, 1))
- self.assertEqual(a[..., np.newaxis, np.newaxis].shape, (1, 1))
- self.assertEqual(a[np.newaxis, np.newaxis, ...].shape, (1, 1))
- self.assertEqual(a[(np.newaxis,)*10].shape, (1,)*10)
+ assert_equal(a[np.newaxis].shape, (1,))
+ assert_equal(a[..., np.newaxis].shape, (1,))
+ assert_equal(a[np.newaxis, ...].shape, (1,))
+ assert_equal(a[..., np.newaxis].shape, (1,))
+ assert_equal(a[np.newaxis, ..., np.newaxis].shape, (1, 1))
+ assert_equal(a[..., np.newaxis, np.newaxis].shape, (1, 1))
+ assert_equal(a[np.newaxis, np.newaxis, ...].shape, (1, 1))
+ assert_equal(a[(np.newaxis,)*10].shape, (1,)*10)
def test_invalid_newaxis(self):
a, b = self.d
@@ -432,40 +601,56 @@ class TestZeroRank(TestCase):
def subscript(x, i):
x[i]
- self.assertRaises(IndexError, subscript, a, (np.newaxis, 0))
- self.assertRaises(IndexError, subscript, a, (np.newaxis,)*50)
+ assert_raises(IndexError, subscript, a, (np.newaxis, 0))
+ assert_raises(IndexError, subscript, a, (np.newaxis,)*50)
def test_constructor(self):
x = np.ndarray(())
x[()] = 5
- self.assertEqual(x[()], 5)
+ assert_equal(x[()], 5)
y = np.ndarray((), buffer=x)
y[()] = 6
- self.assertEqual(x[()], 6)
+ assert_equal(x[()], 6)
def test_output(self):
x = np.array(2)
- self.assertRaises(ValueError, np.add, x, [1], x)
+ assert_raises(ValueError, np.add, x, [1], x)
+
+ def test_real_imag(self):
+ # contiguity checks are for gh-11245
+ x = np.array(1j)
+ xr = x.real
+ xi = x.imag
+
+ assert_equal(xr, np.array(0))
+ assert_(type(xr) is np.ndarray)
+ assert_equal(xr.flags.contiguous, True)
+ assert_equal(xr.flags.f_contiguous, True)
+ assert_equal(xi, np.array(1))
+ assert_(type(xi) is np.ndarray)
+ assert_equal(xi.flags.contiguous, True)
+ assert_equal(xi.flags.f_contiguous, True)
-class TestScalarIndexing(TestCase):
- def setUp(self):
+
+class TestScalarIndexing(object):
+ def setup(self):
self.d = np.array([0, 1])[0]
def test_ellipsis_subscript(self):
a = self.d
- self.assertEqual(a[...], 0)
- self.assertEqual(a[...].shape, ())
+ assert_equal(a[...], 0)
+ assert_equal(a[...].shape, ())
def test_empty_subscript(self):
a = self.d
- self.assertEqual(a[()], 0)
- self.assertEqual(a[()].shape, ())
+ assert_equal(a[()], 0)
+ assert_equal(a[()].shape, ())
def test_invalid_subscript(self):
a = self.d
- self.assertRaises(IndexError, lambda x: x[0], a)
- self.assertRaises(IndexError, lambda x: x[np.array([], int)], a)
+ assert_raises(IndexError, lambda x: x[0], a)
+ assert_raises(IndexError, lambda x: x[np.array([], int)], a)
def test_invalid_subscript_assignment(self):
a = self.d
@@ -473,18 +658,18 @@ class TestScalarIndexing(TestCase):
def assign(x, i, v):
x[i] = v
- self.assertRaises(TypeError, assign, a, 0, 42)
+ assert_raises(TypeError, assign, a, 0, 42)
def test_newaxis(self):
a = self.d
- self.assertEqual(a[np.newaxis].shape, (1,))
- self.assertEqual(a[..., np.newaxis].shape, (1,))
- self.assertEqual(a[np.newaxis, ...].shape, (1,))
- self.assertEqual(a[..., np.newaxis].shape, (1,))
- self.assertEqual(a[np.newaxis, ..., np.newaxis].shape, (1, 1))
- self.assertEqual(a[..., np.newaxis, np.newaxis].shape, (1, 1))
- self.assertEqual(a[np.newaxis, np.newaxis, ...].shape, (1, 1))
- self.assertEqual(a[(np.newaxis,)*10].shape, (1,)*10)
+ assert_equal(a[np.newaxis].shape, (1,))
+ assert_equal(a[..., np.newaxis].shape, (1,))
+ assert_equal(a[np.newaxis, ...].shape, (1,))
+ assert_equal(a[..., np.newaxis].shape, (1,))
+ assert_equal(a[np.newaxis, ..., np.newaxis].shape, (1, 1))
+ assert_equal(a[..., np.newaxis, np.newaxis].shape, (1, 1))
+ assert_equal(a[np.newaxis, np.newaxis, ...].shape, (1, 1))
+ assert_equal(a[(np.newaxis,)*10].shape, (1,)*10)
def test_invalid_newaxis(self):
a = self.d
@@ -492,8 +677,8 @@ class TestScalarIndexing(TestCase):
def subscript(x, i):
x[i]
- self.assertRaises(IndexError, subscript, a, (np.newaxis, 0))
- self.assertRaises(IndexError, subscript, a, (np.newaxis,)*50)
+ assert_raises(IndexError, subscript, a, (np.newaxis, 0))
+ assert_raises(IndexError, subscript, a, (np.newaxis,)*50)
def test_overlapping_assignment(self):
# With positive strides
@@ -544,13 +729,16 @@ class TestScalarIndexing(TestCase):
assert_equal(a, [0, 1, 0, 1, 2])
-class TestCreation(TestCase):
+class TestCreation(object):
+ """
+ Test the np.array constructor
+ """
def test_from_attribute(self):
class x(object):
def __array__(self, dtype=None):
pass
- self.assertRaises(ValueError, np.array, x())
+ assert_raises(ValueError, np.array, x())
def test_from_string(self):
types = np.typecodes['AllInteger'] + np.typecodes['Float']
@@ -564,6 +752,21 @@ class TestCreation(TestCase):
arr = np.array([], dtype='V')
assert_equal(arr.dtype.kind, 'V')
+ def test_too_big_error(self):
+ # 45341 is the smallest integer greater than sqrt(2**31 - 1).
+ # 3037000500 is the smallest integer greater than sqrt(2**63 - 1).
+ # We want to make sure that the square byte array with those dimensions
+ # is too big on 32 or 64 bit systems respectively.
+ if np.iinfo('intp').max == 2**31 - 1:
+ shape = (46341, 46341)
+ elif np.iinfo('intp').max == 2**63 - 1:
+ shape = (3037000500, 3037000500)
+ else:
+ return
+ assert_raises(ValueError, np.empty, shape, dtype=np.int8)
+ assert_raises(ValueError, np.zeros, shape, dtype=np.int8)
+ assert_raises(ValueError, np.ones, shape, dtype=np.int8)
+
def test_zeros(self):
types = np.typecodes['AllInteger'] + np.typecodes['AllFloat']
for dt in types:
@@ -586,7 +789,7 @@ class TestCreation(TestCase):
d = np.zeros(2, dtype='(2,4)i4, (2,4)i4')
assert_equal(np.count_nonzero(d), 0)
- @dec.slow
+ @pytest.mark.slow
def test_zeros_big(self):
# test big array as they might be allocated different by the system
types = np.typecodes['AllInteger'] + np.typecodes['AllFloat']
@@ -649,20 +852,20 @@ class TestCreation(TestCase):
str(d)
def test_sequence_non_homogenous(self):
- assert_equal(np.array([4, 2**80]).dtype, np.object)
- assert_equal(np.array([4, 2**80, 4]).dtype, np.object)
- assert_equal(np.array([2**80, 4]).dtype, np.object)
- assert_equal(np.array([2**80] * 3).dtype, np.object)
- assert_equal(np.array([[1, 1],[1j, 1j]]).dtype, np.complex)
- assert_equal(np.array([[1j, 1j],[1, 1]]).dtype, np.complex)
- assert_equal(np.array([[1, 1, 1],[1, 1j, 1.], [1, 1, 1]]).dtype, np.complex)
-
- @dec.skipif(sys.version_info[0] >= 3)
+ assert_equal(np.array([4, 2**80]).dtype, object)
+ assert_equal(np.array([4, 2**80, 4]).dtype, object)
+ assert_equal(np.array([2**80, 4]).dtype, object)
+ assert_equal(np.array([2**80] * 3).dtype, object)
+ assert_equal(np.array([[1, 1],[1j, 1j]]).dtype, complex)
+ assert_equal(np.array([[1j, 1j],[1, 1]]).dtype, complex)
+ assert_equal(np.array([[1, 1, 1],[1, 1j, 1.], [1, 1, 1]]).dtype, complex)
+
+ @pytest.mark.skipif(sys.version_info[0] >= 3, reason="Not Python 2")
def test_sequence_long(self):
assert_equal(np.array([long(4), long(4)]).dtype, np.long)
- assert_equal(np.array([long(4), 2**80]).dtype, np.object)
- assert_equal(np.array([long(4), 2**80, long(4)]).dtype, np.object)
- assert_equal(np.array([2**80, long(4)]).dtype, np.object)
+ assert_equal(np.array([long(4), 2**80]).dtype, object)
+ assert_equal(np.array([long(4), 2**80, long(4)]).dtype, object)
+ assert_equal(np.array([2**80, long(4)]).dtype, object)
def test_non_sequence_sequence(self):
"""Should not segfault.
@@ -745,8 +948,36 @@ class TestCreation(TestCase):
assert_raises(ValueError, np.ndarray, buffer=buf, strides=(0,),
shape=(max_bytes//itemsize + 1,), dtype=dtype)
+ def test_jagged_ndim_object(self):
+ # Lists of mismatching depths are treated as object arrays
+ a = np.array([[1], 2, 3])
+ assert_equal(a.shape, (3,))
+ assert_equal(a.dtype, object)
+
+ a = np.array([1, [2], 3])
+ assert_equal(a.shape, (3,))
+ assert_equal(a.dtype, object)
+
+ a = np.array([1, 2, [3]])
+ assert_equal(a.shape, (3,))
+ assert_equal(a.dtype, object)
+
+ def test_jagged_shape_object(self):
+ # The jagged dimension of a list is turned into an object array
+ a = np.array([[1, 1], [2], [3]])
+ assert_equal(a.shape, (3,))
+ assert_equal(a.dtype, object)
+
+ a = np.array([[1], [2, 2], [3]])
+ assert_equal(a.shape, (3,))
+ assert_equal(a.dtype, object)
+
+ a = np.array([[1], [2], [3, 3]])
+ assert_equal(a.shape, (3,))
+ assert_equal(a.dtype, object)
-class TestStructured(TestCase):
+
+class TestStructured(object):
def test_subarray_field_access(self):
a = np.zeros((3, 5), dtype=[('a', ('i4', (2, 2)))])
a['a'] = np.arange(60).reshape(3, 5, 2, 2)
@@ -766,7 +997,7 @@ class TestStructured(TestCase):
# multi-dimensional field types work properly
a = np.rec.fromrecords(
[([1, 2, 3], 'a', [[1, 2], [3, 4]]), ([3, 3, 3], 'b', [[0, 0], [0, 0]])],
- dtype=[('a', ('f4', 3)), ('b', np.object), ('c', ('i4', (2, 2)))])
+ dtype=[('a', ('f4', 3)), ('b', object), ('c', ('i4', (2, 2)))])
b = a.copy()
assert_equal(a == b, [True, True])
assert_equal(a != b, [False, False])
@@ -811,8 +1042,8 @@ class TestStructured(TestCase):
# This comparison invokes deprecated behaviour, and will probably
# start raising an error eventually. What we really care about in this
# test is just that it doesn't return True.
- with warnings.catch_warnings():
- warnings.filterwarnings("ignore", category=DeprecationWarning)
+ with suppress_warnings() as sup:
+ sup.filter(FutureWarning, "elementwise == comparison failed")
assert_equal(x == y, False)
x = np.zeros((1,), dtype=[('a', ('f4', (2, 1))), ('b', 'i1')])
@@ -820,8 +1051,8 @@ class TestStructured(TestCase):
# This comparison invokes deprecated behaviour, and will probably
# start raising an error eventually. What we really care about in this
# test is just that it doesn't return True.
- with warnings.catch_warnings():
- warnings.filterwarnings("ignore", category=DeprecationWarning)
+ with suppress_warnings() as sup:
+ sup.filter(FutureWarning, "elementwise == comparison failed")
assert_equal(x == y, False)
# Check that structured arrays that are different only in
@@ -842,12 +1073,11 @@ class TestStructured(TestCase):
# Check that equality comparison works on structured arrays if
# they are 'equiv'-castable
a = np.array([(5, 42), (10, 1)], dtype=[('a', '>i4'), ('b', '<f8')])
- b = np.array([(42, 5), (1, 10)], dtype=[('b', '>f8'), ('a', '<i4')])
+ b = np.array([(5, 42), (10, 1)], dtype=[('a', '<i4'), ('b', '>f8')])
assert_(np.can_cast(a.dtype, b.dtype, casting='equiv'))
assert_equal(a == b, [True, True])
- # Check that 'equiv' casting can reorder fields and change byte
- # order
+ # Check that 'equiv' casting can change byte order
assert_(np.can_cast(a.dtype, b.dtype, casting='equiv'))
c = a.astype(b.dtype, casting='equiv')
assert_equal(a == c, [True, True])
@@ -923,20 +1153,131 @@ class TestStructured(TestCase):
assert_raises(ValueError, testassign)
+ def test_zero_width_string(self):
+ # Test for PR #6430 / issues #473, #4955, #2585
+
+ dt = np.dtype([('I', int), ('S', 'S0')])
+
+ x = np.zeros(4, dtype=dt)
-class TestBool(TestCase):
+ assert_equal(x['S'], [b'', b'', b'', b''])
+ assert_equal(x['S'].itemsize, 0)
+
+ x['S'] = ['a', 'b', 'c', 'd']
+ assert_equal(x['S'], [b'', b'', b'', b''])
+ assert_equal(x['I'], [0, 0, 0, 0])
+
+ # Variation on test case from #4955
+ x['S'][x['I'] == 0] = 'hello'
+ assert_equal(x['S'], [b'', b'', b'', b''])
+ assert_equal(x['I'], [0, 0, 0, 0])
+
+ # Variation on test case from #2585
+ x['S'] = 'A'
+ assert_equal(x['S'], [b'', b'', b'', b''])
+ assert_equal(x['I'], [0, 0, 0, 0])
+
+ # Allow zero-width dtypes in ndarray constructor
+ y = np.ndarray(4, dtype=x['S'].dtype)
+ assert_equal(y.itemsize, 0)
+ assert_equal(x['S'], y)
+
+ # More tests for indexing an array with zero-width fields
+ assert_equal(np.zeros(4, dtype=[('a', 'S0,S0'),
+ ('b', 'u1')])['a'].itemsize, 0)
+ assert_equal(np.empty(3, dtype='S0,S0').itemsize, 0)
+ assert_equal(np.zeros(4, dtype='S0,u1')['f0'].itemsize, 0)
+
+ xx = x['S'].reshape((2, 2))
+ assert_equal(xx.itemsize, 0)
+ assert_equal(xx, [[b'', b''], [b'', b'']])
+ # check for no uninitialized memory due to viewing S0 array
+ assert_equal(xx[:].dtype, xx.dtype)
+ assert_array_equal(eval(repr(xx), dict(array=np.array)), xx)
+
+ b = io.BytesIO()
+ np.save(b, xx)
+
+ b.seek(0)
+ yy = np.load(b)
+ assert_equal(yy.itemsize, 0)
+ assert_equal(xx, yy)
+
+ with temppath(suffix='.npy') as tmp:
+ np.save(tmp, xx)
+ yy = np.load(tmp)
+ assert_equal(yy.itemsize, 0)
+ assert_equal(xx, yy)
+
+ def test_base_attr(self):
+ a = np.zeros(3, dtype='i4,f4')
+ b = a[0]
+ assert_(b.base is a)
+
+ def test_assignment(self):
+ def testassign(arr, v):
+ c = arr.copy()
+ c[0] = v # assign using setitem
+ c[1:] = v # assign using "dtype_transfer" code paths
+ return c
+
+ dt = np.dtype([('foo', 'i8'), ('bar', 'i8')])
+ arr = np.ones(2, dt)
+ v1 = np.array([(2,3)], dtype=[('foo', 'i8'), ('bar', 'i8')])
+ v2 = np.array([(2,3)], dtype=[('bar', 'i8'), ('foo', 'i8')])
+ v3 = np.array([(2,3)], dtype=[('bar', 'i8'), ('baz', 'i8')])
+ v4 = np.array([(2,)], dtype=[('bar', 'i8')])
+ v5 = np.array([(2,3)], dtype=[('foo', 'f8'), ('bar', 'f8')])
+ w = arr.view({'names': ['bar'], 'formats': ['i8'], 'offsets': [8]})
+
+ ans = np.array([(2,3),(2,3)], dtype=dt)
+ assert_equal(testassign(arr, v1), ans)
+ assert_equal(testassign(arr, v2), ans)
+ assert_equal(testassign(arr, v3), ans)
+ assert_raises(ValueError, lambda: testassign(arr, v4))
+ assert_equal(testassign(arr, v5), ans)
+ w[:] = 4
+ assert_equal(arr, np.array([(1,4),(1,4)], dtype=dt))
+
+ # test field-reordering, assignment by position, and self-assignment
+ a = np.array([(1,2,3)],
+ dtype=[('foo', 'i8'), ('bar', 'i8'), ('baz', 'f4')])
+ a[['foo', 'bar']] = a[['bar', 'foo']]
+ assert_equal(a[0].item(), (2,1,3))
+
+ # test that this works even for 'simple_unaligned' structs
+ # (ie, that PyArray_EquivTypes cares about field order too)
+ a = np.array([(1,2)], dtype=[('a', 'i4'), ('b', 'i4')])
+ a[['a', 'b']] = a[['b', 'a']]
+ assert_equal(a[0].item(), (2,1))
+
+ def test_structuredscalar_indexing(self):
+ # test gh-7262
+ x = np.empty(shape=1, dtype="(2)3S,(2)3U")
+ assert_equal(x[["f0","f1"]][0], x[0][["f0","f1"]])
+ assert_equal(x[0], x[0][()])
+
+ def test_multiindex_titles(self):
+ a = np.zeros(4, dtype=[(('a', 'b'), 'i'), ('c', 'i'), ('d', 'i')])
+ assert_raises(KeyError, lambda : a[['a','c']])
+ assert_raises(KeyError, lambda : a[['a','a']])
+ assert_raises(ValueError, lambda : a[['b','b']]) # field exists, but repeated
+ a[['b','c']] # no exception
+
+
+class TestBool(object):
def test_test_interning(self):
a0 = np.bool_(0)
b0 = np.bool_(False)
- self.assertTrue(a0 is b0)
+ assert_(a0 is b0)
a1 = np.bool_(1)
b1 = np.bool_(True)
- self.assertTrue(a1 is b1)
- self.assertTrue(np.array([True])[0] is a1)
- self.assertTrue(np.array(True)[()] is a1)
+ assert_(a1 is b1)
+ assert_(np.array([True])[0] is a1)
+ assert_(np.array(True)[()] is a1)
def test_sum(self):
- d = np.ones(101, dtype=np.bool)
+ d = np.ones(101, dtype=bool)
assert_equal(d.sum(), d.size)
assert_equal(d[::2].sum(), d[::2].size)
assert_equal(d[::-2].sum(), d[::-2].size)
@@ -950,23 +1291,23 @@ class TestBool(TestCase):
powers = [2 ** i for i in range(length)]
for i in range(2**power):
l = [(i & x) != 0 for x in powers]
- a = np.array(l, dtype=np.bool)
+ a = np.array(l, dtype=bool)
c = builtins.sum(l)
- self.assertEqual(np.count_nonzero(a), c)
+ assert_equal(np.count_nonzero(a), c)
av = a.view(np.uint8)
av *= 3
- self.assertEqual(np.count_nonzero(a), c)
+ assert_equal(np.count_nonzero(a), c)
av *= 4
- self.assertEqual(np.count_nonzero(a), c)
+ assert_equal(np.count_nonzero(a), c)
av[av != 0] = 0xFF
- self.assertEqual(np.count_nonzero(a), c)
+ assert_equal(np.count_nonzero(a), c)
def test_count_nonzero(self):
# check all 12 bit combinations in a length 17 array
# covers most cases of the 16 byte unrolled code
self.check_count_nonzero(12, 17)
- @dec.slow
+ @pytest.mark.slow
def test_count_nonzero_all(self):
# check all combinations in a length 17 array
# covers all cases of the 16 byte unrolled code
@@ -975,15 +1316,129 @@ class TestBool(TestCase):
def test_count_nonzero_unaligned(self):
# prevent mistakes as e.g. gh-4060
for o in range(7):
- a = np.zeros((18,), dtype=np.bool)[o+1:]
+ a = np.zeros((18,), dtype=bool)[o+1:]
a[:o] = True
- self.assertEqual(np.count_nonzero(a), builtins.sum(a.tolist()))
- a = np.ones((18,), dtype=np.bool)[o+1:]
+ assert_equal(np.count_nonzero(a), builtins.sum(a.tolist()))
+ a = np.ones((18,), dtype=bool)[o+1:]
a[:o] = False
- self.assertEqual(np.count_nonzero(a), builtins.sum(a.tolist()))
+ assert_equal(np.count_nonzero(a), builtins.sum(a.tolist()))
+
+ def _test_cast_from_flexible(self, dtype):
+ # empty string -> false
+ for n in range(3):
+ v = np.array(b'', (dtype, n))
+ assert_equal(bool(v), False)
+ assert_equal(bool(v[()]), False)
+ assert_equal(v.astype(bool), False)
+ assert_(isinstance(v.astype(bool), np.ndarray))
+ assert_(v[()].astype(bool) is np.False_)
+
+ # anything else -> true
+ for n in range(1, 4):
+ for val in [b'a', b'0', b' ']:
+ v = np.array(val, (dtype, n))
+ assert_equal(bool(v), True)
+ assert_equal(bool(v[()]), True)
+ assert_equal(v.astype(bool), True)
+ assert_(isinstance(v.astype(bool), np.ndarray))
+ assert_(v[()].astype(bool) is np.True_)
+
+ def test_cast_from_void(self):
+ self._test_cast_from_flexible(np.void)
+
+ @pytest.mark.xfail(reason="See gh-9847")
+ def test_cast_from_unicode(self):
+ self._test_cast_from_flexible(np.unicode_)
+
+ @pytest.mark.xfail(reason="See gh-9847")
+ def test_cast_from_bytes(self):
+ self._test_cast_from_flexible(np.bytes_)
+
+
+class TestZeroSizeFlexible(object):
+ @staticmethod
+ def _zeros(shape, dtype=str):
+ dtype = np.dtype(dtype)
+ if dtype == np.void:
+ return np.zeros(shape, dtype=(dtype, 0))
+
+ # not constructable directly
+ dtype = np.dtype([('x', dtype, 0)])
+ return np.zeros(shape, dtype=dtype)['x']
+
+ def test_create(self):
+ zs = self._zeros(10, bytes)
+ assert_equal(zs.itemsize, 0)
+ zs = self._zeros(10, np.void)
+ assert_equal(zs.itemsize, 0)
+ zs = self._zeros(10, unicode)
+ assert_equal(zs.itemsize, 0)
+
+ def _test_sort_partition(self, name, kinds, **kwargs):
+ # Previously, these would all hang
+ for dt in [bytes, np.void, unicode]:
+ zs = self._zeros(10, dt)
+ sort_method = getattr(zs, name)
+ sort_func = getattr(np, name)
+ for kind in kinds:
+ sort_method(kind=kind, **kwargs)
+ sort_func(zs, kind=kind, **kwargs)
+
+ def test_sort(self):
+ self._test_sort_partition('sort', kinds='qhm')
+
+ def test_argsort(self):
+ self._test_sort_partition('argsort', kinds='qhm')
+
+ def test_partition(self):
+ self._test_sort_partition('partition', kinds=['introselect'], kth=2)
+ def test_argpartition(self):
+ self._test_sort_partition('argpartition', kinds=['introselect'], kth=2)
-class TestMethods(TestCase):
+ def test_resize(self):
+ # previously an error
+ for dt in [bytes, np.void, unicode]:
+ zs = self._zeros(10, dt)
+ zs.resize(25)
+ zs.resize((10, 10))
+
+ def test_view(self):
+ for dt in [bytes, np.void, unicode]:
+ zs = self._zeros(10, dt)
+
+ # viewing as itself should be allowed
+ assert_equal(zs.view(dt).dtype, np.dtype(dt))
+
+ # viewing as any non-empty type gives an empty result
+ assert_equal(zs.view((dt, 1)).shape, (0,))
+
+ def test_pickle(self):
+ for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
+ for dt in [bytes, np.void, unicode]:
+ zs = self._zeros(10, dt)
+ p = pickle.dumps(zs, protocol=proto)
+ zs2 = pickle.loads(p)
+
+ assert_equal(zs.dtype, zs2.dtype)
+
+ @pytest.mark.skipif(pickle.HIGHEST_PROTOCOL < 5,
+ reason="requires pickle protocol 5")
+ def test_pickle_with_buffercallback(self):
+ array = np.arange(10)
+ buffers = []
+ bytes_string = pickle.dumps(array, buffer_callback=buffers.append,
+ protocol=5)
+ array_from_buffer = pickle.loads(bytes_string, buffers=buffers)
+ # when using pickle protocol 5 with buffer callbacks,
+ # array_from_buffer is reconstructed from a buffer holding a view
+ # to the initial array's data, so modifying an element in array
+ # should modify it in array_from_buffer too.
+ array[0] = -1
+ assert array_from_buffer[0] == -1, array_from_buffer[0]
+
+
+class TestMethods(object):
def test_compress(self):
tgt = [[5, 6, 7, 8, 9]]
arr = np.arange(10).reshape(2, 5)
@@ -1019,6 +1474,10 @@ class TestMethods(TestCase):
A = ind.choose((x, y2))
assert_equal(A, [[2, 2, 3], [2, 2, 3]])
+ oned = np.ones(1)
+ # gh-12031, caused SEGFAULT
+ assert_raises(TypeError, oned.choose,np.void(0), [oned])
+
def test_prod(self):
ba = [1, 2, 10, 11, 6, 5, 4]
ba2 = [[1, 2, 3, 4], [5, 6, 7, 9], [10, 3, 4, 5]]
@@ -1028,8 +1487,8 @@ class TestMethods(TestCase):
a = np.array(ba, ctype)
a2 = np.array(ba2, ctype)
if ctype in ['1', 'b']:
- self.assertRaises(ArithmeticError, a.prod)
- self.assertRaises(ArithmeticError, a2.prod, axis=1)
+ assert_raises(ArithmeticError, a.prod)
+ assert_raises(ArithmeticError, a2.prod, axis=1)
else:
assert_equal(a.prod(axis=0), 26400)
assert_array_equal(a2.prod(axis=0),
@@ -1110,9 +1569,9 @@ class TestMethods(TestCase):
def test_transpose(self):
a = np.array([[1, 2], [3, 4]])
assert_equal(a.transpose(), [[1, 3], [2, 4]])
- self.assertRaises(ValueError, lambda: a.transpose(0))
- self.assertRaises(ValueError, lambda: a.transpose(0, 0))
- self.assertRaises(ValueError, lambda: a.transpose(0, 1, 2))
+ assert_raises(ValueError, lambda: a.transpose(0))
+ assert_raises(ValueError, lambda: a.transpose(0, 0))
+ assert_raises(ValueError, lambda: a.transpose(0, 1, 2))
def test_sort(self):
# test ordering for floats and complex containing nans. It is only
@@ -1173,9 +1632,9 @@ class TestMethods(TestCase):
assert_equal(c, ai, msg)
# test sorting of complex arrays requiring byte-swapping, gh-5441
- for endianess in '<>':
+ for endianness in '<>':
for dt in np.typecodes['Complex']:
- arr = np.array([1+3.j, 2+2.j, 3+1.j], dtype=endianess + dt)
+ arr = np.array([1+3.j, 2+2.j, 3+1.j], dtype=endianness + dt)
c = arr.copy()
c.sort()
msg = 'byte-swapped complex sort, dtype={0}'.format(dt)
@@ -1208,7 +1667,7 @@ class TestMethods(TestCase):
assert_equal(c, a, msg)
# test object array sorts.
- a = np.empty((101,), dtype=np.object)
+ a = np.empty((101,), dtype=object)
a[:] = list(range(101))
b = a[::-1]
for kind in ['q', 'h', 'm']:
@@ -1281,6 +1740,60 @@ class TestMethods(TestCase):
msg = 'test empty array sort with axis=None'
assert_equal(np.sort(a, axis=None), a.ravel(), msg)
+ # test generic class with bogus ordering,
+ # should not segfault.
+ class Boom(object):
+ def __lt__(self, other):
+ return True
+
+ a = np.array([Boom()]*100, dtype=object)
+ for kind in ['q', 'm', 'h']:
+ msg = "bogus comparison object sort, kind=%s" % kind
+ c.sort(kind=kind)
+
+ def test_void_sort(self):
+ # gh-8210 - previously segfaulted
+ for i in range(4):
+ rand = np.random.randint(256, size=4000, dtype=np.uint8)
+ arr = rand.view('V4')
+ arr[::-1].sort()
+
+ dt = np.dtype([('val', 'i4', (1,))])
+ for i in range(4):
+ rand = np.random.randint(256, size=4000, dtype=np.uint8)
+ arr = rand.view(dt)
+ arr[::-1].sort()
+
+ def test_sort_raises(self):
+ #gh-9404
+ arr = np.array([0, datetime.now(), 1], dtype=object)
+ for kind in ['q', 'm', 'h']:
+ assert_raises(TypeError, arr.sort, kind=kind)
+ #gh-3879
+ class Raiser(object):
+ def raises_anything(*args, **kwargs):
+ raise TypeError("SOMETHING ERRORED")
+ __eq__ = __ne__ = __lt__ = __gt__ = __ge__ = __le__ = raises_anything
+ arr = np.array([[Raiser(), n] for n in range(10)]).reshape(-1)
+ np.random.shuffle(arr)
+ for kind in ['q', 'm', 'h']:
+ assert_raises(TypeError, arr.sort, kind=kind)
+
+ def test_sort_degraded(self):
+ # test degraded dataset would take minutes to run with normal qsort
+ d = np.arange(1000000)
+ do = d.copy()
+ x = d
+ # create a median of 3 killer where each median is the sorted second
+ # last element of the quicksort partition
+ while x.size > 3:
+ mid = x.size // 2
+ x[mid], x[-2] = x[-2], x[mid]
+ x = x[:-2]
+
+ assert_equal(np.sort(d), do)
+ assert_equal(d[np.argsort(d)], do)
+
def test_copy(self):
def assert_fortran(arr):
assert_(arr.flags.fortran)
@@ -1328,6 +1841,9 @@ class TestMethods(TestCase):
assert_equal(r.word, np.array(['my', 'first', 'name']))
assert_equal(r.number, np.array([3.1, 4.5, 6.2]))
+ assert_raises_regex(ValueError, 'duplicate',
+ lambda: r.sort(order=['id', 'id']))
+
if sys.byteorder == 'little':
strtype = '>i2'
else:
@@ -1369,9 +1885,9 @@ class TestMethods(TestCase):
assert_equal(bi.copy().argsort(kind=kind), b, msg)
# test argsort of complex arrays requiring byte-swapping, gh-5441
- for endianess in '<>':
+ for endianness in '<>':
for dt in np.typecodes['Complex']:
- arr = np.array([1+3.j, 2+2.j, 3+1.j], dtype=endianess + dt)
+ arr = np.array([1+3.j, 2+2.j, 3+1.j], dtype=endianness + dt)
msg = 'byte-swapped complex argsort, dtype={0}'.format(dt)
assert_equal(arr.argsort(),
np.arange(len(arr), dtype=np.intp), msg)
@@ -1399,7 +1915,7 @@ class TestMethods(TestCase):
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# test object array argsorts.
- a = np.empty((101,), dtype=np.object)
+ a = np.empty((101,), dtype=object)
a[:] = list(range(101))
b = a[::-1]
r = np.arange(101)
@@ -1466,7 +1982,7 @@ class TestMethods(TestCase):
a = np.zeros(100)
assert_equal(a.argsort(kind='m'), r)
# complex
- a = np.zeros(100, dtype=np.complex)
+ a = np.zeros(100, dtype=complex)
assert_equal(a.argsort(kind='m'), r)
# string
a = np.array(['aaaaaaaaa' for i in range(100)])
@@ -1573,6 +2089,13 @@ class TestMethods(TestCase):
assert_equal(b, out)
b = a.searchsorted(a, 'r')
assert_equal(b, out + 1)
+ # Test empty array, use a fresh array to get warnings in
+ # valgrind if access happens.
+ e = np.ndarray(shape=0, buffer=b'', dtype=dt)
+ b = e.searchsorted(a, 'l')
+ assert_array_equal(b, np.zeros(len(a), dtype=np.intp))
+ b = a.searchsorted(e, 'l')
+ assert_array_equal(b, np.zeros(0, dtype=np.intp))
def test_searchsorted_unicode(self):
# Test searchsorted on unicode strings.
@@ -1670,6 +2193,13 @@ class TestMethods(TestCase):
assert_equal(b, out)
b = a.searchsorted(a, 'r', s)
assert_equal(b, out + 1)
+ # Test empty array, use a fresh array to get warnings in
+ # valgrind if access happens.
+ e = np.ndarray(shape=0, buffer=b'', dtype=dt)
+ b = e.searchsorted(a, 'l', s[:0])
+ assert_array_equal(b, np.zeros(len(a), dtype=np.intp))
+ b = a.searchsorted(e, 'l', s)
+ assert_array_equal(b, np.zeros(0, dtype=np.intp))
# Test non-contiguous sorter array
a = np.array([3, 4, 1, 2, 0])
@@ -1717,6 +2247,24 @@ class TestMethods(TestCase):
assert_raises(ValueError, d_obj.partition, 10)
assert_raises(ValueError, d_obj.partition, -11)
+ def test_argpartition_integer(self):
+ # Test non-integer values in kth raise an error/
+ d = np.arange(10)
+ assert_raises(TypeError, d.argpartition, 9.)
+ # Test also for generic type argpartition, which uses sorting
+ # and used to not bound check kth
+ d_obj = np.arange(10, dtype=object)
+ assert_raises(TypeError, d_obj.argpartition, 9.)
+
+ def test_partition_integer(self):
+ # Test out of range values in kth raise an error, gh-5469
+ d = np.arange(10)
+ assert_raises(TypeError, d.partition, 9.)
+ # Test also for generic type partition, which uses sorting
+ # and used to not bound check kth
+ d_obj = np.arange(10, dtype=object)
+ assert_raises(TypeError, d_obj.partition, 9.)
+
def test_partition_empty_array(self):
# check axis handling for multidimensional empty arrays
a = np.array([])
@@ -1798,8 +2346,8 @@ class TestMethods(TestCase):
# sorted
d = np.arange(49)
- self.assertEqual(np.partition(d, 5, kind=k)[5], 5)
- self.assertEqual(np.partition(d, 15, kind=k)[15], 15)
+ assert_equal(np.partition(d, 5, kind=k)[5], 5)
+ assert_equal(np.partition(d, 15, kind=k)[15], 15)
assert_array_equal(d[np.argpartition(d, 5, kind=k)],
np.partition(d, 5, kind=k))
assert_array_equal(d[np.argpartition(d, 15, kind=k)],
@@ -1807,8 +2355,8 @@ class TestMethods(TestCase):
# rsorted
d = np.arange(47)[::-1]
- self.assertEqual(np.partition(d, 6, kind=k)[6], 6)
- self.assertEqual(np.partition(d, 16, kind=k)[16], 16)
+ assert_equal(np.partition(d, 6, kind=k)[6], 6)
+ assert_equal(np.partition(d, 16, kind=k)[16], 16)
assert_array_equal(d[np.argpartition(d, 6, kind=k)],
np.partition(d, 6, kind=k))
assert_array_equal(d[np.argpartition(d, 16, kind=k)],
@@ -1848,7 +2396,7 @@ class TestMethods(TestCase):
tgt = np.sort(np.arange(47) % 7)
np.random.shuffle(d)
for i in range(d.size):
- self.assertEqual(np.partition(d, i, kind=k)[i], tgt[i])
+ assert_equal(np.partition(d, i, kind=k)[i], tgt[i])
assert_array_equal(d[np.argpartition(d, 6, kind=k)],
np.partition(d, 6, kind=k))
assert_array_equal(d[np.argpartition(d, 16, kind=k)],
@@ -1866,13 +2414,13 @@ class TestMethods(TestCase):
d = np.array([2, 1])
d.partition(0, kind=k)
assert_raises(ValueError, d.partition, 2)
- assert_raises(ValueError, d.partition, 3, axis=1)
+ assert_raises(np.AxisError, d.partition, 3, axis=1)
assert_raises(ValueError, np.partition, d, 2)
- assert_raises(ValueError, np.partition, d, 2, axis=1)
+ assert_raises(np.AxisError, np.partition, d, 2, axis=1)
assert_raises(ValueError, d.argpartition, 2)
- assert_raises(ValueError, d.argpartition, 3, axis=1)
+ assert_raises(np.AxisError, d.argpartition, 3, axis=1)
assert_raises(ValueError, np.argpartition, d, 2)
- assert_raises(ValueError, np.argpartition, d, 2, axis=1)
+ assert_raises(np.AxisError, np.argpartition, d, 2, axis=1)
d = np.arange(10).reshape((2, 5))
d.partition(1, axis=0, kind=k)
d.partition(4, axis=1, kind=k)
@@ -1900,7 +2448,7 @@ class TestMethods(TestCase):
for s in (9, 16)]
for dt, s in td:
aae = assert_array_equal
- at = self.assertTrue
+ at = assert_
d = np.arange(s, dtype=dt)
np.random.shuffle(d)
@@ -1909,7 +2457,7 @@ class TestMethods(TestCase):
d0 = np.transpose(d1)
for i in range(d.size):
p = np.partition(d, i, kind=k)
- self.assertEqual(p[i], i)
+ assert_equal(p[i], i)
# all before are smaller
assert_array_less(p[:i], p[i])
# all after are larger
@@ -2087,7 +2635,8 @@ class TestMethods(TestCase):
assert_equal(x1.flatten('F'), y1f)
assert_equal(x1.flatten('F'), x1.T.flatten())
- def test_dot(self):
+ @pytest.mark.parametrize('func', (np.dot, np.matmul))
+ def test_arr_mult(self, func):
a = np.array([[1, 0], [0, 1]])
b = np.array([[0, 1], [1, 0]])
c = np.array([[9, 1], [1, -9]])
@@ -2111,49 +2660,49 @@ class TestMethods(TestCase):
# gemm vs syrk optimizations
for et in [np.float32, np.float64, np.complex64, np.complex128]:
eaf = a.astype(et)
- assert_equal(np.dot(eaf, eaf), eaf)
- assert_equal(np.dot(eaf.T, eaf), eaf)
- assert_equal(np.dot(eaf, eaf.T), eaf)
- assert_equal(np.dot(eaf.T, eaf.T), eaf)
- assert_equal(np.dot(eaf.T.copy(), eaf), eaf)
- assert_equal(np.dot(eaf, eaf.T.copy()), eaf)
- assert_equal(np.dot(eaf.T.copy(), eaf.T.copy()), eaf)
+ assert_equal(func(eaf, eaf), eaf)
+ assert_equal(func(eaf.T, eaf), eaf)
+ assert_equal(func(eaf, eaf.T), eaf)
+ assert_equal(func(eaf.T, eaf.T), eaf)
+ assert_equal(func(eaf.T.copy(), eaf), eaf)
+ assert_equal(func(eaf, eaf.T.copy()), eaf)
+ assert_equal(func(eaf.T.copy(), eaf.T.copy()), eaf)
# syrk validations
for et in [np.float32, np.float64, np.complex64, np.complex128]:
eaf = a.astype(et)
ebf = b.astype(et)
- assert_equal(np.dot(ebf, ebf), eaf)
- assert_equal(np.dot(ebf.T, ebf), eaf)
- assert_equal(np.dot(ebf, ebf.T), eaf)
- assert_equal(np.dot(ebf.T, ebf.T), eaf)
+ assert_equal(func(ebf, ebf), eaf)
+ assert_equal(func(ebf.T, ebf), eaf)
+ assert_equal(func(ebf, ebf.T), eaf)
+ assert_equal(func(ebf.T, ebf.T), eaf)
# syrk - different shape, stride, and view validations
for et in [np.float32, np.float64, np.complex64, np.complex128]:
edf = d.astype(et)
assert_equal(
- np.dot(edf[::-1, :], edf.T),
- np.dot(edf[::-1, :].copy(), edf.T.copy())
+ func(edf[::-1, :], edf.T),
+ func(edf[::-1, :].copy(), edf.T.copy())
)
assert_equal(
- np.dot(edf[:, ::-1], edf.T),
- np.dot(edf[:, ::-1].copy(), edf.T.copy())
+ func(edf[:, ::-1], edf.T),
+ func(edf[:, ::-1].copy(), edf.T.copy())
)
assert_equal(
- np.dot(edf, edf[::-1, :].T),
- np.dot(edf, edf[::-1, :].T.copy())
+ func(edf, edf[::-1, :].T),
+ func(edf, edf[::-1, :].T.copy())
)
assert_equal(
- np.dot(edf, edf[:, ::-1].T),
- np.dot(edf, edf[:, ::-1].T.copy())
+ func(edf, edf[:, ::-1].T),
+ func(edf, edf[:, ::-1].T.copy())
)
assert_equal(
- np.dot(edf[:edf.shape[0] // 2, :], edf[::2, :].T),
- np.dot(edf[:edf.shape[0] // 2, :].copy(), edf[::2, :].T.copy())
+ func(edf[:edf.shape[0] // 2, :], edf[::2, :].T),
+ func(edf[:edf.shape[0] // 2, :].copy(), edf[::2, :].T.copy())
)
assert_equal(
- np.dot(edf[::2, :], edf[:edf.shape[0] // 2, :].T),
- np.dot(edf[::2, :].copy(), edf[:edf.shape[0] // 2, :].T.copy())
+ func(edf[::2, :], edf[:edf.shape[0] // 2, :].T),
+ func(edf[::2, :].copy(), edf[:edf.shape[0] // 2, :].T.copy())
)
# syrk - different shape
@@ -2161,9 +2710,43 @@ class TestMethods(TestCase):
edf = d.astype(et)
eddtf = ddt.astype(et)
edtdf = dtd.astype(et)
- assert_equal(np.dot(edf, edf.T), eddtf)
- assert_equal(np.dot(edf.T, edf), edtdf)
+ assert_equal(func(edf, edf.T), eddtf)
+ assert_equal(func(edf.T, edf), edtdf)
+
+ @pytest.mark.parametrize('func', (np.dot, np.matmul))
+ @pytest.mark.parametrize('dtype', 'ifdFD')
+ def test_no_dgemv(self, func, dtype):
+ # check vector arg for contiguous before gemv
+ # gh-12156
+ a = np.arange(8.0, dtype=dtype).reshape(2, 4)
+ b = np.broadcast_to(1., (4, 1))
+ ret1 = func(a, b)
+ ret2 = func(a, b.copy())
+ assert_equal(ret1, ret2)
+
+ ret1 = func(b.T, a.T)
+ ret2 = func(b.T.copy(), a.T)
+ assert_equal(ret1, ret2)
+
+ # check for unaligned data
+ dt = np.dtype(dtype)
+ a = np.zeros(8 * dt.itemsize // 2 + 1, dtype='int16')[1:].view(dtype)
+ a = a.reshape(2, 4)
+ b = a[0]
+ # make sure it is not aligned
+ assert_(a.__array_interface__['data'][0] % dt.itemsize != 0)
+ ret1 = func(a, b)
+ ret2 = func(a.copy(), b.copy())
+ assert_equal(ret1, ret2)
+
+ ret1 = func(b.T, a.T)
+ ret2 = func(b.T.copy(), a.T.copy())
+ assert_equal(ret1, ret2)
+ def test_dot(self):
+ a = np.array([[1, 0], [0, 1]])
+ b = np.array([[0, 1], [1, 0]])
+ c = np.array([[9, 1], [1, -9]])
# function versus methods
assert_equal(np.dot(a, b), a.dot(b))
assert_equal(np.dot(np.dot(a, b), c), a.dot(b).dot(c))
@@ -2178,27 +2761,6 @@ class TestMethods(TestCase):
a.dot(b=b, out=c)
assert_equal(c, np.dot(a, b))
- def test_dot_override(self):
- # 2016-01-29: NUMPY_UFUNC_DISABLED
- return
-
- class A(object):
- def __numpy_ufunc__(self, ufunc, method, pos, inputs, **kwargs):
- return "A"
-
- class B(object):
- def __numpy_ufunc__(self, ufunc, method, pos, inputs, **kwargs):
- return NotImplemented
-
- a = A()
- b = B()
- c = np.array([[1]])
-
- assert_equal(np.dot(a, b), "A")
- assert_equal(c.dot(a), "A")
- assert_raises(TypeError, np.dot, b, c)
- assert_raises(TypeError, c.dot, b)
-
def test_dot_type_mismatch(self):
c = 1.
A = np.array((1,1), dtype='i,i')
@@ -2206,12 +2768,73 @@ class TestMethods(TestCase):
assert_raises(TypeError, np.dot, c, A)
assert_raises(TypeError, np.dot, A, c)
+ def test_dot_out_mem_overlap(self):
+ np.random.seed(1)
+
+ # Test BLAS and non-BLAS code paths, including all dtypes
+ # that dot() supports
+ dtypes = [np.dtype(code) for code in np.typecodes['All']
+ if code not in 'USVM']
+ for dtype in dtypes:
+ a = np.random.rand(3, 3).astype(dtype)
+
+ # Valid dot() output arrays must be aligned
+ b = _aligned_zeros((3, 3), dtype=dtype)
+ b[...] = np.random.rand(3, 3)
+
+ y = np.dot(a, b)
+ x = np.dot(a, b, out=b)
+ assert_equal(x, y, err_msg=repr(dtype))
+
+ # Check invalid output array
+ assert_raises(ValueError, np.dot, a, b, out=b[::2])
+ assert_raises(ValueError, np.dot, a, b, out=b.T)
+
+ def test_dot_matmul_out(self):
+ # gh-9641
+ class Sub(np.ndarray):
+ pass
+ a = np.ones((2, 2)).view(Sub)
+ b = np.ones((2, 2)).view(Sub)
+ out = np.ones((2, 2))
+
+ # make sure out can be any ndarray (not only subclass of inputs)
+ np.dot(a, b, out=out)
+ np.matmul(a, b, out=out)
+
+ def test_dot_matmul_inner_array_casting_fails(self):
+
+ class A(object):
+ def __array__(self, *args, **kwargs):
+ raise NotImplementedError
+
+ # Don't override the error from calling __array__()
+ assert_raises(NotImplementedError, np.dot, A(), A())
+ assert_raises(NotImplementedError, np.matmul, A(), A())
+ assert_raises(NotImplementedError, np.inner, A(), A())
+
+ def test_matmul_out(self):
+ # overlapping memory
+ a = np.arange(18).reshape(2, 3, 3)
+ b = np.matmul(a, a)
+ c = np.matmul(a, a, out=a)
+ assert_(c is a)
+ assert_equal(c, b)
+ a = np.arange(18).reshape(2, 3, 3)
+ c = np.matmul(a, a, out=a[::-1, ...])
+ assert_(c.base is a.base)
+ assert_equal(c, b)
+
def test_diagonal(self):
a = np.arange(12).reshape((3, 4))
assert_equal(a.diagonal(), [0, 5, 10])
assert_equal(a.diagonal(0), [0, 5, 10])
assert_equal(a.diagonal(1), [1, 6, 11])
assert_equal(a.diagonal(-1), [4, 9])
+ assert_raises(np.AxisError, a.diagonal, axis1=0, axis2=5)
+ assert_raises(np.AxisError, a.diagonal, axis1=5, axis2=0)
+ assert_raises(np.AxisError, a.diagonal, axis1=5, axis2=5)
+ assert_raises(ValueError, a.diagonal, axis1=1, axis2=1)
b = np.arange(8).reshape((2, 2, 2))
assert_equal(b.diagonal(), [[0, 6], [1, 7]])
@@ -2243,10 +2866,23 @@ class TestMethods(TestCase):
def test_diagonal_memleak(self):
# Regression test for a bug that crept in at one point
a = np.zeros((100, 100))
- assert_(sys.getrefcount(a) < 50)
+ if HAS_REFCOUNT:
+ assert_(sys.getrefcount(a) < 50)
for i in range(100):
a.diagonal()
- assert_(sys.getrefcount(a) < 50)
+ if HAS_REFCOUNT:
+ assert_(sys.getrefcount(a) < 50)
+
+ def test_size_zero_memleak(self):
+ # Regression test for issue 9615
+ # Exercises a special-case code path for dot products of length
+ # zero in cblasfuncs (making it is specific to floating dtypes).
+ a = np.array([], dtype=np.float64)
+ x = np.array(2.0)
+ for _ in range(100):
+ np.dot(a, a, out=x)
+ if HAS_REFCOUNT:
+ assert_(sys.getrefcount(x) < 50)
def test_trace(self):
a = np.arange(12).reshape((3, 4))
@@ -2273,7 +2909,7 @@ class TestMethods(TestCase):
b = np.arange(8).reshape((2, 2, 2)).view(MyArray)
t = b.trace()
- assert isinstance(t, MyArray)
+ assert_(isinstance(t, MyArray))
def test_put(self):
icodes = np.typecodes['AllInteger']
@@ -2428,10 +3064,10 @@ class TestMethods(TestCase):
assert_(a.flags['OWNDATA'])
b = a.copy()
# check exceptions
- assert_raises(ValueError, a.swapaxes, -5, 0)
- assert_raises(ValueError, a.swapaxes, 4, 0)
- assert_raises(ValueError, a.swapaxes, 0, -5)
- assert_raises(ValueError, a.swapaxes, 0, 4)
+ assert_raises(np.AxisError, a.swapaxes, -5, 0)
+ assert_raises(np.AxisError, a.swapaxes, 4, 0)
+ assert_raises(np.AxisError, a.swapaxes, 0, -5)
+ assert_raises(np.AxisError, a.swapaxes, 0, 4)
for i in range(-4, 4):
for j in range(-4, 4):
@@ -2531,6 +3167,10 @@ class TestMethods(TestCase):
e = np.array(['1+1j'], 'U')
assert_raises(TypeError, complex, e)
+class TestCequenceMethods(object):
+ def test_array_contains(self):
+ assert_(4.0 in np.arange(16.).reshape(4,4))
+ assert_(20.0 not in np.arange(16.).reshape(4,4))
class TestBinop(object):
def test_inplace(self):
@@ -2564,270 +3204,201 @@ class TestBinop(object):
assert_equal(a, 5)
assert_equal(b, 3)
- def test_extension_incref_elide(self):
- # test extension (e.g. cython) calling PyNumber_* slots without
- # increasing the reference counts
- #
- # def incref_elide(a):
- # d = input.copy() # refcount 1
- # return d, d + d # PyNumber_Add without increasing refcount
- from numpy.core.multiarray_tests import incref_elide
- d = np.ones(5)
- orig, res = incref_elide(d)
- # the return original should not be changed to an inplace operation
- assert_array_equal(orig, d)
- assert_array_equal(res, d + d)
-
- def test_extension_incref_elide_stack(self):
- # scanning if the refcount == 1 object is on the python stack to check
- # that we are called directly from python is flawed as object may still
- # be above the stack pointer and we have no access to the top of it
- #
- # def incref_elide_l(d):
- # return l[4] + l[4] # PyNumber_Add without increasing refcount
- from numpy.core.multiarray_tests import incref_elide_l
- # padding with 1 makes sure the object on the stack is not overwriten
- l = [1, 1, 1, 1, np.ones(5)]
- res = incref_elide_l(l)
- # the return original should not be changed to an inplace operation
- assert_array_equal(l[4], np.ones(5))
- assert_array_equal(res, l[4] + l[4])
-
- def test_ufunc_override_rop_precedence(self):
- # 2016-01-29: NUMPY_UFUNC_DISABLED
- return
-
- # Check that __rmul__ and other right-hand operations have
- # precedence over __numpy_ufunc__
-
+ # ndarray.__rop__ always calls ufunc
+ # ndarray.__iop__ always calls ufunc
+ # ndarray.__op__, __rop__:
+ # - defer if other has __array_ufunc__ and it is None
+ # or other is not a subclass and has higher array priority
+ # - else, call ufunc
+ def test_ufunc_binop_interaction(self):
+ # Python method name (without underscores)
+ # -> (numpy ufunc, has_in_place_version, preferred_dtype)
ops = {
- '__add__': ('__radd__', np.add, True),
- '__sub__': ('__rsub__', np.subtract, True),
- '__mul__': ('__rmul__', np.multiply, True),
- '__truediv__': ('__rtruediv__', np.true_divide, True),
- '__floordiv__': ('__rfloordiv__', np.floor_divide, True),
- '__mod__': ('__rmod__', np.remainder, True),
- '__divmod__': ('__rdivmod__', None, False),
- '__pow__': ('__rpow__', np.power, True),
- '__lshift__': ('__rlshift__', np.left_shift, True),
- '__rshift__': ('__rrshift__', np.right_shift, True),
- '__and__': ('__rand__', np.bitwise_and, True),
- '__xor__': ('__rxor__', np.bitwise_xor, True),
- '__or__': ('__ror__', np.bitwise_or, True),
- '__ge__': ('__le__', np.less_equal, False),
- '__gt__': ('__lt__', np.less, False),
- '__le__': ('__ge__', np.greater_equal, False),
- '__lt__': ('__gt__', np.greater, False),
- '__eq__': ('__eq__', np.equal, False),
- '__ne__': ('__ne__', np.not_equal, False),
+ 'add': (np.add, True, float),
+ 'sub': (np.subtract, True, float),
+ 'mul': (np.multiply, True, float),
+ 'truediv': (np.true_divide, True, float),
+ 'floordiv': (np.floor_divide, True, float),
+ 'mod': (np.remainder, True, float),
+ 'divmod': (np.divmod, False, float),
+ 'pow': (np.power, True, int),
+ 'lshift': (np.left_shift, True, int),
+ 'rshift': (np.right_shift, True, int),
+ 'and': (np.bitwise_and, True, int),
+ 'xor': (np.bitwise_xor, True, int),
+ 'or': (np.bitwise_or, True, int),
+ # 'ge': (np.less_equal, False),
+ # 'gt': (np.less, False),
+ # 'le': (np.greater_equal, False),
+ # 'lt': (np.greater, False),
+ # 'eq': (np.equal, False),
+ # 'ne': (np.not_equal, False),
}
+ if sys.version_info >= (3, 5):
+ ops['matmul'] = (np.matmul, False, float)
- class OtherNdarraySubclass(np.ndarray):
+ class Coerced(Exception):
pass
- class OtherNdarraySubclassWithOverride(np.ndarray):
- def __numpy_ufunc__(self, *a, **kw):
- raise AssertionError(("__numpy_ufunc__ %r %r shouldn't have "
- "been called!") % (a, kw))
-
- def check(op_name, ndsubclass):
- rop_name, np_op, has_iop = ops[op_name]
-
- if has_iop:
- iop_name = '__i' + op_name[2:]
- iop = getattr(operator, iop_name)
-
- if op_name == "__divmod__":
- op = divmod
+ def array_impl(self):
+ raise Coerced
+
+ def op_impl(self, other):
+ return "forward"
+
+ def rop_impl(self, other):
+ return "reverse"
+
+ def iop_impl(self, other):
+ return "in-place"
+
+ def array_ufunc_impl(self, ufunc, method, *args, **kwargs):
+ return ("__array_ufunc__", ufunc, method, args, kwargs)
+
+ # Create an object with the given base, in the given module, with a
+ # bunch of placeholder __op__ methods, and optionally a
+ # __array_ufunc__ and __array_priority__.
+ def make_obj(base, array_priority=False, array_ufunc=False,
+ alleged_module="__main__"):
+ class_namespace = {"__array__": array_impl}
+ if array_priority is not False:
+ class_namespace["__array_priority__"] = array_priority
+ for op in ops:
+ class_namespace["__{0}__".format(op)] = op_impl
+ class_namespace["__r{0}__".format(op)] = rop_impl
+ class_namespace["__i{0}__".format(op)] = iop_impl
+ if array_ufunc is not False:
+ class_namespace["__array_ufunc__"] = array_ufunc
+ eval_namespace = {"base": base,
+ "class_namespace": class_namespace,
+ "__name__": alleged_module,
+ }
+ MyType = eval("type('MyType', (base,), class_namespace)",
+ eval_namespace)
+ if issubclass(MyType, np.ndarray):
+ # Use this range to avoid special case weirdnesses around
+ # divide-by-0, pow(x, 2), overflow due to pow(big, big), etc.
+ return np.arange(3, 7).reshape(2, 2).view(MyType)
else:
- op = getattr(operator, op_name)
-
- # Dummy class
- def __init__(self, *a, **kw):
- pass
-
- def __numpy_ufunc__(self, *a, **kw):
- raise AssertionError(("__numpy_ufunc__ %r %r shouldn't have "
- "been called!") % (a, kw))
-
- def __op__(self, *other):
- return "op"
-
- def __rop__(self, *other):
- return "rop"
-
- if ndsubclass:
- bases = (np.ndarray,)
- else:
- bases = (object,)
-
- dct = {'__init__': __init__,
- '__numpy_ufunc__': __numpy_ufunc__,
- op_name: __op__}
- if op_name != rop_name:
- dct[rop_name] = __rop__
-
- cls = type("Rop" + rop_name, bases, dct)
-
- # Check behavior against both bare ndarray objects and a
- # ndarray subclasses with and without their own override
- obj = cls((1,), buffer=np.ones(1,))
-
- arr_objs = [np.array([1]),
- np.array([2]).view(OtherNdarraySubclass),
- np.array([3]).view(OtherNdarraySubclassWithOverride),
- ]
-
- for arr in arr_objs:
- err_msg = "%r %r" % (op_name, arr,)
-
- # Check that ndarray op gives up if it sees a non-subclass
- if not isinstance(obj, arr.__class__):
- assert_equal(getattr(arr, op_name)(obj),
- NotImplemented, err_msg=err_msg)
-
- # Check that the Python binops have priority
- assert_equal(op(obj, arr), "op", err_msg=err_msg)
- if op_name == rop_name:
- assert_equal(op(arr, obj), "op", err_msg=err_msg)
- else:
- assert_equal(op(arr, obj), "rop", err_msg=err_msg)
-
- # Check that Python binops have priority also for in-place ops
- if has_iop:
- assert_equal(getattr(arr, iop_name)(obj),
- NotImplemented, err_msg=err_msg)
- if op_name != "__pow__":
- # inplace pow requires the other object to be
- # integer-like?
- assert_equal(iop(arr, obj), "rop", err_msg=err_msg)
-
- # Check that ufunc call __numpy_ufunc__ normally
- if np_op is not None:
- assert_raises(AssertionError, np_op, arr, obj,
- err_msg=err_msg)
- assert_raises(AssertionError, np_op, obj, arr,
- err_msg=err_msg)
-
- # Check all binary operations
- for op_name in sorted(ops.keys()):
- yield check, op_name, True
- yield check, op_name, False
-
- def test_ufunc_override_rop_simple(self):
- # 2016-01-29: NUMPY_UFUNC_DISABLED
- return
-
- # Check parts of the binary op overriding behavior in an
- # explicit test case that is easier to understand.
- class SomeClass(object):
- def __numpy_ufunc__(self, *a, **kw):
- return "ufunc"
-
- def __mul__(self, other):
- return 123
-
- def __rmul__(self, other):
- return 321
-
- def __rsub__(self, other):
- return "no subs for me"
-
- def __gt__(self, other):
- return "yep"
-
- def __lt__(self, other):
- return "nope"
-
- class SomeClass2(SomeClass, np.ndarray):
- def __numpy_ufunc__(self, ufunc, method, i, inputs, **kw):
- if ufunc is np.multiply or ufunc is np.bitwise_and:
- return "ufunc"
- else:
- inputs = list(inputs)
- if i < len(inputs):
- inputs[i] = np.asarray(self)
- func = getattr(ufunc, method)
- if ('out' in kw) and (kw['out'] is not None):
- kw['out'] = np.asarray(kw['out'])
- r = func(*inputs, **kw)
- x = self.__class__(r.shape, dtype=r.dtype)
- x[...] = r
- return x
-
- class SomeClass3(SomeClass2):
- def __rsub__(self, other):
- return "sub for me"
-
- arr = np.array([0])
- obj = SomeClass()
- obj2 = SomeClass2((1,), dtype=np.int_)
- obj2[0] = 9
- obj3 = SomeClass3((1,), dtype=np.int_)
- obj3[0] = 4
-
- # obj is first, so should get to define outcome.
- assert_equal(obj * arr, 123)
- # obj is second, but has __numpy_ufunc__ and defines __rmul__.
- assert_equal(arr * obj, 321)
- # obj is second, but has __numpy_ufunc__ and defines __rsub__.
- assert_equal(arr - obj, "no subs for me")
- # obj is second, but has __numpy_ufunc__ and defines __lt__.
- assert_equal(arr > obj, "nope")
- # obj is second, but has __numpy_ufunc__ and defines __gt__.
- assert_equal(arr < obj, "yep")
- # Called as a ufunc, obj.__numpy_ufunc__ is used.
- assert_equal(np.multiply(arr, obj), "ufunc")
- # obj is second, but has __numpy_ufunc__ and defines __rmul__.
- arr *= obj
- assert_equal(arr, 321)
-
- # obj2 is an ndarray subclass, so CPython takes care of the same rules.
- assert_equal(obj2 * arr, 123)
- assert_equal(arr * obj2, 321)
- assert_equal(arr - obj2, "no subs for me")
- assert_equal(arr > obj2, "nope")
- assert_equal(arr < obj2, "yep")
- # Called as a ufunc, obj2.__numpy_ufunc__ is called.
- assert_equal(np.multiply(arr, obj2), "ufunc")
- # Also when the method is not overridden.
- assert_equal(arr & obj2, "ufunc")
- arr *= obj2
- assert_equal(arr, 321)
-
- obj2 += 33
- assert_equal(obj2[0], 42)
- assert_equal(obj2.sum(), 42)
- assert_(isinstance(obj2, SomeClass2))
-
- # Obj3 is subclass that defines __rsub__. CPython calls it.
- assert_equal(arr - obj3, "sub for me")
- assert_equal(obj2 - obj3, "sub for me")
- # obj3 is a subclass that defines __rmul__. CPython calls it.
- assert_equal(arr * obj3, 321)
- # But not here, since obj3.__rmul__ is obj2.__rmul__.
- assert_equal(obj2 * obj3, 123)
- # And of course, here obj3.__mul__ should be called.
- assert_equal(obj3 * obj2, 123)
- # obj3 defines __numpy_ufunc__ but obj3.__radd__ is obj2.__radd__.
- # (and both are just ndarray.__radd__); see #4815.
- res = obj2 + obj3
- assert_equal(res, 46)
- assert_(isinstance(res, SomeClass2))
- # Since obj3 is a subclass, it should have precedence, like CPython
- # would give, even though obj2 has __numpy_ufunc__ and __radd__.
- # See gh-4815 and gh-5747.
- res = obj3 + obj2
- assert_equal(res, 46)
- assert_(isinstance(res, SomeClass3))
+ return MyType()
+
+ def check(obj, binop_override_expected, ufunc_override_expected,
+ inplace_override_expected, check_scalar=True):
+ for op, (ufunc, has_inplace, dtype) in ops.items():
+ err_msg = ('op: %s, ufunc: %s, has_inplace: %s, dtype: %s'
+ % (op, ufunc, has_inplace, dtype))
+ check_objs = [np.arange(3, 7, dtype=dtype).reshape(2, 2)]
+ if check_scalar:
+ check_objs.append(check_objs[0][0])
+ for arr in check_objs:
+ arr_method = getattr(arr, "__{0}__".format(op))
+
+ def first_out_arg(result):
+ if op == "divmod":
+ assert_(isinstance(result, tuple))
+ return result[0]
+ else:
+ return result
+
+ # arr __op__ obj
+ if binop_override_expected:
+ assert_equal(arr_method(obj), NotImplemented, err_msg)
+ elif ufunc_override_expected:
+ assert_equal(arr_method(obj)[0], "__array_ufunc__",
+ err_msg)
+ else:
+ if (isinstance(obj, np.ndarray) and
+ (type(obj).__array_ufunc__ is
+ np.ndarray.__array_ufunc__)):
+ # __array__ gets ignored
+ res = first_out_arg(arr_method(obj))
+ assert_(res.__class__ is obj.__class__, err_msg)
+ else:
+ assert_raises((TypeError, Coerced),
+ arr_method, obj, err_msg=err_msg)
+ # obj __op__ arr
+ arr_rmethod = getattr(arr, "__r{0}__".format(op))
+ if ufunc_override_expected:
+ res = arr_rmethod(obj)
+ assert_equal(res[0], "__array_ufunc__",
+ err_msg=err_msg)
+ assert_equal(res[1], ufunc, err_msg=err_msg)
+ else:
+ if (isinstance(obj, np.ndarray) and
+ (type(obj).__array_ufunc__ is
+ np.ndarray.__array_ufunc__)):
+ # __array__ gets ignored
+ res = first_out_arg(arr_rmethod(obj))
+ assert_(res.__class__ is obj.__class__, err_msg)
+ else:
+ # __array_ufunc__ = "asdf" creates a TypeError
+ assert_raises((TypeError, Coerced),
+ arr_rmethod, obj, err_msg=err_msg)
+
+ # arr __iop__ obj
+ # array scalars don't have in-place operators
+ if has_inplace and isinstance(arr, np.ndarray):
+ arr_imethod = getattr(arr, "__i{0}__".format(op))
+ if inplace_override_expected:
+ assert_equal(arr_method(obj), NotImplemented,
+ err_msg=err_msg)
+ elif ufunc_override_expected:
+ res = arr_imethod(obj)
+ assert_equal(res[0], "__array_ufunc__", err_msg)
+ assert_equal(res[1], ufunc, err_msg)
+ assert_(type(res[-1]["out"]) is tuple, err_msg)
+ assert_(res[-1]["out"][0] is arr, err_msg)
+ else:
+ if (isinstance(obj, np.ndarray) and
+ (type(obj).__array_ufunc__ is
+ np.ndarray.__array_ufunc__)):
+ # __array__ gets ignored
+ assert_(arr_imethod(obj) is arr, err_msg)
+ else:
+ assert_raises((TypeError, Coerced),
+ arr_imethod, obj,
+ err_msg=err_msg)
+
+ op_fn = getattr(operator, op, None)
+ if op_fn is None:
+ op_fn = getattr(operator, op + "_", None)
+ if op_fn is None:
+ op_fn = getattr(builtins, op)
+ assert_equal(op_fn(obj, arr), "forward", err_msg)
+ if not isinstance(obj, np.ndarray):
+ if binop_override_expected:
+ assert_equal(op_fn(arr, obj), "reverse", err_msg)
+ elif ufunc_override_expected:
+ assert_equal(op_fn(arr, obj)[0], "__array_ufunc__",
+ err_msg)
+ if ufunc_override_expected:
+ assert_equal(ufunc(obj, arr)[0], "__array_ufunc__",
+ err_msg)
+
+ # No array priority, no array_ufunc -> nothing called
+ check(make_obj(object), False, False, False)
+ # Negative array priority, no array_ufunc -> nothing called
+ # (has to be very negative, because scalar priority is -1000000.0)
+ check(make_obj(object, array_priority=-2**30), False, False, False)
+ # Positive array priority, no array_ufunc -> binops and iops only
+ check(make_obj(object, array_priority=1), True, False, True)
+ # ndarray ignores array_priority for ndarray subclasses
+ check(make_obj(np.ndarray, array_priority=1), False, False, False,
+ check_scalar=False)
+ # Positive array_priority and array_ufunc -> array_ufunc only
+ check(make_obj(object, array_priority=1,
+ array_ufunc=array_ufunc_impl), False, True, False)
+ check(make_obj(np.ndarray, array_priority=1,
+ array_ufunc=array_ufunc_impl), False, True, False)
+ # array_ufunc set to None -> defer binops only
+ check(make_obj(object, array_ufunc=None), True, False, False)
+ check(make_obj(np.ndarray, array_ufunc=None), True, False, False,
+ check_scalar=False)
def test_ufunc_override_normalize_signature(self):
- # 2016-01-29: NUMPY_UFUNC_DISABLED
- return
-
# gh-5674
class SomeClass(object):
- def __numpy_ufunc__(self, ufunc, method, i, inputs, **kw):
+ def __array_ufunc__(self, ufunc, method, *inputs, **kw):
return kw
a = SomeClass()
@@ -2840,58 +3411,63 @@ class TestBinop(object):
assert_('sig' not in kw and 'signature' in kw)
assert_equal(kw['signature'], 'ii->i')
- def test_numpy_ufunc_index(self):
- # 2016-01-29: NUMPY_UFUNC_DISABLED
- return
-
+ def test_array_ufunc_index(self):
# Check that index is set appropriately, also if only an output
# is passed on (latter is another regression tests for github bug 4753)
+ # This also checks implicitly that 'out' is always a tuple.
class CheckIndex(object):
- def __numpy_ufunc__(self, ufunc, method, i, inputs, **kw):
- return i
+ def __array_ufunc__(self, ufunc, method, *inputs, **kw):
+ for i, a in enumerate(inputs):
+ if a is self:
+ return i
+ # calls below mean we must be in an output.
+ for j, a in enumerate(kw['out']):
+ if a is self:
+ return (j,)
a = CheckIndex()
dummy = np.arange(2.)
# 1 input, 1 output
assert_equal(np.sin(a), 0)
- assert_equal(np.sin(dummy, a), 1)
- assert_equal(np.sin(dummy, out=a), 1)
- assert_equal(np.sin(dummy, out=(a,)), 1)
+ assert_equal(np.sin(dummy, a), (0,))
+ assert_equal(np.sin(dummy, out=a), (0,))
+ assert_equal(np.sin(dummy, out=(a,)), (0,))
assert_equal(np.sin(a, a), 0)
assert_equal(np.sin(a, out=a), 0)
assert_equal(np.sin(a, out=(a,)), 0)
# 1 input, 2 outputs
- assert_equal(np.modf(dummy, a), 1)
- assert_equal(np.modf(dummy, None, a), 2)
- assert_equal(np.modf(dummy, dummy, a), 2)
- assert_equal(np.modf(dummy, out=a), 1)
- assert_equal(np.modf(dummy, out=(a,)), 1)
- assert_equal(np.modf(dummy, out=(a, None)), 1)
- assert_equal(np.modf(dummy, out=(a, dummy)), 1)
- assert_equal(np.modf(dummy, out=(None, a)), 2)
- assert_equal(np.modf(dummy, out=(dummy, a)), 2)
+ assert_equal(np.modf(dummy, a), (0,))
+ assert_equal(np.modf(dummy, None, a), (1,))
+ assert_equal(np.modf(dummy, dummy, a), (1,))
+ assert_equal(np.modf(dummy, out=(a, None)), (0,))
+ assert_equal(np.modf(dummy, out=(a, dummy)), (0,))
+ assert_equal(np.modf(dummy, out=(None, a)), (1,))
+ assert_equal(np.modf(dummy, out=(dummy, a)), (1,))
assert_equal(np.modf(a, out=(dummy, a)), 0)
+ with warnings.catch_warnings(record=True) as w:
+ warnings.filterwarnings('always', '', DeprecationWarning)
+ assert_equal(np.modf(dummy, out=a), (0,))
+ assert_(w[0].category is DeprecationWarning)
+ assert_raises(ValueError, np.modf, dummy, out=(a,))
+
# 2 inputs, 1 output
assert_equal(np.add(a, dummy), 0)
assert_equal(np.add(dummy, a), 1)
- assert_equal(np.add(dummy, dummy, a), 2)
+ assert_equal(np.add(dummy, dummy, a), (0,))
assert_equal(np.add(dummy, a, a), 1)
- assert_equal(np.add(dummy, dummy, out=a), 2)
- assert_equal(np.add(dummy, dummy, out=(a,)), 2)
+ assert_equal(np.add(dummy, dummy, out=a), (0,))
+ assert_equal(np.add(dummy, dummy, out=(a,)), (0,))
assert_equal(np.add(a, dummy, out=a), 0)
def test_out_override(self):
- # 2016-01-29: NUMPY_UFUNC_DISABLED
- return
-
# regression test for github bug 4753
class OutClass(np.ndarray):
- def __numpy_ufunc__(self, ufunc, method, i, inputs, **kw):
+ def __array_ufunc__(self, ufunc, method, *inputs, **kw):
if 'out' in kw:
tmp_kw = kw.copy()
tmp_kw.pop('out')
func = getattr(ufunc, method)
- kw['out'][...] = func(*inputs, **tmp_kw)
+ kw['out'][0][...] = func(*inputs, **tmp_kw)
A = np.array([0]).view(OutClass)
B = np.array([5])
@@ -2904,10 +3480,175 @@ class TestBinop(object):
assert_equal(A[0], 30)
assert_(isinstance(A, OutClass))
+ def test_pow_override_with_errors(self):
+ # regression test for gh-9112
+ class PowerOnly(np.ndarray):
+ def __array_ufunc__(self, ufunc, method, *inputs, **kw):
+ if ufunc is not np.power:
+ raise NotImplementedError
+ return "POWER!"
+ # explicit cast to float, to ensure the fast power path is taken.
+ a = np.array(5., dtype=np.float64).view(PowerOnly)
+ assert_equal(a ** 2.5, "POWER!")
+ with assert_raises(NotImplementedError):
+ a ** 0.5
+ with assert_raises(NotImplementedError):
+ a ** 0
+ with assert_raises(NotImplementedError):
+ a ** 1
+ with assert_raises(NotImplementedError):
+ a ** -1
+ with assert_raises(NotImplementedError):
+ a ** 2
+
+ def test_pow_array_object_dtype(self):
+ # test pow on arrays of object dtype
+ class SomeClass(object):
+ def __init__(self, num=None):
+ self.num = num
+
+ # want to ensure a fast pow path is not taken
+ def __mul__(self, other):
+ raise AssertionError('__mul__ should not be called')
+
+ def __div__(self, other):
+ raise AssertionError('__div__ should not be called')
+
+ def __pow__(self, exp):
+ return SomeClass(num=self.num ** exp)
+
+ def __eq__(self, other):
+ if isinstance(other, SomeClass):
+ return self.num == other.num
+
+ __rpow__ = __pow__
+
+ def pow_for(exp, arr):
+ return np.array([x ** exp for x in arr])
+
+ obj_arr = np.array([SomeClass(1), SomeClass(2), SomeClass(3)])
+
+ assert_equal(obj_arr ** 0.5, pow_for(0.5, obj_arr))
+ assert_equal(obj_arr ** 0, pow_for(0, obj_arr))
+ assert_equal(obj_arr ** 1, pow_for(1, obj_arr))
+ assert_equal(obj_arr ** -1, pow_for(-1, obj_arr))
+ assert_equal(obj_arr ** 2, pow_for(2, obj_arr))
+
+ def test_pos_array_ufunc_override(self):
+ class A(np.ndarray):
+ def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
+ return getattr(ufunc, method)(*[i.view(np.ndarray) for
+ i in inputs], **kwargs)
+ tst = np.array('foo').view(A)
+ with assert_raises(TypeError):
+ +tst
+
-class TestCAPI(TestCase):
+class TestTemporaryElide(object):
+ # elision is only triggered on relatively large arrays
+
+ def test_extension_incref_elide(self):
+ # test extension (e.g. cython) calling PyNumber_* slots without
+ # increasing the reference counts
+ #
+ # def incref_elide(a):
+ # d = input.copy() # refcount 1
+ # return d, d + d # PyNumber_Add without increasing refcount
+ from numpy.core._multiarray_tests import incref_elide
+ d = np.ones(100000)
+ orig, res = incref_elide(d)
+ d + d
+ # the return original should not be changed to an inplace operation
+ assert_array_equal(orig, d)
+ assert_array_equal(res, d + d)
+
+ def test_extension_incref_elide_stack(self):
+ # scanning if the refcount == 1 object is on the python stack to check
+ # that we are called directly from python is flawed as object may still
+ # be above the stack pointer and we have no access to the top of it
+ #
+ # def incref_elide_l(d):
+ # return l[4] + l[4] # PyNumber_Add without increasing refcount
+ from numpy.core._multiarray_tests import incref_elide_l
+ # padding with 1 makes sure the object on the stack is not overwritten
+ l = [1, 1, 1, 1, np.ones(100000)]
+ res = incref_elide_l(l)
+ # the return original should not be changed to an inplace operation
+ assert_array_equal(l[4], np.ones(100000))
+ assert_array_equal(res, l[4] + l[4])
+
+ def test_temporary_with_cast(self):
+ # check that we don't elide into a temporary which would need casting
+ d = np.ones(200000, dtype=np.int64)
+ assert_equal(((d + d) + 2**222).dtype, np.dtype('O'))
+
+ r = ((d + d) / 2)
+ assert_equal(r.dtype, np.dtype('f8'))
+
+ r = np.true_divide((d + d), 2)
+ assert_equal(r.dtype, np.dtype('f8'))
+
+ r = ((d + d) / 2.)
+ assert_equal(r.dtype, np.dtype('f8'))
+
+ r = ((d + d) // 2)
+ assert_equal(r.dtype, np.dtype(np.int64))
+
+ # commutative elision into the astype result
+ f = np.ones(100000, dtype=np.float32)
+ assert_equal(((f + f) + f.astype(np.float64)).dtype, np.dtype('f8'))
+
+ # no elision into lower type
+ d = f.astype(np.float64)
+ assert_equal(((f + f) + d).dtype, d.dtype)
+ l = np.ones(100000, dtype=np.longdouble)
+ assert_equal(((d + d) + l).dtype, l.dtype)
+
+ # test unary abs with different output dtype
+ for dt in (np.complex64, np.complex128, np.clongdouble):
+ c = np.ones(100000, dtype=dt)
+ r = abs(c * 2.0)
+ assert_equal(r.dtype, np.dtype('f%d' % (c.itemsize // 2)))
+
+ def test_elide_broadcast(self):
+ # test no elision on broadcast to higher dimension
+ # only triggers elision code path in debug mode as triggering it in
+ # normal mode needs 256kb large matching dimension, so a lot of memory
+ d = np.ones((2000, 1), dtype=int)
+ b = np.ones((2000), dtype=bool)
+ r = (1 - d) + b
+ assert_equal(r, 1)
+ assert_equal(r.shape, (2000, 2000))
+
+ def test_elide_scalar(self):
+ # check inplace op does not create ndarray from scalars
+ a = np.bool_()
+ assert_(type(~(a & a)) is np.bool_)
+
+ def test_elide_scalar_readonly(self):
+ # The imaginary part of a real array is readonly. This needs to go
+ # through fast_scalar_power which is only called for powers of
+ # +1, -1, 0, 0.5, and 2, so use 2. Also need valid refcount for
+ # elision which can be gotten for the imaginary part of a real
+ # array. Should not error.
+ a = np.empty(100000, dtype=np.float64)
+ a.imag ** 2
+
+ def test_elide_readonly(self):
+ # don't try to elide readonly temporaries
+ r = np.asarray(np.broadcast_to(np.zeros(1), 100000).flat) * 0.0
+ assert_equal(r, 0)
+
+ def test_elide_updateifcopy(self):
+ a = np.ones(2**20)[::2]
+ b = a.flat.__array__() + 1
+ del b
+ assert_equal(a, 1)
+
+
+class TestCAPI(object):
def test_IsPythonScalar(self):
- from numpy.core.multiarray_tests import IsPythonScalar
+ from numpy.core._multiarray_tests import IsPythonScalar
assert_(IsPythonScalar(b'foobar'))
assert_(IsPythonScalar(1))
assert_(IsPythonScalar(2**80))
@@ -2915,82 +3656,167 @@ class TestCAPI(TestCase):
assert_(IsPythonScalar("a"))
-class TestSubscripting(TestCase):
+class TestSubscripting(object):
def test_test_zero_rank(self):
x = np.array([1, 2, 3])
- self.assertTrue(isinstance(x[0], np.int_))
+ assert_(isinstance(x[0], np.int_))
if sys.version_info[0] < 3:
- self.assertTrue(isinstance(x[0], int))
- self.assertTrue(type(x[0, ...]) is np.ndarray)
+ assert_(isinstance(x[0], int))
+ assert_(type(x[0, ...]) is np.ndarray)
+
+class TestPickling(object):
+ def test_highest_available_pickle_protocol(self):
+ try:
+ import pickle5
+ except ImportError:
+ pickle5 = None
+
+ if sys.version_info[:2] >= (3, 8) or pickle5 is not None:
+ assert pickle.HIGHEST_PROTOCOL >= 5
+ else:
+ assert pickle.HIGHEST_PROTOCOL < 5
+
+ @pytest.mark.skipif(pickle.HIGHEST_PROTOCOL >= 5,
+ reason=('this tests the error messages when trying to'
+ 'protocol 5 although it is not available'))
+ def test_correct_protocol5_error_message(self):
+ array = np.arange(10)
+
+ if sys.version_info[:2] in ((3, 6), (3, 7)):
+ # For the specific case of python3.6 and 3.7, raise a clear import
+ # error about the pickle5 backport when trying to use protocol=5
+ # without the pickle5 package
+ with pytest.raises(ImportError):
+ array.__reduce_ex__(5)
+
+ elif sys.version_info[:2] < (3, 6):
+ # when calling __reduce_ex__ explicitly with protocol=5 on python
+ # raise a ValueError saying that protocol 5 is not available for
+ # this python version
+ with pytest.raises(ValueError):
+ array.__reduce_ex__(5)
+
+ def test_record_array_with_object_dtype(self):
+ my_object = object()
+
+ arr_with_object = np.array(
+ [(my_object, 1, 2.0)],
+ dtype=[('a', object), ('b', int), ('c', float)])
+ arr_without_object = np.array(
+ [('xxx', 1, 2.0)],
+ dtype=[('a', str), ('b', int), ('c', float)])
+
+ for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
+ depickled_arr_with_object = pickle.loads(
+ pickle.dumps(arr_with_object, protocol=proto))
+ depickled_arr_without_object = pickle.loads(
+ pickle.dumps(arr_without_object, protocol=proto))
+
+ assert_equal(arr_with_object.dtype,
+ depickled_arr_with_object.dtype)
+ assert_equal(arr_without_object.dtype,
+ depickled_arr_without_object.dtype)
+
+ @pytest.mark.skipif(pickle.HIGHEST_PROTOCOL < 5,
+ reason="requires pickle protocol 5")
+ def test_f_contiguous_array(self):
+ f_contiguous_array = np.array([[1, 2, 3], [4, 5, 6]], order='F')
+ buffers = []
+
+ # When using pickle protocol 5, Fortran-contiguous arrays can be
+ # serialized using out-of-band buffers
+ bytes_string = pickle.dumps(f_contiguous_array, protocol=5,
+ buffer_callback=buffers.append)
+
+ assert len(buffers) > 0
+
+ depickled_f_contiguous_array = pickle.loads(bytes_string,
+ buffers=buffers)
+
+ assert_equal(f_contiguous_array, depickled_f_contiguous_array)
+
+ def test_non_contiguous_array(self):
+ non_contiguous_array = np.arange(12).reshape(3, 4)[:, :2]
+ assert not non_contiguous_array.flags.c_contiguous
+ assert not non_contiguous_array.flags.f_contiguous
+
+ # make sure non-contiguous arrays can be pickled-depickled
+ # using any protocol
+ for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
+ depickled_non_contiguous_array = pickle.loads(
+ pickle.dumps(non_contiguous_array, protocol=proto))
+
+ assert_equal(non_contiguous_array, depickled_non_contiguous_array)
-class TestPickling(TestCase):
def test_roundtrip(self):
- import pickle
- carray = np.array([[2, 9], [7, 0], [3, 8]])
- DATA = [
- carray,
- np.transpose(carray),
- np.array([('xxx', 1, 2.0)], dtype=[('a', (str, 3)), ('b', int),
- ('c', float)])
- ]
+ for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
+ carray = np.array([[2, 9], [7, 0], [3, 8]])
+ DATA = [
+ carray,
+ np.transpose(carray),
+ np.array([('xxx', 1, 2.0)], dtype=[('a', (str, 3)), ('b', int),
+ ('c', float)])
+ ]
- for a in DATA:
- assert_equal(a, pickle.loads(a.dumps()), err_msg="%r" % a)
+ for a in DATA:
+ assert_equal(
+ a, pickle.loads(pickle.dumps(a, protocol=proto)),
+ err_msg="%r" % a)
def _loads(self, obj):
if sys.version_info[0] >= 3:
- return np.loads(obj, encoding='latin1')
+ return pickle.loads(obj, encoding='latin1')
else:
- return np.loads(obj)
+ return pickle.loads(obj)
# version 0 pickles, using protocol=2 to pickle
# version 0 doesn't have a version field
def test_version0_int8(self):
- s = '\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x04\x85cnumpy\ndtype\nq\x04U\x02i1K\x00K\x01\x87Rq\x05(U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x04\x01\x02\x03\x04tb.'
+ s = b'\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x04\x85cnumpy\ndtype\nq\x04U\x02i1K\x00K\x01\x87Rq\x05(U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x04\x01\x02\x03\x04tb.'
a = np.array([1, 2, 3, 4], dtype=np.int8)
- p = self._loads(asbytes(s))
+ p = self._loads(s)
assert_equal(a, p)
def test_version0_float32(self):
- s = '\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x04\x85cnumpy\ndtype\nq\x04U\x02f4K\x00K\x01\x87Rq\x05(U\x01<NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x10\x00\x00\x80?\x00\x00\x00@\x00\x00@@\x00\x00\x80@tb.'
+ s = b'\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x04\x85cnumpy\ndtype\nq\x04U\x02f4K\x00K\x01\x87Rq\x05(U\x01<NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x10\x00\x00\x80?\x00\x00\x00@\x00\x00@@\x00\x00\x80@tb.'
a = np.array([1.0, 2.0, 3.0, 4.0], dtype=np.float32)
- p = self._loads(asbytes(s))
+ p = self._loads(s)
assert_equal(a, p)
def test_version0_object(self):
- s = '\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x02\x85cnumpy\ndtype\nq\x04U\x02O8K\x00K\x01\x87Rq\x05(U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89]q\x06(}q\x07U\x01aK\x01s}q\x08U\x01bK\x02setb.'
+ s = b'\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x02\x85cnumpy\ndtype\nq\x04U\x02O8K\x00K\x01\x87Rq\x05(U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89]q\x06(}q\x07U\x01aK\x01s}q\x08U\x01bK\x02setb.'
a = np.array([{'a': 1}, {'b': 2}])
- p = self._loads(asbytes(s))
+ p = self._loads(s)
assert_equal(a, p)
# version 1 pickles, using protocol=2 to pickle
def test_version1_int8(self):
- s = '\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x01K\x04\x85cnumpy\ndtype\nq\x04U\x02i1K\x00K\x01\x87Rq\x05(K\x01U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x04\x01\x02\x03\x04tb.'
+ s = b'\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x01K\x04\x85cnumpy\ndtype\nq\x04U\x02i1K\x00K\x01\x87Rq\x05(K\x01U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x04\x01\x02\x03\x04tb.'
a = np.array([1, 2, 3, 4], dtype=np.int8)
- p = self._loads(asbytes(s))
+ p = self._loads(s)
assert_equal(a, p)
def test_version1_float32(self):
- s = '\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x01K\x04\x85cnumpy\ndtype\nq\x04U\x02f4K\x00K\x01\x87Rq\x05(K\x01U\x01<NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x10\x00\x00\x80?\x00\x00\x00@\x00\x00@@\x00\x00\x80@tb.'
+ s = b'\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x01K\x04\x85cnumpy\ndtype\nq\x04U\x02f4K\x00K\x01\x87Rq\x05(K\x01U\x01<NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x10\x00\x00\x80?\x00\x00\x00@\x00\x00@@\x00\x00\x80@tb.'
a = np.array([1.0, 2.0, 3.0, 4.0], dtype=np.float32)
- p = self._loads(asbytes(s))
+ p = self._loads(s)
assert_equal(a, p)
def test_version1_object(self):
- s = '\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x01K\x02\x85cnumpy\ndtype\nq\x04U\x02O8K\x00K\x01\x87Rq\x05(K\x01U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89]q\x06(}q\x07U\x01aK\x01s}q\x08U\x01bK\x02setb.'
+ s = b'\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x01K\x02\x85cnumpy\ndtype\nq\x04U\x02O8K\x00K\x01\x87Rq\x05(K\x01U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89]q\x06(}q\x07U\x01aK\x01s}q\x08U\x01bK\x02setb.'
a = np.array([{'a': 1}, {'b': 2}])
- p = self._loads(asbytes(s))
+ p = self._loads(s)
assert_equal(a, p)
def test_subarray_int_shape(self):
- s = "cnumpy.core.multiarray\n_reconstruct\np0\n(cnumpy\nndarray\np1\n(I0\ntp2\nS'b'\np3\ntp4\nRp5\n(I1\n(I1\ntp6\ncnumpy\ndtype\np7\n(S'V6'\np8\nI0\nI1\ntp9\nRp10\n(I3\nS'|'\np11\nN(S'a'\np12\ng3\ntp13\n(dp14\ng12\n(g7\n(S'V4'\np15\nI0\nI1\ntp16\nRp17\n(I3\nS'|'\np18\n(g7\n(S'i1'\np19\nI0\nI1\ntp20\nRp21\n(I3\nS'|'\np22\nNNNI-1\nI-1\nI0\ntp23\nb(I2\nI2\ntp24\ntp25\nNNI4\nI1\nI0\ntp26\nbI0\ntp27\nsg3\n(g7\n(S'V2'\np28\nI0\nI1\ntp29\nRp30\n(I3\nS'|'\np31\n(g21\nI2\ntp32\nNNI2\nI1\nI0\ntp33\nbI4\ntp34\nsI6\nI1\nI0\ntp35\nbI00\nS'\\x01\\x01\\x01\\x01\\x01\\x02'\np36\ntp37\nb."
+ s = b"cnumpy.core.multiarray\n_reconstruct\np0\n(cnumpy\nndarray\np1\n(I0\ntp2\nS'b'\np3\ntp4\nRp5\n(I1\n(I1\ntp6\ncnumpy\ndtype\np7\n(S'V6'\np8\nI0\nI1\ntp9\nRp10\n(I3\nS'|'\np11\nN(S'a'\np12\ng3\ntp13\n(dp14\ng12\n(g7\n(S'V4'\np15\nI0\nI1\ntp16\nRp17\n(I3\nS'|'\np18\n(g7\n(S'i1'\np19\nI0\nI1\ntp20\nRp21\n(I3\nS'|'\np22\nNNNI-1\nI-1\nI0\ntp23\nb(I2\nI2\ntp24\ntp25\nNNI4\nI1\nI0\ntp26\nbI0\ntp27\nsg3\n(g7\n(S'V2'\np28\nI0\nI1\ntp29\nRp30\n(I3\nS'|'\np31\n(g21\nI2\ntp32\nNNI2\nI1\nI0\ntp33\nbI4\ntp34\nsI6\nI1\nI0\ntp35\nbI00\nS'\\x01\\x01\\x01\\x01\\x01\\x02'\np36\ntp37\nb."
a = np.array([(1, (1, 2))], dtype=[('a', 'i1', (2, 2)), ('b', 'i1', 2)])
- p = self._loads(asbytes(s))
+ p = self._loads(s)
assert_equal(a, p)
-class TestFancyIndexing(TestCase):
+class TestFancyIndexing(object):
def test_list(self):
x = np.ones((1, 1))
x[:, [0]] = 2.0
@@ -3044,7 +3870,7 @@ class TestFancyIndexing(TestCase):
assert_array_equal(x, np.array([[1, 10, 3, 4], [5, 6, 7, 8]]))
-class TestStringCompare(TestCase):
+class TestStringCompare(object):
def test_string(self):
g1 = np.array(["This", "is", "example"])
g2 = np.array(["This", "was", "example"])
@@ -3066,8 +3892,8 @@ class TestStringCompare(TestCase):
assert_array_equal(g1 >= g2, [x >= g2 for x in g1])
def test_unicode(self):
- g1 = np.array([sixu("This"), sixu("is"), sixu("example")])
- g2 = np.array([sixu("This"), sixu("was"), sixu("example")])
+ g1 = np.array([u"This", u"is", u"example"])
+ g2 = np.array([u"This", u"was", u"example"])
assert_array_equal(g1 == g2, [g1[i] == g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 != g2, [g1[i] != g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 <= g2, [g1[i] <= g2[i] for i in [0, 1, 2]])
@@ -3076,7 +3902,7 @@ class TestStringCompare(TestCase):
assert_array_equal(g1 > g2, [g1[i] > g2[i] for i in [0, 1, 2]])
-class TestArgmax(TestCase):
+class TestArgmax(object):
nan_arr = [
([0, 1, 2, 3, np.nan], 4),
@@ -3152,8 +3978,13 @@ class TestArgmax(TestCase):
def test_combinations(self):
for arr, pos in self.nan_arr:
+ with suppress_warnings() as sup:
+ sup.filter(RuntimeWarning,
+ "invalid value encountered in reduce")
+ max_val = np.max(arr)
+
assert_equal(np.argmax(arr), pos, err_msg="%r" % arr)
- assert_equal(arr[np.argmax(arr)], np.max(arr), err_msg="%r" % arr)
+ assert_equal(arr[np.argmax(arr)], max_val, err_msg="%r" % arr)
def test_output_shape(self):
# see also gh-616
@@ -3205,7 +4036,7 @@ class TestArgmax(TestCase):
assert_equal(a.argmax(), 1)
-class TestArgmin(TestCase):
+class TestArgmin(object):
nan_arr = [
([0, 1, 2, 3, np.nan], 4),
@@ -3281,8 +4112,13 @@ class TestArgmin(TestCase):
def test_combinations(self):
for arr, pos in self.nan_arr:
+ with suppress_warnings() as sup:
+ sup.filter(RuntimeWarning,
+ "invalid value encountered in reduce")
+ min_val = np.min(arr)
+
assert_equal(np.argmin(arr), pos, err_msg="%r" % arr)
- assert_equal(arr[np.argmin(arr)], np.min(arr), err_msg="%r" % arr)
+ assert_equal(arr[np.argmin(arr)], min_val, err_msg="%r" % arr)
def test_minimum_signed_integers(self):
@@ -3348,11 +4184,11 @@ class TestArgmin(TestCase):
assert_equal(a.argmin(), 1)
-class TestMinMax(TestCase):
+class TestMinMax(object):
def test_scalar(self):
- assert_raises(ValueError, np.amax, 1, 1)
- assert_raises(ValueError, np.amin, 1, 1)
+ assert_raises(np.AxisError, np.amax, 1, 1)
+ assert_raises(np.AxisError, np.amin, 1, 1)
assert_equal(np.amax(1, axis=0), 1)
assert_equal(np.amin(1, axis=0), 1)
@@ -3360,7 +4196,7 @@ class TestMinMax(TestCase):
assert_equal(np.amin(1, axis=None), 1)
def test_axis(self):
- assert_raises(ValueError, np.amax, [1, 2, 3], 1000)
+ assert_raises(np.AxisError, np.amax, [1, 2, 3], 1000)
assert_equal(np.amax([[1, 2, 3]], axis=1), 3)
def test_datetime(self):
@@ -3378,14 +4214,14 @@ class TestMinMax(TestCase):
assert_equal(np.amax(a), a[0])
-class TestNewaxis(TestCase):
+class TestNewaxis(object):
def test_basic(self):
sk = np.array([0, -0.1, 0.1])
res = 250*sk[:, np.newaxis]
assert_almost_equal(res.ravel(), 250*sk)
-class TestClip(TestCase):
+class TestClip(object):
def _check_range(self, x, cmin, cmax):
assert_(np.all(x >= cmin))
assert_(np.all(x <= cmax))
@@ -3452,8 +4288,14 @@ class TestClip(TestCase):
x = val.clip(max=4)
assert_(np.all(x <= 4))
+ def test_nan(self):
+ input_arr = np.array([-2., np.nan, 0.5, 3., 0.25, np.nan])
+ result = input_arr.clip(-1, 1)
+ expected = np.array([-1., np.nan, 0.5, 1., 0.25, np.nan])
+ assert_array_equal(result, expected)
+
-class TestCompress(TestCase):
+class TestCompress(object):
def test_axis(self):
tgt = [[5, 6, 7, 8, 9]]
arr = np.arange(10).reshape(2, 5)
@@ -3479,11 +4321,11 @@ class TestCompress(TestCase):
class TestPutmask(object):
def tst_basic(self, x, T, mask, val):
np.putmask(x, mask, val)
- assert_(np.all(x[mask] == T(val)))
- assert_(x.dtype == T)
+ assert_equal(x[mask], T(val))
+ assert_equal(x.dtype, T)
def test_ip_types(self):
- unchecked_types = [str, unicode, np.void, object]
+ unchecked_types = [bytes, unicode, np.void, object]
x = np.random.random(1000)*100
mask = x < 40
@@ -3492,20 +4334,17 @@ class TestPutmask(object):
for types in np.sctypes.values():
for T in types:
if T not in unchecked_types:
- yield self.tst_basic, x.copy().astype(T), T, mask, val
+ self.tst_basic(x.copy().astype(T), T, mask, val)
def test_mask_size(self):
assert_raises(ValueError, np.putmask, np.array([1, 2, 3]), [True], 5)
- def tst_byteorder(self, dtype):
+ @pytest.mark.parametrize('dtype', ('>i4', '<i4'))
+ def test_byteorder(self, dtype):
x = np.array([1, 2, 3], dtype)
np.putmask(x, [True, False, True], -1)
assert_array_equal(x, [-1, 2, -1])
- def test_ip_byteorder(self):
- for dtype in ('>i4', '<i4'):
- yield self.tst_byteorder, dtype
-
def test_record_array(self):
# Note mixed byteorder.
rec = np.array([(-5, 2.0, 3.0), (5.0, 4.0, 3.0)],
@@ -3526,14 +4365,14 @@ class TestTake(object):
assert_array_equal(x.take(ind, axis=0), x)
def test_ip_types(self):
- unchecked_types = [str, unicode, np.void, object]
+ unchecked_types = [bytes, unicode, np.void, object]
x = np.random.random(24)*100
x.shape = 2, 3, 4
for types in np.sctypes.values():
for T in types:
if T not in unchecked_types:
- yield self.tst_basic, x.copy().astype(T)
+ self.tst_basic(x.copy().astype(T))
def test_raise(self):
x = np.random.random(24)*100
@@ -3555,14 +4394,11 @@ class TestTake(object):
assert_array_equal(x.take([2], axis=0, mode='wrap')[0], x[0])
assert_array_equal(x.take([3], axis=0, mode='wrap')[0], x[1])
- def tst_byteorder(self, dtype):
+ @pytest.mark.parametrize('dtype', ('>i4', '<i4'))
+ def test_byteorder(self, dtype):
x = np.array([1, 2, 3], dtype)
assert_array_equal(x.take([0, 2, 1]), [1, 3, 2])
- def test_ip_byteorder(self):
- for dtype in ('>i4', '<i4'):
- yield self.tst_byteorder, dtype
-
def test_record_array(self):
# Note mixed byteorder.
rec = np.array([(-5, 2.0, 3.0), (5.0, 4.0, 3.0)],
@@ -3571,7 +4407,7 @@ class TestTake(object):
assert_(rec1['x'] == 5.0 and rec1['y'] == 4.0)
-class TestLexsort(TestCase):
+class TestLexsort(object):
def test_basic(self):
a = [1, 2, 1, 3, 1, 5]
b = [0, 4, 5, 6, 2, 3]
@@ -3614,20 +4450,23 @@ class TestLexsort(TestCase):
u, v = np.array(u, dtype='object'), np.array(v, dtype='object')
assert_array_equal(idx, np.lexsort((u, v)))
+ def test_invalid_axis(self): # gh-7528
+ x = np.linspace(0., 1., 42*3).reshape(42, 3)
+ assert_raises(np.AxisError, np.lexsort, x, axis=2)
class TestIO(object):
"""Test tofile, fromfile, tobytes, and fromstring"""
- def setUp(self):
+ def setup(self):
shape = (2, 4, 3)
rand = np.random.random
- self.x = rand(shape) + rand(shape).astype(np.complex)*1j
+ self.x = rand(shape) + rand(shape).astype(complex)*1j
self.x[0,:, 1] = [np.nan, np.inf, -np.inf, np.nan]
self.dtype = self.x.dtype
self.tempdir = tempfile.mkdtemp()
self.filename = tempfile.mktemp(dir=self.tempdir)
- def tearDown(self):
+ def teardown(self):
shutil.rmtree(self.tempdir)
def test_nofile(self):
@@ -3684,11 +4523,11 @@ class TestIO(object):
def test_roundtrip_binary_str(self):
s = self.x.tobytes()
- y = np.fromstring(s, dtype=self.dtype)
+ y = np.frombuffer(s, dtype=self.dtype)
assert_array_equal(y, self.x.flat)
s = self.x.tobytes('F')
- y = np.fromstring(s, dtype=self.dtype)
+ y = np.frombuffer(s, dtype=self.dtype)
assert_array_equal(y, self.x.flatten('F'))
def test_roundtrip_str(self):
@@ -3706,18 +4545,24 @@ class TestIO(object):
y = np.fromstring(s, sep="@")
assert_array_equal(x, y)
- def test_unbuffered_fromfile(self):
+ def test_unseekable_fromfile(self):
# gh-6246
self.x.tofile(self.filename)
def fail(*args, **kwargs):
- raise io.IOError('Can not tell or seek')
+ raise IOError('Can not tell or seek')
- f = io.open(self.filename, 'rb', buffering=0)
- f.seek = fail
- f.tell = fail
- y = np.fromfile(self.filename, dtype=self.dtype)
- assert_array_equal(y, self.x.flat)
+ with io.open(self.filename, 'rb', buffering=0) as f:
+ f.seek = fail
+ f.tell = fail
+ assert_raises(IOError, np.fromfile, f, dtype=self.dtype)
+
+ def test_io_open_unbuffered_fromfile(self):
+ # gh-6632
+ self.x.tofile(self.filename)
+ with io.open(self.filename, 'rb', buffering=0) as f:
+ y = np.fromfile(f, dtype=self.dtype)
+ assert_array_equal(y, self.x.flat)
def test_largish_file(self):
# check the fallocate path on files > 16MB
@@ -3730,6 +4575,21 @@ class TestIO(object):
f.seek(d.nbytes)
d.tofile(f)
assert_equal(os.path.getsize(self.filename), d.nbytes * 2)
+ # check append mode (gh-8329)
+ open(self.filename, "w").close() # delete file contents
+ with open(self.filename, "ab") as f:
+ d.tofile(f)
+ assert_array_equal(d, np.fromfile(self.filename))
+ with open(self.filename, "ab") as f:
+ d.tofile(f)
+ assert_equal(os.path.getsize(self.filename), d.nbytes * 2)
+
+ def test_io_open_buffered_fromfile(self):
+ # gh-6632
+ self.x.tofile(self.filename)
+ with io.open(self.filename, 'rb', buffering=-1) as f:
+ y = np.fromfile(f, dtype=self.dtype)
+ assert_array_equal(y, self.x.flat)
def test_file_position_after_fromfile(self):
# gh-4118
@@ -3780,38 +4640,54 @@ class TestIO(object):
f.close()
assert_equal(pos, 10, err_msg=err_msg)
+ def test_load_object_array_fromfile(self):
+ # gh-12300
+ with open(self.filename, 'w') as f:
+ # Ensure we have a file with consistent contents
+ pass
+
+ with open(self.filename, 'rb') as f:
+ assert_raises_regex(ValueError, "Cannot read into object array",
+ np.fromfile, f, dtype=object)
+
+ assert_raises_regex(ValueError, "Cannot read into object array",
+ np.fromfile, self.filename, dtype=object)
+
def _check_from(self, s, value, **kw):
- y = np.fromstring(asbytes(s), **kw)
+ if 'sep' not in kw:
+ y = np.frombuffer(s, **kw)
+ else:
+ y = np.fromstring(s, **kw)
assert_array_equal(y, value)
f = open(self.filename, 'wb')
- f.write(asbytes(s))
+ f.write(s)
f.close()
y = np.fromfile(self.filename, **kw)
assert_array_equal(y, value)
def test_nan(self):
self._check_from(
- "nan +nan -nan NaN nan(foo) +NaN(BAR) -NAN(q_u_u_x_)",
+ b"nan +nan -nan NaN nan(foo) +NaN(BAR) -NAN(q_u_u_x_)",
[np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan],
sep=' ')
def test_inf(self):
self._check_from(
- "inf +inf -inf infinity -Infinity iNfInItY -inF",
+ b"inf +inf -inf infinity -Infinity iNfInItY -inF",
[np.inf, np.inf, -np.inf, np.inf, -np.inf, np.inf, -np.inf],
sep=' ')
def test_numbers(self):
- self._check_from("1.234 -1.234 .3 .3e55 -123133.1231e+133",
+ self._check_from(b"1.234 -1.234 .3 .3e55 -123133.1231e+133",
[1.234, -1.234, .3, .3e55, -123133.1231e+133], sep=' ')
def test_binary(self):
- self._check_from('\x00\x00\x80?\x00\x00\x00@\x00\x00@@\x00\x00\x80@',
+ self._check_from(b'\x00\x00\x80?\x00\x00\x00@\x00\x00@@\x00\x00\x80@',
np.array([1, 2, 3, 4]),
dtype='<f4')
- @dec.slow # takes > 1 minute on mechanical hard drive
+ @pytest.mark.slow # takes > 1 minute on mechanical hard drive
def test_big_binary(self):
"""Test workarounds for 32-bit limited fwrite, fseek, and ftell
calls in windows. These normally would hang doing something like this.
@@ -3837,40 +4713,40 @@ class TestIO(object):
pass
def test_string(self):
- self._check_from('1,2,3,4', [1., 2., 3., 4.], sep=',')
+ self._check_from(b'1,2,3,4', [1., 2., 3., 4.], sep=',')
def test_counted_string(self):
- self._check_from('1,2,3,4', [1., 2., 3., 4.], count=4, sep=',')
- self._check_from('1,2,3,4', [1., 2., 3.], count=3, sep=',')
- self._check_from('1,2,3,4', [1., 2., 3., 4.], count=-1, sep=',')
+ self._check_from(b'1,2,3,4', [1., 2., 3., 4.], count=4, sep=',')
+ self._check_from(b'1,2,3,4', [1., 2., 3.], count=3, sep=',')
+ self._check_from(b'1,2,3,4', [1., 2., 3., 4.], count=-1, sep=',')
def test_string_with_ws(self):
- self._check_from('1 2 3 4 ', [1, 2, 3, 4], dtype=int, sep=' ')
+ self._check_from(b'1 2 3 4 ', [1, 2, 3, 4], dtype=int, sep=' ')
def test_counted_string_with_ws(self):
- self._check_from('1 2 3 4 ', [1, 2, 3], count=3, dtype=int,
+ self._check_from(b'1 2 3 4 ', [1, 2, 3], count=3, dtype=int,
sep=' ')
def test_ascii(self):
- self._check_from('1 , 2 , 3 , 4', [1., 2., 3., 4.], sep=',')
- self._check_from('1,2,3,4', [1., 2., 3., 4.], dtype=float, sep=',')
+ self._check_from(b'1 , 2 , 3 , 4', [1., 2., 3., 4.], sep=',')
+ self._check_from(b'1,2,3,4', [1., 2., 3., 4.], dtype=float, sep=',')
def test_malformed(self):
- self._check_from('1.234 1,234', [1.234, 1.], sep=' ')
+ self._check_from(b'1.234 1,234', [1.234, 1.], sep=' ')
def test_long_sep(self):
- self._check_from('1_x_3_x_4_x_5', [1, 3, 4, 5], sep='_x_')
+ self._check_from(b'1_x_3_x_4_x_5', [1, 3, 4, 5], sep='_x_')
def test_dtype(self):
v = np.array([1, 2, 3, 4], dtype=np.int_)
- self._check_from('1,2,3,4', v, sep=',', dtype=np.int_)
+ self._check_from(b'1,2,3,4', v, sep=',', dtype=np.int_)
def test_dtype_bool(self):
# can't use _check_from because fromstring can't handle True/False
v = np.array([True, False, True, False], dtype=np.bool_)
- s = '1,0,-2.3,0'
+ s = b'1,0,-2.3,0'
f = open(self.filename, 'wb')
- f.write(asbytes(s))
+ f.write(s)
f.close()
y = np.fromfile(self.filename, sep=',', dtype=np.bool_)
assert_(y.dtype == '?')
@@ -3899,34 +4775,32 @@ class TestIO(object):
assert_equal(s, '1.51,2.00,3.51,4.00')
def test_locale(self):
- in_foreign_locale(self.test_numbers)()
- in_foreign_locale(self.test_nan)()
- in_foreign_locale(self.test_inf)()
- in_foreign_locale(self.test_counted_string)()
- in_foreign_locale(self.test_ascii)()
- in_foreign_locale(self.test_malformed)()
- in_foreign_locale(self.test_tofile_sep)()
- in_foreign_locale(self.test_tofile_format)()
+ with CommaDecimalPointLocale():
+ self.test_numbers()
+ self.test_nan()
+ self.test_inf()
+ self.test_counted_string()
+ self.test_ascii()
+ self.test_malformed()
+ self.test_tofile_sep()
+ self.test_tofile_format()
class TestFromBuffer(object):
- def tst_basic(self, buffer, expected, kwargs):
- assert_array_equal(np.frombuffer(buffer,**kwargs), expected)
-
- def test_ip_basic(self):
- for byteorder in ['<', '>']:
- for dtype in [float, int, np.complex]:
- dt = np.dtype(dtype).newbyteorder(byteorder)
- x = (np.random.random((4, 7))*5).astype(dt)
- buf = x.tobytes()
- yield self.tst_basic, buf, x.flat, {'dtype':dt}
+ @pytest.mark.parametrize('byteorder', ['<', '>'])
+ @pytest.mark.parametrize('dtype', [float, int, complex])
+ def test_basic(self, byteorder, dtype):
+ dt = np.dtype(dtype).newbyteorder(byteorder)
+ x = (np.random.random((4, 7)) * 5).astype(dt)
+ buf = x.tobytes()
+ assert_array_equal(np.frombuffer(buf, dtype=dt), x.flat)
def test_empty(self):
- yield self.tst_basic, asbytes(''), np.array([]), {}
+ assert_array_equal(np.frombuffer(b''), np.array([]))
-class TestFlat(TestCase):
- def setUp(self):
+class TestFlat(object):
+ def setup(self):
a0 = np.arange(20.0)
a = a0.reshape(4, 5)
a0.shape = (4, 5)
@@ -3962,20 +4836,32 @@ class TestFlat(TestCase):
assert_(c.flags.writeable is False)
assert_(d.flags.writeable is False)
+ # for 1.14 all are set to non-writeable on the way to replacing the
+ # UPDATEIFCOPY array returned for non-contiguous arrays.
assert_(e.flags.writeable is True)
- assert_(f.flags.writeable is True)
-
- assert_(c.flags.updateifcopy is False)
- assert_(d.flags.updateifcopy is False)
- assert_(e.flags.updateifcopy is False)
- assert_(f.flags.updateifcopy is True)
- assert_(f.base is self.b0)
-
-
-class TestResize(TestCase):
+ assert_(f.flags.writeable is False)
+ with assert_warns(DeprecationWarning):
+ assert_(c.flags.updateifcopy is False)
+ with assert_warns(DeprecationWarning):
+ assert_(d.flags.updateifcopy is False)
+ with assert_warns(DeprecationWarning):
+ assert_(e.flags.updateifcopy is False)
+ with assert_warns(DeprecationWarning):
+ # UPDATEIFCOPY is removed.
+ assert_(f.flags.updateifcopy is False)
+ assert_(c.flags.writebackifcopy is False)
+ assert_(d.flags.writebackifcopy is False)
+ assert_(e.flags.writebackifcopy is False)
+ assert_(f.flags.writebackifcopy is False)
+
+
+class TestResize(object):
def test_basic(self):
x = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
- x.resize((5, 5))
+ if IS_PYPY:
+ x.resize((5, 5), refcheck=False)
+ else:
+ x.resize((5, 5))
assert_array_equal(x.flat[:9],
np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]).flat)
assert_array_equal(x[9:].flat, 0)
@@ -3983,12 +4869,15 @@ class TestResize(TestCase):
def test_check_reference(self):
x = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
y = x
- self.assertRaises(ValueError, x.resize, (5, 1))
+ assert_raises(ValueError, x.resize, (5, 1))
del y # avoid pyflakes unused variable warning.
def test_int_shape(self):
x = np.eye(3)
- x.resize(3)
+ if IS_PYPY:
+ x.resize(3, refcheck=False)
+ else:
+ x.resize(3)
assert_array_equal(x, np.eye(3)[0,:])
def test_none_shape(self):
@@ -3998,89 +4887,144 @@ class TestResize(TestCase):
x.resize()
assert_array_equal(x, np.eye(3))
- def test_invalid_arguements(self):
- self.assertRaises(TypeError, np.eye(3).resize, 'hi')
- self.assertRaises(ValueError, np.eye(3).resize, -1)
- self.assertRaises(TypeError, np.eye(3).resize, order=1)
- self.assertRaises(TypeError, np.eye(3).resize, refcheck='hi')
+ def test_0d_shape(self):
+ # to it multiple times to test it does not break alloc cache gh-9216
+ for i in range(10):
+ x = np.empty((1,))
+ x.resize(())
+ assert_equal(x.shape, ())
+ assert_equal(x.size, 1)
+ x = np.empty(())
+ x.resize((1,))
+ assert_equal(x.shape, (1,))
+ assert_equal(x.size, 1)
+
+ def test_invalid_arguments(self):
+ assert_raises(TypeError, np.eye(3).resize, 'hi')
+ assert_raises(ValueError, np.eye(3).resize, -1)
+ assert_raises(TypeError, np.eye(3).resize, order=1)
+ assert_raises(TypeError, np.eye(3).resize, refcheck='hi')
def test_freeform_shape(self):
x = np.eye(3)
- x.resize(3, 2, 1)
+ if IS_PYPY:
+ x.resize(3, 2, 1, refcheck=False)
+ else:
+ x.resize(3, 2, 1)
assert_(x.shape == (3, 2, 1))
def test_zeros_appended(self):
x = np.eye(3)
- x.resize(2, 3, 3)
+ if IS_PYPY:
+ x.resize(2, 3, 3, refcheck=False)
+ else:
+ x.resize(2, 3, 3)
assert_array_equal(x[0], np.eye(3))
assert_array_equal(x[1], np.zeros((3, 3)))
def test_obj_obj(self):
# check memory is initialized on resize, gh-4857
a = np.ones(10, dtype=[('k', object, 2)])
- a.resize(15,)
+ if IS_PYPY:
+ a.resize(15, refcheck=False)
+ else:
+ a.resize(15,)
assert_equal(a.shape, (15,))
assert_array_equal(a['k'][-5:], 0)
assert_array_equal(a['k'][:-5], 1)
+ def test_empty_view(self):
+ # check that sizes containing a zero don't trigger a reallocate for
+ # already empty arrays
+ x = np.zeros((10, 0), int)
+ x_view = x[...]
+ x_view.resize((0, 10))
+ x_view.resize((0, 100))
-class TestRecord(TestCase):
+ def test_check_weakref(self):
+ x = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
+ xref = weakref.ref(x)
+ assert_raises(ValueError, x.resize, (5, 1))
+ del xref # avoid pyflakes unused variable warning.
+
+
+class TestRecord(object):
def test_field_rename(self):
dt = np.dtype([('f', float), ('i', int)])
dt.names = ['p', 'q']
assert_equal(dt.names, ['p', 'q'])
def test_multiple_field_name_occurrence(self):
- def test_assign():
- dtype = np.dtype([("A", "f8"), ("B", "f8"), ("A", "f8")])
+ def test_dtype_init():
+ np.dtype([("A", "f8"), ("B", "f8"), ("A", "f8")])
# Error raised when multiple fields have the same name
- assert_raises(ValueError, test_assign)
-
- if sys.version_info[0] >= 3:
- def test_bytes_fields(self):
- # Bytes are not allowed in field names and not recognized in titles
- # on Py3
- assert_raises(TypeError, np.dtype, [(asbytes('a'), int)])
- assert_raises(TypeError, np.dtype, [(('b', asbytes('a')), int)])
-
- dt = np.dtype([((asbytes('a'), 'b'), int)])
- assert_raises(ValueError, dt.__getitem__, asbytes('a'))
-
- x = np.array([(1,), (2,), (3,)], dtype=dt)
- assert_raises(IndexError, x.__getitem__, asbytes('a'))
-
- y = x[0]
- assert_raises(IndexError, y.__getitem__, asbytes('a'))
-
- def test_multiple_field_name_unicode(self):
- def test_assign_unicode():
- dt = np.dtype([("\u20B9", "f8"),
- ("B", "f8"),
- ("\u20B9", "f8")])
-
- # Error raised when multiple fields have the same name(unicode included)
- assert_raises(ValueError, test_assign_unicode)
-
- else:
- def test_unicode_field_titles(self):
- # Unicode field titles are added to field dict on Py2
- title = unicode('b')
- dt = np.dtype([((title, 'a'), int)])
- dt[title]
- dt['a']
- x = np.array([(1,), (2,), (3,)], dtype=dt)
- x[title]
- x['a']
- y = x[0]
- y[title]
- y['a']
-
- def test_unicode_field_names(self):
- # Unicode field names are not allowed on Py2
- title = unicode('b')
- assert_raises(TypeError, np.dtype, [(title, int)])
- assert_raises(TypeError, np.dtype, [(('a', title), int)])
+ assert_raises(ValueError, test_dtype_init)
+
+ @pytest.mark.skipif(sys.version_info[0] < 3, reason="Not Python 3")
+ def test_bytes_fields(self):
+ # Bytes are not allowed in field names and not recognized in titles
+ # on Py3
+ assert_raises(TypeError, np.dtype, [(b'a', int)])
+ assert_raises(TypeError, np.dtype, [(('b', b'a'), int)])
+
+ dt = np.dtype([((b'a', 'b'), int)])
+ assert_raises(TypeError, dt.__getitem__, b'a')
+
+ x = np.array([(1,), (2,), (3,)], dtype=dt)
+ assert_raises(IndexError, x.__getitem__, b'a')
+
+ y = x[0]
+ assert_raises(IndexError, y.__getitem__, b'a')
+
+ @pytest.mark.skipif(sys.version_info[0] < 3, reason="Not Python 3")
+ def test_multiple_field_name_unicode(self):
+ def test_dtype_unicode():
+ np.dtype([("\u20B9", "f8"), ("B", "f8"), ("\u20B9", "f8")])
+
+ # Error raised when multiple fields have the same name(unicode included)
+ assert_raises(ValueError, test_dtype_unicode)
+
+ @pytest.mark.skipif(sys.version_info[0] >= 3, reason="Not Python 2")
+ def test_unicode_field_titles(self):
+ # Unicode field titles are added to field dict on Py2
+ title = u'b'
+ dt = np.dtype([((title, 'a'), int)])
+ dt[title]
+ dt['a']
+ x = np.array([(1,), (2,), (3,)], dtype=dt)
+ x[title]
+ x['a']
+ y = x[0]
+ y[title]
+ y['a']
+
+ @pytest.mark.skipif(sys.version_info[0] >= 3, reason="Not Python 2")
+ def test_unicode_field_names(self):
+ # Unicode field names are converted to ascii on Python 2:
+ encodable_name = u'b'
+ assert_equal(np.dtype([(encodable_name, int)]).names[0], b'b')
+ assert_equal(np.dtype([(('a', encodable_name), int)]).names[0], b'b')
+
+ # But raises UnicodeEncodeError if it can't be encoded:
+ nonencodable_name = u'\uc3bc'
+ assert_raises(UnicodeEncodeError, np.dtype, [(nonencodable_name, int)])
+ assert_raises(UnicodeEncodeError, np.dtype, [(('a', nonencodable_name), int)])
+
+ def test_fromarrays_unicode(self):
+ # A single name string provided to fromarrays() is allowed to be unicode
+ # on both Python 2 and 3:
+ x = np.core.records.fromarrays([[0], [1]], names=u'a,b', formats=u'i4,i4')
+ assert_equal(x['a'][0], 0)
+ assert_equal(x['b'][0], 1)
+
+ def test_unicode_order(self):
+ # Test that we can sort with order as a unicode field name in both Python 2 and
+ # 3:
+ name = u'b'
+ x = np.array([1, 3, 2], dtype=[(name, int)])
+ x.sort(order=name)
+ assert_equal(x[u'b'], np.array([1, 2, 3]))
def test_field_names(self):
# Test unicode and 8-bit / byte strings can be used
@@ -4091,10 +5035,10 @@ class TestRecord(TestCase):
if is_py3:
funcs = (str,)
# byte string indexing fails gracefully
- assert_raises(IndexError, a.__setitem__, asbytes('f1'), 1)
- assert_raises(IndexError, a.__getitem__, asbytes('f1'))
- assert_raises(IndexError, a['f1'].__setitem__, asbytes('sf1'), 1)
- assert_raises(IndexError, a['f1'].__getitem__, asbytes('sf1'))
+ assert_raises(IndexError, a.__setitem__, b'f1', 1)
+ assert_raises(IndexError, a.__getitem__, b'f1')
+ assert_raises(IndexError, a['f1'].__setitem__, b'sf1', 1)
+ assert_raises(IndexError, a['f1'].__getitem__, b'sf1')
else:
funcs = (str, unicode)
for func in funcs:
@@ -4120,58 +5064,18 @@ class TestRecord(TestCase):
# multiple subfields
fn2 = func('f2')
b[fn2] = 3
+
assert_equal(b[['f1', 'f2']][0].tolist(), (2, 3))
assert_equal(b[['f2', 'f1']][0].tolist(), (3, 2))
assert_equal(b[['f1', 'f3']][0].tolist(), (2, (1,)))
- # view of subfield view/copy
- assert_equal(b[['f1', 'f2']][0].view(('i4', 2)).tolist(), (2, 3))
- assert_equal(b[['f2', 'f1']][0].view(('i4', 2)).tolist(), (3, 2))
- view_dtype = [('f1', 'i4'), ('f3', [('', 'i4')])]
- assert_equal(b[['f1', 'f3']][0].view(view_dtype).tolist(), (2, (1,)))
+
# non-ascii unicode field indexing is well behaved
if not is_py3:
- raise SkipTest('non ascii unicode field indexing skipped; '
- 'raises segfault on python 2.x')
+ pytest.skip('non ascii unicode field indexing skipped; '
+ 'raises segfault on python 2.x')
else:
- assert_raises(ValueError, a.__setitem__, sixu('\u03e0'), 1)
- assert_raises(ValueError, a.__getitem__, sixu('\u03e0'))
-
- def test_field_names_deprecation(self):
-
- def collect_warnings(f, *args, **kwargs):
- with warnings.catch_warnings(record=True) as log:
- warnings.simplefilter("always")
- f(*args, **kwargs)
- return [w.category for w in log]
-
- a = np.zeros((1,), dtype=[('f1', 'i4'),
- ('f2', 'i4'),
- ('f3', [('sf1', 'i4')])])
- a['f1'][0] = 1
- a['f2'][0] = 2
- a['f3'][0] = (3,)
- b = np.zeros((1,), dtype=[('f1', 'i4'),
- ('f2', 'i4'),
- ('f3', [('sf1', 'i4')])])
- b['f1'][0] = 1
- b['f2'][0] = 2
- b['f3'][0] = (3,)
-
- # All the different functions raise a warning, but not an error, and
- # 'a' is not modified:
- assert_equal(collect_warnings(a[['f1', 'f2']].__setitem__, 0, (10, 20)),
- [FutureWarning])
- assert_equal(a, b)
- # Views also warn
- subset = a[['f1', 'f2']]
- subset_view = subset.view()
- assert_equal(collect_warnings(subset_view['f1'].__setitem__, 0, 10),
- [FutureWarning])
- # But the write goes through:
- assert_equal(subset['f1'][0], 10)
- # Only one warning per multiple field indexing, though (even if there
- # are multiple views involved):
- assert_equal(collect_warnings(subset['f1'].__setitem__, 0, 10), [])
+ assert_raises(ValueError, a.__setitem__, u'\u03e0', 1)
+ assert_raises(ValueError, a.__getitem__, u'\u03e0')
def test_record_hash(self):
a = np.array([(1, 2), (1, 2)], dtype='i1,i2')
@@ -4180,14 +5084,14 @@ class TestRecord(TestCase):
b.flags.writeable = False
c = np.array([(1, 2), (3, 4)], dtype='i1,i2')
c.flags.writeable = False
- self.assertTrue(hash(a[0]) == hash(a[1]))
- self.assertTrue(hash(a[0]) == hash(b[0]))
- self.assertTrue(hash(a[0]) != hash(b[1]))
- self.assertTrue(hash(c[0]) == hash(a[0]) and c[0] == a[0])
+ assert_(hash(a[0]) == hash(a[1]))
+ assert_(hash(a[0]) == hash(b[0]))
+ assert_(hash(a[0]) != hash(b[1]))
+ assert_(hash(c[0]) == hash(a[0]) and c[0] == a[0])
def test_record_no_hash(self):
a = np.array([(1, 2), (1, 2)], dtype='i1,i2')
- self.assertRaises(TypeError, hash, a[0])
+ assert_raises(TypeError, hash, a[0])
def test_empty_structure_creation(self):
# make sure these do not raise errors (gh-5631)
@@ -4196,7 +5100,17 @@ class TestRecord(TestCase):
np.array([(), (), (), (), ()], dtype={'names': [], 'formats': [],
'offsets': [], 'itemsize': 12})
-class TestView(TestCase):
+ def test_multifield_indexing_view(self):
+ a = np.ones(3, dtype=[('a', 'i4'), ('b', 'f4'), ('c', 'u4')])
+ v = a[['a', 'c']]
+ assert_(v.base is a)
+ assert_(v.dtype == np.dtype({'names': ['a', 'c'],
+ 'formats': ['i4', 'u4'],
+ 'offsets': [0, 8]}))
+ v[:] = (4,5)
+ assert_equal(a[0].item(), (4, 1, 5))
+
+class TestView(object):
def test_basic(self):
x = np.array([(1, 2, 3, 4), (5, 6, 7, 8)],
dtype=[('r', np.int8), ('g', np.int8),
@@ -4221,17 +5135,23 @@ def _std(a, **args):
return a.std(**args)
-class TestStats(TestCase):
+class TestStats(object):
funcs = [_mean, _var, _std]
- def setUp(self):
+ def setup(self):
np.random.seed(range(3))
self.rmat = np.random.random((4, 5))
self.cmat = self.rmat + 1j * self.rmat
self.omat = np.array([Decimal(repr(r)) for r in self.rmat.flat])
self.omat = self.omat.reshape(4, 5)
+ def test_python_type(self):
+ for x in (np.float16(1.), 1, 1., 1+0j):
+ assert_equal(np.mean([x]), 1.)
+ assert_equal(np.std([x]), 0.)
+ assert_equal(np.var([x]), 0.)
+
def test_keepdims(self):
mat = np.eye(3)
for f in self.funcs:
@@ -4382,6 +5302,11 @@ class TestStats(TestCase):
res = _mean(mat, axis=axis) * np.prod(mat.shape)
assert_almost_equal(res, tgt)
+ def test_mean_float16(self):
+ # This fail if the sum inside mean is done in float16 instead
+ # of float32.
+ assert_(_mean(np.ones(100000, dtype='float16')) == 1)
+
def test_var_values(self):
for mat in [self.rmat, self.cmat, self.omat]:
for axis in [0, 1, None]:
@@ -4417,7 +5342,7 @@ class TestStats(TestCase):
res = dat.var(1)
assert_(res.info == dat.info)
-class TestVdot(TestCase):
+class TestVdot(object):
def test_basic(self):
dt_numeric = np.typecodes['AllFloat'] + np.typecodes['AllInteger']
dt_complex = np.typecodes['Complex']
@@ -4439,7 +5364,7 @@ class TestVdot(TestCase):
assert_equal(np.vdot(b, b), 3)
# test boolean
- b = np.eye(3, dtype=np.bool)
+ b = np.eye(3, dtype=bool)
res = np.vdot(b, b)
assert_(np.isscalar(res))
assert_equal(np.vdot(b, b), True)
@@ -4477,8 +5402,8 @@ class TestVdot(TestCase):
np.vdot(a.flatten(), b.flatten()))
-class TestDot(TestCase):
- def setUp(self):
+class TestDot(object):
+ def setup(self):
np.random.seed(128)
self.A = np.random.rand(4, 2)
self.b1 = np.random.rand(2, 1)
@@ -4631,7 +5556,8 @@ class TestDot(TestCase):
r = np.empty((1024, 32))
for i in range(12):
dot(f, v, r)
- assert_equal(sys.getrefcount(r), 2)
+ if HAS_REFCOUNT:
+ assert_equal(sys.getrefcount(r), 2)
r2 = dot(f, v, out=None)
assert_array_equal(r2, r)
assert_(r is dot(f, v, out=r))
@@ -4682,13 +5608,6 @@ class TestDot(TestCase):
assert_equal(np.dot(b, a), res)
assert_equal(np.dot(b, b), res)
- def test_dot_scalar_and_matrix_of_objects(self):
- # Ticket #2469
- arr = np.matrix([1, 2], dtype=object)
- desired = np.matrix([[3, 6]], dtype=object)
- assert_equal(np.dot(arr, 3), desired)
- assert_equal(np.dot(3, arr), desired)
-
def test_accelerate_framework_sgemv_fix(self):
def aligned_array(shape, align, dtype, order='C'):
@@ -4754,11 +5673,9 @@ class TestDot(TestCase):
assert_dot_close(A_f_12, X_f_2, desired)
-class MatmulCommon():
+class MatmulCommon(object):
"""Common tests for '@' operator and numpy.matmul.
- Do not derive from TestCase to avoid nose running it.
-
"""
# Should work with these types. Will want to add
# "O" at some point
@@ -4816,15 +5733,38 @@ class MatmulCommon():
res = self.matmul(v, v)
assert_(type(res) is np.dtype(dt).type)
- def test_vector_vector_values(self):
- vec = np.array([1, 2])
- tgt = 5
+ def test_scalar_output(self):
+ vec1 = np.array([2])
+ vec2 = np.array([3, 4]).reshape(1, -1)
+ tgt = np.array([6, 8])
for dt in self.types[1:]:
- v1 = vec.astype(dt)
- res = self.matmul(v1, v1)
+ v1 = vec1.astype(dt)
+ v2 = vec2.astype(dt)
+ res = self.matmul(v1, v2)
+ assert_equal(res, tgt)
+ res = self.matmul(v2.T, v1)
assert_equal(res, tgt)
# boolean type
+ vec = np.array([True, True], dtype='?').reshape(1, -1)
+ res = self.matmul(vec[:, 0], vec)
+ assert_equal(res, True)
+
+ def test_vector_vector_values(self):
+ vec1 = np.array([1, 2])
+ vec2 = np.array([3, 4]).reshape(-1, 1)
+ tgt1 = np.array([11])
+ tgt2 = np.array([[3, 6], [4, 8]])
+ for dt in self.types[1:]:
+ v1 = vec1.astype(dt)
+ v2 = vec2.astype(dt)
+ res = self.matmul(v1, v2)
+ assert_equal(res, tgt1)
+ # no broadcast, we must make v1 into a 2d ndarray
+ res = self.matmul(v2, v1.reshape(1, -1))
+ assert_equal(res, tgt2)
+
+ # boolean type
vec = np.array([True, True], dtype='?')
res = self.matmul(vec, vec)
assert_equal(res, True)
@@ -4948,78 +5888,105 @@ class MatmulCommon():
res = self.matmul(m12, m21)
assert_equal(res, tgt12_21)
- def test_numpy_ufunc_override(self):
- # 2016-01-29: NUMPY_UFUNC_DISABLED
- return
-
- class A(np.ndarray):
- def __new__(cls, *args, **kwargs):
- return np.array(*args, **kwargs).view(cls)
-
- def __numpy_ufunc__(self, ufunc, method, pos, inputs, **kwargs):
- return "A"
-
- class B(np.ndarray):
- def __new__(cls, *args, **kwargs):
- return np.array(*args, **kwargs).view(cls)
-
- def __numpy_ufunc__(self, ufunc, method, pos, inputs, **kwargs):
- return NotImplemented
-
- a = A([1, 2])
- b = B([1, 2])
- c = np.ones(2)
- assert_equal(self.matmul(a, b), "A")
- assert_equal(self.matmul(b, a), "A")
- assert_raises(TypeError, self.matmul, b, c)
-
-class TestMatmul(MatmulCommon, TestCase):
+class TestMatmul(MatmulCommon):
matmul = np.matmul
def test_out_arg(self):
- a = np.ones((2, 2), dtype=np.float)
- b = np.ones((2, 2), dtype=np.float)
- tgt = np.full((2,2), 2, dtype=np.float)
+ a = np.ones((5, 2), dtype=float)
+ b = np.array([[1, 3], [5, 7]], dtype=float)
+ tgt = np.dot(a, b)
# test as positional argument
msg = "out positional argument"
- out = np.zeros((2, 2), dtype=np.float)
+ out = np.zeros((5, 2), dtype=float)
self.matmul(a, b, out)
assert_array_equal(out, tgt, err_msg=msg)
# test as keyword argument
msg = "out keyword argument"
- out = np.zeros((2, 2), dtype=np.float)
+ out = np.zeros((5, 2), dtype=float)
self.matmul(a, b, out=out)
assert_array_equal(out, tgt, err_msg=msg)
# test out with not allowed type cast (safe casting)
- # einsum and cblas raise different error types, so
- # use Exception.
- msg = "out argument with illegal cast"
- out = np.zeros((2, 2), dtype=np.int32)
- assert_raises(Exception, self.matmul, a, b, out=out)
-
- # skip following tests for now, cblas does not allow non-contiguous
- # outputs and consistency with dot would require same type,
- # dimensions, subtype, and c_contiguous.
-
- # test out with allowed type cast
- # msg = "out argument with allowed cast"
- # out = np.zeros((2, 2), dtype=np.complex128)
- # self.matmul(a, b, out=out)
- # assert_array_equal(out, tgt, err_msg=msg)
+ msg = "Cannot cast ufunc matmul output"
+ out = np.zeros((5, 2), dtype=np.int32)
+ assert_raises_regex(TypeError, msg, self.matmul, a, b, out=out)
+
+ # test out with type upcast to complex
+ out = np.zeros((5, 2), dtype=np.complex128)
+ c = self.matmul(a, b, out=out)
+ assert_(c is out)
+ with suppress_warnings() as sup:
+ sup.filter(np.ComplexWarning, '')
+ c = c.astype(tgt.dtype)
+ assert_array_equal(c, tgt)
+
+ def test_out_contiguous(self):
+ a = np.ones((5, 2), dtype=float)
+ b = np.array([[1, 3], [5, 7]], dtype=float)
+ v = np.array([1, 3], dtype=float)
+ tgt = np.dot(a, b)
+ tgt_mv = np.dot(a, v)
# test out non-contiguous
- # msg = "out argument with non-contiguous layout"
- # c = np.zeros((2, 2, 2), dtype=np.float)
- # self.matmul(a, b, out=c[..., 0])
- # assert_array_equal(c, tgt, err_msg=msg)
+ out = np.ones((5, 2, 2), dtype=float)
+ c = self.matmul(a, b, out=out[..., 0])
+ assert c.base is out
+ assert_array_equal(c, tgt)
+ c = self.matmul(a, v, out=out[:, 0, 0])
+ assert_array_equal(c, tgt_mv)
+ c = self.matmul(v, a.T, out=out[:, 0, 0])
+ assert_array_equal(c, tgt_mv)
+
+ # test out contiguous in only last dim
+ out = np.ones((10, 2), dtype=float)
+ c = self.matmul(a, b, out=out[::2, :])
+ assert_array_equal(c, tgt)
+
+ # test transposes of out, args
+ out = np.ones((5, 2), dtype=float)
+ c = self.matmul(b.T, a.T, out=out.T)
+ assert_array_equal(out, tgt)
+
+ m1 = np.arange(15.).reshape(5, 3)
+ m2 = np.arange(21.).reshape(3, 7)
+ m3 = np.arange(30.).reshape(5, 6)[:, ::2] # non-contiguous
+ vc = np.arange(10.)
+ vr = np.arange(6.)
+ m0 = np.zeros((3, 0))
+ @pytest.mark.parametrize('args', (
+ # matrix-matrix
+ (m1, m2), (m2.T, m1.T), (m2.T.copy(), m1.T), (m2.T, m1.T.copy()),
+ # matrix-matrix-transpose, contiguous and non
+ (m1, m1.T), (m1.T, m1), (m1, m3.T), (m3, m1.T),
+ (m3, m3.T), (m3.T, m3),
+ # matrix-matrix non-contiguous
+ (m3, m2), (m2.T, m3.T), (m2.T.copy(), m3.T),
+ # vector-matrix, matrix-vector, contiguous
+ (m1, vr[:3]), (vc[:5], m1), (m1.T, vc[:5]), (vr[:3], m1.T),
+ # vector-matrix, matrix-vector, vector non-contiguous
+ (m1, vr[::2]), (vc[::2], m1), (m1.T, vc[::2]), (vr[::2], m1.T),
+ # vector-matrix, matrix-vector, matrix non-contiguous
+ (m3, vr[:3]), (vc[:5], m3), (m3.T, vc[:5]), (vr[:3], m3.T),
+ # vector-matrix, matrix-vector, both non-contiguous
+ (m3, vr[::2]), (vc[::2], m3), (m3.T, vc[::2]), (vr[::2], m3.T),
+ # size == 0
+ (m0, m0.T), (m0.T, m0), (m1, m0), (m0.T, m1.T),
+ ))
+ def test_dot_equivalent(self, args):
+ r1 = np.matmul(*args)
+ r2 = np.dot(*args)
+ assert_equal(r1, r2)
+
+ r3 = np.matmul(args[0].copy(), args[1].copy())
+ assert_equal(r1, r3)
+
if sys.version_info[:2] >= (3, 5):
- class TestMatmulOperator(MatmulCommon, TestCase):
+ class TestMatmulOperator(MatmulCommon):
import operator
matmul = operator.matmul
@@ -5039,6 +6006,11 @@ if sys.version_info[:2] >= (3, 5):
assert_equal(self.matmul(a, b), "A")
assert_equal(self.matmul(b, a), "A")
+ def test_matmul_raises(self):
+ assert_raises(TypeError, self.matmul, np.int8(5), np.int8(5))
+ assert_raises(TypeError, self.matmul, np.void(b'abc'), np.void(b'abc'))
+ assert_raises(ValueError, self.matmul, np.arange(10), np.void(b'abc'))
+
def test_matmul_inplace():
# It would be nice to support in-place matmul eventually, but for now
# we don't have a working implementation, so better just to error out
@@ -5053,8 +6025,19 @@ if sys.version_info[:2] >= (3, 5):
exec_ = getattr(builtins, "exec")
assert_raises(TypeError, exec_, "a @= b", globals(), locals())
+ def test_matmul_axes():
+ a = np.arange(3*4*5).reshape(3, 4, 5)
+ c = np.matmul(a, a, axes=[(-2, -1), (-1, -2), (1, 2)])
+ assert c.shape == (3, 4, 4)
+ d = np.matmul(a, a, axes=[(-2, -1), (-1, -2), (0, 1)])
+ assert d.shape == (4, 4, 3)
+ e = np.swapaxes(d, 0, 2)
+ assert_array_equal(e, c)
+ f = np.matmul(a, np.arange(3), axes=[(1, 0), (0), (0)])
+ assert f.shape == (4, 5)
+
-class TestInner(TestCase):
+class TestInner(object):
def test_inner_type_mismatch(self):
c = 1.
@@ -5071,21 +6054,6 @@ class TestInner(TestCase):
assert_equal(np.inner(vec, sca), desired)
assert_equal(np.inner(sca, vec), desired)
- def test_inner_scalar_and_matrix(self):
- for dt in np.typecodes['AllInteger'] + np.typecodes['AllFloat'] + '?':
- sca = np.array(3, dtype=dt)[()]
- arr = np.matrix([[1, 2], [3, 4]], dtype=dt)
- desired = np.matrix([[3, 6], [9, 12]], dtype=dt)
- assert_equal(np.inner(arr, sca), desired)
- assert_equal(np.inner(sca, arr), desired)
-
- def test_inner_scalar_and_matrix_of_objects(self):
- # Ticket #4482
- arr = np.matrix([1, 2], dtype=object)
- desired = np.matrix([[3, 6]], dtype=object)
- assert_equal(np.inner(arr, 3), desired)
- assert_equal(np.inner(3, arr), desired)
-
def test_vecself(self):
# Ticket 844.
# Inner product of a vector with itself segfaults or give
@@ -5147,46 +6115,26 @@ class TestInner(TestCase):
assert_equal(np.inner(b, a).transpose(2,3,0,1), desired)
-class TestSummarization(TestCase):
- def test_1d(self):
- A = np.arange(1001)
- strA = '[ 0 1 2 ..., 998 999 1000]'
- assert_(str(A) == strA)
-
- reprA = 'array([ 0, 1, 2, ..., 998, 999, 1000])'
- assert_(repr(A) == reprA)
-
- def test_2d(self):
- A = np.arange(1002).reshape(2, 501)
- strA = '[[ 0 1 2 ..., 498 499 500]\n' \
- ' [ 501 502 503 ..., 999 1000 1001]]'
- assert_(str(A) == strA)
-
- reprA = 'array([[ 0, 1, 2, ..., 498, 499, 500],\n' \
- ' [ 501, 502, 503, ..., 999, 1000, 1001]])'
- assert_(repr(A) == reprA)
-
-
-class TestAlen(TestCase):
+class TestAlen(object):
def test_basic(self):
m = np.array([1, 2, 3])
- self.assertEqual(np.alen(m), 3)
+ assert_equal(np.alen(m), 3)
m = np.array([[1, 2, 3], [4, 5, 7]])
- self.assertEqual(np.alen(m), 2)
+ assert_equal(np.alen(m), 2)
m = [1, 2, 3]
- self.assertEqual(np.alen(m), 3)
+ assert_equal(np.alen(m), 3)
m = [[1, 2, 3], [4, 5, 7]]
- self.assertEqual(np.alen(m), 2)
+ assert_equal(np.alen(m), 2)
def test_singleton(self):
- self.assertEqual(np.alen(5), 1)
+ assert_equal(np.alen(5), 1)
-class TestChoose(TestCase):
- def setUp(self):
+class TestChoose(object):
+ def setup(self):
self.x = 2*np.ones((3,), dtype=int)
self.y = 3*np.ones((3,), dtype=int)
self.x2 = 2*np.ones((2, 3), dtype=int)
@@ -5206,8 +6154,8 @@ class TestChoose(TestCase):
assert_equal(A, [[2, 2, 3], [2, 2, 3]])
-class TestRepeat(TestCase):
- def setUp(self):
+class TestRepeat(object):
+ def setup(self):
self.m = np.array([1, 2, 3, 4, 5, 6])
self.m_rect = self.m.reshape((2, 3))
@@ -5247,110 +6195,87 @@ class TestRepeat(TestCase):
NEIGH_MODE = {'zero': 0, 'one': 1, 'constant': 2, 'circular': 3, 'mirror': 4}
-class TestNeighborhoodIter(TestCase):
+@pytest.mark.parametrize('dt', [float, Decimal], ids=['float', 'object'])
+class TestNeighborhoodIter(object):
# Simple, 2d tests
- def _test_simple2d(self, dt):
+ def test_simple2d(self, dt):
# Test zero and one padding for simple data type
x = np.array([[0, 1], [2, 3]], dtype=dt)
r = [np.array([[0, 0, 0], [0, 0, 1]], dtype=dt),
np.array([[0, 0, 0], [0, 1, 0]], dtype=dt),
np.array([[0, 0, 1], [0, 2, 3]], dtype=dt),
np.array([[0, 1, 0], [2, 3, 0]], dtype=dt)]
- l = test_neighborhood_iterator(x, [-1, 0, -1, 1], x[0],
- NEIGH_MODE['zero'])
+ l = _multiarray_tests.test_neighborhood_iterator(
+ x, [-1, 0, -1, 1], x[0], NEIGH_MODE['zero'])
assert_array_equal(l, r)
r = [np.array([[1, 1, 1], [1, 0, 1]], dtype=dt),
np.array([[1, 1, 1], [0, 1, 1]], dtype=dt),
np.array([[1, 0, 1], [1, 2, 3]], dtype=dt),
np.array([[0, 1, 1], [2, 3, 1]], dtype=dt)]
- l = test_neighborhood_iterator(x, [-1, 0, -1, 1], x[0],
- NEIGH_MODE['one'])
+ l = _multiarray_tests.test_neighborhood_iterator(
+ x, [-1, 0, -1, 1], x[0], NEIGH_MODE['one'])
assert_array_equal(l, r)
r = [np.array([[4, 4, 4], [4, 0, 1]], dtype=dt),
np.array([[4, 4, 4], [0, 1, 4]], dtype=dt),
np.array([[4, 0, 1], [4, 2, 3]], dtype=dt),
np.array([[0, 1, 4], [2, 3, 4]], dtype=dt)]
- l = test_neighborhood_iterator(x, [-1, 0, -1, 1], 4,
- NEIGH_MODE['constant'])
+ l = _multiarray_tests.test_neighborhood_iterator(
+ x, [-1, 0, -1, 1], 4, NEIGH_MODE['constant'])
assert_array_equal(l, r)
- def test_simple2d(self):
- self._test_simple2d(np.float)
-
- def test_simple2d_object(self):
- self._test_simple2d(Decimal)
-
- def _test_mirror2d(self, dt):
+ def test_mirror2d(self, dt):
x = np.array([[0, 1], [2, 3]], dtype=dt)
r = [np.array([[0, 0, 1], [0, 0, 1]], dtype=dt),
np.array([[0, 1, 1], [0, 1, 1]], dtype=dt),
np.array([[0, 0, 1], [2, 2, 3]], dtype=dt),
np.array([[0, 1, 1], [2, 3, 3]], dtype=dt)]
- l = test_neighborhood_iterator(x, [-1, 0, -1, 1], x[0],
- NEIGH_MODE['mirror'])
+ l = _multiarray_tests.test_neighborhood_iterator(
+ x, [-1, 0, -1, 1], x[0], NEIGH_MODE['mirror'])
assert_array_equal(l, r)
- def test_mirror2d(self):
- self._test_mirror2d(np.float)
-
- def test_mirror2d_object(self):
- self._test_mirror2d(Decimal)
-
# Simple, 1d tests
- def _test_simple(self, dt):
+ def test_simple(self, dt):
# Test padding with constant values
x = np.linspace(1, 5, 5).astype(dt)
r = [[0, 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, 0]]
- l = test_neighborhood_iterator(x, [-1, 1], x[0], NEIGH_MODE['zero'])
+ l = _multiarray_tests.test_neighborhood_iterator(
+ x, [-1, 1], x[0], NEIGH_MODE['zero'])
assert_array_equal(l, r)
r = [[1, 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, 1]]
- l = test_neighborhood_iterator(x, [-1, 1], x[0], NEIGH_MODE['one'])
+ l = _multiarray_tests.test_neighborhood_iterator(
+ x, [-1, 1], x[0], NEIGH_MODE['one'])
assert_array_equal(l, r)
r = [[x[4], 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, x[4]]]
- l = test_neighborhood_iterator(x, [-1, 1], x[4], NEIGH_MODE['constant'])
+ l = _multiarray_tests.test_neighborhood_iterator(
+ x, [-1, 1], x[4], NEIGH_MODE['constant'])
assert_array_equal(l, r)
- def test_simple_float(self):
- self._test_simple(np.float)
-
- def test_simple_object(self):
- self._test_simple(Decimal)
-
# Test mirror modes
- def _test_mirror(self, dt):
+ def test_mirror(self, dt):
x = np.linspace(1, 5, 5).astype(dt)
r = np.array([[2, 1, 1, 2, 3], [1, 1, 2, 3, 4], [1, 2, 3, 4, 5],
[2, 3, 4, 5, 5], [3, 4, 5, 5, 4]], dtype=dt)
- l = test_neighborhood_iterator(x, [-2, 2], x[1], NEIGH_MODE['mirror'])
- self.assertTrue([i.dtype == dt for i in l])
+ l = _multiarray_tests.test_neighborhood_iterator(
+ x, [-2, 2], x[1], NEIGH_MODE['mirror'])
+ assert_([i.dtype == dt for i in l])
assert_array_equal(l, r)
- def test_mirror(self):
- self._test_mirror(np.float)
-
- def test_mirror_object(self):
- self._test_mirror(Decimal)
-
# Circular mode
- def _test_circular(self, dt):
+ def test_circular(self, dt):
x = np.linspace(1, 5, 5).astype(dt)
r = np.array([[4, 5, 1, 2, 3], [5, 1, 2, 3, 4], [1, 2, 3, 4, 5],
[2, 3, 4, 5, 1], [3, 4, 5, 1, 2]], dtype=dt)
- l = test_neighborhood_iterator(x, [-2, 2], x[0], NEIGH_MODE['circular'])
+ l = _multiarray_tests.test_neighborhood_iterator(
+ x, [-2, 2], x[0], NEIGH_MODE['circular'])
assert_array_equal(l, r)
- def test_circular(self):
- self._test_circular(np.float)
-
- def test_circular_object(self):
- self._test_circular(Decimal)
# Test stacking neighborhood iterators
-class TestStackedNeighborhoodIter(TestCase):
+class TestStackedNeighborhoodIter(object):
# Simple, 1d test: stacking 2 constant-padded neigh iterators
def test_simple_const(self):
dt = np.float64
@@ -5363,8 +6288,8 @@ class TestStackedNeighborhoodIter(TestCase):
np.array([3], dtype=dt),
np.array([0], dtype=dt),
np.array([0], dtype=dt)]
- l = test_neighborhood_iterator_oob(x, [-2, 4], NEIGH_MODE['zero'],
- [0, 0], NEIGH_MODE['zero'])
+ l = _multiarray_tests.test_neighborhood_iterator_oob(
+ x, [-2, 4], NEIGH_MODE['zero'], [0, 0], NEIGH_MODE['zero'])
assert_array_equal(l, r)
r = [np.array([1, 0, 1], dtype=dt),
@@ -5372,8 +6297,8 @@ class TestStackedNeighborhoodIter(TestCase):
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 0], dtype=dt),
np.array([3, 0, 1], dtype=dt)]
- l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
- [-1, 1], NEIGH_MODE['one'])
+ l = _multiarray_tests.test_neighborhood_iterator_oob(
+ x, [-1, 3], NEIGH_MODE['zero'], [-1, 1], NEIGH_MODE['one'])
assert_array_equal(l, r)
# 2nd simple, 1d test: stacking 2 neigh iterators, mixing const padding and
@@ -5387,8 +6312,8 @@ class TestStackedNeighborhoodIter(TestCase):
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 3], dtype=dt),
np.array([3, 3, 0], dtype=dt)]
- l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['mirror'],
- [-1, 1], NEIGH_MODE['zero'])
+ l = _multiarray_tests.test_neighborhood_iterator_oob(
+ x, [-1, 3], NEIGH_MODE['mirror'], [-1, 1], NEIGH_MODE['zero'])
assert_array_equal(l, r)
# Stacking mirror on top of zero
@@ -5398,8 +6323,8 @@ class TestStackedNeighborhoodIter(TestCase):
np.array([0, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 0], dtype=dt)]
- l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
- [-2, 0], NEIGH_MODE['mirror'])
+ l = _multiarray_tests.test_neighborhood_iterator_oob(
+ x, [-1, 3], NEIGH_MODE['zero'], [-2, 0], NEIGH_MODE['mirror'])
assert_array_equal(l, r)
# Stacking mirror on top of zero: 2nd
@@ -5409,8 +6334,8 @@ class TestStackedNeighborhoodIter(TestCase):
np.array([2, 3, 0], dtype=dt),
np.array([3, 0, 0], dtype=dt),
np.array([0, 0, 3], dtype=dt)]
- l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
- [0, 2], NEIGH_MODE['mirror'])
+ l = _multiarray_tests.test_neighborhood_iterator_oob(
+ x, [-1, 3], NEIGH_MODE['zero'], [0, 2], NEIGH_MODE['mirror'])
assert_array_equal(l, r)
# Stacking mirror on top of zero: 3rd
@@ -5420,8 +6345,8 @@ class TestStackedNeighborhoodIter(TestCase):
np.array([0, 1, 2, 3, 0], dtype=dt),
np.array([1, 2, 3, 0, 0], dtype=dt),
np.array([2, 3, 0, 0, 3], dtype=dt)]
- l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
- [-2, 2], NEIGH_MODE['mirror'])
+ l = _multiarray_tests.test_neighborhood_iterator_oob(
+ x, [-1, 3], NEIGH_MODE['zero'], [-2, 2], NEIGH_MODE['mirror'])
assert_array_equal(l, r)
# 3rd simple, 1d test: stacking 2 neigh iterators, mixing const padding and
@@ -5435,8 +6360,8 @@ class TestStackedNeighborhoodIter(TestCase):
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 1], dtype=dt),
np.array([3, 1, 0], dtype=dt)]
- l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['circular'],
- [-1, 1], NEIGH_MODE['zero'])
+ l = _multiarray_tests.test_neighborhood_iterator_oob(
+ x, [-1, 3], NEIGH_MODE['circular'], [-1, 1], NEIGH_MODE['zero'])
assert_array_equal(l, r)
# Stacking mirror on top of zero
@@ -5446,8 +6371,8 @@ class TestStackedNeighborhoodIter(TestCase):
np.array([0, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 0], dtype=dt)]
- l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
- [-2, 0], NEIGH_MODE['circular'])
+ l = _multiarray_tests.test_neighborhood_iterator_oob(
+ x, [-1, 3], NEIGH_MODE['zero'], [-2, 0], NEIGH_MODE['circular'])
assert_array_equal(l, r)
# Stacking mirror on top of zero: 2nd
@@ -5457,8 +6382,8 @@ class TestStackedNeighborhoodIter(TestCase):
np.array([2, 3, 0], dtype=dt),
np.array([3, 0, 0], dtype=dt),
np.array([0, 0, 1], dtype=dt)]
- l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
- [0, 2], NEIGH_MODE['circular'])
+ l = _multiarray_tests.test_neighborhood_iterator_oob(
+ x, [-1, 3], NEIGH_MODE['zero'], [0, 2], NEIGH_MODE['circular'])
assert_array_equal(l, r)
# Stacking mirror on top of zero: 3rd
@@ -5468,8 +6393,8 @@ class TestStackedNeighborhoodIter(TestCase):
np.array([0, 1, 2, 3, 0], dtype=dt),
np.array([1, 2, 3, 0, 0], dtype=dt),
np.array([2, 3, 0, 0, 1], dtype=dt)]
- l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
- [-2, 2], NEIGH_MODE['circular'])
+ l = _multiarray_tests.test_neighborhood_iterator_oob(
+ x, [-1, 3], NEIGH_MODE['zero'], [-2, 2], NEIGH_MODE['circular'])
assert_array_equal(l, r)
# 4th simple, 1d test: stacking 2 neigh iterators, but with lower iterator
@@ -5480,24 +6405,24 @@ class TestStackedNeighborhoodIter(TestCase):
# array
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([1, 2, 3, 0], dtype=dt)]
- l = test_neighborhood_iterator_oob(x, [1, 1], NEIGH_MODE['zero'],
- [-1, 2], NEIGH_MODE['zero'])
+ l = _multiarray_tests.test_neighborhood_iterator_oob(
+ x, [1, 1], NEIGH_MODE['zero'], [-1, 2], NEIGH_MODE['zero'])
assert_array_equal(l, r)
# Stacking mirror on top of zero, first neighborhood strictly inside the
# array
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([1, 2, 3, 3], dtype=dt)]
- l = test_neighborhood_iterator_oob(x, [1, 1], NEIGH_MODE['zero'],
- [-1, 2], NEIGH_MODE['mirror'])
+ l = _multiarray_tests.test_neighborhood_iterator_oob(
+ x, [1, 1], NEIGH_MODE['zero'], [-1, 2], NEIGH_MODE['mirror'])
assert_array_equal(l, r)
# Stacking mirror on top of zero, first neighborhood strictly inside the
# array
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([1, 2, 3, 1], dtype=dt)]
- l = test_neighborhood_iterator_oob(x, [1, 1], NEIGH_MODE['zero'],
- [-1, 2], NEIGH_MODE['circular'])
+ l = _multiarray_tests.test_neighborhood_iterator_oob(
+ x, [1, 1], NEIGH_MODE['zero'], [-1, 2], NEIGH_MODE['circular'])
assert_array_equal(l, r)
class TestWarnings(object):
@@ -5540,21 +6465,14 @@ class TestMinScalarType(object):
assert_equal(wanted, dt)
-if sys.version_info[:2] == (2, 6):
- from numpy.core.multiarray import memorysimpleview as memoryview
-
from numpy.core._internal import _dtype_from_pep3118
class TestPEP3118Dtype(object):
def _check(self, spec, wanted):
dt = np.dtype(wanted)
- if isinstance(wanted, list) and isinstance(wanted[-1], tuple):
- if wanted[-1][0] == '':
- names = list(dt.names)
- names[-1] = ''
- dt.names = tuple(names)
- assert_equal(_dtype_from_pep3118(spec), dt,
+ actual = _dtype_from_pep3118(spec)
+ assert_equal(actual, dt,
err_msg="spec %r != dtype %r" % (spec, wanted))
def test_native_padding(self):
@@ -5578,21 +6496,24 @@ class TestPEP3118Dtype(object):
# Trailing padding should be included, *and*, the item size
# should match the alignment if in aligned mode
align = np.dtype('i').alignment
+ size = np.dtype('i').itemsize
+
+ def aligned(n):
+ return align*(1 + (n-1)//align)
- def VV(n):
- return 'V%d' % (align*(1 + (n-1)//align))
+ base = dict(formats=['i'], names=['f0'])
- self._check('ix', [('f0', 'i'), ('', VV(1))])
- self._check('ixx', [('f0', 'i'), ('', VV(2))])
- self._check('ixxx', [('f0', 'i'), ('', VV(3))])
- self._check('ixxxx', [('f0', 'i'), ('', VV(4))])
- self._check('i7x', [('f0', 'i'), ('', VV(7))])
+ self._check('ix', dict(itemsize=aligned(size + 1), **base))
+ self._check('ixx', dict(itemsize=aligned(size + 2), **base))
+ self._check('ixxx', dict(itemsize=aligned(size + 3), **base))
+ self._check('ixxxx', dict(itemsize=aligned(size + 4), **base))
+ self._check('i7x', dict(itemsize=aligned(size + 7), **base))
- self._check('^ix', [('f0', 'i'), ('', 'V1')])
- self._check('^ixx', [('f0', 'i'), ('', 'V2')])
- self._check('^ixxx', [('f0', 'i'), ('', 'V3')])
- self._check('^ixxxx', [('f0', 'i'), ('', 'V4')])
- self._check('^i7x', [('f0', 'i'), ('', 'V7')])
+ self._check('^ix', dict(itemsize=size + 1, **base))
+ self._check('^ixx', dict(itemsize=size + 2, **base))
+ self._check('^ixxx', dict(itemsize=size + 3, **base))
+ self._check('^ixxxx', dict(itemsize=size + 4, **base))
+ self._check('^i7x', dict(itemsize=size + 7, **base))
def test_native_padding_3(self):
dt = np.dtype(
@@ -5622,14 +6543,41 @@ class TestPEP3118Dtype(object):
def test_intra_padding(self):
# Natively aligned sub-arrays may require some internal padding
align = np.dtype('i').alignment
+ size = np.dtype('i').itemsize
+
+ def aligned(n):
+ return (align*(1 + (n-1)//align))
- def VV(n):
- return 'V%d' % (align*(1 + (n-1)//align))
+ self._check('(3)T{ix}', (dict(
+ names=['f0'],
+ formats=['i'],
+ offsets=[0],
+ itemsize=aligned(size + 1)
+ ), (3,)))
- self._check('(3)T{ix}', ({'f0': ('i', 0), '': (VV(1), 4)}, (3,)))
+ def test_char_vs_string(self):
+ dt = np.dtype('c')
+ self._check('c', dt)
+
+ dt = np.dtype([('f0', 'S1', (4,)), ('f1', 'S4')])
+ self._check('4c4s', dt)
+
+ def test_field_order(self):
+ # gh-9053 - previously, we relied on dictionary key order
+ self._check("(0)I:a:f:b:", [('a', 'I', (0,)), ('b', 'f')])
+ self._check("(0)I:b:f:a:", [('b', 'I', (0,)), ('a', 'f')])
+
+ def test_unnamed_fields(self):
+ self._check('ii', [('f0', 'i'), ('f1', 'i')])
+ self._check('ii:f0:', [('f1', 'i'), ('f0', 'i')])
+
+ self._check('i', 'i')
+ self._check('i:f0:', [('f0', 'i')])
class TestNewBufferProtocol(object):
+ """ Test PEP3118 buffers """
+
def _check_roundtrip(self, obj):
obj = np.asarray(obj)
x = memoryview(obj)
@@ -5680,7 +6628,7 @@ class TestNewBufferProtocol(object):
]
x = np.array(
[(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
- asbytes('aaaa'), 'bbbb', asbytes('xxx'), True, 1.0)],
+ b'aaaa', 'bbbb', b'xxx', True, 1.0)],
dtype=dt)
self._check_roundtrip(x)
@@ -5762,6 +6710,14 @@ class TestNewBufferProtocol(object):
# Issue #4015.
self._check_roundtrip(0)
+ def test_invalid_buffer_format(self):
+ # datetime64 cannot be used fully in a buffer yet
+ # Should be fixed in the next Numpy major release
+ dt = np.dtype([('a', 'uint16'), ('b', 'M8[s]')])
+ a = np.empty(3, dt)
+ assert_raises((ValueError, BufferError), memoryview, a)
+ assert_raises((ValueError, BufferError), memoryview, np.array((3), 'M8[D]'))
+
def test_export_simple_1d(self):
x = np.array([1, 2, 3, 4, 5], dtype='i')
y = memoryview(x)
@@ -5817,7 +6773,7 @@ class TestNewBufferProtocol(object):
]
x = np.array(
[(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
- asbytes('aaaa'), 'bbbb', asbytes(' '), True, 1.0)],
+ b'aaaa', 'bbbb', b' ', True, 1.0)],
dtype=dt)
y = memoryview(x)
assert_equal(y.shape, (1,))
@@ -5861,7 +6817,9 @@ class TestNewBufferProtocol(object):
def test_export_flags(self):
# Check SIMPLE flag, see also gh-3613 (exception should be BufferError)
- assert_raises(ValueError, get_buffer_info, np.arange(5)[::2], ('SIMPLE',))
+ assert_raises(ValueError,
+ _multiarray_tests.get_buffer_info,
+ np.arange(5)[::2], ('SIMPLE',))
def test_padding(self):
for j in range(8):
@@ -5869,12 +6827,14 @@ class TestNewBufferProtocol(object):
self._check_roundtrip(x)
def test_reference_leak(self):
- count_1 = sys.getrefcount(np.core._internal)
+ if HAS_REFCOUNT:
+ count_1 = sys.getrefcount(np.core._internal)
a = np.zeros(4)
b = memoryview(a)
c = np.asarray(b)
- count_2 = sys.getrefcount(np.core._internal)
- assert_equal(count_1, count_2)
+ if HAS_REFCOUNT:
+ count_2 = sys.getrefcount(np.core._internal)
+ assert_equal(count_1, count_2)
del c # avoid pyflakes unused variable warning.
def test_padded_struct_array(self):
@@ -5915,21 +6875,106 @@ class TestNewBufferProtocol(object):
arr = np.ones((1, 10))
if arr.flags.f_contiguous:
- shape, strides = get_buffer_info(arr, ['F_CONTIGUOUS'])
+ shape, strides = _multiarray_tests.get_buffer_info(
+ arr, ['F_CONTIGUOUS'])
assert_(strides[0] == 8)
arr = np.ones((10, 1), order='F')
- shape, strides = get_buffer_info(arr, ['C_CONTIGUOUS'])
+ shape, strides = _multiarray_tests.get_buffer_info(
+ arr, ['C_CONTIGUOUS'])
assert_(strides[-1] == 8)
+ def test_out_of_order_fields(self):
+ dt = np.dtype(dict(
+ formats=['<i4', '<i4'],
+ names=['one', 'two'],
+ offsets=[4, 0],
+ itemsize=8
+ ))
+
+ # overlapping fields cannot be represented by PEP3118
+ arr = np.empty(1, dt)
+ with assert_raises(ValueError):
+ memoryview(arr)
+
+ def test_max_dims(self):
+ a = np.empty((1,) * 32)
+ self._check_roundtrip(a)
+
+ @pytest.mark.skipif(sys.version_info < (2, 7, 7), reason="See gh-11115")
+ def test_error_too_many_dims(self):
+ def make_ctype(shape, scalar_type):
+ t = scalar_type
+ for dim in shape[::-1]:
+ t = dim * t
+ return t
+
+ # construct a memoryview with 33 dimensions
+ c_u8_33d = make_ctype((1,)*33, ctypes.c_uint8)
+ m = memoryview(c_u8_33d())
+ assert_equal(m.ndim, 33)
+
+ assert_raises_regex(
+ RuntimeError, "ndim",
+ np.array, m)
+
+ def test_error_pointer_type(self):
+ # gh-6741
+ m = memoryview(ctypes.pointer(ctypes.c_uint8()))
+ assert_('&' in m.format)
+
+ assert_raises_regex(
+ ValueError, "format string",
+ np.array, m)
+
+ def test_error_message_unsupported(self):
+ # wchar has no corresponding numpy type - if this changes in future, we
+ # need a better way to construct an invalid memoryview format.
+ t = ctypes.c_wchar * 4
+ with assert_raises(ValueError) as cm:
+ np.array(t())
+
+ exc = cm.exception
+ if sys.version_info.major > 2:
+ with assert_raises_regex(
+ NotImplementedError,
+ r"Unrepresentable .* 'u' \(UCS-2 strings\)"
+ ):
+ raise exc.__cause__
+
+ def test_ctypes_integer_via_memoryview(self):
+ # gh-11150, due to bpo-10746
+ for c_integer in {ctypes.c_int, ctypes.c_long, ctypes.c_longlong}:
+ value = c_integer(42)
+ with warnings.catch_warnings(record=True):
+ warnings.filterwarnings('always', r'.*\bctypes\b', RuntimeWarning)
+ np.asarray(value)
+
+ def test_ctypes_struct_via_memoryview(self):
+ # gh-10528
+ class foo(ctypes.Structure):
+ _fields_ = [('a', ctypes.c_uint8), ('b', ctypes.c_uint32)]
+ f = foo(a=1, b=2)
+
+ with warnings.catch_warnings(record=True):
+ warnings.filterwarnings('always', r'.*\bctypes\b', RuntimeWarning)
+ arr = np.asarray(f)
+
+ assert_equal(arr['a'], 1)
+ assert_equal(arr['b'], 2)
+ f.a = 3
+ assert_equal(arr['a'], 3)
+
class TestArrayAttributeDeletion(object):
def test_multiarray_writable_attributes_deletion(self):
- """ticket #2046, should not seqfault, raise AttributeError"""
+ # ticket #2046, should not seqfault, raise AttributeError
a = np.ones(2)
attr = ['shape', 'strides', 'data', 'dtype', 'real', 'imag', 'flat']
- for s in attr:
- assert_raises(AttributeError, delattr, a, s)
+ with suppress_warnings() as sup:
+ sup.filter(DeprecationWarning, "Assigning the 'data' attribute")
+ for s in attr:
+ assert_raises(AttributeError, delattr, a, s)
def test_multiarray_not_writable_attributes_deletion(self):
a = np.ones(2)
@@ -5941,7 +6986,7 @@ class TestArrayAttributeDeletion(object):
def test_multiarray_flags_writable_attribute_deletion(self):
a = np.ones(2).flags
- attr = ['updateifcopy', 'aligned', 'writeable']
+ attr = ['writebackifcopy', 'updateifcopy', 'aligned', 'writeable']
for s in attr:
assert_raises(AttributeError, delattr, a, s)
@@ -6000,6 +7045,32 @@ def test_array_interface_itemsize():
assert_equal(descr_t.itemsize, typestr_t.itemsize)
+def test_array_interface_empty_shape():
+ # See gh-7994
+ arr = np.array([1, 2, 3])
+ interface1 = dict(arr.__array_interface__)
+ interface1['shape'] = ()
+
+ class DummyArray1(object):
+ __array_interface__ = interface1
+
+ # NOTE: Because Py2 str/Py3 bytes supports the buffer interface, setting
+ # the interface data to bytes would invoke the bug this tests for, that
+ # __array_interface__ with shape=() is not allowed if the data is an object
+ # exposing the buffer interface
+ interface2 = dict(interface1)
+ interface2['data'] = arr[0].tobytes()
+
+ class DummyArray2(object):
+ __array_interface__ = interface2
+
+ arr1 = np.asarray(DummyArray1())
+ arr2 = np.asarray(DummyArray2())
+ arr3 = arr[:1].reshape(())
+ assert_equal(arr1, arr2)
+ assert_equal(arr1, arr3)
+
+
def test_flat_element_deletion():
it = np.ones(3).flat
try:
@@ -6007,7 +7078,7 @@ def test_flat_element_deletion():
del it[1:2]
except TypeError:
pass
- except:
+ except Exception:
raise AssertionError
@@ -6016,28 +7087,29 @@ def test_scalar_element_deletion():
assert_raises(ValueError, a[0].__delitem__, 'x')
-class TestMemEventHook(TestCase):
+class TestMemEventHook(object):
def test_mem_seteventhook(self):
# The actual tests are within the C code in
- # multiarray/multiarray_tests.c.src
- test_pydatamem_seteventhook_start()
+ # multiarray/_multiarray_tests.c.src
+ _multiarray_tests.test_pydatamem_seteventhook_start()
# force an allocation and free of a numpy array
# needs to be larger then limit of small memory cacher in ctors.c
a = np.zeros(1000)
del a
- test_pydatamem_seteventhook_end()
+ gc.collect()
+ _multiarray_tests.test_pydatamem_seteventhook_end()
-class TestMapIter(TestCase):
+class TestMapIter(object):
def test_mapiter(self):
# The actual tests are within the C code in
- # multiarray/multiarray_tests.c.src
+ # multiarray/_multiarray_tests.c.src
a = np.arange(12).reshape((3, 4)).astype(float)
index = ([1, 1, 2, 0],
[0, 0, 2, 3])
vals = [50, 50, 30, 16]
- test_inplace_increment(a, index, vals)
+ _multiarray_tests.test_inplace_increment(a, index, vals)
assert_equal(a, [[0.00, 1., 2.0, 19.],
[104., 5., 6.0, 7.0],
[8.00, 9., 40., 11.]])
@@ -6045,28 +7117,28 @@ class TestMapIter(TestCase):
b = np.arange(6).astype(float)
index = (np.array([1, 2, 0]),)
vals = [50, 4, 100.1]
- test_inplace_increment(b, index, vals)
+ _multiarray_tests.test_inplace_increment(b, index, vals)
assert_equal(b, [100.1, 51., 6., 3., 4., 5.])
-class TestAsCArray(TestCase):
+class TestAsCArray(object):
def test_1darray(self):
array = np.arange(24, dtype=np.double)
- from_c = test_as_c_array(array, 3)
+ from_c = _multiarray_tests.test_as_c_array(array, 3)
assert_equal(array[3], from_c)
def test_2darray(self):
array = np.arange(24, dtype=np.double).reshape(3, 8)
- from_c = test_as_c_array(array, 2, 4)
+ from_c = _multiarray_tests.test_as_c_array(array, 2, 4)
assert_equal(array[2, 4], from_c)
def test_3darray(self):
array = np.arange(24, dtype=np.double).reshape(2, 3, 4)
- from_c = test_as_c_array(array, 1, 2, 3)
+ from_c = _multiarray_tests.test_as_c_array(array, 1, 2, 3)
assert_equal(array[1, 2, 3], from_c)
-class TestConversion(TestCase):
+class TestConversion(object):
def test_array_scalar_relational_operation(self):
# All integer
for dt1 in np.typecodes['AllInteger']:
@@ -6108,13 +7180,63 @@ class TestConversion(TestCase):
assert_(np.array(-1, dtype=dt1) == np.array(-1, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
+ def test_to_bool_scalar(self):
+ assert_equal(bool(np.array([False])), False)
+ assert_equal(bool(np.array([True])), True)
+ assert_equal(bool(np.array([[42]])), True)
+ assert_raises(ValueError, bool, np.array([1, 2]))
+
+ class NotConvertible(object):
+ def __bool__(self):
+ raise NotImplementedError
+ __nonzero__ = __bool__ # python 2
-class TestWhere(TestCase):
+ assert_raises(NotImplementedError, bool, np.array(NotConvertible()))
+ assert_raises(NotImplementedError, bool, np.array([NotConvertible()]))
+
+ self_containing = np.array([None])
+ self_containing[0] = self_containing
+ try:
+ Error = RecursionError
+ except NameError:
+ Error = RuntimeError # python < 3.5
+ assert_raises(Error, bool, self_containing) # previously stack overflow
+
+ def test_to_int_scalar(self):
+ # gh-9972 means that these aren't always the same
+ int_funcs = (int, lambda x: x.__int__())
+ for int_func in int_funcs:
+ assert_equal(int_func(np.array([1])), 1)
+ assert_equal(int_func(np.array([0])), 0)
+ assert_equal(int_func(np.array([[42]])), 42)
+ assert_raises(TypeError, int_func, np.array([1, 2]))
+
+ # gh-9972
+ assert_equal(4, int_func(np.array('4')))
+ assert_equal(5, int_func(np.bytes_(b'5')))
+ assert_equal(6, int_func(np.unicode_(u'6')))
+
+ class HasTrunc:
+ def __trunc__(self):
+ return 3
+ assert_equal(3, int_func(np.array(HasTrunc())))
+ assert_equal(3, int_func(np.array([HasTrunc()])))
+
+ class NotConvertible(object):
+ def __int__(self):
+ raise NotImplementedError
+ assert_raises(NotImplementedError,
+ int_func, np.array(NotConvertible()))
+ assert_raises(NotImplementedError,
+ int_func, np.array([NotConvertible()]))
+
+
+class TestWhere(object):
def test_basic(self):
- dts = [np.bool, np.int16, np.int32, np.int64, np.double, np.complex128,
+ dts = [bool, np.int16, np.int32, np.int64, np.double, np.complex128,
np.longdouble, np.clongdouble]
for dt in dts:
- c = np.ones(53, dtype=np.bool)
+ c = np.ones(53, dtype=bool)
assert_equal(np.where( c, dt(0), dt(1)), dt(0))
assert_equal(np.where(~c, dt(0), dt(1)), dt(1))
assert_equal(np.where(True, dt(0), dt(1)), dt(0))
@@ -6206,7 +7328,7 @@ class TestWhere(TestCase):
assert_equal(np.where(c, a, b), r)
# non bool mask
- c = c.astype(np.int)
+ c = c.astype(int)
c[c != 0] = 34242324
assert_equal(np.where(c, a, b), r)
# invert
@@ -6254,54 +7376,75 @@ class TestWhere(TestCase):
assert_equal(np.where(True, a, b), "abcd")
assert_equal(np.where(False, b, a), "abcd")
+ def test_empty_result(self):
+ # pass empty where result through an assignment which reads the data of
+ # empty arrays, error detectable with valgrind, see gh-8922
+ x = np.zeros((1, 1))
+ ibad = np.vstack(np.where(x == 99.))
+ assert_array_equal(ibad,
+ np.atleast_2d(np.array([[],[]], dtype=np.intp)))
-class TestSizeOf(TestCase):
+ def test_largedim(self):
+ # invalid read regression gh-9304
+ shape = [10, 2, 3, 4, 5, 6]
+ np.random.seed(2)
+ array = np.random.rand(*shape)
- def test_empty_array(self):
- x = np.array([])
- assert_(sys.getsizeof(x) > 0)
+ for i in range(10):
+ benchmark = array.nonzero()
+ result = array.nonzero()
+ assert_array_equal(benchmark, result)
- def check_array(self, dtype):
- elem_size = dtype(0).itemsize
- for length in [10, 50, 100, 500]:
- x = np.arange(length, dtype=dtype)
- assert_(sys.getsizeof(x) > length * elem_size)
+if not IS_PYPY:
+ # sys.getsizeof() is not valid on PyPy
+ class TestSizeOf(object):
- def test_array_int32(self):
- self.check_array(np.int32)
+ def test_empty_array(self):
+ x = np.array([])
+ assert_(sys.getsizeof(x) > 0)
- def test_array_int64(self):
- self.check_array(np.int64)
+ def check_array(self, dtype):
+ elem_size = dtype(0).itemsize
- def test_array_float32(self):
- self.check_array(np.float32)
+ for length in [10, 50, 100, 500]:
+ x = np.arange(length, dtype=dtype)
+ assert_(sys.getsizeof(x) > length * elem_size)
- def test_array_float64(self):
- self.check_array(np.float64)
+ def test_array_int32(self):
+ self.check_array(np.int32)
- def test_view(self):
- d = np.ones(100)
- assert_(sys.getsizeof(d[...]) < sys.getsizeof(d))
+ def test_array_int64(self):
+ self.check_array(np.int64)
- def test_reshape(self):
- d = np.ones(100)
- assert_(sys.getsizeof(d) < sys.getsizeof(d.reshape(100, 1, 1).copy()))
+ def test_array_float32(self):
+ self.check_array(np.float32)
- def test_resize(self):
- d = np.ones(100)
- old = sys.getsizeof(d)
- d.resize(50)
- assert_(old > sys.getsizeof(d))
- d.resize(150)
- assert_(old < sys.getsizeof(d))
+ def test_array_float64(self):
+ self.check_array(np.float64)
- def test_error(self):
- d = np.ones(100)
- assert_raises(TypeError, d.__sizeof__, "a")
+ def test_view(self):
+ d = np.ones(100)
+ assert_(sys.getsizeof(d[...]) < sys.getsizeof(d))
+
+ def test_reshape(self):
+ d = np.ones(100)
+ assert_(sys.getsizeof(d) < sys.getsizeof(d.reshape(100, 1, 1).copy()))
+
+ def test_resize(self):
+ d = np.ones(100)
+ old = sys.getsizeof(d)
+ d.resize(50)
+ assert_(old > sys.getsizeof(d))
+ d.resize(150)
+ assert_(old < sys.getsizeof(d))
+ def test_error(self):
+ d = np.ones(100)
+ assert_raises(TypeError, d.__sizeof__, "a")
-class TestHashing(TestCase):
+
+class TestHashing(object):
def test_arrays_not_hashable(self):
x = np.ones(3)
@@ -6309,10 +7452,10 @@ class TestHashing(TestCase):
def test_collections_hashable(self):
x = np.array([])
- self.assertFalse(isinstance(x, collections.Hashable))
+ assert_(not isinstance(x, collections_abc.Hashable))
-class TestArrayPriority(TestCase):
+class TestArrayPriority(object):
# This will go away when __array_priority__ is settled, meanwhile
# it serves to check unintended changes.
op = operator
@@ -6322,7 +7465,9 @@ class TestArrayPriority(TestCase):
op.ge, op.lt, op.le, op.ne, op.eq
]
- if sys.version_info[0] < 3:
+ # See #7949. Don't use "/" operator With -3 switch, since python reports it
+ # as a DeprecationWarning
+ if sys.version_info[0] < 3 and not sys.py3kwarning:
binary_ops.append(op.div)
class Foo(np.ndarray):
@@ -6396,47 +7541,400 @@ class TestArrayPriority(TestCase):
assert_(isinstance(f(b, a), self.Other), msg)
-class TestBytestringArrayNonzero(TestCase):
+class TestBytestringArrayNonzero(object):
def test_empty_bstring_array_is_falsey(self):
- self.assertFalse(np.array([''], dtype=np.str))
+ assert_(not np.array([''], dtype=str))
def test_whitespace_bstring_array_is_falsey(self):
- a = np.array(['spam'], dtype=np.str)
+ a = np.array(['spam'], dtype=str)
a[0] = ' \0\0'
- self.assertFalse(a)
+ assert_(not a)
def test_all_null_bstring_array_is_falsey(self):
- a = np.array(['spam'], dtype=np.str)
+ a = np.array(['spam'], dtype=str)
a[0] = '\0\0\0\0'
- self.assertFalse(a)
+ assert_(not a)
def test_null_inside_bstring_array_is_truthy(self):
- a = np.array(['spam'], dtype=np.str)
+ a = np.array(['spam'], dtype=str)
a[0] = ' \0 \0'
- self.assertTrue(a)
+ assert_(a)
-class TestUnicodeArrayNonzero(TestCase):
+class TestUnicodeArrayNonzero(object):
def test_empty_ustring_array_is_falsey(self):
- self.assertFalse(np.array([''], dtype=np.unicode))
+ assert_(not np.array([''], dtype=np.unicode))
def test_whitespace_ustring_array_is_falsey(self):
a = np.array(['eggs'], dtype=np.unicode)
a[0] = ' \0\0'
- self.assertFalse(a)
+ assert_(not a)
def test_all_null_ustring_array_is_falsey(self):
a = np.array(['eggs'], dtype=np.unicode)
a[0] = '\0\0\0\0'
- self.assertFalse(a)
+ assert_(not a)
def test_null_inside_ustring_array_is_truthy(self):
a = np.array(['eggs'], dtype=np.unicode)
a[0] = ' \0 \0'
- self.assertTrue(a)
+ assert_(a)
+
+class TestFormat(object):
-if __name__ == "__main__":
- run_module_suite()
+ def test_0d(self):
+ a = np.array(np.pi)
+ assert_equal('{:0.3g}'.format(a), '3.14')
+ assert_equal('{:0.3g}'.format(a[()]), '3.14')
+
+ def test_1d_no_format(self):
+ a = np.array([np.pi])
+ assert_equal('{}'.format(a), str(a))
+
+ def test_1d_format(self):
+ # until gh-5543, ensure that the behaviour matches what it used to be
+ a = np.array([np.pi])
+ if sys.version_info[:2] >= (3, 4):
+ assert_raises(TypeError, '{:30}'.format, a)
+ else:
+ with suppress_warnings() as sup:
+ sup.filter(PendingDeprecationWarning)
+ res = '{:30}'.format(a)
+ dst = object.__format__(a, '30')
+ assert_equal(res, dst)
+
+class TestCTypes(object):
+
+ def test_ctypes_is_available(self):
+ test_arr = np.array([[1, 2, 3], [4, 5, 6]])
+
+ assert_equal(ctypes, test_arr.ctypes._ctypes)
+ assert_equal(tuple(test_arr.ctypes.shape), (2, 3))
+
+ def test_ctypes_is_not_available(self):
+ from numpy.core import _internal
+ _internal.ctypes = None
+ try:
+ test_arr = np.array([[1, 2, 3], [4, 5, 6]])
+
+ assert_(isinstance(test_arr.ctypes._ctypes,
+ _internal._missing_ctypes))
+ assert_equal(tuple(test_arr.ctypes.shape), (2, 3))
+ finally:
+ _internal.ctypes = ctypes
+
+
+class TestWritebackIfCopy(object):
+ # all these tests use the WRITEBACKIFCOPY mechanism
+ def test_argmax_with_out(self):
+ mat = np.eye(5)
+ out = np.empty(5, dtype='i2')
+ res = np.argmax(mat, 0, out=out)
+ assert_equal(res, range(5))
+
+ def test_argmin_with_out(self):
+ mat = -np.eye(5)
+ out = np.empty(5, dtype='i2')
+ res = np.argmin(mat, 0, out=out)
+ assert_equal(res, range(5))
+
+ def test_clip_with_out(self):
+ mat = np.eye(5)
+ out = np.eye(5, dtype='i2')
+ res = np.clip(mat, a_min=-10, a_max=0, out=out)
+ assert_(res is out)
+ assert_equal(np.sum(out), 0)
+
+ def test_insert_noncontiguous(self):
+ a = np.arange(6).reshape(2,3).T # force non-c-contiguous
+ # uses arr_insert
+ np.place(a, a>2, [44, 55])
+ assert_equal(a, np.array([[0, 44], [1, 55], [2, 44]]))
+
+ def test_put_noncontiguous(self):
+ a = np.arange(6).reshape(2,3).T # force non-c-contiguous
+ np.put(a, [0, 2], [44, 55])
+ assert_equal(a, np.array([[44, 3], [55, 4], [2, 5]]))
+
+ def test_putmask_noncontiguous(self):
+ a = np.arange(6).reshape(2,3).T # force non-c-contiguous
+ # uses arr_putmask
+ np.putmask(a, a>2, a**2)
+ assert_equal(a, np.array([[0, 9], [1, 16], [2, 25]]))
+
+ def test_take_mode_raise(self):
+ a = np.arange(6, dtype='int')
+ out = np.empty(2, dtype='int')
+ np.take(a, [0, 2], out=out, mode='raise')
+ assert_equal(out, np.array([0, 2]))
+
+ def test_choose_mod_raise(self):
+ a = np.array([[1, 0, 1], [0, 1, 0], [1, 0, 1]])
+ out = np.empty((3,3), dtype='int')
+ choices = [-10, 10]
+ np.choose(a, choices, out=out, mode='raise')
+ assert_equal(out, np.array([[ 10, -10, 10],
+ [-10, 10, -10],
+ [ 10, -10, 10]]))
+
+ def test_flatiter__array__(self):
+ a = np.arange(9).reshape(3,3)
+ b = a.T.flat
+ c = b.__array__()
+ # triggers the WRITEBACKIFCOPY resolution, assuming refcount semantics
+ del c
+
+ def test_dot_out(self):
+ # if HAVE_CBLAS, will use WRITEBACKIFCOPY
+ a = np.arange(9, dtype=float).reshape(3,3)
+ b = np.dot(a, a, out=a)
+ assert_equal(b, np.array([[15, 18, 21], [42, 54, 66], [69, 90, 111]]))
+
+ def test_view_assign(self):
+ from numpy.core._multiarray_tests import npy_create_writebackifcopy, npy_resolve
+
+ arr = np.arange(9).reshape(3, 3).T
+ arr_wb = npy_create_writebackifcopy(arr)
+ assert_(arr_wb.flags.writebackifcopy)
+ assert_(arr_wb.base is arr)
+ arr_wb[...] = -100
+ npy_resolve(arr_wb)
+ # arr changes after resolve, even though we assigned to arr_wb
+ assert_equal(arr, -100)
+ # after resolve, the two arrays no longer reference each other
+ assert_(arr_wb.ctypes.data != 0)
+ assert_equal(arr_wb.base, None)
+ # assigning to arr_wb does not get transferred to arr
+ arr_wb[...] = 100
+ assert_equal(arr, -100)
+
+ def test_dealloc_warning(self):
+ with suppress_warnings() as sup:
+ sup.record(RuntimeWarning)
+ arr = np.arange(9).reshape(3, 3)
+ v = arr.T
+ _multiarray_tests.npy_abuse_writebackifcopy(v)
+ assert len(sup.log) == 1
+
+ def test_view_discard_refcount(self):
+ from numpy.core._multiarray_tests import npy_create_writebackifcopy, npy_discard
+
+ arr = np.arange(9).reshape(3, 3).T
+ orig = arr.copy()
+ if HAS_REFCOUNT:
+ arr_cnt = sys.getrefcount(arr)
+ arr_wb = npy_create_writebackifcopy(arr)
+ assert_(arr_wb.flags.writebackifcopy)
+ assert_(arr_wb.base is arr)
+ arr_wb[...] = -100
+ npy_discard(arr_wb)
+ # arr remains unchanged after discard
+ assert_equal(arr, orig)
+ # after discard, the two arrays no longer reference each other
+ assert_(arr_wb.ctypes.data != 0)
+ assert_equal(arr_wb.base, None)
+ if HAS_REFCOUNT:
+ assert_equal(arr_cnt, sys.getrefcount(arr))
+ # assigning to arr_wb does not get transferred to arr
+ arr_wb[...] = 100
+ assert_equal(arr, orig)
+
+
+class TestArange(object):
+ def test_infinite(self):
+ assert_raises_regex(
+ ValueError, "size exceeded",
+ np.arange, 0, np.inf
+ )
+
+ def test_nan_step(self):
+ assert_raises_regex(
+ ValueError, "cannot compute length",
+ np.arange, 0, 1, np.nan
+ )
+
+ def test_zero_step(self):
+ assert_raises(ZeroDivisionError, np.arange, 0, 10, 0)
+ assert_raises(ZeroDivisionError, np.arange, 0.0, 10.0, 0.0)
+
+ # empty range
+ assert_raises(ZeroDivisionError, np.arange, 0, 0, 0)
+ assert_raises(ZeroDivisionError, np.arange, 0.0, 0.0, 0.0)
+
+
+class TestArrayFinalize(object):
+ """ Tests __array_finalize__ """
+
+ def test_receives_base(self):
+ # gh-11237
+ class SavesBase(np.ndarray):
+ def __array_finalize__(self, obj):
+ self.saved_base = self.base
+
+ a = np.array(1).view(SavesBase)
+ assert_(a.saved_base is a.base)
+
+ def test_lifetime_on_error(self):
+ # gh-11237
+ class RaisesInFinalize(np.ndarray):
+ def __array_finalize__(self, obj):
+ # crash, but keep this object alive
+ raise Exception(self)
+
+ # a plain object can't be weakref'd
+ class Dummy(object): pass
+
+ # get a weak reference to an object within an array
+ obj_arr = np.array(Dummy())
+ obj_ref = weakref.ref(obj_arr[()])
+
+ # get an array that crashed in __array_finalize__
+ with assert_raises(Exception) as e:
+ obj_arr.view(RaisesInFinalize)
+ if sys.version_info.major == 2:
+ # prevent an extra reference being kept
+ sys.exc_clear()
+
+ obj_subarray = e.exception.args[0]
+ del e
+ assert_(isinstance(obj_subarray, RaisesInFinalize))
+
+ # reference should still be held by obj_arr
+ gc.collect()
+ assert_(obj_ref() is not None, "object should not already be dead")
+
+ del obj_arr
+ gc.collect()
+ assert_(obj_ref() is not None, "obj_arr should not hold the last reference")
+
+ del obj_subarray
+ gc.collect()
+ assert_(obj_ref() is None, "no references should remain")
+
+
+def test_orderconverter_with_nonASCII_unicode_ordering():
+ # gh-7475
+ a = np.arange(5)
+ assert_raises(ValueError, a.flatten, order=u'\xe2')
+
+
+def test_equal_override():
+ # gh-9153: ndarray.__eq__ uses special logic for structured arrays, which
+ # did not respect overrides with __array_priority__ or __array_ufunc__.
+ # The PR fixed this for __array_priority__ and __array_ufunc__ = None.
+ class MyAlwaysEqual(object):
+ def __eq__(self, other):
+ return "eq"
+
+ def __ne__(self, other):
+ return "ne"
+
+ class MyAlwaysEqualOld(MyAlwaysEqual):
+ __array_priority__ = 10000
+
+ class MyAlwaysEqualNew(MyAlwaysEqual):
+ __array_ufunc__ = None
+
+ array = np.array([(0, 1), (2, 3)], dtype='i4,i4')
+ for my_always_equal_cls in MyAlwaysEqualOld, MyAlwaysEqualNew:
+ my_always_equal = my_always_equal_cls()
+ assert_equal(my_always_equal == array, 'eq')
+ assert_equal(array == my_always_equal, 'eq')
+ assert_equal(my_always_equal != array, 'ne')
+ assert_equal(array != my_always_equal, 'ne')
+
+
+def test_npymath_complex():
+ # Smoketest npymath functions
+ from numpy.core._multiarray_tests import (
+ npy_cabs, npy_carg)
+
+ funcs = {npy_cabs: np.absolute,
+ npy_carg: np.angle}
+ vals = (1, np.inf, -np.inf, np.nan)
+ types = (np.complex64, np.complex128, np.clongdouble)
+
+ for fun, npfun in funcs.items():
+ for x, y in itertools.product(vals, vals):
+ for t in types:
+ z = t(complex(x, y))
+ got = fun(z)
+ expected = npfun(z)
+ assert_allclose(got, expected)
+
+
+def test_npymath_real():
+ # Smoketest npymath functions
+ from numpy.core._multiarray_tests import (
+ npy_log10, npy_cosh, npy_sinh, npy_tan, npy_tanh)
+
+ funcs = {npy_log10: np.log10,
+ npy_cosh: np.cosh,
+ npy_sinh: np.sinh,
+ npy_tan: np.tan,
+ npy_tanh: np.tanh}
+ vals = (1, np.inf, -np.inf, np.nan)
+ types = (np.float32, np.float64, np.longdouble)
+
+ with np.errstate(all='ignore'):
+ for fun, npfun in funcs.items():
+ for x, t in itertools.product(vals, types):
+ z = t(x)
+ got = fun(z)
+ expected = npfun(z)
+ assert_allclose(got, expected)
+
+def test_uintalignment_and_alignment():
+ # alignment code needs to satisfy these requrements:
+ # 1. numpy structs match C struct layout
+ # 2. ufuncs/casting is safe wrt to aligned access
+ # 3. copy code is safe wrt to "uint alidned" access
+ #
+ # Complex types are the main problem, whose alignment may not be the same
+ # as their "uint alignment".
+ #
+ # This test might only fail on certain platforms, where uint64 alignment is
+ # not equal to complex64 alignment. The second 2 tests will only fail
+ # for DEBUG=1.
+
+ d1 = np.dtype('u1,c8', align=True)
+ d2 = np.dtype('u4,c8', align=True)
+ d3 = np.dtype({'names': ['a', 'b'], 'formats': ['u1', d1]}, align=True)
+
+ assert_equal(np.zeros(1, dtype=d1)['f1'].flags['ALIGNED'], True)
+ assert_equal(np.zeros(1, dtype=d2)['f1'].flags['ALIGNED'], True)
+ assert_equal(np.zeros(1, dtype='u1,c8')['f1'].flags['ALIGNED'], False)
+
+ # check that C struct matches numpy struct size
+ s = _multiarray_tests.get_struct_alignments()
+ for d, (alignment, size) in zip([d1,d2,d3], s):
+ assert_equal(d.alignment, alignment)
+ assert_equal(d.itemsize, size)
+
+ # check that ufuncs don't complain in debug mode
+ # (this is probably OK if the aligned flag is true above)
+ src = np.zeros((2,2), dtype=d1)['f1'] # 4-byte aligned, often
+ np.exp(src) # assert fails?
+
+ # check that copy code doesn't complain in debug mode
+ dst = np.zeros((2,2), dtype='c8')
+ dst[:,1] = src[:,1] # assert in lowlevel_strided_loops fails?
+
+def test_getfield():
+ a = np.arange(32, dtype='uint16')
+ if sys.byteorder == 'little':
+ i = 0
+ j = 1
+ else:
+ i = 1
+ j = 0
+ b = a.getfield('int8', i)
+ assert_equal(b, a)
+ b = a.getfield('int8', j)
+ assert_equal(b, 0)
+ pytest.raises(ValueError, a.getfield, 'uint8', -1)
+ pytest.raises(ValueError, a.getfield, 'uint8', 16)
+ pytest.raises(ValueError, a.getfield, 'uint64', 0)
diff --git a/numpy/core/tests/test_nditer.py b/numpy/core/tests/test_nditer.py
index ed0197991..26fd9c346 100644
--- a/numpy/core/tests/test_nditer.py
+++ b/numpy/core/tests/test_nditer.py
@@ -1,15 +1,14 @@
from __future__ import division, absolute_import, print_function
import sys
-import warnings
+import pytest
import numpy as np
+import numpy.core._multiarray_tests as _multiarray_tests
from numpy import array, arange, nditer, all
-from numpy.compat import asbytes, sixu
-from numpy.core.multiarray_tests import test_nditer_too_large
from numpy.testing import (
- run_module_suite, assert_, assert_equal, assert_array_equal,
- assert_raises, dec
+ assert_, assert_equal, assert_array_equal, assert_raises,
+ HAS_REFCOUNT, suppress_warnings
)
@@ -34,6 +33,7 @@ def iter_iterindices(i):
i.iternext()
return ret
+@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
def test_iter_refcount():
# Make sure the iterator doesn't leak
@@ -42,13 +42,14 @@ def test_iter_refcount():
dt = np.dtype('f4').newbyteorder()
rc_a = sys.getrefcount(a)
rc_dt = sys.getrefcount(dt)
- it = nditer(a, [],
+ with nditer(a, [],
[['readwrite', 'updateifcopy']],
casting='unsafe',
- op_dtypes=[dt])
- assert_(not it.iterationneedsapi)
- assert_(sys.getrefcount(a) > rc_a)
- assert_(sys.getrefcount(dt) > rc_dt)
+ op_dtypes=[dt]) as it:
+ assert_(not it.iterationneedsapi)
+ assert_(sys.getrefcount(a) > rc_a)
+ assert_(sys.getrefcount(dt) > rc_dt)
+ # del 'it'
it = None
assert_equal(sys.getrefcount(a), rc_a)
assert_equal(sys.getrefcount(dt), rc_dt)
@@ -765,12 +766,32 @@ def test_iter_flags_errors():
def test_iter_slice():
a, b, c = np.arange(3), np.arange(3), np.arange(3.)
i = nditer([a, b, c], [], ['readwrite'])
- i[0:2] = (3, 3)
- assert_equal(a, [3, 1, 2])
- assert_equal(b, [3, 1, 2])
- assert_equal(c, [0, 1, 2])
- i[1] = 12
- assert_equal(i[0:2], [3, 12])
+ with i:
+ i[0:2] = (3, 3)
+ assert_equal(a, [3, 1, 2])
+ assert_equal(b, [3, 1, 2])
+ assert_equal(c, [0, 1, 2])
+ i[1] = 12
+ assert_equal(i[0:2], [3, 12])
+
+def test_iter_assign_mapping():
+ a = np.arange(24, dtype='f8').reshape(2, 3, 4).T
+ it = np.nditer(a, [], [['readwrite', 'updateifcopy']],
+ casting='same_kind', op_dtypes=[np.dtype('f4')])
+ with it:
+ it.operands[0][...] = 3
+ it.operands[0][...] = 14
+ assert_equal(a, 14)
+ it = np.nditer(a, [], [['readwrite', 'updateifcopy']],
+ casting='same_kind', op_dtypes=[np.dtype('f4')])
+ with it:
+ x = it.operands[0][-1:1]
+ x[...] = 14
+ it.operands[0][...] = -1234
+ assert_equal(a, -1234)
+ # check for no warnings on dealloc
+ x = None
+ it = None
def test_iter_nbo_align_contig():
# Check that byte order, alignment, and contig changes work
@@ -782,23 +803,26 @@ def test_iter_nbo_align_contig():
i = nditer(au, [], [['readwrite', 'updateifcopy']],
casting='equiv',
op_dtypes=[np.dtype('f4')])
- assert_equal(i.dtypes[0].byteorder, a.dtype.byteorder)
- assert_equal(i.operands[0].dtype.byteorder, a.dtype.byteorder)
- assert_equal(i.operands[0], a)
- i.operands[0][:] = 2
- i = None
+ with i:
+ # context manager triggers UPDATEIFCOPY on i at exit
+ assert_equal(i.dtypes[0].byteorder, a.dtype.byteorder)
+ assert_equal(i.operands[0].dtype.byteorder, a.dtype.byteorder)
+ assert_equal(i.operands[0], a)
+ i.operands[0][:] = 2
assert_equal(au, [2]*6)
-
+ del i # should not raise a warning
# Byte order change by requesting NBO
a = np.arange(6, dtype='f4')
au = a.byteswap().newbyteorder()
assert_(a.dtype.byteorder != au.dtype.byteorder)
- i = nditer(au, [], [['readwrite', 'updateifcopy', 'nbo']], casting='equiv')
- assert_equal(i.dtypes[0].byteorder, a.dtype.byteorder)
- assert_equal(i.operands[0].dtype.byteorder, a.dtype.byteorder)
- assert_equal(i.operands[0], a)
- i.operands[0][:] = 2
- i = None
+ with nditer(au, [], [['readwrite', 'updateifcopy', 'nbo']],
+ casting='equiv') as i:
+ # context manager triggers UPDATEIFCOPY on i at exit
+ assert_equal(i.dtypes[0].byteorder, a.dtype.byteorder)
+ assert_equal(i.operands[0].dtype.byteorder, a.dtype.byteorder)
+ assert_equal(i.operands[0], a)
+ i.operands[0][:] = 12345
+ i.operands[0][:] = 2
assert_equal(au, [2]*6)
# Unaligned input
@@ -811,11 +835,11 @@ def test_iter_nbo_align_contig():
assert_(not i.operands[0].flags.aligned)
assert_equal(i.operands[0], a)
# With 'aligned', should make a copy
- i = nditer(a, [], [['readwrite', 'updateifcopy', 'aligned']])
- assert_(i.operands[0].flags.aligned)
- assert_equal(i.operands[0], a)
- i.operands[0][:] = 3
- i = None
+ with nditer(a, [], [['readwrite', 'updateifcopy', 'aligned']]) as i:
+ assert_(i.operands[0].flags.aligned)
+ # context manager triggers UPDATEIFCOPY on i at exit
+ assert_equal(i.operands[0], a)
+ i.operands[0][:] = 3
assert_equal(a, [3]*6)
# Discontiguous input
@@ -837,16 +861,17 @@ def test_iter_array_cast():
# No cast 'f4' -> 'f4'
a = np.arange(6, dtype='f4').reshape(2, 3)
i = nditer(a, [], [['readwrite']], op_dtypes=[np.dtype('f4')])
- assert_equal(i.operands[0], a)
- assert_equal(i.operands[0].dtype, np.dtype('f4'))
+ with i:
+ assert_equal(i.operands[0], a)
+ assert_equal(i.operands[0].dtype, np.dtype('f4'))
# Byte-order cast '<f4' -> '>f4'
a = np.arange(6, dtype='<f4').reshape(2, 3)
- i = nditer(a, [], [['readwrite', 'updateifcopy']],
+ with nditer(a, [], [['readwrite', 'updateifcopy']],
casting='equiv',
- op_dtypes=[np.dtype('>f4')])
- assert_equal(i.operands[0], a)
- assert_equal(i.operands[0].dtype, np.dtype('>f4'))
+ op_dtypes=[np.dtype('>f4')]) as i:
+ assert_equal(i.operands[0], a)
+ assert_equal(i.operands[0].dtype, np.dtype('>f4'))
# Safe case 'f4' -> 'f8'
a = np.arange(24, dtype='f4').reshape(2, 3, 4).swapaxes(1, 2)
@@ -868,30 +893,28 @@ def test_iter_array_cast():
# Same-kind cast 'f8' -> 'f4' -> 'f8'
a = np.arange(24, dtype='f8').reshape(2, 3, 4).T
- i = nditer(a, [],
+ with nditer(a, [],
[['readwrite', 'updateifcopy']],
casting='same_kind',
- op_dtypes=[np.dtype('f4')])
- assert_equal(i.operands[0], a)
- assert_equal(i.operands[0].dtype, np.dtype('f4'))
- assert_equal(i.operands[0].strides, (4, 16, 48))
- # Check that UPDATEIFCOPY is activated
- i.operands[0][2, 1, 1] = -12.5
- assert_(a[2, 1, 1] != -12.5)
- i = None
+ op_dtypes=[np.dtype('f4')]) as i:
+ assert_equal(i.operands[0], a)
+ assert_equal(i.operands[0].dtype, np.dtype('f4'))
+ assert_equal(i.operands[0].strides, (4, 16, 48))
+ # Check that WRITEBACKIFCOPY is activated at exit
+ i.operands[0][2, 1, 1] = -12.5
+ assert_(a[2, 1, 1] != -12.5)
assert_equal(a[2, 1, 1], -12.5)
a = np.arange(6, dtype='i4')[::-2]
- i = nditer(a, [],
+ with nditer(a, [],
[['writeonly', 'updateifcopy']],
casting='unsafe',
- op_dtypes=[np.dtype('f4')])
- assert_equal(i.operands[0].dtype, np.dtype('f4'))
- # Even though the stride was negative in 'a', it
- # becomes positive in the temporary
- assert_equal(i.operands[0].strides, (4,))
- i.operands[0][:] = [1, 2, 3]
- i = None
+ op_dtypes=[np.dtype('f4')]) as i:
+ assert_equal(i.operands[0].dtype, np.dtype('f4'))
+ # Even though the stride was negative in 'a', it
+ # becomes positive in the temporary
+ assert_equal(i.operands[0].strides, (4,))
+ i.operands[0][:] = [1, 2, 3]
assert_equal(a, [1, 2, 3])
def test_iter_array_cast_errors():
@@ -1000,17 +1023,20 @@ def test_iter_object_arrays_basic():
obj = {'a':3,'b':'d'}
a = np.array([[1, 2, 3], None, obj, None], dtype='O')
- rc = sys.getrefcount(obj)
+ if HAS_REFCOUNT:
+ rc = sys.getrefcount(obj)
# Need to allow references for object arrays
assert_raises(TypeError, nditer, a)
- assert_equal(sys.getrefcount(obj), rc)
+ if HAS_REFCOUNT:
+ assert_equal(sys.getrefcount(obj), rc)
i = nditer(a, ['refs_ok'], ['readonly'])
vals = [x_[()] for x_ in i]
assert_equal(np.array(vals, dtype='O'), a)
vals, i, x = [None]*3
- assert_equal(sys.getrefcount(obj), rc)
+ if HAS_REFCOUNT:
+ assert_equal(sys.getrefcount(obj), rc)
i = nditer(a.reshape(2, 2).T, ['refs_ok', 'buffered'],
['readonly'], order='C')
@@ -1018,14 +1044,17 @@ def test_iter_object_arrays_basic():
vals = [x_[()] for x_ in i]
assert_equal(np.array(vals, dtype='O'), a.reshape(2, 2).ravel(order='F'))
vals, i, x = [None]*3
- assert_equal(sys.getrefcount(obj), rc)
+ if HAS_REFCOUNT:
+ assert_equal(sys.getrefcount(obj), rc)
i = nditer(a.reshape(2, 2).T, ['refs_ok', 'buffered'],
['readwrite'], order='C')
- for x in i:
- x[...] = None
- vals, i, x = [None]*3
- assert_equal(sys.getrefcount(obj), rc-1)
+ with i:
+ for x in i:
+ x[...] = None
+ vals, i, x = [None]*3
+ if HAS_REFCOUNT:
+ assert_(sys.getrefcount(obj) == rc-1)
assert_equal(a, np.array([None]*4, dtype='O'))
def test_iter_object_arrays_conversions():
@@ -1033,15 +1062,17 @@ def test_iter_object_arrays_conversions():
a = np.arange(6, dtype='O')
i = nditer(a, ['refs_ok', 'buffered'], ['readwrite'],
casting='unsafe', op_dtypes='i4')
- for x in i:
- x[...] += 1
+ with i:
+ for x in i:
+ x[...] += 1
assert_equal(a, np.arange(6)+1)
a = np.arange(6, dtype='i4')
i = nditer(a, ['refs_ok', 'buffered'], ['readwrite'],
casting='unsafe', op_dtypes='O')
- for x in i:
- x[...] += 1
+ with i:
+ for x in i:
+ x[...] += 1
assert_equal(a, np.arange(6)+1)
# Non-contiguous object array
@@ -1050,8 +1081,9 @@ def test_iter_object_arrays_conversions():
a[:] = np.arange(6)
i = nditer(a, ['refs_ok', 'buffered'], ['readwrite'],
casting='unsafe', op_dtypes='i4')
- for x in i:
- x[...] += 1
+ with i:
+ for x in i:
+ x[...] += 1
assert_equal(a, np.arange(6)+1)
#Non-contiguous value array
@@ -1060,11 +1092,14 @@ def test_iter_object_arrays_conversions():
a[:] = np.arange(6) + 98172488
i = nditer(a, ['refs_ok', 'buffered'], ['readwrite'],
casting='unsafe', op_dtypes='O')
- ob = i[0][()]
- rc = sys.getrefcount(ob)
- for x in i:
- x[...] += 1
- assert_equal(sys.getrefcount(ob), rc-1)
+ with i:
+ ob = i[0][()]
+ if HAS_REFCOUNT:
+ rc = sys.getrefcount(ob)
+ for x in i:
+ x[...] += 1
+ if HAS_REFCOUNT:
+ assert_(sys.getrefcount(ob) == rc-1)
assert_equal(a, np.arange(6)+98172489)
def test_iter_common_dtype():
@@ -1131,6 +1166,96 @@ def test_iter_common_dtype():
assert_equal(i.dtypes[1], np.dtype('c16'))
assert_equal(i.dtypes[2], np.dtype('c16'))
+def test_iter_copy_if_overlap():
+ # Ensure the iterator makes copies on read/write overlap, if requested
+
+ # Copy not needed, 1 op
+ for flag in ['readonly', 'writeonly', 'readwrite']:
+ a = arange(10)
+ i = nditer([a], ['copy_if_overlap'], [[flag]])
+ with i:
+ assert_(i.operands[0] is a)
+
+ # Copy needed, 2 ops, read-write overlap
+ x = arange(10)
+ a = x[1:]
+ b = x[:-1]
+ with nditer([a, b], ['copy_if_overlap'], [['readonly'], ['readwrite']]) as i:
+ assert_(not np.shares_memory(*i.operands))
+
+ # Copy not needed with elementwise, 2 ops, exactly same arrays
+ x = arange(10)
+ a = x
+ b = x
+ i = nditer([a, b], ['copy_if_overlap'], [['readonly', 'overlap_assume_elementwise'],
+ ['readwrite', 'overlap_assume_elementwise']])
+ with i:
+ assert_(i.operands[0] is a and i.operands[1] is b)
+ with nditer([a, b], ['copy_if_overlap'], [['readonly'], ['readwrite']]) as i:
+ assert_(i.operands[0] is a and not np.shares_memory(i.operands[1], b))
+
+ # Copy not needed, 2 ops, no overlap
+ x = arange(10)
+ a = x[::2]
+ b = x[1::2]
+ i = nditer([a, b], ['copy_if_overlap'], [['readonly'], ['writeonly']])
+ assert_(i.operands[0] is a and i.operands[1] is b)
+
+ # Copy needed, 2 ops, read-write overlap
+ x = arange(4, dtype=np.int8)
+ a = x[3:]
+ b = x.view(np.int32)[:1]
+ with nditer([a, b], ['copy_if_overlap'], [['readonly'], ['writeonly']]) as i:
+ assert_(not np.shares_memory(*i.operands))
+
+ # Copy needed, 3 ops, read-write overlap
+ for flag in ['writeonly', 'readwrite']:
+ x = np.ones([10, 10])
+ a = x
+ b = x.T
+ c = x
+ with nditer([a, b, c], ['copy_if_overlap'],
+ [['readonly'], ['readonly'], [flag]]) as i:
+ a2, b2, c2 = i.operands
+ assert_(not np.shares_memory(a2, c2))
+ assert_(not np.shares_memory(b2, c2))
+
+ # Copy not needed, 3 ops, read-only overlap
+ x = np.ones([10, 10])
+ a = x
+ b = x.T
+ c = x
+ i = nditer([a, b, c], ['copy_if_overlap'],
+ [['readonly'], ['readonly'], ['readonly']])
+ a2, b2, c2 = i.operands
+ assert_(a is a2)
+ assert_(b is b2)
+ assert_(c is c2)
+
+ # Copy not needed, 3 ops, read-only overlap
+ x = np.ones([10, 10])
+ a = x
+ b = np.ones([10, 10])
+ c = x.T
+ i = nditer([a, b, c], ['copy_if_overlap'],
+ [['readonly'], ['writeonly'], ['readonly']])
+ a2, b2, c2 = i.operands
+ assert_(a is a2)
+ assert_(b is b2)
+ assert_(c is c2)
+
+ # Copy not needed, 3 ops, write-only overlap
+ x = np.arange(7)
+ a = x[:3]
+ b = x[3:6]
+ c = x[4:7]
+ i = nditer([a, b, c], ['copy_if_overlap'],
+ [['readonly'], ['writeonly'], ['writeonly']])
+ a2, b2, c2 = i.operands
+ assert_(a is a2)
+ assert_(b is b2)
+ assert_(c is c2)
+
def test_iter_op_axes():
# Check that custom axes work
@@ -1228,17 +1353,15 @@ def test_iter_copy():
assert_equal([x[()] for x in i], [x[()] for x in j])
# Casting iterator
- i = nditer(a, ['buffered'], order='F', casting='unsafe',
- op_dtypes='f8', buffersize=5)
- j = i.copy()
- i = None
+ with nditer(a, ['buffered'], order='F', casting='unsafe',
+ op_dtypes='f8', buffersize=5) as i:
+ j = i.copy()
assert_equal([x[()] for x in j], a.ravel(order='F'))
a = arange(24, dtype='<i4').reshape(2, 3, 4)
- i = nditer(a, ['buffered'], order='F', casting='unsafe',
- op_dtypes='>f8', buffersize=5)
- j = i.copy()
- i = None
+ with nditer(a, ['buffered'], order='F', casting='unsafe',
+ op_dtypes='>f8', buffersize=5) as i:
+ j = i.copy()
assert_equal([x[()] for x in j], a.ravel(order='F'))
def test_iter_allocate_output_simple():
@@ -1257,11 +1380,12 @@ def test_iter_allocate_output_buffered_readwrite():
a = arange(6)
i = nditer([a, None], ['buffered', 'delay_bufalloc'],
[['readonly'], ['allocate', 'readwrite']])
- i.operands[1][:] = 1
- i.reset()
- for x in i:
- x[1][...] += x[0][...]
- assert_equal(i.operands[1], a+1)
+ with i:
+ i.operands[1][:] = 1
+ i.reset()
+ for x in i:
+ x[1][...] += x[0][...]
+ assert_equal(i.operands[1], a+1)
def test_iter_allocate_output_itorder():
# The allocated output should match the iteration order
@@ -1291,7 +1415,7 @@ def test_iter_allocate_output_itorder():
assert_equal(i.operands[1].dtype, np.dtype('f4'))
def test_iter_allocate_output_opaxes():
- # Specifing op_axes should work
+ # Specifying op_axes should work
a = arange(24, dtype='i4').reshape(2, 3, 4)
i = nditer([None, a], [], [['writeonly', 'allocate'], ['readonly']],
@@ -1344,26 +1468,25 @@ def test_iter_allocate_output_types_scalar():
def test_iter_allocate_output_subtype():
# Make sure that the subtype with priority wins
+ class MyNDArray(np.ndarray):
+ __array_priority__ = 15
- # matrix vs ndarray
- a = np.matrix([[1, 2], [3, 4]])
+ # subclass vs ndarray
+ a = np.array([[1, 2], [3, 4]]).view(MyNDArray)
b = np.arange(4).reshape(2, 2).T
i = nditer([a, b, None], [],
- [['readonly'], ['readonly'], ['writeonly', 'allocate']])
+ [['readonly'], ['readonly'], ['writeonly', 'allocate']])
assert_equal(type(a), type(i.operands[2]))
- assert_(type(b) != type(i.operands[2]))
+ assert_(type(b) is not type(i.operands[2]))
assert_equal(i.operands[2].shape, (2, 2))
- # matrix always wants things to be 2D
- b = np.arange(4).reshape(1, 2, 2)
- assert_raises(RuntimeError, nditer, [a, b, None], [],
- [['readonly'], ['readonly'], ['writeonly', 'allocate']])
- # but if subtypes are disabled, the result can still work
+ # If subtypes are disabled, we should get back an ndarray.
i = nditer([a, b, None], [],
- [['readonly'], ['readonly'], ['writeonly', 'allocate', 'no_subtype']])
+ [['readonly'], ['readonly'],
+ ['writeonly', 'allocate', 'no_subtype']])
assert_equal(type(b), type(i.operands[2]))
- assert_(type(a) != type(i.operands[2]))
- assert_equal(i.operands[2].shape, (1, 2, 2))
+ assert_(type(a) is not type(i.operands[2]))
+ assert_equal(i.operands[2].shape, (2, 2))
def test_iter_allocate_output_errors():
# Check that the iterator will throw errors for bad output allocations
@@ -1556,10 +1679,11 @@ def test_iter_write_buffering():
order='C',
buffersize=16)
x = 0
- while not i.finished:
- i[0] = x
- x += 1
- i.iternext()
+ with i:
+ while not i.finished:
+ i[0] = x
+ x += 1
+ i.iternext()
assert_equal(a.ravel(order='C'), np.arange(24))
def test_iter_buffering_delayed_alloc():
@@ -1583,10 +1707,11 @@ def test_iter_buffering_delayed_alloc():
i.reset()
assert_(not i.has_delayed_bufalloc)
assert_equal(i.multi_index, (0,))
- assert_equal(i[0], 0)
- i[1] = 1
- assert_equal(i[0:2], [0, 1])
- assert_equal([[x[0][()], x[1][()]] for x in i], list(zip(range(6), [1]*6)))
+ with i:
+ assert_equal(i[0], 0)
+ i[1] = 1
+ assert_equal(i[0:2], [0, 1])
+ assert_equal([[x[0][()], x[1][()]] for x in i], list(zip(range(6), [1]*6)))
def test_iter_buffered_cast_simple():
# Test that buffering can handle a simple cast
@@ -1597,8 +1722,9 @@ def test_iter_buffered_cast_simple():
casting='same_kind',
op_dtypes=[np.dtype('f8')],
buffersize=3)
- for v in i:
- v[...] *= 2
+ with i:
+ for v in i:
+ v[...] *= 2
assert_equal(a, 2*np.arange(10, dtype='f4'))
@@ -1611,13 +1737,14 @@ def test_iter_buffered_cast_byteswapped():
casting='same_kind',
op_dtypes=[np.dtype('f8').newbyteorder()],
buffersize=3)
- for v in i:
- v[...] *= 2
+ with i:
+ for v in i:
+ v[...] *= 2
assert_equal(a, 2*np.arange(10, dtype='f4'))
- try:
- warnings.simplefilter("ignore", np.ComplexWarning)
+ with suppress_warnings() as sup:
+ sup.filter(np.ComplexWarning)
a = np.arange(10, dtype='f8').newbyteorder().byteswap()
i = nditer(a, ['buffered', 'external_loop'],
@@ -1625,12 +1752,11 @@ def test_iter_buffered_cast_byteswapped():
casting='unsafe',
op_dtypes=[np.dtype('c8').newbyteorder()],
buffersize=3)
- for v in i:
- v[...] *= 2
+ with i:
+ for v in i:
+ v[...] *= 2
assert_equal(a, 2*np.arange(10, dtype='f8'))
- finally:
- warnings.simplefilter("default", np.ComplexWarning)
def test_iter_buffered_cast_byteswapped_complex():
# Test that buffering can handle a cast which requires swap->cast->copy
@@ -1642,8 +1768,9 @@ def test_iter_buffered_cast_byteswapped_complex():
casting='same_kind',
op_dtypes=[np.dtype('c16')],
buffersize=3)
- for v in i:
- v[...] *= 2
+ with i:
+ for v in i:
+ v[...] *= 2
assert_equal(a, 2*np.arange(10, dtype='c8') + 4j)
a = np.arange(10, dtype='c8')
@@ -1653,8 +1780,9 @@ def test_iter_buffered_cast_byteswapped_complex():
casting='same_kind',
op_dtypes=[np.dtype('c16').newbyteorder()],
buffersize=3)
- for v in i:
- v[...] *= 2
+ with i:
+ for v in i:
+ v[...] *= 2
assert_equal(a, 2*np.arange(10, dtype='c8') + 4j)
a = np.arange(10, dtype=np.clongdouble).newbyteorder().byteswap()
@@ -1664,8 +1792,9 @@ def test_iter_buffered_cast_byteswapped_complex():
casting='same_kind',
op_dtypes=[np.dtype('c16')],
buffersize=3)
- for v in i:
- v[...] *= 2
+ with i:
+ for v in i:
+ v[...] *= 2
assert_equal(a, 2*np.arange(10, dtype=np.clongdouble) + 4j)
a = np.arange(10, dtype=np.longdouble).newbyteorder().byteswap()
@@ -1674,8 +1803,9 @@ def test_iter_buffered_cast_byteswapped_complex():
casting='same_kind',
op_dtypes=[np.dtype('f4')],
buffersize=7)
- for v in i:
- v[...] *= 2
+ with i:
+ for v in i:
+ v[...] *= 2
assert_equal(a, 2*np.arange(10, dtype=np.longdouble))
def test_iter_buffered_cast_structured_type():
@@ -1704,7 +1834,8 @@ def test_iter_buffered_cast_structured_type():
a[0] = (0.5, 0.5, [[0.5, 0.5, 0.5], [0.5, 0.5, 0.5]], 0.5)
a[1] = (1.5, 1.5, [[1.5, 1.5, 1.5], [1.5, 1.5, 1.5]], 1.5)
a[2] = (2.5, 2.5, [[2.5, 2.5, 2.5], [2.5, 2.5, 2.5]], 2.5)
- rc = sys.getrefcount(a[0])
+ if HAS_REFCOUNT:
+ rc = sys.getrefcount(a[0])
i = nditer(a, ['buffered', 'refs_ok'], ['readonly'],
casting='unsafe',
op_dtypes=sdt)
@@ -1719,16 +1850,25 @@ def test_iter_buffered_cast_structured_type():
assert_equal(vals[1]['d'], 1.5)
assert_equal(vals[0].dtype, np.dtype(sdt))
vals, i, x = [None]*3
- assert_equal(sys.getrefcount(a[0]), rc)
+ if HAS_REFCOUNT:
+ assert_equal(sys.getrefcount(a[0]), rc)
- # struct type -> simple (takes the first value)
- sdt = [('a', 'f4'), ('b', 'i8'), ('d', 'O')]
- a = np.array([(5.5, 7, 'test'), (8, 10, 11)], dtype=sdt)
+ # single-field struct type -> simple
+ sdt = [('a', 'f4')]
+ a = np.array([(5.5,), (8,)], dtype=sdt)
i = nditer(a, ['buffered', 'refs_ok'], ['readonly'],
casting='unsafe',
op_dtypes='i4')
assert_equal([x_[()] for x_ in i], [5, 8])
+ # make sure multi-field struct type -> simple doesn't work
+ sdt = [('a', 'f4'), ('b', 'i8'), ('d', 'O')]
+ a = np.array([(5.5, 7, 'test'), (8, 10, 11)], dtype=sdt)
+ assert_raises(ValueError, lambda: (
+ nditer(a, ['buffered', 'refs_ok'], ['readonly'],
+ casting='unsafe',
+ op_dtypes='i4')))
+
# struct type -> struct type (field-wise copy)
sdt1 = [('a', 'f4'), ('b', 'i8'), ('d', 'O')]
sdt2 = [('d', 'u2'), ('a', 'O'), ('b', 'f8')]
@@ -1738,73 +1878,20 @@ def test_iter_buffered_cast_structured_type():
op_dtypes=sdt2)
assert_equal(i[0].dtype, np.dtype(sdt2))
assert_equal([np.array(x_) for x_ in i],
- [np.array((3, 1, 2), dtype=sdt2),
- np.array((6, 4, 5), dtype=sdt2)])
+ [np.array((1, 2, 3), dtype=sdt2),
+ np.array((4, 5, 6), dtype=sdt2)])
- # struct type -> struct type (field gets discarded)
+ # make sure struct type -> struct type with different
+ # number of fields fails
sdt1 = [('a', 'f4'), ('b', 'i8'), ('d', 'O')]
sdt2 = [('b', 'O'), ('a', 'f8')]
a = np.array([(1, 2, 3), (4, 5, 6)], dtype=sdt1)
- i = nditer(a, ['buffered', 'refs_ok'], ['readwrite'],
- casting='unsafe',
- op_dtypes=sdt2)
- assert_equal(i[0].dtype, np.dtype(sdt2))
- vals = []
- for x in i:
- vals.append(np.array(x))
- x['a'] = x['b']+3
- assert_equal(vals, [np.array((2, 1), dtype=sdt2),
- np.array((5, 4), dtype=sdt2)])
- assert_equal(a, np.array([(5, 2, None), (8, 5, None)], dtype=sdt1))
-
- # struct type -> struct type (structured field gets discarded)
- sdt1 = [('a', 'f4'), ('b', 'i8'), ('d', [('a', 'i2'), ('b', 'i4')])]
- sdt2 = [('b', 'O'), ('a', 'f8')]
- a = np.array([(1, 2, (0, 9)), (4, 5, (20, 21))], dtype=sdt1)
- i = nditer(a, ['buffered', 'refs_ok'], ['readwrite'],
- casting='unsafe',
- op_dtypes=sdt2)
- assert_equal(i[0].dtype, np.dtype(sdt2))
- vals = []
- for x in i:
- vals.append(np.array(x))
- x['a'] = x['b']+3
- assert_equal(vals, [np.array((2, 1), dtype=sdt2),
- np.array((5, 4), dtype=sdt2)])
- assert_equal(a, np.array([(5, 2, (0, 0)), (8, 5, (0, 0))], dtype=sdt1))
-
- # struct type -> struct type (structured field w/ ref gets discarded)
- sdt1 = [('a', 'f4'), ('b', 'i8'), ('d', [('a', 'i2'), ('b', 'O')])]
- sdt2 = [('b', 'O'), ('a', 'f8')]
- a = np.array([(1, 2, (0, 9)), (4, 5, (20, 21))], dtype=sdt1)
- i = nditer(a, ['buffered', 'refs_ok'], ['readwrite'],
- casting='unsafe',
- op_dtypes=sdt2)
- assert_equal(i[0].dtype, np.dtype(sdt2))
- vals = []
- for x in i:
- vals.append(np.array(x))
- x['a'] = x['b']+3
- assert_equal(vals, [np.array((2, 1), dtype=sdt2),
- np.array((5, 4), dtype=sdt2)])
- assert_equal(a, np.array([(5, 2, (0, None)), (8, 5, (0, None))], dtype=sdt1))
-
- # struct type -> struct type back (structured field w/ ref gets discarded)
- sdt1 = [('b', 'O'), ('a', 'f8')]
- sdt2 = [('a', 'f4'), ('b', 'i8'), ('d', [('a', 'i2'), ('b', 'O')])]
- a = np.array([(1, 2), (4, 5)], dtype=sdt1)
- i = nditer(a, ['buffered', 'refs_ok'], ['readwrite'],
- casting='unsafe',
- op_dtypes=sdt2)
- assert_equal(i[0].dtype, np.dtype(sdt2))
- vals = []
- for x in i:
- vals.append(np.array(x))
- assert_equal(x['d'], np.array((0, None), dtype=[('a', 'i2'), ('b', 'O')]))
- x['a'] = x['b']+3
- assert_equal(vals, [np.array((2, 1, (0, None)), dtype=sdt2),
- np.array((5, 4, (0, None)), dtype=sdt2)])
- assert_equal(a, np.array([(1, 4), (4, 7)], dtype=sdt1))
+
+ assert_raises(ValueError, lambda : (
+ nditer(a, ['buffered', 'refs_ok'], ['readwrite'],
+ casting='unsafe',
+ op_dtypes=sdt2)))
+
def test_iter_buffered_cast_subarray():
# Tests buffering of subarrays
@@ -1829,12 +1916,13 @@ def test_iter_buffered_cast_subarray():
i = nditer(a, ['buffered', 'refs_ok'], ['readwrite'],
casting='unsafe',
op_dtypes=sdt2)
- assert_equal(i[0].dtype, np.dtype(sdt2))
- count = 0
- for x in i:
- assert_(np.all(x['a'] == count))
- x['a'][0] += 2
- count += 1
+ with i:
+ assert_equal(i[0].dtype, np.dtype(sdt2))
+ count = 0
+ for x in i:
+ assert_(np.all(x['a'] == count))
+ x['a'][0] += 2
+ count += 1
assert_equal(a['a'], np.arange(6).reshape(6, 1, 1)+2)
# many -> one element -> back (copies just element 0)
@@ -1845,12 +1933,13 @@ def test_iter_buffered_cast_subarray():
i = nditer(a, ['buffered', 'refs_ok'], ['readwrite'],
casting='unsafe',
op_dtypes=sdt2)
- assert_equal(i[0].dtype, np.dtype(sdt2))
- count = 0
- for x in i:
- assert_equal(x['a'], count)
- x['a'] += 2
- count += 1
+ with i:
+ assert_equal(i[0].dtype, np.dtype(sdt2))
+ count = 0
+ for x in i:
+ assert_equal(x['a'], count)
+ x['a'] += 2
+ count += 1
assert_equal(a['a'], np.arange(6).reshape(6, 1, 1, 1)*np.ones((1, 3, 2, 2))+2)
# many -> one element -> back (copies just element 0)
@@ -2012,7 +2101,7 @@ def test_iter_buffering_string():
assert_raises(TypeError, nditer, a, ['buffered'], ['readonly'],
op_dtypes='S2')
i = nditer(a, ['buffered'], ['readonly'], op_dtypes='S6')
- assert_equal(i[0], asbytes('abc'))
+ assert_equal(i[0], b'abc')
assert_equal(i[0].dtype, np.dtype('S6'))
a = np.array(['abc', 'a', 'abcd'], dtype=np.unicode)
@@ -2020,7 +2109,7 @@ def test_iter_buffering_string():
assert_raises(TypeError, nditer, a, ['buffered'], ['readonly'],
op_dtypes='U2')
i = nditer(a, ['buffered'], ['readonly'], op_dtypes='U6')
- assert_equal(i[0], sixu('abc'))
+ assert_equal(i[0], u'abc')
assert_equal(i[0].dtype, np.dtype('U6'))
def test_iter_buffering_growinner():
@@ -2032,7 +2121,7 @@ def test_iter_buffering_growinner():
assert_equal(i[0].size, a.size)
-@dec.slow
+@pytest.mark.slow
def test_iter_buffered_reduce_reuse():
# large enough array for all views, including negative strides.
a = np.arange(2*3**5)[3**5:3**5+1]
@@ -2040,7 +2129,7 @@ def test_iter_buffered_reduce_reuse():
op_flags = [('readonly',), ('readwrite', 'allocate')]
op_axes_list = [[(0, 1, 2), (0, 1, -1)], [(0, 1, 2), (0, -1, -1)]]
# wrong dtype to force buffering
- op_dtypes = [np.float, a.dtype]
+ op_dtypes = [float, a.dtype]
def get_params():
for xs in range(-3**2, 3**2 + 1):
@@ -2058,27 +2147,29 @@ def test_iter_buffered_reduce_reuse():
nditer2 = np.nditer([arr.copy(), None],
op_axes=op_axes, flags=flags, op_flags=op_flags,
op_dtypes=op_dtypes)
- nditer2.operands[-1][...] = 0
- nditer2.reset()
- nditer2.iterindex = skip
+ with nditer2:
+ nditer2.operands[-1][...] = 0
+ nditer2.reset()
+ nditer2.iterindex = skip
- for (a2_in, b2_in) in nditer2:
- b2_in += a2_in.astype(np.int_)
+ for (a2_in, b2_in) in nditer2:
+ b2_in += a2_in.astype(np.int_)
- comp_res = nditer2.operands[-1]
+ comp_res = nditer2.operands[-1]
for bufsize in range(0, 3**3):
nditer1 = np.nditer([arr, None],
op_axes=op_axes, flags=flags, op_flags=op_flags,
buffersize=bufsize, op_dtypes=op_dtypes)
- nditer1.operands[-1][...] = 0
- nditer1.reset()
- nditer1.iterindex = skip
+ with nditer1:
+ nditer1.operands[-1][...] = 0
+ nditer1.reset()
+ nditer1.iterindex = skip
- for (a1_in, b1_in) in nditer1:
- b1_in += a1_in.astype(np.int_)
+ for (a1_in, b1_in) in nditer1:
+ b1_in += a1_in.astype(np.int_)
- res = nditer1.operands[-1]
+ res = nditer1.operands[-1]
assert_array_equal(res, comp_res)
@@ -2096,172 +2187,187 @@ def test_iter_no_broadcast():
assert_raises(ValueError, nditer, [a, b, c], [],
[['readonly'], ['readonly'], ['readonly', 'no_broadcast']])
-def test_iter_nested_iters_basic():
- # Test nested iteration basic usage
- a = arange(12).reshape(2, 3, 2)
-
- i, j = np.nested_iters(a, [[0], [1, 2]])
- vals = []
- for x in i:
- vals.append([y for y in j])
- assert_equal(vals, [[0, 1, 2, 3, 4, 5], [6, 7, 8, 9, 10, 11]])
-
- i, j = np.nested_iters(a, [[0, 1], [2]])
- vals = []
- for x in i:
- vals.append([y for y in j])
- assert_equal(vals, [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9], [10, 11]])
-
- i, j = np.nested_iters(a, [[0, 2], [1]])
- vals = []
- for x in i:
- vals.append([y for y in j])
- assert_equal(vals, [[0, 2, 4], [1, 3, 5], [6, 8, 10], [7, 9, 11]])
-def test_iter_nested_iters_reorder():
- # Test nested iteration basic usage
- a = arange(12).reshape(2, 3, 2)
-
- # In 'K' order (default), it gets reordered
- i, j = np.nested_iters(a, [[0], [2, 1]])
- vals = []
- for x in i:
- vals.append([y for y in j])
- assert_equal(vals, [[0, 1, 2, 3, 4, 5], [6, 7, 8, 9, 10, 11]])
-
- i, j = np.nested_iters(a, [[1, 0], [2]])
- vals = []
- for x in i:
- vals.append([y for y in j])
- assert_equal(vals, [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9], [10, 11]])
-
- i, j = np.nested_iters(a, [[2, 0], [1]])
- vals = []
- for x in i:
- vals.append([y for y in j])
- assert_equal(vals, [[0, 2, 4], [1, 3, 5], [6, 8, 10], [7, 9, 11]])
-
- # In 'C' order, it doesn't
- i, j = np.nested_iters(a, [[0], [2, 1]], order='C')
- vals = []
- for x in i:
- vals.append([y for y in j])
- assert_equal(vals, [[0, 2, 4, 1, 3, 5], [6, 8, 10, 7, 9, 11]])
-
- i, j = np.nested_iters(a, [[1, 0], [2]], order='C')
- vals = []
- for x in i:
- vals.append([y for y in j])
- assert_equal(vals, [[0, 1], [6, 7], [2, 3], [8, 9], [4, 5], [10, 11]])
-
- i, j = np.nested_iters(a, [[2, 0], [1]], order='C')
- vals = []
- for x in i:
- vals.append([y for y in j])
- assert_equal(vals, [[0, 2, 4], [6, 8, 10], [1, 3, 5], [7, 9, 11]])
-
-def test_iter_nested_iters_flip_axes():
- # Test nested iteration with negative axes
- a = arange(12).reshape(2, 3, 2)[::-1, ::-1, ::-1]
-
- # In 'K' order (default), the axes all get flipped
- i, j = np.nested_iters(a, [[0], [1, 2]])
- vals = []
- for x in i:
- vals.append([y for y in j])
- assert_equal(vals, [[0, 1, 2, 3, 4, 5], [6, 7, 8, 9, 10, 11]])
-
- i, j = np.nested_iters(a, [[0, 1], [2]])
- vals = []
- for x in i:
- vals.append([y for y in j])
- assert_equal(vals, [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9], [10, 11]])
-
- i, j = np.nested_iters(a, [[0, 2], [1]])
- vals = []
- for x in i:
- vals.append([y for y in j])
- assert_equal(vals, [[0, 2, 4], [1, 3, 5], [6, 8, 10], [7, 9, 11]])
-
- # In 'C' order, flipping axes is disabled
- i, j = np.nested_iters(a, [[0], [1, 2]], order='C')
- vals = []
- for x in i:
- vals.append([y for y in j])
- assert_equal(vals, [[11, 10, 9, 8, 7, 6], [5, 4, 3, 2, 1, 0]])
-
- i, j = np.nested_iters(a, [[0, 1], [2]], order='C')
- vals = []
- for x in i:
- vals.append([y for y in j])
- assert_equal(vals, [[11, 10], [9, 8], [7, 6], [5, 4], [3, 2], [1, 0]])
-
- i, j = np.nested_iters(a, [[0, 2], [1]], order='C')
- vals = []
- for x in i:
- vals.append([y for y in j])
- assert_equal(vals, [[11, 9, 7], [10, 8, 6], [5, 3, 1], [4, 2, 0]])
-
-def test_iter_nested_iters_broadcast():
- # Test nested iteration with broadcasting
- a = arange(2).reshape(2, 1)
- b = arange(3).reshape(1, 3)
-
- i, j = np.nested_iters([a, b], [[0], [1]])
- vals = []
- for x in i:
- vals.append([y for y in j])
- assert_equal(vals, [[[0, 0], [0, 1], [0, 2]], [[1, 0], [1, 1], [1, 2]]])
-
- i, j = np.nested_iters([a, b], [[1], [0]])
- vals = []
- for x in i:
- vals.append([y for y in j])
- assert_equal(vals, [[[0, 0], [1, 0]], [[0, 1], [1, 1]], [[0, 2], [1, 2]]])
-
-def test_iter_nested_iters_dtype_copy():
- # Test nested iteration with a copy to change dtype
-
- # copy
- a = arange(6, dtype='i4').reshape(2, 3)
- i, j = np.nested_iters(a, [[0], [1]],
- op_flags=['readonly', 'copy'],
- op_dtypes='f8')
- assert_equal(j[0].dtype, np.dtype('f8'))
- vals = []
- for x in i:
- vals.append([y for y in j])
- assert_equal(vals, [[0, 1, 2], [3, 4, 5]])
- vals = None
-
- # updateifcopy
- a = arange(6, dtype='f4').reshape(2, 3)
- i, j = np.nested_iters(a, [[0], [1]],
- op_flags=['readwrite', 'updateifcopy'],
- casting='same_kind',
- op_dtypes='f8')
- assert_equal(j[0].dtype, np.dtype('f8'))
- for x in i:
- for y in j:
- y[...] += 1
- assert_equal(a, [[0, 1, 2], [3, 4, 5]])
- i, j, x, y = (None,)*4 # force the updateifcopy
- assert_equal(a, [[1, 2, 3], [4, 5, 6]])
-
-def test_iter_nested_iters_dtype_buffered():
- # Test nested iteration with buffering to change dtype
-
- a = arange(6, dtype='f4').reshape(2, 3)
- i, j = np.nested_iters(a, [[0], [1]],
- flags=['buffered'],
- op_flags=['readwrite'],
- casting='same_kind',
- op_dtypes='f8')
- assert_equal(j[0].dtype, np.dtype('f8'))
- for x in i:
- for y in j:
- y[...] += 1
- assert_equal(a, [[1, 2, 3], [4, 5, 6]])
+class TestIterNested(object):
+
+ def test_basic(self):
+ # Test nested iteration basic usage
+ a = arange(12).reshape(2, 3, 2)
+
+ i, j = np.nested_iters(a, [[0], [1, 2]])
+ vals = [list(j) for _ in i]
+ assert_equal(vals, [[0, 1, 2, 3, 4, 5], [6, 7, 8, 9, 10, 11]])
+
+ i, j = np.nested_iters(a, [[0, 1], [2]])
+ vals = [list(j) for _ in i]
+ assert_equal(vals, [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9], [10, 11]])
+
+ i, j = np.nested_iters(a, [[0, 2], [1]])
+ vals = [list(j) for _ in i]
+ assert_equal(vals, [[0, 2, 4], [1, 3, 5], [6, 8, 10], [7, 9, 11]])
+
+ def test_reorder(self):
+ # Test nested iteration basic usage
+ a = arange(12).reshape(2, 3, 2)
+
+ # In 'K' order (default), it gets reordered
+ i, j = np.nested_iters(a, [[0], [2, 1]])
+ vals = [list(j) for _ in i]
+ assert_equal(vals, [[0, 1, 2, 3, 4, 5], [6, 7, 8, 9, 10, 11]])
+
+ i, j = np.nested_iters(a, [[1, 0], [2]])
+ vals = [list(j) for _ in i]
+ assert_equal(vals, [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9], [10, 11]])
+
+ i, j = np.nested_iters(a, [[2, 0], [1]])
+ vals = [list(j) for _ in i]
+ assert_equal(vals, [[0, 2, 4], [1, 3, 5], [6, 8, 10], [7, 9, 11]])
+
+ # In 'C' order, it doesn't
+ i, j = np.nested_iters(a, [[0], [2, 1]], order='C')
+ vals = [list(j) for _ in i]
+ assert_equal(vals, [[0, 2, 4, 1, 3, 5], [6, 8, 10, 7, 9, 11]])
+
+ i, j = np.nested_iters(a, [[1, 0], [2]], order='C')
+ vals = [list(j) for _ in i]
+ assert_equal(vals, [[0, 1], [6, 7], [2, 3], [8, 9], [4, 5], [10, 11]])
+
+ i, j = np.nested_iters(a, [[2, 0], [1]], order='C')
+ vals = [list(j) for _ in i]
+ assert_equal(vals, [[0, 2, 4], [6, 8, 10], [1, 3, 5], [7, 9, 11]])
+
+ def test_flip_axes(self):
+ # Test nested iteration with negative axes
+ a = arange(12).reshape(2, 3, 2)[::-1, ::-1, ::-1]
+
+ # In 'K' order (default), the axes all get flipped
+ i, j = np.nested_iters(a, [[0], [1, 2]])
+ vals = [list(j) for _ in i]
+ assert_equal(vals, [[0, 1, 2, 3, 4, 5], [6, 7, 8, 9, 10, 11]])
+
+ i, j = np.nested_iters(a, [[0, 1], [2]])
+ vals = [list(j) for _ in i]
+ assert_equal(vals, [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9], [10, 11]])
+
+ i, j = np.nested_iters(a, [[0, 2], [1]])
+ vals = [list(j) for _ in i]
+ assert_equal(vals, [[0, 2, 4], [1, 3, 5], [6, 8, 10], [7, 9, 11]])
+
+ # In 'C' order, flipping axes is disabled
+ i, j = np.nested_iters(a, [[0], [1, 2]], order='C')
+ vals = [list(j) for _ in i]
+ assert_equal(vals, [[11, 10, 9, 8, 7, 6], [5, 4, 3, 2, 1, 0]])
+
+ i, j = np.nested_iters(a, [[0, 1], [2]], order='C')
+ vals = [list(j) for _ in i]
+ assert_equal(vals, [[11, 10], [9, 8], [7, 6], [5, 4], [3, 2], [1, 0]])
+
+ i, j = np.nested_iters(a, [[0, 2], [1]], order='C')
+ vals = [list(j) for _ in i]
+ assert_equal(vals, [[11, 9, 7], [10, 8, 6], [5, 3, 1], [4, 2, 0]])
+
+ def test_broadcast(self):
+ # Test nested iteration with broadcasting
+ a = arange(2).reshape(2, 1)
+ b = arange(3).reshape(1, 3)
+
+ i, j = np.nested_iters([a, b], [[0], [1]])
+ vals = [list(j) for _ in i]
+ assert_equal(vals, [[[0, 0], [0, 1], [0, 2]], [[1, 0], [1, 1], [1, 2]]])
+
+ i, j = np.nested_iters([a, b], [[1], [0]])
+ vals = [list(j) for _ in i]
+ assert_equal(vals, [[[0, 0], [1, 0]], [[0, 1], [1, 1]], [[0, 2], [1, 2]]])
+
+ def test_dtype_copy(self):
+ # Test nested iteration with a copy to change dtype
+
+ # copy
+ a = arange(6, dtype='i4').reshape(2, 3)
+ i, j = np.nested_iters(a, [[0], [1]],
+ op_flags=['readonly', 'copy'],
+ op_dtypes='f8')
+ assert_equal(j[0].dtype, np.dtype('f8'))
+ vals = [list(j) for _ in i]
+ assert_equal(vals, [[0, 1, 2], [3, 4, 5]])
+ vals = None
+
+ # writebackifcopy - using conext manager
+ a = arange(6, dtype='f4').reshape(2, 3)
+ i, j = np.nested_iters(a, [[0], [1]],
+ op_flags=['readwrite', 'updateifcopy'],
+ casting='same_kind',
+ op_dtypes='f8')
+ with i, j:
+ assert_equal(j[0].dtype, np.dtype('f8'))
+ for x in i:
+ for y in j:
+ y[...] += 1
+ assert_equal(a, [[0, 1, 2], [3, 4, 5]])
+ assert_equal(a, [[1, 2, 3], [4, 5, 6]])
+
+ # writebackifcopy - using close()
+ a = arange(6, dtype='f4').reshape(2, 3)
+ i, j = np.nested_iters(a, [[0], [1]],
+ op_flags=['readwrite', 'updateifcopy'],
+ casting='same_kind',
+ op_dtypes='f8')
+ assert_equal(j[0].dtype, np.dtype('f8'))
+ for x in i:
+ for y in j:
+ y[...] += 1
+ assert_equal(a, [[0, 1, 2], [3, 4, 5]])
+ i.close()
+ j.close()
+ assert_equal(a, [[1, 2, 3], [4, 5, 6]])
+
+ def test_dtype_buffered(self):
+ # Test nested iteration with buffering to change dtype
+
+ a = arange(6, dtype='f4').reshape(2, 3)
+ i, j = np.nested_iters(a, [[0], [1]],
+ flags=['buffered'],
+ op_flags=['readwrite'],
+ casting='same_kind',
+ op_dtypes='f8')
+ assert_equal(j[0].dtype, np.dtype('f8'))
+ for x in i:
+ for y in j:
+ y[...] += 1
+ assert_equal(a, [[1, 2, 3], [4, 5, 6]])
+
+ def test_0d(self):
+ a = np.arange(12).reshape(2, 3, 2)
+ i, j = np.nested_iters(a, [[], [1, 0, 2]])
+ vals = [list(j) for _ in i]
+ assert_equal(vals, [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]])
+
+ i, j = np.nested_iters(a, [[1, 0, 2], []])
+ vals = [list(j) for _ in i]
+ assert_equal(vals, [[0], [1], [2], [3], [4], [5], [6], [7], [8], [9], [10], [11]])
+
+ i, j, k = np.nested_iters(a, [[2, 0], [], [1]])
+ vals = []
+ for x in i:
+ for y in j:
+ vals.append([z for z in k])
+ assert_equal(vals, [[0, 2, 4], [1, 3, 5], [6, 8, 10], [7, 9, 11]])
+
+ def test_iter_nested_iters_dtype_buffered(self):
+ # Test nested iteration with buffering to change dtype
+
+ a = arange(6, dtype='f4').reshape(2, 3)
+ i, j = np.nested_iters(a, [[0], [1]],
+ flags=['buffered'],
+ op_flags=['readwrite'],
+ casting='same_kind',
+ op_dtypes='f8')
+ with i, j:
+ assert_equal(j[0].dtype, np.dtype('f8'))
+ for x in i:
+ for y in j:
+ y[...] += 1
+ assert_equal(a, [[1, 2, 3], [4, 5, 6]])
def test_iter_reduction_error():
@@ -2283,29 +2389,35 @@ def test_iter_reduction():
[['readonly'], ['readwrite', 'allocate']],
op_axes=[[0], [-1]])
# Need to initialize the output operand to the addition unit
- i.operands[1][...] = 0
- # Do the reduction
- for x, y in i:
- y[...] += x
- # Since no axes were specified, should have allocated a scalar
- assert_equal(i.operands[1].ndim, 0)
- assert_equal(i.operands[1], np.sum(a))
+ with i:
+ i.operands[1][...] = 0
+ # Do the reduction
+ for x, y in i:
+ y[...] += x
+ # Since no axes were specified, should have allocated a scalar
+ assert_equal(i.operands[1].ndim, 0)
+ assert_equal(i.operands[1], np.sum(a))
a = np.arange(6).reshape(2, 3)
i = nditer([a, None], ['reduce_ok', 'external_loop'],
[['readonly'], ['readwrite', 'allocate']],
op_axes=[[0, 1], [-1, -1]])
# Need to initialize the output operand to the addition unit
- i.operands[1][...] = 0
- # Reduction shape/strides for the output
- assert_equal(i[1].shape, (6,))
- assert_equal(i[1].strides, (0,))
- # Do the reduction
- for x, y in i:
- y[...] += x
- # Since no axes were specified, should have allocated a scalar
- assert_equal(i.operands[1].ndim, 0)
- assert_equal(i.operands[1], np.sum(a))
+ with i:
+ i.operands[1][...] = 0
+ # Reduction shape/strides for the output
+ assert_equal(i[1].shape, (6,))
+ assert_equal(i[1].strides, (0,))
+ # Do the reduction
+ for x, y in i:
+ # Use a for loop instead of ``y[...] += x``
+ # (equivalent to ``y[...] = y[...].copy() + x``),
+ # because y has zero strides we use for the reduction
+ for j in range(len(y)):
+ y[j] += x[j]
+ # Since no axes were specified, should have allocated a scalar
+ assert_equal(i.operands[1].ndim, 0)
+ assert_equal(i.operands[1], np.sum(a))
# This is a tricky reduction case for the buffering double loop
# to handle
@@ -2317,15 +2429,16 @@ def test_iter_reduction():
'buffered', 'delay_bufalloc'],
[['readonly'], ['readwrite', 'allocate']],
op_axes=[None, [0, -1, 1]], buffersize=10)
- it1.operands[1].fill(0)
- it2.operands[1].fill(0)
- it2.reset()
- for x in it1:
- x[1][...] += x[0]
- for x in it2:
- x[1][...] += x[0]
- assert_equal(it1.operands[1], it2.operands[1])
- assert_equal(it2.operands[1].sum(), a.size)
+ with it1, it2:
+ it1.operands[1].fill(0)
+ it2.operands[1].fill(0)
+ it2.reset()
+ for x in it1:
+ x[1][...] += x[0]
+ for x in it2:
+ x[1][...] += x[0]
+ assert_equal(it1.operands[1], it2.operands[1])
+ assert_equal(it2.operands[1].sum(), a.size)
def test_iter_buffering_reduction():
# Test doing buffered reductions with the iterator
@@ -2335,11 +2448,12 @@ def test_iter_buffering_reduction():
i = nditer([a, b], ['reduce_ok', 'buffered'],
[['readonly'], ['readwrite', 'nbo']],
op_axes=[[0], [-1]])
- assert_equal(i[1].dtype, np.dtype('f8'))
- assert_(i[1].dtype != b.dtype)
- # Do the reduction
- for x, y in i:
- y[...] += x
+ with i:
+ assert_equal(i[1].dtype, np.dtype('f8'))
+ assert_(i[1].dtype != b.dtype)
+ # Do the reduction
+ for x, y in i:
+ y[...] += x
# Since no axes were specified, should have allocated a scalar
assert_equal(b, np.sum(a))
@@ -2349,11 +2463,16 @@ def test_iter_buffering_reduction():
[['readonly'], ['readwrite', 'nbo']],
op_axes=[[0, 1], [0, -1]])
# Reduction shape/strides for the output
- assert_equal(i[1].shape, (3,))
- assert_equal(i[1].strides, (0,))
- # Do the reduction
- for x, y in i:
- y[...] += x
+ with i:
+ assert_equal(i[1].shape, (3,))
+ assert_equal(i[1].strides, (0,))
+ # Do the reduction
+ for x, y in i:
+ # Use a for loop instead of ``y[...] += x``
+ # (equivalent to ``y[...] = y[...].copy() + x``),
+ # because y has zero strides we use for the reduction
+ for j in range(len(y)):
+ y[j] += x[j]
assert_equal(b, np.sum(a, axis=1))
# Iterator inner double loop was wrong on this one
@@ -2363,9 +2482,27 @@ def test_iter_buffering_reduction():
[['readonly'], ['readwrite', 'allocate']],
op_axes=[[-1, 0], [-1, -1]],
itershape=(2, 2))
- it.operands[1].fill(0)
- it.reset()
- assert_equal(it[0], [1, 2, 1, 2])
+ with it:
+ it.operands[1].fill(0)
+ it.reset()
+ assert_equal(it[0], [1, 2, 1, 2])
+
+ # Iterator inner loop should take argument contiguity into account
+ x = np.ones((7, 13, 8), np.int8)[4:6,1:11:6,1:5].transpose(1, 2, 0)
+ x[...] = np.arange(x.size).reshape(x.shape)
+ y_base = np.arange(4*4, dtype=np.int8).reshape(4, 4)
+ y_base_copy = y_base.copy()
+ y = y_base[::2,:,None]
+
+ it = np.nditer([y, x],
+ ['buffered', 'external_loop', 'reduce_ok'],
+ [['readwrite'], ['readonly']])
+ with it:
+ for a, b in it:
+ a.fill(2)
+
+ assert_equal(y_base[1::2], y_base_copy[1::2])
+ assert_equal(y_base[::2], 2)
def test_iter_buffering_reduction_reuse_reduce_loops():
# There was a bug triggering reuse of the reduce loop inappropriately,
@@ -2378,9 +2515,8 @@ def test_iter_buffering_reduction_reuse_reduce_loops():
op_flags=[['readonly'], ['readwrite']],
buffersize=5)
- bufsizes = []
- for x, y in it:
- bufsizes.append(x.shape[0])
+ with it:
+ bufsizes = [x.shape[0] for x, y in it]
assert_equal(bufsizes, [5, 2, 5, 2])
assert_equal(sum(bufsizes), a.size)
@@ -2459,8 +2595,9 @@ def test_iter_writemasked():
it = np.nditer([a, msk], [],
[['readwrite', 'writemasked'],
['readonly', 'arraymask']])
- for x, m in it:
- x[...] = 1
+ with it:
+ for x, m in it:
+ x[...] = 1
# Because we violated the semantics, all the values became 1
assert_equal(a, [1, 1, 1])
@@ -2469,8 +2606,9 @@ def test_iter_writemasked():
it = np.nditer([a, msk], ['buffered'],
[['readwrite', 'writemasked'],
['readonly', 'arraymask']])
- for x, m in it:
- x[...] = 2.5
+ with it:
+ for x, m in it:
+ x[...] = 2.5
# Because we violated the semantics, all the values became 2.5
assert_equal(a, [2.5, 2.5, 2.5])
@@ -2482,8 +2620,9 @@ def test_iter_writemasked():
['readonly', 'arraymask']],
op_dtypes=['i8', None],
casting='unsafe')
- for x, m in it:
- x[...] = 3
+ with it:
+ for x, m in it:
+ x[...] = 3
# Even though we violated the semantics, only the selected values
# were copied back
assert_equal(a, [3, 3, 2.5])
@@ -2512,7 +2651,7 @@ def test_iter_element_deletion():
del it[1:2]
except TypeError:
pass
- except:
+ except Exception:
raise AssertionError
def test_iter_allocated_array_dtypes():
@@ -2565,28 +2704,6 @@ def test_0d_iter():
assert_equal(vals['d'], 0.5)
-def test_0d_nested_iter():
- a = np.arange(12).reshape(2, 3, 2)
- i, j = np.nested_iters(a, [[], [1, 0, 2]])
- vals = []
- for x in i:
- vals.append([y for y in j])
- assert_equal(vals, [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]])
-
- i, j = np.nested_iters(a, [[1, 0, 2], []])
- vals = []
- for x in i:
- vals.append([y for y in j])
- assert_equal(vals, [[0], [1], [2], [3], [4], [5], [6], [7], [8], [9], [10], [11]])
-
- i, j, k = np.nested_iters(a, [[2, 0], [], [1]])
- vals = []
- for x in i:
- for y in j:
- vals.append([z for z in k])
- assert_equal(vals, [[0, 2, 4], [1, 3, 5], [6, 8, 10], [7, 9, 11]])
-
-
def test_iter_too_large():
# The total size of the iterator must not exceed the maximum intp due
# to broadcasting. Dividing by 1024 will keep it small enough to
@@ -2620,19 +2737,125 @@ def test_iter_too_large_with_multiindex():
# arrays are now too large to be broadcast. The different modes test
# different nditer functionality with or without GIL.
for mode in range(6):
- assert_raises(ValueError, test_nditer_too_large, arrays, -1, mode)
+ with assert_raises(ValueError):
+ _multiarray_tests.test_nditer_too_large(arrays, -1, mode)
# but if we do nothing with the nditer, it can be constructed:
- test_nditer_too_large(arrays, -1, 7)
+ _multiarray_tests.test_nditer_too_large(arrays, -1, 7)
# When an axis is removed, things should work again (half the time):
for i in range(num):
for mode in range(6):
# an axis with size 1024 is removed:
- test_nditer_too_large(arrays, i*2, mode)
+ _multiarray_tests.test_nditer_too_large(arrays, i*2, mode)
# an axis with size 1 is removed:
- assert_raises(ValueError, test_nditer_too_large,
- arrays, i*2 + 1, mode)
+ with assert_raises(ValueError):
+ _multiarray_tests.test_nditer_too_large(arrays, i*2 + 1, mode)
-
-if __name__ == "__main__":
- run_module_suite()
+def test_writebacks():
+ a = np.arange(6, dtype='f4')
+ au = a.byteswap().newbyteorder()
+ assert_(a.dtype.byteorder != au.dtype.byteorder)
+ it = nditer(au, [], [['readwrite', 'updateifcopy']],
+ casting='equiv', op_dtypes=[np.dtype('f4')])
+ with it:
+ it.operands[0][:] = 100
+ assert_equal(au, 100)
+ # do it again, this time raise an error,
+ it = nditer(au, [], [['readwrite', 'updateifcopy']],
+ casting='equiv', op_dtypes=[np.dtype('f4')])
+ try:
+ with it:
+ assert_equal(au.flags.writeable, False)
+ it.operands[0][:] = 0
+ raise ValueError('exit context manager on exception')
+ except:
+ pass
+ assert_equal(au, 0)
+ assert_equal(au.flags.writeable, True)
+ # cannot reuse i outside context manager
+ assert_raises(ValueError, getattr, it, 'operands')
+
+ it = nditer(au, [], [['readwrite', 'updateifcopy']],
+ casting='equiv', op_dtypes=[np.dtype('f4')])
+ with it:
+ x = it.operands[0]
+ x[:] = 6
+ assert_(x.flags.writebackifcopy)
+ assert_equal(au, 6)
+ assert_(not x.flags.writebackifcopy)
+ x[:] = 123 # x.data still valid
+ assert_equal(au, 6) # but not connected to au
+
+ it = nditer(au, [],
+ [['readwrite', 'updateifcopy']],
+ casting='equiv', op_dtypes=[np.dtype('f4')])
+ # reentering works
+ with it:
+ with it:
+ for x in it:
+ x[...] = 123
+
+ it = nditer(au, [],
+ [['readwrite', 'updateifcopy']],
+ casting='equiv', op_dtypes=[np.dtype('f4')])
+ # make sure exiting the inner context manager closes the iterator
+ with it:
+ with it:
+ for x in it:
+ x[...] = 123
+ assert_raises(ValueError, getattr, it, 'operands')
+ # do not crash if original data array is decrefed
+ it = nditer(au, [],
+ [['readwrite', 'updateifcopy']],
+ casting='equiv', op_dtypes=[np.dtype('f4')])
+ del au
+ with it:
+ for x in it:
+ x[...] = 123
+ # make sure we cannot reenter the closed iterator
+ enter = it.__enter__
+ assert_raises(RuntimeError, enter)
+
+def test_close_equivalent():
+ ''' using a context amanger and using nditer.close are equivalent
+ '''
+ def add_close(x, y, out=None):
+ addop = np.add
+ it = np.nditer([x, y, out], [],
+ [['readonly'], ['readonly'], ['writeonly','allocate']])
+ for (a, b, c) in it:
+ addop(a, b, out=c)
+ ret = it.operands[2]
+ it.close()
+ return ret
+
+ def add_context(x, y, out=None):
+ addop = np.add
+ it = np.nditer([x, y, out], [],
+ [['readonly'], ['readonly'], ['writeonly','allocate']])
+ with it:
+ for (a, b, c) in it:
+ addop(a, b, out=c)
+ return it.operands[2]
+ z = add_close(range(5), range(5))
+ assert_equal(z, range(0, 10, 2))
+ z = add_context(range(5), range(5))
+ assert_equal(z, range(0, 10, 2))
+
+def test_close_raises():
+ it = np.nditer(np.arange(3))
+ assert_equal (next(it), 0)
+ it.close()
+ assert_raises(StopIteration, next, it)
+ assert_raises(ValueError, getattr, it, 'operands')
+
+@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
+def test_warn_noclose():
+ a = np.arange(6, dtype='f4')
+ au = a.byteswap().newbyteorder()
+ with suppress_warnings() as sup:
+ sup.record(RuntimeWarning)
+ it = np.nditer(au, [], [['readwrite', 'updateifcopy']],
+ casting='equiv', op_dtypes=[np.dtype('f4')])
+ del it
+ assert len(sup.log) == 1
diff --git a/numpy/core/tests/test_numeric.py b/numpy/core/tests/test_numeric.py
index 0040f3a25..37534720a 100644
--- a/numpy/core/tests/test_numeric.py
+++ b/numpy/core/tests/test_numeric.py
@@ -4,19 +4,20 @@ import sys
import warnings
import itertools
import platform
+import pytest
from decimal import Decimal
import numpy as np
from numpy.core import umath
from numpy.random import rand, randint, randn
from numpy.testing import (
- TestCase, run_module_suite, assert_, assert_equal, assert_raises,
- assert_raises_regex, assert_array_equal, assert_almost_equal,
- assert_array_almost_equal, dec
-)
+ assert_, assert_equal, assert_raises, assert_raises_regex,
+ assert_array_equal, assert_almost_equal, assert_array_almost_equal,
+ HAS_REFCOUNT
+ )
-class TestResize(TestCase):
+class TestResize(object):
def test_copies(self):
A = np.array([[1, 2], [3, 4]])
Ar1 = np.array([[1, 2, 3, 4], [1, 2, 3, 4]])
@@ -34,6 +35,12 @@ class TestResize(TestCase):
assert_array_equal(Ar, np.array([]))
assert_equal(A.dtype, Ar.dtype)
+ Ar = np.resize(A, (0, 2))
+ assert_equal(Ar.shape, (0, 2))
+
+ Ar = np.resize(A, (2, 0))
+ assert_equal(Ar.shape, (2, 0))
+
def test_reshape_from_zero(self):
# See also gh-6740
A = np.zeros(0, dtype=[('a', np.float32, 1)])
@@ -42,7 +49,7 @@ class TestResize(TestCase):
assert_equal(A.dtype, Ar.dtype)
-class TestNonarrayArgs(TestCase):
+class TestNonarrayArgs(object):
# check that non-array arguments to functions wrap them in arrays
def test_choose(self):
choices = [[0, 1, 2],
@@ -67,6 +74,13 @@ class TestNonarrayArgs(TestCase):
out = np.compress([0, 1], arr, axis=0)
assert_equal(out, tgt)
+ def test_count_nonzero(self):
+ arr = [[0, 1, 7, 0, 0],
+ [3, 0, 0, 2, 19]]
+ tgt = np.array([2, 3])
+ out = np.count_nonzero(arr, axis=1)
+ assert_equal(out, tgt)
+
def test_cumproduct(self):
A = [[1, 2, 3], [4, 5, 6]]
assert_(np.all(np.cumproduct(A) == np.array([1, 2, 6, 24, 120, 720])))
@@ -195,45 +209,61 @@ class TestNonarrayArgs(TestCase):
assert_(w[0].category is RuntimeWarning)
-class TestBoolScalar(TestCase):
+class TestIsscalar(object):
+ def test_isscalar(self):
+ assert_(np.isscalar(3.1))
+ assert_(np.isscalar(np.int16(12345)))
+ assert_(np.isscalar(False))
+ assert_(np.isscalar('numpy'))
+ assert_(not np.isscalar([3.1]))
+ assert_(not np.isscalar(None))
+
+ # PEP 3141
+ from fractions import Fraction
+ assert_(np.isscalar(Fraction(5, 17)))
+ from numbers import Number
+ assert_(np.isscalar(Number()))
+
+
+class TestBoolScalar(object):
def test_logical(self):
f = np.False_
t = np.True_
s = "xyz"
- self.assertTrue((t and s) is s)
- self.assertTrue((f and s) is f)
+ assert_((t and s) is s)
+ assert_((f and s) is f)
def test_bitwise_or(self):
f = np.False_
t = np.True_
- self.assertTrue((t | t) is t)
- self.assertTrue((f | t) is t)
- self.assertTrue((t | f) is t)
- self.assertTrue((f | f) is f)
+ assert_((t | t) is t)
+ assert_((f | t) is t)
+ assert_((t | f) is t)
+ assert_((f | f) is f)
def test_bitwise_and(self):
f = np.False_
t = np.True_
- self.assertTrue((t & t) is t)
- self.assertTrue((f & t) is f)
- self.assertTrue((t & f) is f)
- self.assertTrue((f & f) is f)
+ assert_((t & t) is t)
+ assert_((f & t) is f)
+ assert_((t & f) is f)
+ assert_((f & f) is f)
def test_bitwise_xor(self):
f = np.False_
t = np.True_
- self.assertTrue((t ^ t) is f)
- self.assertTrue((f ^ t) is t)
- self.assertTrue((t ^ f) is t)
- self.assertTrue((f ^ f) is f)
+ assert_((t ^ t) is f)
+ assert_((f ^ t) is t)
+ assert_((t ^ f) is t)
+ assert_((f ^ f) is f)
-class TestBoolArray(TestCase):
- def setUp(self):
+class TestBoolArray(object):
+ def setup(self):
# offset for simd tests
- self.t = np.array([True] * 41, dtype=np.bool)[1::]
- self.f = np.array([False] * 41, dtype=np.bool)[1::]
- self.o = np.array([False] * 42, dtype=np.bool)[2::]
+ self.t = np.array([True] * 41, dtype=bool)[1::]
+ self.f = np.array([False] * 41, dtype=bool)[1::]
+ self.o = np.array([False] * 42, dtype=bool)[2::]
self.nm = self.f.copy()
self.im = self.t.copy()
self.nm[3] = True
@@ -242,31 +272,31 @@ class TestBoolArray(TestCase):
self.im[-2] = False
def test_all_any(self):
- self.assertTrue(self.t.all())
- self.assertTrue(self.t.any())
- self.assertFalse(self.f.all())
- self.assertFalse(self.f.any())
- self.assertTrue(self.nm.any())
- self.assertTrue(self.im.any())
- self.assertFalse(self.nm.all())
- self.assertFalse(self.im.all())
+ assert_(self.t.all())
+ assert_(self.t.any())
+ assert_(not self.f.all())
+ assert_(not self.f.any())
+ assert_(self.nm.any())
+ assert_(self.im.any())
+ assert_(not self.nm.all())
+ assert_(not self.im.all())
# check bad element in all positions
for i in range(256 - 7):
- d = np.array([False] * 256, dtype=np.bool)[7::]
+ d = np.array([False] * 256, dtype=bool)[7::]
d[i] = True
- self.assertTrue(np.any(d))
- e = np.array([True] * 256, dtype=np.bool)[7::]
+ assert_(np.any(d))
+ e = np.array([True] * 256, dtype=bool)[7::]
e[i] = False
- self.assertFalse(np.all(e))
+ assert_(not np.all(e))
assert_array_equal(e, ~d)
# big array test for blocked libc loops
for i in list(range(9, 6000, 507)) + [7764, 90021, -10]:
- d = np.array([False] * 100043, dtype=np.bool)
+ d = np.array([False] * 100043, dtype=bool)
d[i] = True
- self.assertTrue(np.any(d), msg="%r" % i)
- e = np.array([True] * 100043, dtype=np.bool)
+ assert_(np.any(d), msg="%r" % i)
+ e = np.array([True] * 100043, dtype=bool)
e[i] = False
- self.assertFalse(np.all(e), msg="%r" % i)
+ assert_(not np.all(e), msg="%r" % i)
def test_logical_not_abs(self):
assert_array_equal(~self.t, self.f)
@@ -315,12 +345,12 @@ class TestBoolArray(TestCase):
assert_array_equal(self.im ^ False, self.im)
-class TestBoolCmp(TestCase):
- def setUp(self):
+class TestBoolCmp(object):
+ def setup(self):
self.f = np.ones(256, dtype=np.float32)
- self.ef = np.ones(self.f.size, dtype=np.bool)
+ self.ef = np.ones(self.f.size, dtype=bool)
self.d = np.ones(128, dtype=np.float64)
- self.ed = np.ones(self.d.size, dtype=np.bool)
+ self.ed = np.ones(self.d.size, dtype=bool)
# generate values for all permutation of 256bit simd vectors
s = 0
for i in range(32):
@@ -415,38 +445,35 @@ class TestBoolCmp(TestCase):
assert_array_equal(np.signbit(self.signd[i:]), self.ed[i:])
-class TestSeterr(TestCase):
+class TestSeterr(object):
def test_default(self):
err = np.geterr()
- self.assertEqual(err, dict(
- divide='warn',
- invalid='warn',
- over='warn',
- under='ignore',
- ))
+ assert_equal(err,
+ dict(divide='warn',
+ invalid='warn',
+ over='warn',
+ under='ignore')
+ )
def test_set(self):
with np.errstate():
err = np.seterr()
old = np.seterr(divide='print')
- self.assertTrue(err == old)
+ assert_(err == old)
new = np.seterr()
- self.assertTrue(new['divide'] == 'print')
+ assert_(new['divide'] == 'print')
np.seterr(over='raise')
- self.assertTrue(np.geterr()['over'] == 'raise')
- self.assertTrue(new['divide'] == 'print')
+ assert_(np.geterr()['over'] == 'raise')
+ assert_(new['divide'] == 'print')
np.seterr(**old)
- self.assertTrue(np.geterr() == old)
+ assert_(np.geterr() == old)
- @dec.skipif(platform.machine() == "armv5tel", "See gh-413.")
+ @pytest.mark.skipif(platform.machine() == "armv5tel", reason="See gh-413.")
def test_divide_err(self):
with np.errstate(divide='raise'):
- try:
+ with assert_raises(FloatingPointError):
np.array([1.]) / np.array([0.])
- except FloatingPointError:
- pass
- else:
- self.fail()
+
np.seterr(divide='ignore')
np.array([1.]) / np.array([0.])
@@ -459,7 +486,7 @@ class TestSeterr(TestCase):
with np.errstate(divide='warn'):
np.seterrobj([20000, 1, None])
np.array([1.]) / np.array([0.])
- self.assertEqual(len(w), 1)
+ assert_equal(len(w), 1)
def log_err(*args):
self.called += 1
@@ -470,12 +497,12 @@ class TestSeterr(TestCase):
with np.errstate(divide='ignore'):
np.seterrobj([20000, 3, log_err])
np.array([1.]) / np.array([0.])
- self.assertEqual(self.called, 1)
+ assert_equal(self.called, 1)
np.seterrobj(olderrobj)
with np.errstate(divide='ignore'):
np.divide(1., 0., extobj=[20000, 3, log_err])
- self.assertEqual(self.called, 2)
+ assert_equal(self.called, 2)
finally:
np.seterrobj(olderrobj)
del self.called
@@ -499,7 +526,7 @@ class TestSeterr(TestCase):
np.seterrobj(olderrobj)
-class TestFloatExceptions(TestCase):
+class TestFloatExceptions(object):
def assert_raises_fpe(self, fpeerr, flop, x, y):
ftype = type(x)
try:
@@ -522,7 +549,6 @@ class TestFloatExceptions(TestCase):
self.assert_raises_fpe(fpeerr, flop, sc1, sc2[()])
self.assert_raises_fpe(fpeerr, flop, sc1[()], sc2[()])
- @dec.knownfailureif(True, "See ticket #2350")
def test_floating_exceptions(self):
# Test basic arithmetic function errors
with np.errstate(all='raise'):
@@ -583,20 +609,20 @@ class TestFloatExceptions(TestCase):
warnings.simplefilter("always")
with np.errstate(all="warn"):
np.divide(1, 0.)
- self.assertEqual(len(w), 1)
- self.assertTrue("divide by zero" in str(w[0].message))
+ assert_equal(len(w), 1)
+ assert_("divide by zero" in str(w[0].message))
np.array(1e300) * np.array(1e300)
- self.assertEqual(len(w), 2)
- self.assertTrue("overflow" in str(w[-1].message))
+ assert_equal(len(w), 2)
+ assert_("overflow" in str(w[-1].message))
np.array(np.inf) - np.array(np.inf)
- self.assertEqual(len(w), 3)
- self.assertTrue("invalid value" in str(w[-1].message))
+ assert_equal(len(w), 3)
+ assert_("invalid value" in str(w[-1].message))
np.array(1e-300) * np.array(1e-300)
- self.assertEqual(len(w), 4)
- self.assertTrue("underflow" in str(w[-1].message))
+ assert_equal(len(w), 4)
+ assert_("underflow" in str(w[-1].message))
-class TestTypes(TestCase):
+class TestTypes(object):
def check_promotion_cases(self, promote_func):
# tests that the scalars get coerced correctly.
b = np.bool_(0)
@@ -787,8 +813,8 @@ class TestTypes(TestCase):
def test_can_cast(self):
assert_(np.can_cast(np.int32, np.int64))
- assert_(np.can_cast(np.float64, np.complex))
- assert_(not np.can_cast(np.complex, np.float))
+ assert_(np.can_cast(np.float64, complex))
+ assert_(not np.can_cast(complex, float))
assert_(np.can_cast('i8', 'f8'))
assert_(not np.can_cast('i8', 'f4'))
@@ -859,13 +885,30 @@ class TestTypes(TestCase):
assert_raises(TypeError, np.can_cast, 'i4', None)
assert_raises(TypeError, np.can_cast, None, 'i4')
+ # Also test keyword arguments
+ assert_(np.can_cast(from_=np.int32, to=np.int64))
+
+ def test_can_cast_values(self):
+ # gh-5917
+ for dt in np.sctypes['int'] + np.sctypes['uint']:
+ ii = np.iinfo(dt)
+ assert_(np.can_cast(ii.min, dt))
+ assert_(np.can_cast(ii.max, dt))
+ assert_(not np.can_cast(ii.min - 1, dt))
+ assert_(not np.can_cast(ii.max + 1, dt))
+
+ for dt in np.sctypes['float']:
+ fi = np.finfo(dt)
+ assert_(np.can_cast(fi.min, dt))
+ assert_(np.can_cast(fi.max, dt))
+
# Custom exception class to test exception propagation in fromiter
class NIterError(Exception):
pass
-class TestFromiter(TestCase):
+class TestFromiter(object):
def makegen(self):
for x in range(24):
yield x**2
@@ -874,25 +917,25 @@ class TestFromiter(TestCase):
ai32 = np.fromiter(self.makegen(), np.int32)
ai64 = np.fromiter(self.makegen(), np.int64)
af = np.fromiter(self.makegen(), float)
- self.assertTrue(ai32.dtype == np.dtype(np.int32))
- self.assertTrue(ai64.dtype == np.dtype(np.int64))
- self.assertTrue(af.dtype == np.dtype(float))
+ assert_(ai32.dtype == np.dtype(np.int32))
+ assert_(ai64.dtype == np.dtype(np.int64))
+ assert_(af.dtype == np.dtype(float))
def test_lengths(self):
expected = np.array(list(self.makegen()))
a = np.fromiter(self.makegen(), int)
a20 = np.fromiter(self.makegen(), int, 20)
- self.assertTrue(len(a) == len(expected))
- self.assertTrue(len(a20) == 20)
- self.assertRaises(ValueError, np.fromiter,
+ assert_(len(a) == len(expected))
+ assert_(len(a20) == 20)
+ assert_raises(ValueError, np.fromiter,
self.makegen(), int, len(expected) + 10)
def test_values(self):
expected = np.array(list(self.makegen()))
a = np.fromiter(self.makegen(), int)
a20 = np.fromiter(self.makegen(), int, 20)
- self.assertTrue(np.alltrue(a == expected, axis=0))
- self.assertTrue(np.alltrue(a20 == expected[:20], axis=0))
+ assert_(np.alltrue(a == expected, axis=0))
+ assert_(np.alltrue(a20 == expected[:20], axis=0))
def load_data(self, n, eindex):
# Utility method for the issue 2592 tests.
@@ -905,18 +948,18 @@ class TestFromiter(TestCase):
def test_2592(self):
# Test iteration exceptions are correctly raised.
count, eindex = 10, 5
- self.assertRaises(NIterError, np.fromiter,
+ assert_raises(NIterError, np.fromiter,
self.load_data(count, eindex), dtype=int, count=count)
def test_2592_edge(self):
# Test iter. exceptions, edge case (exception at end of iterator).
count = 10
eindex = count-1
- self.assertRaises(NIterError, np.fromiter,
+ assert_raises(NIterError, np.fromiter,
self.load_data(count, eindex), dtype=int, count=count)
-class TestNonzero(TestCase):
+class TestNonzero(object):
def test_nonzero_trivial(self):
assert_equal(np.count_nonzero(np.array([])), 0)
assert_equal(np.count_nonzero(np.array([], dtype='?')), 0)
@@ -968,11 +1011,11 @@ class TestNonzero(TestCase):
def test_sparse(self):
# test special sparse condition boolean code path
for i in range(20):
- c = np.zeros(200, dtype=np.bool)
+ c = np.zeros(200, dtype=bool)
c[i::20] = True
assert_equal(np.nonzero(c)[0], np.arange(i, 200 + i, 20))
- c = np.zeros(400, dtype=np.bool)
+ c = np.zeros(400, dtype=bool)
c[10 + i:20 + i] = True
c[20 + i*2] = True
assert_equal(np.nonzero(c)[0],
@@ -991,16 +1034,138 @@ class TestNonzero(TestCase):
assert_(type(nzx_i) is np.ndarray)
assert_(nzx_i.flags.writeable)
- # Tests that the array method
- # call works
+ def test_count_nonzero_axis(self):
+ # Basic check of functionality
+ m = np.array([[0, 1, 7, 0, 0], [3, 0, 0, 2, 19]])
+
+ expected = np.array([1, 1, 1, 1, 1])
+ assert_equal(np.count_nonzero(m, axis=0), expected)
+
+ expected = np.array([2, 3])
+ assert_equal(np.count_nonzero(m, axis=1), expected)
+
+ assert_raises(ValueError, np.count_nonzero, m, axis=(1, 1))
+ assert_raises(TypeError, np.count_nonzero, m, axis='foo')
+ assert_raises(np.AxisError, np.count_nonzero, m, axis=3)
+ assert_raises(TypeError, np.count_nonzero,
+ m, axis=np.array([[1], [2]]))
+
+ def test_count_nonzero_axis_all_dtypes(self):
+ # More thorough test that the axis argument is respected
+ # for all dtypes and responds correctly when presented with
+ # either integer or tuple arguments for axis
+ msg = "Mismatch for dtype: %s"
+
+ def assert_equal_w_dt(a, b, err_msg):
+ assert_equal(a.dtype, b.dtype, err_msg=err_msg)
+ assert_equal(a, b, err_msg=err_msg)
+
+ for dt in np.typecodes['All']:
+ err_msg = msg % (np.dtype(dt).name,)
+
+ if dt != 'V':
+ if dt != 'M':
+ m = np.zeros((3, 3), dtype=dt)
+ n = np.ones(1, dtype=dt)
+
+ m[0, 0] = n[0]
+ m[1, 0] = n[0]
+
+ else: # np.zeros doesn't work for np.datetime64
+ m = np.array(['1970-01-01'] * 9)
+ m = m.reshape((3, 3))
+
+ m[0, 0] = '1970-01-12'
+ m[1, 0] = '1970-01-12'
+ m = m.astype(dt)
+
+ expected = np.array([2, 0, 0], dtype=np.intp)
+ assert_equal_w_dt(np.count_nonzero(m, axis=0),
+ expected, err_msg=err_msg)
+
+ expected = np.array([1, 1, 0], dtype=np.intp)
+ assert_equal_w_dt(np.count_nonzero(m, axis=1),
+ expected, err_msg=err_msg)
+
+ expected = np.array(2)
+ assert_equal(np.count_nonzero(m, axis=(0, 1)),
+ expected, err_msg=err_msg)
+ assert_equal(np.count_nonzero(m, axis=None),
+ expected, err_msg=err_msg)
+ assert_equal(np.count_nonzero(m),
+ expected, err_msg=err_msg)
+
+ if dt == 'V':
+ # There are no 'nonzero' objects for np.void, so the testing
+ # setup is slightly different for this dtype
+ m = np.array([np.void(1)] * 6).reshape((2, 3))
+
+ expected = np.array([0, 0, 0], dtype=np.intp)
+ assert_equal_w_dt(np.count_nonzero(m, axis=0),
+ expected, err_msg=err_msg)
+
+ expected = np.array([0, 0], dtype=np.intp)
+ assert_equal_w_dt(np.count_nonzero(m, axis=1),
+ expected, err_msg=err_msg)
+
+ expected = np.array(0)
+ assert_equal(np.count_nonzero(m, axis=(0, 1)),
+ expected, err_msg=err_msg)
+ assert_equal(np.count_nonzero(m, axis=None),
+ expected, err_msg=err_msg)
+ assert_equal(np.count_nonzero(m),
+ expected, err_msg=err_msg)
+
+ def test_count_nonzero_axis_consistent(self):
+ # Check that the axis behaviour for valid axes in
+ # non-special cases is consistent (and therefore
+ # correct) by checking it against an integer array
+ # that is then casted to the generic object dtype
+ from itertools import combinations, permutations
+
+ axis = (0, 1, 2, 3)
+ size = (5, 5, 5, 5)
+ msg = "Mismatch for axis: %s"
+
+ rng = np.random.RandomState(1234)
+ m = rng.randint(-100, 100, size=size)
+ n = m.astype(object)
+
+ for length in range(len(axis)):
+ for combo in combinations(axis, length):
+ for perm in permutations(combo):
+ assert_equal(
+ np.count_nonzero(m, axis=perm),
+ np.count_nonzero(n, axis=perm),
+ err_msg=msg % (perm,))
+
+ def test_countnonzero_axis_empty(self):
+ a = np.array([[0, 0, 1], [1, 0, 1]])
+ assert_equal(np.count_nonzero(a, axis=()), a.astype(bool))
+
def test_array_method(self):
+ # Tests that the array method
+ # call to nonzero works
m = np.array([[1, 0, 0], [4, 0, 6]])
tgt = [[0, 1, 1], [0, 0, 2]]
assert_equal(m.nonzero(), tgt)
+ def test_nonzero_invalid_object(self):
+ # gh-9295
+ a = np.array([np.array([1, 2]), 3])
+ assert_raises(ValueError, np.nonzero, a)
+
+ class BoolErrors:
+ def __bool__(self):
+ raise ValueError("Not allowed")
+ def __nonzero__(self):
+ raise ValueError("Not allowed")
+
+ assert_raises(ValueError, np.nonzero, np.array([BoolErrors()]))
+
-class TestIndex(TestCase):
+class TestIndex(object):
def test_boolean(self):
a = rand(3, 5, 8)
V = rand(5, 8)
@@ -1017,7 +1182,7 @@ class TestIndex(TestCase):
assert_equal(c.dtype, np.dtype('int32'))
-class TestBinaryRepr(TestCase):
+class TestBinaryRepr(object):
def test_zero(self):
assert_equal(np.binary_repr(0), '0')
@@ -1041,8 +1206,20 @@ class TestBinaryRepr(TestCase):
assert_equal(np.binary_repr(10, width=7), '0001010')
assert_equal(np.binary_repr(-5, width=7), '1111011')
+ def test_neg_width_boundaries(self):
+ # see gh-8670
+
+ # Ensure that the example in the issue does not
+ # break before proceeding to a more thorough test.
+ assert_equal(np.binary_repr(-128, width=8), '10000000')
+
+ for width in range(1, 11):
+ num = -2**(width - 1)
+ exp = '1' + (width - 1) * '0'
+ assert_equal(np.binary_repr(num, width=width), exp)
+
-class TestBaseRepr(TestCase):
+class TestBaseRepr(object):
def test_base3(self):
assert_equal(np.base_repr(3**5, 3), '100000')
@@ -1058,13 +1235,13 @@ class TestBaseRepr(TestCase):
assert_equal(np.base_repr(-12, 4), '-30')
def test_base_range(self):
- with self.assertRaises(ValueError):
+ with assert_raises(ValueError):
np.base_repr(1, 1)
- with self.assertRaises(ValueError):
+ with assert_raises(ValueError):
np.base_repr(1, 37)
-class TestArrayComparisons(TestCase):
+class TestArrayComparisons(object):
def test_array_equal(self):
res = np.array_equal(np.array([1, 2]), np.array([1, 2]))
assert_(res)
@@ -1086,6 +1263,15 @@ class TestArrayComparisons(TestCase):
assert_(res)
assert_(type(res) is bool)
+ def test_none_compares_elementwise(self):
+ a = np.array([None, 1, None], dtype=object)
+ assert_equal(a == None, [True, False, True])
+ assert_equal(a != None, [False, True, False])
+
+ a = np.ones(3)
+ assert_equal(a == None, [False, False, False])
+ assert_equal(a != None, [True, True, True])
+
def test_array_equiv(self):
res = np.array_equiv(np.array([1, 2]), np.array([1, 2]))
assert_(res)
@@ -1129,13 +1315,13 @@ def assert_array_strict_equal(x, y):
assert_(x.flags.writeable == y.flags.writeable)
assert_(x.flags.c_contiguous == y.flags.c_contiguous)
assert_(x.flags.f_contiguous == y.flags.f_contiguous)
- assert_(x.flags.updateifcopy == y.flags.updateifcopy)
+ assert_(x.flags.writebackifcopy == y.flags.writebackifcopy)
# check endianness
assert_(x.dtype.isnative == y.dtype.isnative)
-class TestClip(TestCase):
- def setUp(self):
+class TestClip(object):
+ def setup(self):
self.nr = 5
self.nc = 3
@@ -1250,7 +1436,7 @@ class TestClip(TestCase):
# Address Issue gh-5354 for clipping complex arrays
# Test native complex input without explicit min/max
# ie, either min=None or max=None
- a = np.ones(10, dtype=np.complex)
+ a = np.ones(10, dtype=complex)
m = a.min()
M = a.max()
am = self.fastclip(a, m, None)
@@ -1340,7 +1526,7 @@ class TestClip(TestCase):
m = -0.5
M = 0.6
self.fastclip(a, m, M, a)
- self.clip(a, m, M, ac)
+ self.clip(ac, m, M, ac)
assert_array_strict_equal(a, ac)
def test_noncontig_inplace(self):
@@ -1353,7 +1539,7 @@ class TestClip(TestCase):
m = -0.5
M = 0.6
self.fastclip(a, m, M, a)
- self.clip(a, m, M, ac)
+ self.clip(ac, m, M, ac)
assert_array_equal(a, ac)
def test_type_cast_01(self):
@@ -1532,6 +1718,22 @@ class TestClip(TestCase):
self.clip(a, m, M, act)
assert_array_strict_equal(ac, act)
+ def test_clip_with_out_transposed(self):
+ # Test that the out argument works when tranposed
+ a = np.arange(16).reshape(4, 4)
+ out = np.empty_like(a).T
+ a.clip(4, 10, out=out)
+ expected = self.clip(a, 4, 10)
+ assert_array_equal(out, expected)
+
+ def test_clip_with_out_memory_overlap(self):
+ # Test that the out argument works when it has memory overlap
+ a = np.arange(16).reshape(4, 4)
+ ac = a.copy()
+ a[:-1].clip(4, 10, out=a[1:])
+ expected = self.clip(ac[:-1], 4, 10)
+ assert_array_equal(a[1:], expected)
+
def test_clip_inplace_array(self):
# Test native double input with array min/max
a = self._generate_data(self.nr, self.nc)
@@ -1561,7 +1763,7 @@ class TestClip(TestCase):
a2 = np.clip(a, m, M, out=a)
self.clip(a, m, M, ac)
assert_array_strict_equal(a2, ac)
- self.assertTrue(a2 is a)
+ assert_(a2 is a)
def test_clip_nan(self):
d = np.arange(7.)
@@ -1576,10 +1778,10 @@ class TestAllclose(object):
rtol = 1e-5
atol = 1e-8
- def setUp(self):
+ def setup(self):
self.olderr = np.seterr(invalid='ignore')
- def tearDown(self):
+ def teardown(self):
np.seterr(**self.olderr)
def tst_allclose(self, x, y):
@@ -1606,7 +1808,7 @@ class TestAllclose(object):
(np.inf, [np.inf])]
for (x, y) in data:
- yield (self.tst_allclose, x, y)
+ self.tst_allclose(x, y)
def test_ip_not_allclose(self):
# Parametric test factory.
@@ -1627,7 +1829,7 @@ class TestAllclose(object):
(np.array([np.inf, 1]), np.array([0, np.inf]))]
for (x, y) in data:
- yield (self.tst_not_allclose, x, y)
+ self.tst_not_allclose(x, y)
def test_no_parameter_modification(self):
x = np.array([np.inf, 1])
@@ -1711,7 +1913,7 @@ class TestIsclose(object):
tests = self.some_close_tests
results = self.some_close_results
for (x, y), result in zip(tests, results):
- yield (assert_array_equal, np.isclose(x, y), result)
+ assert_array_equal(np.isclose(x, y), result)
def tst_all_isclose(self, x, y):
assert_(np.all(np.isclose(x, y)), "%s and %s not close" % (x, y))
@@ -1731,19 +1933,19 @@ class TestIsclose(object):
def test_ip_all_isclose(self):
self.setup()
for (x, y) in self.all_close_tests:
- yield (self.tst_all_isclose, x, y)
+ self.tst_all_isclose(x, y)
def test_ip_none_isclose(self):
self.setup()
for (x, y) in self.none_close_tests:
- yield (self.tst_none_isclose, x, y)
+ self.tst_none_isclose(x, y)
def test_ip_isclose_allclose(self):
self.setup()
tests = (self.all_close_tests + self.none_close_tests +
self.some_close_tests)
for (x, y) in tests:
- yield (self.tst_isclose_allclose, x, y)
+ self.tst_isclose_allclose(x, y)
def test_equal_nan(self):
assert_array_equal(np.isclose(np.nan, np.nan, equal_nan=True), [True])
@@ -1790,13 +1992,13 @@ class TestIsclose(object):
def test_non_finite_scalar(self):
# GH7014, when two scalars are compared the output should also be a
# scalar
- assert_(np.isclose(np.inf, -np.inf) is False)
- assert_(np.isclose(0, np.inf) is False)
- assert_(type(np.isclose(0, np.inf)) is bool)
+ assert_(np.isclose(np.inf, -np.inf) is np.False_)
+ assert_(np.isclose(0, np.inf) is np.False_)
+ assert_(type(np.isclose(0, np.inf)) is np.bool_)
-class TestStdVar(TestCase):
- def setUp(self):
+class TestStdVar(object):
+ def setup(self):
self.A = np.array([1, -1, 1, -1])
self.real_var = 1
@@ -1834,7 +2036,7 @@ class TestStdVar(TestCase):
assert_array_equal(r, out)
-class TestStdVarComplex(TestCase):
+class TestStdVarComplex(object):
def test_basic(self):
A = np.array([1, 1.j, -1, -1.j])
real_var = 1
@@ -1846,10 +2048,10 @@ class TestStdVarComplex(TestCase):
assert_equal(np.std(1j), 0)
-class TestCreationFuncs(TestCase):
+class TestCreationFuncs(object):
# Test ones, zeros, empty and full.
- def setUp(self):
+ def setup(self):
dtypes = {np.dtype(tp) for tp in itertools.chain(*np.sctypes.values())}
# void, bytes, str
variable_sized = {tp for tp in dtypes if tp.str.endswith('0')}
@@ -1868,27 +2070,26 @@ class TestCreationFuncs(TestCase):
fill_kwarg = {}
if fill_value is not None:
fill_kwarg = {'fill_value': fill_value}
- with warnings.catch_warnings():
- warnings.simplefilter('ignore', DeprecationWarning)
- for size, ndims, order, dtype in itertools.product(*par):
- shape = ndims * [size]
- # do not fill void type
- if fill_kwarg and dtype.str.startswith('|V'):
- continue
+ for size, ndims, order, dtype in itertools.product(*par):
+ shape = ndims * [size]
- arr = func(shape, order=order, dtype=dtype,
- **fill_kwarg)
+ # do not fill void type
+ if fill_kwarg and dtype.str.startswith('|V'):
+ continue
- assert_equal(arr.dtype, dtype)
- assert_(getattr(arr.flags, self.orders[order]))
+ arr = func(shape, order=order, dtype=dtype,
+ **fill_kwarg)
- if fill_value is not None:
- if dtype.str.startswith('|S'):
- val = str(fill_value)
- else:
- val = fill_value
- assert_equal(arr, dtype.type(val))
+ assert_equal(arr.dtype, dtype)
+ assert_(getattr(arr.flags, self.orders[order]))
+
+ if fill_value is not None:
+ if dtype.str.startswith('|S'):
+ val = str(fill_value)
+ else:
+ val = fill_value
+ assert_equal(arr, dtype.type(val))
def test_zeros(self):
self.check_function(np.zeros)
@@ -1903,6 +2104,7 @@ class TestCreationFuncs(TestCase):
self.check_function(np.full, 0)
self.check_function(np.full, 1)
+ @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
def test_for_reference_leak(self):
# Make sure we have an object for reference
dim = 1
@@ -1917,10 +2119,10 @@ class TestCreationFuncs(TestCase):
assert_(sys.getrefcount(dim) == beg)
-class TestLikeFuncs(TestCase):
+class TestLikeFuncs(object):
'''Test ones_like, zeros_like, empty_like and full_like'''
- def setUp(self):
+ def setup(self):
self.data = [
# Array scalars
(np.array(3.), None),
@@ -2010,13 +2212,16 @@ class TestLikeFuncs(TestCase):
self.compare_array_value(dz, value, fill_value)
# Test the 'subok' parameter
- a = np.matrix([[1, 2], [3, 4]])
+ class MyNDArray(np.ndarray):
+ pass
+
+ a = np.array([[1, 2], [3, 4]]).view(MyNDArray)
b = like_function(a, **fill_kwarg)
- assert_(type(b) is np.matrix)
+ assert_(type(b) is MyNDArray)
b = like_function(a, subok=False, **fill_kwarg)
- assert_(type(b) is not np.matrix)
+ assert_(type(b) is not MyNDArray)
def test_ones_like(self):
self.check_like_function(np.ones_like, 1)
@@ -2035,7 +2240,7 @@ class TestLikeFuncs(TestCase):
self.check_like_function(np.full_like, np.inf, True)
-class TestCorrelate(TestCase):
+class TestCorrelate(object):
def _setup(self, dt):
self.x = np.array([1, 2, 3, 4, 5], dtype=dt)
self.xs = np.arange(1, 20)[::3]
@@ -2049,7 +2254,7 @@ class TestCorrelate(TestCase):
-102., -54., -19.], dtype=dt)
def test_float(self):
- self._setup(np.float)
+ self._setup(float)
z = np.correlate(self.x, self.y, 'full')
assert_array_almost_equal(z, self.z1)
z = np.correlate(self.x, self.y[:-1], 'full')
@@ -2078,15 +2283,15 @@ class TestCorrelate(TestCase):
assert_array_equal(k, np.ones(3))
def test_complex(self):
- x = np.array([1, 2, 3, 4+1j], dtype=np.complex)
- y = np.array([-1, -2j, 3+1j], dtype=np.complex)
- r_z = np.array([3-1j, 6, 8+1j, 11+5j, -5+8j, -4-1j], dtype=np.complex)
+ x = np.array([1, 2, 3, 4+1j], dtype=complex)
+ y = np.array([-1, -2j, 3+1j], dtype=complex)
+ r_z = np.array([3-1j, 6, 8+1j, 11+5j, -5+8j, -4-1j], dtype=complex)
r_z = r_z[::-1].conjugate()
z = np.correlate(y, x, mode='full')
assert_array_almost_equal(z, r_z)
-class TestConvolve(TestCase):
+class TestConvolve(object):
def test_object(self):
d = [1.] * 100
k = [1.] * 3
@@ -2128,7 +2333,7 @@ class TestStringFunction(object):
assert_equal(str(a), "[1]")
-class TestRoll(TestCase):
+class TestRoll(object):
def test_roll1d(self):
x = np.arange(10)
xr = np.roll(x, 2)
@@ -2145,12 +2350,48 @@ class TestRoll(TestCase):
x2r = np.roll(x2, 1, axis=1)
assert_equal(x2r, np.array([[4, 0, 1, 2, 3], [9, 5, 6, 7, 8]]))
+ # Roll multiple axes at once.
+ x2r = np.roll(x2, 1, axis=(0, 1))
+ assert_equal(x2r, np.array([[9, 5, 6, 7, 8], [4, 0, 1, 2, 3]]))
+
+ x2r = np.roll(x2, (1, 0), axis=(0, 1))
+ assert_equal(x2r, np.array([[5, 6, 7, 8, 9], [0, 1, 2, 3, 4]]))
+
+ x2r = np.roll(x2, (-1, 0), axis=(0, 1))
+ assert_equal(x2r, np.array([[5, 6, 7, 8, 9], [0, 1, 2, 3, 4]]))
+
+ x2r = np.roll(x2, (0, 1), axis=(0, 1))
+ assert_equal(x2r, np.array([[4, 0, 1, 2, 3], [9, 5, 6, 7, 8]]))
+
+ x2r = np.roll(x2, (0, -1), axis=(0, 1))
+ assert_equal(x2r, np.array([[1, 2, 3, 4, 0], [6, 7, 8, 9, 5]]))
+
+ x2r = np.roll(x2, (1, 1), axis=(0, 1))
+ assert_equal(x2r, np.array([[9, 5, 6, 7, 8], [4, 0, 1, 2, 3]]))
+
+ x2r = np.roll(x2, (-1, -1), axis=(0, 1))
+ assert_equal(x2r, np.array([[6, 7, 8, 9, 5], [1, 2, 3, 4, 0]]))
+
+ # Roll the same axis multiple times.
+ x2r = np.roll(x2, 1, axis=(0, 0))
+ assert_equal(x2r, np.array([[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]))
+
+ x2r = np.roll(x2, 1, axis=(1, 1))
+ assert_equal(x2r, np.array([[3, 4, 0, 1, 2], [8, 9, 5, 6, 7]]))
+
+ # Roll more than one turn in either direction.
+ x2r = np.roll(x2, 6, axis=1)
+ assert_equal(x2r, np.array([[4, 0, 1, 2, 3], [9, 5, 6, 7, 8]]))
+
+ x2r = np.roll(x2, -4, axis=1)
+ assert_equal(x2r, np.array([[4, 0, 1, 2, 3], [9, 5, 6, 7, 8]]))
+
def test_roll_empty(self):
x = np.array([])
assert_equal(np.roll(x, 1), np.array([]))
-class TestRollaxis(TestCase):
+class TestRollaxis(object):
# expected shape indexed by (axis, start) for array of
# shape (1, 2, 3, 4)
@@ -2169,10 +2410,10 @@ class TestRollaxis(TestCase):
def test_exceptions(self):
a = np.arange(1*2*3*4).reshape(1, 2, 3, 4)
- assert_raises(ValueError, np.rollaxis, a, -5, 0)
- assert_raises(ValueError, np.rollaxis, a, 0, -5)
- assert_raises(ValueError, np.rollaxis, a, 4, 0)
- assert_raises(ValueError, np.rollaxis, a, 0, 5)
+ assert_raises(np.AxisError, np.rollaxis, a, -5, 0)
+ assert_raises(np.AxisError, np.rollaxis, a, 0, -5)
+ assert_raises(np.AxisError, np.rollaxis, a, 4, 0)
+ assert_raises(np.AxisError, np.rollaxis, a, 0, 5)
def test_results(self):
a = np.arange(1*2*3*4).reshape(1, 2, 3, 4).copy()
@@ -2212,7 +2453,7 @@ class TestRollaxis(TestCase):
assert_(not res.flags['OWNDATA'])
-class TestMoveaxis(TestCase):
+class TestMoveaxis(object):
def test_move_to_end(self):
x = np.random.randn(5, 6, 7)
for source, expected in [(0, (6, 7, 5)),
@@ -2259,11 +2500,11 @@ class TestMoveaxis(TestCase):
def test_errors(self):
x = np.random.randn(1, 2, 3)
- assert_raises_regex(ValueError, 'invalid axis .* `source`',
+ assert_raises_regex(np.AxisError, 'source.*out of bounds',
np.moveaxis, x, 3, 0)
- assert_raises_regex(ValueError, 'invalid axis .* `source`',
+ assert_raises_regex(np.AxisError, 'source.*out of bounds',
np.moveaxis, x, -4, 0)
- assert_raises_regex(ValueError, 'invalid axis .* `destination`',
+ assert_raises_regex(np.AxisError, 'destination.*out of bounds',
np.moveaxis, x, 0, 5)
assert_raises_regex(ValueError, 'repeated axis in `source`',
np.moveaxis, x, [0, 0], [0, 1])
@@ -2286,7 +2527,7 @@ class TestMoveaxis(TestCase):
assert_(isinstance(result, np.ndarray))
-class TestCross(TestCase):
+class TestCross(object):
def test_2x2(self):
u = [1, 2]
v = [3, 4]
@@ -2351,13 +2592,13 @@ class TestCross(TestCase):
u = np.ones((10, 3, 5))
v = np.ones((2, 5))
assert_equal(np.cross(u, v, axisa=1, axisb=0).shape, (10, 5, 3))
- assert_raises(ValueError, np.cross, u, v, axisa=1, axisb=2)
- assert_raises(ValueError, np.cross, u, v, axisa=3, axisb=0)
+ assert_raises(np.AxisError, np.cross, u, v, axisa=1, axisb=2)
+ assert_raises(np.AxisError, np.cross, u, v, axisa=3, axisb=0)
u = np.ones((10, 3, 5, 7))
v = np.ones((5, 7, 2))
assert_equal(np.cross(u, v, axisa=1, axisc=2).shape, (10, 5, 3, 7))
- assert_raises(ValueError, np.cross, u, v, axisa=-5, axisb=2)
- assert_raises(ValueError, np.cross, u, v, axisa=1, axisb=-4)
+ assert_raises(np.AxisError, np.cross, u, v, axisa=-5, axisb=2)
+ assert_raises(np.AxisError, np.cross, u, v, axisa=1, axisb=-4)
# gh-5885
u = np.ones((3, 4, 2))
for axisc in range(-2, 2):
@@ -2414,7 +2655,7 @@ class TestRequire(object):
fd = [None, 'f8', 'c16']
for idtype, fdtype, flag in itertools.product(id, fd, self.flag_names):
a = self.generate_all_false(idtype)
- yield self.set_and_check_flag, flag, fdtype, a
+ self.set_and_check_flag(flag, fdtype, a)
def test_unknown_requirement(self):
a = self.generate_all_false('f8')
@@ -2446,10 +2687,10 @@ class TestRequire(object):
for flag in self.flag_names:
a = ArraySubclass((2, 2))
- yield self.set_and_check_flag, flag, None, a
+ self.set_and_check_flag(flag, None, a)
-class TestBroadcast(TestCase):
+class TestBroadcast(object):
def test_broadcast_in_args(self):
# gh-5881
arrs = [np.empty((6, 7)), np.empty((5, 6, 1)), np.empty((7,)),
@@ -2459,6 +2700,7 @@ class TestBroadcast(TestCase):
np.broadcast(arrs[0], np.broadcast(*arrs[1:-1]), arrs[-1])]
for mit in mits:
assert_equal(mit.shape, (5, 6, 7))
+ assert_equal(mit.ndim, 3)
assert_equal(mit.nd, 3)
assert_equal(mit.numiter, 4)
for a, ia in zip(arrs, mit.iters):
@@ -2469,6 +2711,7 @@ class TestBroadcast(TestCase):
arrs = [np.empty((5, 6, 7))]
mit = np.broadcast(*arrs)
assert_equal(mit.shape, (5, 6, 7))
+ assert_equal(mit.ndim, 3)
assert_equal(mit.nd, 3)
assert_equal(mit.numiter, 1)
assert_(arrs[0] is mit.iters[0].base)
@@ -2484,7 +2727,7 @@ class TestBroadcast(TestCase):
assert_equal(mit.numiter, j)
-class TestKeepdims(TestCase):
+class TestKeepdims(object):
class sub_array(np.ndarray):
def sum(self, axis=None, dtype=None, out=None):
@@ -2496,5 +2739,12 @@ class TestKeepdims(TestCase):
assert_raises(TypeError, np.sum, x, keepdims=True)
-if __name__ == "__main__":
- run_module_suite()
+class TestTensordot(object):
+
+ def test_zero_dimension(self):
+ # Test resolution to issue #5663
+ a = np.ndarray((3,0))
+ b = np.ndarray((0,4))
+ td = np.tensordot(a, b, (1, 0))
+ assert_array_equal(td, np.dot(a, b))
+ assert_array_equal(td, np.einsum('ij,jk', a, b))
diff --git a/numpy/core/tests/test_numerictypes.py b/numpy/core/tests/test_numerictypes.py
index a7bbe0192..71f7b7150 100644
--- a/numpy/core/tests/test_numerictypes.py
+++ b/numpy/core/tests/test_numerictypes.py
@@ -1,12 +1,11 @@
from __future__ import division, absolute_import, print_function
import sys
+import itertools
+import pytest
import numpy as np
-from numpy.compat import asbytes, asunicode
-from numpy.testing import (
- TestCase, run_module_suite, assert_, assert_equal
-)
+from numpy.testing import assert_, assert_equal, assert_raises
# This is the structure of the table used for plain objects:
#
@@ -63,8 +62,8 @@ NbufferT = [
# x Info color info y z
# value y2 Info2 name z2 Name Value
# name value y3 z3
- ([3, 2], (6j, 6., (asbytes('nn'), [6j, 4j], [6., 4.], [1, 2]), asbytes('NN'), True), asbytes('cc'), (asunicode('NN'), 6j), [[6., 4.], [6., 4.]], 8),
- ([4, 3], (7j, 7., (asbytes('oo'), [7j, 5j], [7., 5.], [2, 1]), asbytes('OO'), False), asbytes('dd'), (asunicode('OO'), 7j), [[7., 5.], [7., 5.]], 9),
+ ([3, 2], (6j, 6., (b'nn', [6j, 4j], [6., 4.], [1, 2]), b'NN', True), b'cc', (u'NN', 6j), [[6., 4.], [6., 4.]], 8),
+ ([4, 3], (7j, 7., (b'oo', [7j, 5j], [7., 5.], [2, 1]), b'OO', False), b'dd', (u'OO', 7j), [[7., 5.], [7., 5.]], 9),
]
@@ -88,10 +87,8 @@ def normalize_descr(descr):
else:
nitem = (item[0], dtype)
out.append(nitem)
- elif isinstance(item[1], list):
- l = []
- for j in normalize_descr(item[1]):
- l.append(j)
+ elif isinstance(dtype, list):
+ l = normalize_descr(dtype)
out.append((item[0], l))
else:
raise ValueError("Expected a str or list and got %s" %
@@ -103,99 +100,99 @@ def normalize_descr(descr):
# Creation tests
############################################################
-class create_zeros(object):
+class CreateZeros(object):
"""Check the creation of heterogeneous arrays zero-valued"""
def test_zeros0D(self):
"""Check creation of 0-dimensional objects"""
h = np.zeros((), dtype=self._descr)
- self.assertTrue(normalize_descr(self._descr) == h.dtype.descr)
- self.assertTrue(h.dtype.fields['x'][0].name[:4] == 'void')
- self.assertTrue(h.dtype.fields['x'][0].char == 'V')
- self.assertTrue(h.dtype.fields['x'][0].type == np.void)
+ assert_(normalize_descr(self._descr) == h.dtype.descr)
+ assert_(h.dtype.fields['x'][0].name[:4] == 'void')
+ assert_(h.dtype.fields['x'][0].char == 'V')
+ assert_(h.dtype.fields['x'][0].type == np.void)
# A small check that data is ok
assert_equal(h['z'], np.zeros((), dtype='u1'))
def test_zerosSD(self):
"""Check creation of single-dimensional objects"""
h = np.zeros((2,), dtype=self._descr)
- self.assertTrue(normalize_descr(self._descr) == h.dtype.descr)
- self.assertTrue(h.dtype['y'].name[:4] == 'void')
- self.assertTrue(h.dtype['y'].char == 'V')
- self.assertTrue(h.dtype['y'].type == np.void)
+ assert_(normalize_descr(self._descr) == h.dtype.descr)
+ assert_(h.dtype['y'].name[:4] == 'void')
+ assert_(h.dtype['y'].char == 'V')
+ assert_(h.dtype['y'].type == np.void)
# A small check that data is ok
assert_equal(h['z'], np.zeros((2,), dtype='u1'))
def test_zerosMD(self):
"""Check creation of multi-dimensional objects"""
h = np.zeros((2, 3), dtype=self._descr)
- self.assertTrue(normalize_descr(self._descr) == h.dtype.descr)
- self.assertTrue(h.dtype['z'].name == 'uint8')
- self.assertTrue(h.dtype['z'].char == 'B')
- self.assertTrue(h.dtype['z'].type == np.uint8)
+ assert_(normalize_descr(self._descr) == h.dtype.descr)
+ assert_(h.dtype['z'].name == 'uint8')
+ assert_(h.dtype['z'].char == 'B')
+ assert_(h.dtype['z'].type == np.uint8)
# A small check that data is ok
assert_equal(h['z'], np.zeros((2, 3), dtype='u1'))
-class test_create_zeros_plain(create_zeros, TestCase):
+class TestCreateZerosPlain(CreateZeros):
"""Check the creation of heterogeneous arrays zero-valued (plain)"""
_descr = Pdescr
-class test_create_zeros_nested(create_zeros, TestCase):
+class TestCreateZerosNested(CreateZeros):
"""Check the creation of heterogeneous arrays zero-valued (nested)"""
_descr = Ndescr
-class create_values(object):
+class CreateValues(object):
"""Check the creation of heterogeneous arrays with values"""
def test_tuple(self):
"""Check creation from tuples"""
h = np.array(self._buffer, dtype=self._descr)
- self.assertTrue(normalize_descr(self._descr) == h.dtype.descr)
+ assert_(normalize_descr(self._descr) == h.dtype.descr)
if self.multiple_rows:
- self.assertTrue(h.shape == (2,))
+ assert_(h.shape == (2,))
else:
- self.assertTrue(h.shape == ())
+ assert_(h.shape == ())
def test_list_of_tuple(self):
"""Check creation from list of tuples"""
h = np.array([self._buffer], dtype=self._descr)
- self.assertTrue(normalize_descr(self._descr) == h.dtype.descr)
+ assert_(normalize_descr(self._descr) == h.dtype.descr)
if self.multiple_rows:
- self.assertTrue(h.shape == (1, 2))
+ assert_(h.shape == (1, 2))
else:
- self.assertTrue(h.shape == (1,))
+ assert_(h.shape == (1,))
def test_list_of_list_of_tuple(self):
"""Check creation from list of list of tuples"""
h = np.array([[self._buffer]], dtype=self._descr)
- self.assertTrue(normalize_descr(self._descr) == h.dtype.descr)
+ assert_(normalize_descr(self._descr) == h.dtype.descr)
if self.multiple_rows:
- self.assertTrue(h.shape == (1, 1, 2))
+ assert_(h.shape == (1, 1, 2))
else:
- self.assertTrue(h.shape == (1, 1))
+ assert_(h.shape == (1, 1))
-class test_create_values_plain_single(create_values, TestCase):
+class TestCreateValuesPlainSingle(CreateValues):
"""Check the creation of heterogeneous arrays (plain, single row)"""
_descr = Pdescr
multiple_rows = 0
_buffer = PbufferT[0]
-class test_create_values_plain_multiple(create_values, TestCase):
+class TestCreateValuesPlainMultiple(CreateValues):
"""Check the creation of heterogeneous arrays (plain, multiple rows)"""
_descr = Pdescr
multiple_rows = 1
_buffer = PbufferT
-class test_create_values_nested_single(create_values, TestCase):
+class TestCreateValuesNestedSingle(CreateValues):
"""Check the creation of heterogeneous arrays (nested, single row)"""
_descr = Ndescr
multiple_rows = 0
_buffer = NbufferT[0]
-class test_create_values_nested_multiple(create_values, TestCase):
+class TestCreateValuesNestedMultiple(CreateValues):
"""Check the creation of heterogeneous arrays (nested, multiple rows)"""
_descr = Ndescr
multiple_rows = 1
@@ -206,18 +203,18 @@ class test_create_values_nested_multiple(create_values, TestCase):
# Reading tests
############################################################
-class read_values_plain(object):
+class ReadValuesPlain(object):
"""Check the reading of values in heterogeneous arrays (plain)"""
def test_access_fields(self):
h = np.array(self._buffer, dtype=self._descr)
if not self.multiple_rows:
- self.assertTrue(h.shape == ())
+ assert_(h.shape == ())
assert_equal(h['x'], np.array(self._buffer[0], dtype='i4'))
assert_equal(h['y'], np.array(self._buffer[1], dtype='f8'))
assert_equal(h['z'], np.array(self._buffer[2], dtype='u1'))
else:
- self.assertTrue(len(h) == 2)
+ assert_(len(h) == 2)
assert_equal(h['x'], np.array([self._buffer[0][0],
self._buffer[1][0]], dtype='i4'))
assert_equal(h['y'], np.array([self._buffer[0][1],
@@ -226,31 +223,31 @@ class read_values_plain(object):
self._buffer[1][2]], dtype='u1'))
-class test_read_values_plain_single(read_values_plain, TestCase):
+class TestReadValuesPlainSingle(ReadValuesPlain):
"""Check the creation of heterogeneous arrays (plain, single row)"""
_descr = Pdescr
multiple_rows = 0
_buffer = PbufferT[0]
-class test_read_values_plain_multiple(read_values_plain, TestCase):
+class TestReadValuesPlainMultiple(ReadValuesPlain):
"""Check the values of heterogeneous arrays (plain, multiple rows)"""
_descr = Pdescr
multiple_rows = 1
_buffer = PbufferT
-class read_values_nested(object):
+class ReadValuesNested(object):
"""Check the reading of values in heterogeneous arrays (nested)"""
def test_access_top_fields(self):
"""Check reading the top fields of a nested array"""
h = np.array(self._buffer, dtype=self._descr)
if not self.multiple_rows:
- self.assertTrue(h.shape == ())
+ assert_(h.shape == ())
assert_equal(h['x'], np.array(self._buffer[0], dtype='i4'))
assert_equal(h['y'], np.array(self._buffer[4], dtype='f8'))
assert_equal(h['z'], np.array(self._buffer[5], dtype='u1'))
else:
- self.assertTrue(len(h) == 2)
+ assert_(len(h) == 2)
assert_equal(h['x'], np.array([self._buffer[0][0],
self._buffer[1][0]], dtype='i4'))
assert_equal(h['y'], np.array([self._buffer[0][4],
@@ -309,41 +306,41 @@ class read_values_nested(object):
def test_nested1_descriptor(self):
"""Check access nested descriptors of a nested array (1st level)"""
h = np.array(self._buffer, dtype=self._descr)
- self.assertTrue(h.dtype['Info']['value'].name == 'complex128')
- self.assertTrue(h.dtype['Info']['y2'].name == 'float64')
+ assert_(h.dtype['Info']['value'].name == 'complex128')
+ assert_(h.dtype['Info']['y2'].name == 'float64')
if sys.version_info[0] >= 3:
- self.assertTrue(h.dtype['info']['Name'].name == 'str256')
+ assert_(h.dtype['info']['Name'].name == 'str256')
else:
- self.assertTrue(h.dtype['info']['Name'].name == 'unicode256')
- self.assertTrue(h.dtype['info']['Value'].name == 'complex128')
+ assert_(h.dtype['info']['Name'].name == 'unicode256')
+ assert_(h.dtype['info']['Value'].name == 'complex128')
def test_nested2_descriptor(self):
"""Check access nested descriptors of a nested array (2nd level)"""
h = np.array(self._buffer, dtype=self._descr)
- self.assertTrue(h.dtype['Info']['Info2']['value'].name == 'void256')
- self.assertTrue(h.dtype['Info']['Info2']['z3'].name == 'void64')
+ assert_(h.dtype['Info']['Info2']['value'].name == 'void256')
+ assert_(h.dtype['Info']['Info2']['z3'].name == 'void64')
-class test_read_values_nested_single(read_values_nested, TestCase):
+class TestReadValuesNestedSingle(ReadValuesNested):
"""Check the values of heterogeneous arrays (nested, single row)"""
_descr = Ndescr
multiple_rows = False
_buffer = NbufferT[0]
-class test_read_values_nested_multiple(read_values_nested, TestCase):
+class TestReadValuesNestedMultiple(ReadValuesNested):
"""Check the values of heterogeneous arrays (nested, multiple rows)"""
_descr = Ndescr
multiple_rows = True
_buffer = NbufferT
-class TestEmptyField(TestCase):
+class TestEmptyField(object):
def test_assign(self):
a = np.arange(10, dtype=np.float32)
a.dtype = [("int", "<0i4"), ("float", "<2f4")]
assert_(a['int'].shape == (5, 0))
assert_(a['float'].shape == (5, 2))
-class TestCommonType(TestCase):
+class TestCommonType(object):
def test_scalar_loses1(self):
res = np.find_common_type(['f4', 'f4', 'i2'], ['f8'])
assert_(res == 'f4')
@@ -364,19 +361,139 @@ class TestCommonType(TestCase):
res = np.find_common_type(['u8', 'i8', 'i8'], ['f8'])
assert_(res == 'f8')
-class TestMultipleFields(TestCase):
- def setUp(self):
+class TestMultipleFields(object):
+ def setup(self):
self.ary = np.array([(1, 2, 3, 4), (5, 6, 7, 8)], dtype='i4,f4,i2,c8')
def _bad_call(self):
return self.ary['f0', 'f1']
def test_no_tuple(self):
- self.assertRaises(IndexError, self._bad_call)
+ assert_raises(IndexError, self._bad_call)
def test_return(self):
res = self.ary[['f0', 'f2']].tolist()
assert_(res == [(1, 3), (5, 7)])
-if __name__ == "__main__":
- run_module_suite()
+
+class TestIsSubDType(object):
+ # scalar types can be promoted into dtypes
+ wrappers = [np.dtype, lambda x: x]
+
+ def test_both_abstract(self):
+ assert_(np.issubdtype(np.floating, np.inexact))
+ assert_(not np.issubdtype(np.inexact, np.floating))
+
+ def test_same(self):
+ for cls in (np.float32, np.int32):
+ for w1, w2 in itertools.product(self.wrappers, repeat=2):
+ assert_(np.issubdtype(w1(cls), w2(cls)))
+
+ def test_subclass(self):
+ # note we cannot promote floating to a dtype, as it would turn into a
+ # concrete type
+ for w in self.wrappers:
+ assert_(np.issubdtype(w(np.float32), np.floating))
+ assert_(np.issubdtype(w(np.float64), np.floating))
+
+ def test_subclass_backwards(self):
+ for w in self.wrappers:
+ assert_(not np.issubdtype(np.floating, w(np.float32)))
+ assert_(not np.issubdtype(np.floating, w(np.float64)))
+
+ def test_sibling_class(self):
+ for w1, w2 in itertools.product(self.wrappers, repeat=2):
+ assert_(not np.issubdtype(w1(np.float32), w2(np.float64)))
+ assert_(not np.issubdtype(w1(np.float64), w2(np.float32)))
+
+
+class TestSctypeDict(object):
+ def test_longdouble(self):
+ assert_(np.sctypeDict['f8'] is not np.longdouble)
+ assert_(np.sctypeDict['c16'] is not np.clongdouble)
+
+
+class TestBitName(object):
+ def test_abstract(self):
+ assert_raises(ValueError, np.core.numerictypes.bitname, np.floating)
+
+
+class TestMaximumSctype(object):
+
+ # note that parametrizing with sctype['int'] and similar would skip types
+ # with the same size (gh-11923)
+
+ @pytest.mark.parametrize('t', [np.byte, np.short, np.intc, np.int_, np.longlong])
+ def test_int(self, t):
+ assert_equal(np.maximum_sctype(t), np.sctypes['int'][-1])
+
+ @pytest.mark.parametrize('t', [np.ubyte, np.ushort, np.uintc, np.uint, np.ulonglong])
+ def test_uint(self, t):
+ assert_equal(np.maximum_sctype(t), np.sctypes['uint'][-1])
+
+ @pytest.mark.parametrize('t', [np.half, np.single, np.double, np.longdouble])
+ def test_float(self, t):
+ assert_equal(np.maximum_sctype(t), np.sctypes['float'][-1])
+
+ @pytest.mark.parametrize('t', [np.csingle, np.cdouble, np.clongdouble])
+ def test_complex(self, t):
+ assert_equal(np.maximum_sctype(t), np.sctypes['complex'][-1])
+
+ @pytest.mark.parametrize('t', [np.bool_, np.object_, np.unicode_, np.bytes_, np.void])
+ def test_other(self, t):
+ assert_equal(np.maximum_sctype(t), t)
+
+
+class Test_sctype2char(object):
+ # This function is old enough that we're really just documenting the quirks
+ # at this point.
+
+ def test_scalar_type(self):
+ assert_equal(np.sctype2char(np.double), 'd')
+ assert_equal(np.sctype2char(np.int_), 'l')
+ assert_equal(np.sctype2char(np.unicode_), 'U')
+ assert_equal(np.sctype2char(np.bytes_), 'S')
+
+ def test_other_type(self):
+ assert_equal(np.sctype2char(float), 'd')
+ assert_equal(np.sctype2char(list), 'O')
+ assert_equal(np.sctype2char(np.ndarray), 'O')
+
+ def test_third_party_scalar_type(self):
+ from numpy.core._rational_tests import rational
+ assert_raises(KeyError, np.sctype2char, rational)
+ assert_raises(KeyError, np.sctype2char, rational(1))
+
+ def test_array_instance(self):
+ assert_equal(np.sctype2char(np.array([1.0, 2.0])), 'd')
+
+ def test_abstract_type(self):
+ assert_raises(KeyError, np.sctype2char, np.floating)
+
+ def test_non_type(self):
+ assert_raises(ValueError, np.sctype2char, 1)
+
+@pytest.mark.parametrize("rep, expected", [
+ (np.int32, True),
+ (list, False),
+ (1.1, False),
+ (str, True),
+ (np.dtype(np.float64), True),
+ (np.dtype((np.int16, (3, 4))), True),
+ (np.dtype([('a', np.int8)]), True),
+ ])
+def test_issctype(rep, expected):
+ # ensure proper identification of scalar
+ # data-types by issctype()
+ actual = np.issctype(rep)
+ assert_equal(actual, expected)
+
+
+@pytest.mark.skipif(sys.flags.optimize > 1,
+ reason="no docstrings present to inspect when PYTHONOPTIMIZE/Py_OptimizeFlag > 1")
+class TestDocStrings(object):
+ def test_platform_dependent_aliases(self):
+ if np.int64 is np.int_:
+ assert_('int64' in np.int_.__doc__)
+ elif np.int64 is np.longlong:
+ assert_('int64' in np.longlong.__doc__)
diff --git a/numpy/core/tests/test_overrides.py b/numpy/core/tests/test_overrides.py
new file mode 100644
index 000000000..62b2a3e53
--- /dev/null
+++ b/numpy/core/tests/test_overrides.py
@@ -0,0 +1,388 @@
+from __future__ import division, absolute_import, print_function
+
+import inspect
+import sys
+
+import numpy as np
+from numpy.testing import (
+ assert_, assert_equal, assert_raises, assert_raises_regex)
+from numpy.core.overrides import (
+ get_overloaded_types_and_args, array_function_dispatch,
+ verify_matching_signatures, ENABLE_ARRAY_FUNCTION)
+from numpy.core.numeric import pickle
+import pytest
+
+
+requires_array_function = pytest.mark.skipif(
+ not ENABLE_ARRAY_FUNCTION,
+ reason="__array_function__ dispatch not enabled.")
+
+
+def _get_overloaded_args(relevant_args):
+ types, args = get_overloaded_types_and_args(relevant_args)
+ return args
+
+
+def _return_not_implemented(self, *args, **kwargs):
+ return NotImplemented
+
+
+# need to define this at the top level to test pickling
+@array_function_dispatch(lambda array: (array,))
+def dispatched_one_arg(array):
+ """Docstring."""
+ return 'original'
+
+
+@array_function_dispatch(lambda array1, array2: (array1, array2))
+def dispatched_two_arg(array1, array2):
+ """Docstring."""
+ return 'original'
+
+
+@requires_array_function
+class TestGetOverloadedTypesAndArgs(object):
+
+ def test_ndarray(self):
+ array = np.array(1)
+
+ types, args = get_overloaded_types_and_args([array])
+ assert_equal(set(types), {np.ndarray})
+ assert_equal(list(args), [array])
+
+ types, args = get_overloaded_types_and_args([array, array])
+ assert_equal(len(types), 1)
+ assert_equal(set(types), {np.ndarray})
+ assert_equal(list(args), [array])
+
+ types, args = get_overloaded_types_and_args([array, 1])
+ assert_equal(set(types), {np.ndarray})
+ assert_equal(list(args), [array])
+
+ types, args = get_overloaded_types_and_args([1, array])
+ assert_equal(set(types), {np.ndarray})
+ assert_equal(list(args), [array])
+
+ def test_ndarray_subclasses(self):
+
+ class OverrideSub(np.ndarray):
+ __array_function__ = _return_not_implemented
+
+ class NoOverrideSub(np.ndarray):
+ pass
+
+ array = np.array(1).view(np.ndarray)
+ override_sub = np.array(1).view(OverrideSub)
+ no_override_sub = np.array(1).view(NoOverrideSub)
+
+ types, args = get_overloaded_types_and_args([array, override_sub])
+ assert_equal(set(types), {np.ndarray, OverrideSub})
+ assert_equal(list(args), [override_sub, array])
+
+ types, args = get_overloaded_types_and_args([array, no_override_sub])
+ assert_equal(set(types), {np.ndarray, NoOverrideSub})
+ assert_equal(list(args), [no_override_sub, array])
+
+ types, args = get_overloaded_types_and_args(
+ [override_sub, no_override_sub])
+ assert_equal(set(types), {OverrideSub, NoOverrideSub})
+ assert_equal(list(args), [override_sub, no_override_sub])
+
+ def test_ndarray_and_duck_array(self):
+
+ class Other(object):
+ __array_function__ = _return_not_implemented
+
+ array = np.array(1)
+ other = Other()
+
+ types, args = get_overloaded_types_and_args([other, array])
+ assert_equal(set(types), {np.ndarray, Other})
+ assert_equal(list(args), [other, array])
+
+ types, args = get_overloaded_types_and_args([array, other])
+ assert_equal(set(types), {np.ndarray, Other})
+ assert_equal(list(args), [array, other])
+
+ def test_ndarray_subclass_and_duck_array(self):
+
+ class OverrideSub(np.ndarray):
+ __array_function__ = _return_not_implemented
+
+ class Other(object):
+ __array_function__ = _return_not_implemented
+
+ array = np.array(1)
+ subarray = np.array(1).view(OverrideSub)
+ other = Other()
+
+ assert_equal(_get_overloaded_args([array, subarray, other]),
+ [subarray, array, other])
+ assert_equal(_get_overloaded_args([array, other, subarray]),
+ [subarray, array, other])
+
+ def test_many_duck_arrays(self):
+
+ class A(object):
+ __array_function__ = _return_not_implemented
+
+ class B(A):
+ __array_function__ = _return_not_implemented
+
+ class C(A):
+ __array_function__ = _return_not_implemented
+
+ class D(object):
+ __array_function__ = _return_not_implemented
+
+ a = A()
+ b = B()
+ c = C()
+ d = D()
+
+ assert_equal(_get_overloaded_args([1]), [])
+ assert_equal(_get_overloaded_args([a]), [a])
+ assert_equal(_get_overloaded_args([a, 1]), [a])
+ assert_equal(_get_overloaded_args([a, a, a]), [a])
+ assert_equal(_get_overloaded_args([a, d, a]), [a, d])
+ assert_equal(_get_overloaded_args([a, b]), [b, a])
+ assert_equal(_get_overloaded_args([b, a]), [b, a])
+ assert_equal(_get_overloaded_args([a, b, c]), [b, c, a])
+ assert_equal(_get_overloaded_args([a, c, b]), [c, b, a])
+
+
+@requires_array_function
+class TestNDArrayArrayFunction(object):
+
+ def test_method(self):
+
+ class Other(object):
+ __array_function__ = _return_not_implemented
+
+ class NoOverrideSub(np.ndarray):
+ pass
+
+ class OverrideSub(np.ndarray):
+ __array_function__ = _return_not_implemented
+
+ array = np.array([1])
+ other = Other()
+ no_override_sub = array.view(NoOverrideSub)
+ override_sub = array.view(OverrideSub)
+
+ result = array.__array_function__(func=dispatched_two_arg,
+ types=(np.ndarray,),
+ args=(array, 1.), kwargs={})
+ assert_equal(result, 'original')
+
+ result = array.__array_function__(func=dispatched_two_arg,
+ types=(np.ndarray, Other),
+ args=(array, other), kwargs={})
+ assert_(result is NotImplemented)
+
+ result = array.__array_function__(func=dispatched_two_arg,
+ types=(np.ndarray, NoOverrideSub),
+ args=(array, no_override_sub),
+ kwargs={})
+ assert_equal(result, 'original')
+
+ result = array.__array_function__(func=dispatched_two_arg,
+ types=(np.ndarray, OverrideSub),
+ args=(array, override_sub),
+ kwargs={})
+ assert_equal(result, 'original')
+
+ with assert_raises_regex(TypeError, 'no implementation found'):
+ np.concatenate((array, other))
+
+ expected = np.concatenate((array, array))
+ result = np.concatenate((array, no_override_sub))
+ assert_equal(result, expected.view(NoOverrideSub))
+ result = np.concatenate((array, override_sub))
+ assert_equal(result, expected.view(OverrideSub))
+
+
+@requires_array_function
+class TestArrayFunctionDispatch(object):
+
+ def test_pickle(self):
+ for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
+ roundtripped = pickle.loads(
+ pickle.dumps(dispatched_one_arg, protocol=proto))
+ assert_(roundtripped is dispatched_one_arg)
+
+ def test_name_and_docstring(self):
+ assert_equal(dispatched_one_arg.__name__, 'dispatched_one_arg')
+ if sys.flags.optimize < 2:
+ assert_equal(dispatched_one_arg.__doc__, 'Docstring.')
+
+ def test_interface(self):
+
+ class MyArray(object):
+ def __array_function__(self, func, types, args, kwargs):
+ return (self, func, types, args, kwargs)
+
+ original = MyArray()
+ (obj, func, types, args, kwargs) = dispatched_one_arg(original)
+ assert_(obj is original)
+ assert_(func is dispatched_one_arg)
+ assert_equal(set(types), {MyArray})
+ # assert_equal uses the overloaded np.iscomplexobj() internally
+ assert_(args == (original,))
+ assert_equal(kwargs, {})
+
+ def test_not_implemented(self):
+
+ class MyArray(object):
+ def __array_function__(self, func, types, args, kwargs):
+ return NotImplemented
+
+ array = MyArray()
+ with assert_raises_regex(TypeError, 'no implementation found'):
+ dispatched_one_arg(array)
+
+
+@requires_array_function
+class TestVerifyMatchingSignatures(object):
+
+ def test_verify_matching_signatures(self):
+
+ verify_matching_signatures(lambda x: 0, lambda x: 0)
+ verify_matching_signatures(lambda x=None: 0, lambda x=None: 0)
+ verify_matching_signatures(lambda x=1: 0, lambda x=None: 0)
+
+ with assert_raises(RuntimeError):
+ verify_matching_signatures(lambda a: 0, lambda b: 0)
+ with assert_raises(RuntimeError):
+ verify_matching_signatures(lambda x: 0, lambda x=None: 0)
+ with assert_raises(RuntimeError):
+ verify_matching_signatures(lambda x=None: 0, lambda y=None: 0)
+ with assert_raises(RuntimeError):
+ verify_matching_signatures(lambda x=1: 0, lambda y=1: 0)
+
+ def test_array_function_dispatch(self):
+
+ with assert_raises(RuntimeError):
+ @array_function_dispatch(lambda x: (x,))
+ def f(y):
+ pass
+
+ # should not raise
+ @array_function_dispatch(lambda x: (x,), verify=False)
+ def f(y):
+ pass
+
+
+def _new_duck_type_and_implements():
+ """Create a duck array type and implements functions."""
+ HANDLED_FUNCTIONS = {}
+
+ class MyArray(object):
+ def __array_function__(self, func, types, args, kwargs):
+ if func not in HANDLED_FUNCTIONS:
+ return NotImplemented
+ if not all(issubclass(t, MyArray) for t in types):
+ return NotImplemented
+ return HANDLED_FUNCTIONS[func](*args, **kwargs)
+
+ def implements(numpy_function):
+ """Register an __array_function__ implementations."""
+ def decorator(func):
+ HANDLED_FUNCTIONS[numpy_function] = func
+ return func
+ return decorator
+
+ return (MyArray, implements)
+
+
+@requires_array_function
+class TestArrayFunctionImplementation(object):
+
+ def test_one_arg(self):
+ MyArray, implements = _new_duck_type_and_implements()
+
+ @implements(dispatched_one_arg)
+ def _(array):
+ return 'myarray'
+
+ assert_equal(dispatched_one_arg(1), 'original')
+ assert_equal(dispatched_one_arg(MyArray()), 'myarray')
+
+ def test_optional_args(self):
+ MyArray, implements = _new_duck_type_and_implements()
+
+ @array_function_dispatch(lambda array, option=None: (array,))
+ def func_with_option(array, option='default'):
+ return option
+
+ @implements(func_with_option)
+ def my_array_func_with_option(array, new_option='myarray'):
+ return new_option
+
+ # we don't need to implement every option on __array_function__
+ # implementations
+ assert_equal(func_with_option(1), 'default')
+ assert_equal(func_with_option(1, option='extra'), 'extra')
+ assert_equal(func_with_option(MyArray()), 'myarray')
+ with assert_raises(TypeError):
+ func_with_option(MyArray(), option='extra')
+
+ # but new options on implementations can't be used
+ result = my_array_func_with_option(MyArray(), new_option='yes')
+ assert_equal(result, 'yes')
+ with assert_raises(TypeError):
+ func_with_option(MyArray(), new_option='no')
+
+ def test_not_implemented(self):
+ MyArray, implements = _new_duck_type_and_implements()
+
+ @array_function_dispatch(lambda array: (array,), module='my')
+ def func(array):
+ return array
+
+ array = np.array(1)
+ assert_(func(array) is array)
+ assert_equal(func.__module__, 'my')
+
+ with assert_raises_regex(
+ TypeError, "no implementation found for 'my.func'"):
+ func(MyArray())
+
+
+class TestNDArrayMethods(object):
+
+ def test_repr(self):
+ # gh-12162: should still be defined even if __array_function__ doesn't
+ # implement np.array_repr()
+
+ class MyArray(np.ndarray):
+ def __array_function__(*args, **kwargs):
+ return NotImplemented
+
+ array = np.array(1).view(MyArray)
+ assert_equal(repr(array), 'MyArray(1)')
+ assert_equal(str(array), '1')
+
+
+class TestNumPyFunctions(object):
+
+ def test_set_module(self):
+ assert_equal(np.sum.__module__, 'numpy')
+ assert_equal(np.char.equal.__module__, 'numpy.char')
+ assert_equal(np.fft.fft.__module__, 'numpy.fft')
+ assert_equal(np.linalg.solve.__module__, 'numpy.linalg')
+
+ @pytest.mark.skipif(sys.version_info[0] < 3, reason="Python 3 only")
+ def test_inspect_sum(self):
+ signature = inspect.signature(np.sum)
+ assert_('axis' in signature.parameters)
+
+ @requires_array_function
+ def test_override_sum(self):
+ MyArray, implements = _new_duck_type_and_implements()
+
+ @implements(np.sum)
+ def _(array):
+ return 'yes'
+
+ assert_equal(np.sum(MyArray()), 'yes')
diff --git a/numpy/core/tests/test_print.py b/numpy/core/tests/test_print.py
index 6234b641e..c5c091e13 100644
--- a/numpy/core/tests/test_print.py
+++ b/numpy/core/tests/test_print.py
@@ -1,13 +1,12 @@
from __future__ import division, absolute_import, print_function
import sys
-import locale
-import nose
+
+import pytest
import numpy as np
-from numpy.testing import (
- run_module_suite, assert_, assert_equal, SkipTest
-)
+from numpy.testing import assert_, assert_equal
+from numpy.core.tests._locales import CommaDecimalPointLocale
if sys.version_info[0] >= 3:
@@ -18,47 +17,51 @@ else:
_REF = {np.inf: 'inf', -np.inf: '-inf', np.nan: 'nan'}
-def check_float_type(tp):
+@pytest.mark.parametrize('tp', [np.float32, np.double, np.longdouble])
+def test_float_types(tp):
+ """ Check formatting.
+
+ This is only for the str function, and only for simple types.
+ The precision of np.float32 and np.longdouble aren't the same as the
+ python float precision.
+
+ """
for x in [0, 1, -1, 1e20]:
assert_equal(str(tp(x)), str(float(x)),
err_msg='Failed str formatting for type %s' % tp)
- if tp(1e10).itemsize > 4:
- assert_equal(str(tp(1e10)), str(float('1e10')),
+ if tp(1e16).itemsize > 4:
+ assert_equal(str(tp(1e16)), str(float('1e16')),
err_msg='Failed str formatting for type %s' % tp)
else:
- ref = '1e+10'
- assert_equal(str(tp(1e10)), ref,
+ ref = '1e+16'
+ assert_equal(str(tp(1e16)), ref,
err_msg='Failed str formatting for type %s' % tp)
-def test_float_types():
- """ Check formatting.
+
+@pytest.mark.parametrize('tp', [np.float32, np.double, np.longdouble])
+def test_nan_inf_float(tp):
+ """ Check formatting of nan & inf.
This is only for the str function, and only for simple types.
- The precision of np.float and np.longdouble aren't the same as the
+ The precision of np.float32 and np.longdouble aren't the same as the
python float precision.
"""
- for t in [np.float32, np.double, np.longdouble]:
- yield check_float_type, t
-
-def check_nan_inf_float(tp):
for x in [np.inf, -np.inf, np.nan]:
assert_equal(str(tp(x)), _REF[x],
err_msg='Failed str formatting for type %s' % tp)
-def test_nan_inf_float():
- """ Check formatting of nan & inf.
+
+@pytest.mark.parametrize('tp', [np.complex64, np.cdouble, np.clongdouble])
+def test_complex_types(tp):
+ """Check formatting of complex types.
This is only for the str function, and only for simple types.
- The precision of np.float and np.longdouble aren't the same as the
+ The precision of np.float32 and np.longdouble aren't the same as the
python float precision.
"""
- for t in [np.float32, np.double, np.longdouble]:
- yield check_nan_inf_float, t
-
-def check_complex_type(tp):
for x in [0, 1, -1, 1e20]:
assert_equal(str(tp(x)), str(complex(x)),
err_msg='Failed str formatting for type %s' % tp)
@@ -67,51 +70,39 @@ def check_complex_type(tp):
assert_equal(str(tp(x + x*1j)), str(complex(x + x*1j)),
err_msg='Failed str formatting for type %s' % tp)
- if tp(1e10).itemsize > 8:
- assert_equal(str(tp(1e10)), str(complex(1e10)),
+ if tp(1e16).itemsize > 8:
+ assert_equal(str(tp(1e16)), str(complex(1e16)),
err_msg='Failed str formatting for type %s' % tp)
else:
- ref = '(1e+10+0j)'
- assert_equal(str(tp(1e10)), ref,
+ ref = '(1e+16+0j)'
+ assert_equal(str(tp(1e16)), ref,
err_msg='Failed str formatting for type %s' % tp)
-def test_complex_types():
- """Check formatting of complex types.
-
- This is only for the str function, and only for simple types.
- The precision of np.float and np.longdouble aren't the same as the
- python float precision.
- """
- for t in [np.complex64, np.cdouble, np.clongdouble]:
- yield check_complex_type, t
-
-def test_complex_inf_nan():
+@pytest.mark.parametrize('dtype', [np.complex64, np.cdouble, np.clongdouble])
+def test_complex_inf_nan(dtype):
"""Check inf/nan formatting of complex types."""
TESTS = {
complex(np.inf, 0): "(inf+0j)",
- complex(0, np.inf): "inf*j",
+ complex(0, np.inf): "infj",
complex(-np.inf, 0): "(-inf+0j)",
- complex(0, -np.inf): "-inf*j",
+ complex(0, -np.inf): "-infj",
complex(np.inf, 1): "(inf+1j)",
- complex(1, np.inf): "(1+inf*j)",
+ complex(1, np.inf): "(1+infj)",
complex(-np.inf, 1): "(-inf+1j)",
- complex(1, -np.inf): "(1-inf*j)",
+ complex(1, -np.inf): "(1-infj)",
complex(np.nan, 0): "(nan+0j)",
- complex(0, np.nan): "nan*j",
+ complex(0, np.nan): "nanj",
complex(-np.nan, 0): "(nan+0j)",
- complex(0, -np.nan): "nan*j",
+ complex(0, -np.nan): "nanj",
complex(np.nan, 1): "(nan+1j)",
- complex(1, np.nan): "(1+nan*j)",
+ complex(1, np.nan): "(1+nanj)",
complex(-np.nan, 1): "(nan+1j)",
- complex(1, -np.nan): "(1+nan*j)",
+ complex(1, -np.nan): "(1+nanj)",
}
- for tp in [np.complex64, np.cdouble, np.clongdouble]:
- for c, s in TESTS.items():
- yield _check_complex_inf_nan, c, s, tp
+ for c, s in TESTS.items():
+ assert_equal(str(dtype(c)), s)
-def _check_complex_inf_nan(c, s, dtype):
- assert_equal(str(dtype(c)), s)
# print tests
def _test_redirected_print(x, tp, ref=None):
@@ -132,44 +123,41 @@ def _test_redirected_print(x, tp, ref=None):
assert_equal(file.getvalue(), file_tp.getvalue(),
err_msg='print failed for type%s' % tp)
-def check_float_type_print(tp):
+
+@pytest.mark.parametrize('tp', [np.float32, np.double, np.longdouble])
+def test_float_type_print(tp):
+ """Check formatting when using print """
for x in [0, 1, -1, 1e20]:
_test_redirected_print(float(x), tp)
for x in [np.inf, -np.inf, np.nan]:
_test_redirected_print(float(x), tp, _REF[x])
- if tp(1e10).itemsize > 4:
- _test_redirected_print(float(1e10), tp)
+ if tp(1e16).itemsize > 4:
+ _test_redirected_print(float(1e16), tp)
else:
- ref = '1e+10'
- _test_redirected_print(float(1e10), tp, ref)
+ ref = '1e+16'
+ _test_redirected_print(float(1e16), tp, ref)
+
-def check_complex_type_print(tp):
+@pytest.mark.parametrize('tp', [np.complex64, np.cdouble, np.clongdouble])
+def test_complex_type_print(tp):
+ """Check formatting when using print """
# We do not create complex with inf/nan directly because the feature is
# missing in python < 2.6
for x in [0, 1, -1, 1e20]:
_test_redirected_print(complex(x), tp)
- if tp(1e10).itemsize > 8:
- _test_redirected_print(complex(1e10), tp)
+ if tp(1e16).itemsize > 8:
+ _test_redirected_print(complex(1e16), tp)
else:
- ref = '(1e+10+0j)'
- _test_redirected_print(complex(1e10), tp, ref)
+ ref = '(1e+16+0j)'
+ _test_redirected_print(complex(1e16), tp, ref)
_test_redirected_print(complex(np.inf, 1), tp, '(inf+1j)')
_test_redirected_print(complex(-np.inf, 1), tp, '(-inf+1j)')
_test_redirected_print(complex(-np.nan, 1), tp, '(nan+1j)')
-def test_float_type_print():
- """Check formatting when using print """
- for t in [np.float32, np.double, np.longdouble]:
- yield check_float_type_print, t
-
-def test_complex_type_print():
- """Check formatting when using print """
- for t in [np.complex64, np.cdouble, np.clongdouble]:
- yield check_complex_type_print, t
def test_scalar_format():
"""Test the str.format method with NumPy scalar types"""
@@ -186,12 +174,10 @@ def test_scalar_format():
('{0:g}', 1.5, np.float16),
('{0:g}', 1.5, np.float32),
('{0:g}', 1.5, np.float64),
- ('{0:g}', 1.5, np.longdouble)]
- # Python 2.6 doesn't implement complex.__format__
- if sys.version_info[:2] > (2, 6):
- tests += [('{0:g}', 1.5+0.5j, np.complex64),
- ('{0:g}', 1.5+0.5j, np.complex128),
- ('{0:g}', 1.5+0.5j, np.clongdouble)]
+ ('{0:g}', 1.5, np.longdouble),
+ ('{0:g}', 1.5+0.5j, np.complex64),
+ ('{0:g}', 1.5+0.5j, np.complex128),
+ ('{0:g}', 1.5+0.5j, np.clongdouble)]
for (fmat, val, valtype) in tests:
try:
@@ -203,46 +189,17 @@ def test_scalar_format():
(fmat, repr(val), repr(valtype), str(e)))
+#
# Locale tests: scalar types formatting should be independent of the locale
-def in_foreign_locale(func):
- """
- Swap LC_NUMERIC locale to one in which the decimal point is ',' and not '.'
- If not possible, raise SkipTest
+#
- """
- if sys.platform == 'win32':
- locales = ['FRENCH']
- else:
- locales = ['fr_FR', 'fr_FR.UTF-8', 'fi_FI', 'fi_FI.UTF-8']
+class TestCommaDecimalPointLocale(CommaDecimalPointLocale):
- def wrapper(*args, **kwargs):
- curloc = locale.getlocale(locale.LC_NUMERIC)
- try:
- for loc in locales:
- try:
- locale.setlocale(locale.LC_NUMERIC, loc)
- break
- except locale.Error:
- pass
- else:
- raise SkipTest("Skipping locale test, because "
- "French locale not found")
- return func(*args, **kwargs)
- finally:
- locale.setlocale(locale.LC_NUMERIC, locale=curloc)
- return nose.tools.make_decorator(func)(wrapper)
-
-@in_foreign_locale
-def test_locale_single():
- assert_equal(str(np.float32(1.2)), str(float(1.2)))
-
-@in_foreign_locale
-def test_locale_double():
- assert_equal(str(np.double(1.2)), str(float(1.2)))
-
-@in_foreign_locale
-def test_locale_longdouble():
- assert_equal(str(np.longdouble(1.2)), str(float(1.2)))
-
-if __name__ == "__main__":
- run_module_suite()
+ def test_locale_single(self):
+ assert_equal(str(np.float32(1.2)), str(float(1.2)))
+
+ def test_locale_double(self):
+ assert_equal(str(np.double(1.2)), str(float(1.2)))
+
+ def test_locale_longdouble(self):
+ assert_equal(str(np.longdouble('1.2')), str(float(1.2)))
diff --git a/numpy/core/tests/test_records.py b/numpy/core/tests/test_records.py
index 2c85546a7..c059ef510 100644
--- a/numpy/core/tests/test_records.py
+++ b/numpy/core/tests/test_records.py
@@ -1,19 +1,26 @@
from __future__ import division, absolute_import, print_function
import sys
-import collections
-import pickle
+try:
+ # Accessing collections abstract classes from collections
+ # has been deprecated since Python 3.3
+ import collections.abc as collections_abc
+except ImportError:
+ import collections as collections_abc
+import textwrap
from os import path
+import pytest
import numpy as np
-from numpy.compat import asbytes
+from numpy.compat import Path
from numpy.testing import (
- TestCase, run_module_suite, assert_, assert_equal, assert_array_equal,
- assert_array_almost_equal, assert_raises
+ assert_, assert_equal, assert_array_equal, assert_array_almost_equal,
+ assert_raises, temppath
)
+from numpy.core.numeric import pickle
-class TestFromrecords(TestCase):
+class TestFromrecords(object):
def test_fromrecords(self):
r = np.rec.fromrecords([[456, 'dbe', 1.2], [2, 'de', 1.3]],
names='col1,col2,col3')
@@ -27,19 +34,45 @@ class TestFromrecords(TestCase):
assert_equal(r['col2'].dtype.itemsize, 3)
assert_equal(r['col3'].dtype.kind, 'f')
+ def test_fromrecords_0len(self):
+ """ Verify fromrecords works with a 0-length input """
+ dtype = [('a', float), ('b', float)]
+ r = np.rec.fromrecords([], dtype=dtype)
+ assert_equal(r.shape, (0,))
+
+ def test_fromrecords_2d(self):
+ data = [
+ [(1, 2), (3, 4), (5, 6)],
+ [(6, 5), (4, 3), (2, 1)]
+ ]
+ expected_a = [[1, 3, 5], [6, 4, 2]]
+ expected_b = [[2, 4, 6], [5, 3, 1]]
+
+ # try with dtype
+ r1 = np.rec.fromrecords(data, dtype=[('a', int), ('b', int)])
+ assert_equal(r1['a'], expected_a)
+ assert_equal(r1['b'], expected_b)
+
+ # try with names
+ r2 = np.rec.fromrecords(data, names=['a', 'b'])
+ assert_equal(r2['a'], expected_a)
+ assert_equal(r2['b'], expected_b)
+
+ assert_equal(r1, r2)
+
def test_method_array(self):
- r = np.rec.array(asbytes('abcdefg') * 100, formats='i2,a3,i4', shape=3, byteorder='big')
- assert_equal(r[1].item(), (25444, asbytes('efg'), 1633837924))
+ r = np.rec.array(b'abcdefg' * 100, formats='i2,a3,i4', shape=3, byteorder='big')
+ assert_equal(r[1].item(), (25444, b'efg', 1633837924))
def test_method_array2(self):
r = np.rec.array([(1, 11, 'a'), (2, 22, 'b'), (3, 33, 'c'), (4, 44, 'd'), (5, 55, 'ex'),
(6, 66, 'f'), (7, 77, 'g')], formats='u1,f4,a1')
- assert_equal(r[1].item(), (2, 22.0, asbytes('b')))
+ assert_equal(r[1].item(), (2, 22.0, b'b'))
def test_recarray_slices(self):
r = np.rec.array([(1, 11, 'a'), (2, 22, 'b'), (3, 33, 'c'), (4, 44, 'd'), (5, 55, 'ex'),
(6, 66, 'f'), (7, 77, 'g')], formats='u1,f4,a1')
- assert_equal(r[1::2][1].item(), (4, 44.0, asbytes('d')))
+ assert_equal(r[1::2][1].item(), (4, 44.0, b'd'))
def test_recarray_fromarrays(self):
x1 = np.array([1, 2, 3, 4])
@@ -75,6 +108,42 @@ class TestFromrecords(TestCase):
assert_((mine.data1[i] == 0.0))
assert_((mine.data2[i] == 0.0))
+ def test_recarray_repr(self):
+ a = np.array([(1, 0.1), (2, 0.2)],
+ dtype=[('foo', '<i4'), ('bar', '<f8')])
+ a = np.rec.array(a)
+ assert_equal(
+ repr(a),
+ textwrap.dedent("""\
+ rec.array([(1, 0.1), (2, 0.2)],
+ dtype=[('foo', '<i4'), ('bar', '<f8')])""")
+ )
+
+ # make sure non-structured dtypes also show up as rec.array
+ a = np.array(np.ones(4, dtype='f8'))
+ assert_(repr(np.rec.array(a)).startswith('rec.array'))
+
+ # check that the 'np.record' part of the dtype isn't shown
+ a = np.rec.array(np.ones(3, dtype='i4,i4'))
+ assert_equal(repr(a).find('numpy.record'), -1)
+ a = np.rec.array(np.ones(3, dtype='i4'))
+ assert_(repr(a).find('dtype=int32') != -1)
+
+ def test_0d_recarray_repr(self):
+ arr_0d = np.rec.array((1, 2.0, '2003'), dtype='<i4,<f8,<M8[Y]')
+ assert_equal(repr(arr_0d), textwrap.dedent("""\
+ rec.array((1, 2., '2003'),
+ dtype=[('f0', '<i4'), ('f1', '<f8'), ('f2', '<M8[Y]')])"""))
+
+ record = arr_0d[()]
+ assert_equal(repr(record), "(1, 2., '2003')")
+ # 1.13 converted to python scalars before the repr
+ try:
+ np.set_printoptions(legacy='1.13')
+ assert_equal(repr(record), '(1, 2.0, datetime.date(2003, 1, 1))')
+ finally:
+ np.set_printoptions(legacy=False)
+
def test_recarray_from_repr(self):
a = np.array([(1,'ABC'), (2, "DEF")],
dtype=[('foo', int), ('bar', 'S4')])
@@ -126,8 +195,6 @@ class TestFromrecords(TestCase):
('c', 'i4,i4')]))
assert_equal(r['c'].dtype.type, np.record)
assert_equal(type(r['c']), np.recarray)
- assert_equal(r[['a', 'b']].dtype.type, np.record)
- assert_equal(type(r[['a', 'b']]), np.recarray)
#and that it preserves subclasses (gh-6949)
class C(np.recarray):
@@ -162,17 +229,6 @@ class TestFromrecords(TestCase):
assert_equal(arr2.dtype.type, arr.dtype.type)
assert_equal(type(arr2), type(arr))
- def test_recarray_repr(self):
- # make sure non-structured dtypes also show up as rec.array
- a = np.array(np.ones(4, dtype='f8'))
- assert_(repr(np.rec.array(a)).startswith('rec.array'))
-
- # check that the 'np.record' part of the dtype isn't shown
- a = np.rec.array(np.ones(3, dtype='i4,i4'))
- assert_equal(repr(a).find('numpy.record'), -1)
- a = np.rec.array(np.ones(3, dtype='i4'))
- assert_(repr(a).find('dtype=int32') != -1)
-
def test_recarray_from_names(self):
ra = np.rec.array([
(1, 'abc', 3.7000002861022949, 0),
@@ -202,17 +258,17 @@ class TestFromrecords(TestCase):
assert_array_equal(ra['shape'], [['A', 'B', 'C']])
ra.field = 5
assert_array_equal(ra['field'], [[5, 5, 5]])
- assert_(isinstance(ra.field, collections.Callable))
+ assert_(isinstance(ra.field, collections_abc.Callable))
def test_fromrecords_with_explicit_dtype(self):
a = np.rec.fromrecords([(1, 'a'), (2, 'bbb')],
- dtype=[('a', int), ('b', np.object)])
+ dtype=[('a', int), ('b', object)])
assert_equal(a.a, [1, 2])
assert_equal(a[0].a, 1)
assert_equal(a.b, ['a', 'bbb'])
assert_equal(a[-1].b, 'bbb')
#
- ndtype = np.dtype([('a', int), ('b', np.object)])
+ ndtype = np.dtype([('a', int), ('b', object)])
a = np.rec.fromrecords([(1, 'a'), (2, 'bbb')], dtype=ndtype)
assert_equal(a.a, [1, 2])
assert_equal(a[0].a, 1)
@@ -249,14 +305,45 @@ class TestFromrecords(TestCase):
assert_equal(a[0].bar['A'], 1)
assert_equal(a[0]['bar'].A, 1)
assert_equal(a[0]['bar']['A'], 1)
- assert_equal(a[0].qux.D, asbytes('fgehi'))
- assert_equal(a[0].qux['D'], asbytes('fgehi'))
- assert_equal(a[0]['qux'].D, asbytes('fgehi'))
- assert_equal(a[0]['qux']['D'], asbytes('fgehi'))
-
-
-class TestRecord(TestCase):
- def setUp(self):
+ assert_equal(a[0].qux.D, b'fgehi')
+ assert_equal(a[0].qux['D'], b'fgehi')
+ assert_equal(a[0]['qux'].D, b'fgehi')
+ assert_equal(a[0]['qux']['D'], b'fgehi')
+
+ def test_zero_width_strings(self):
+ # Test for #6430, based on the test case from #1901
+
+ cols = [['test'] * 3, [''] * 3]
+ rec = np.rec.fromarrays(cols)
+ assert_equal(rec['f0'], ['test', 'test', 'test'])
+ assert_equal(rec['f1'], ['', '', ''])
+
+ dt = np.dtype([('f0', '|S4'), ('f1', '|S')])
+ rec = np.rec.fromarrays(cols, dtype=dt)
+ assert_equal(rec.itemsize, 4)
+ assert_equal(rec['f0'], [b'test', b'test', b'test'])
+ assert_equal(rec['f1'], [b'', b'', b''])
+
+
+@pytest.mark.skipif(Path is None, reason="No pathlib.Path")
+class TestPathUsage(object):
+ # Test that pathlib.Path can be used
+ def test_tofile_fromfile(self):
+ with temppath(suffix='.bin') as path:
+ path = Path(path)
+ np.random.seed(123)
+ a = np.random.rand(10).astype('f8,i4,a5')
+ a[5] = (0.5,10,'abcde')
+ with path.open("wb") as fd:
+ a.tofile(fd)
+ x = np.core.records.fromfile(path,
+ formats='f8,i4,a5',
+ shape=10)
+ assert_array_equal(x, a)
+
+
+class TestRecord(object):
+ def setup(self):
self.data = np.rec.fromrecords([(1, 2, 3), (4, 5, 6)],
dtype=[("col1", "<i4"),
("col2", "<i4"),
@@ -280,33 +367,54 @@ class TestRecord(TestCase):
def assign_invalid_column(x):
x[0].col5 = 1
- self.assertRaises(AttributeError, assign_invalid_column, a)
+ assert_raises(AttributeError, assign_invalid_column, a)
+
+ def test_nonwriteable_setfield(self):
+ # gh-8171
+ r = np.rec.array([(0,), (1,)], dtype=[('f', 'i4')])
+ r.flags.writeable = False
+ with assert_raises(ValueError):
+ r.f = [2, 3]
+ with assert_raises(ValueError):
+ r.setfield([2,3], *r.dtype.fields['f'])
def test_out_of_order_fields(self):
- """Ticket #1431."""
+ # names in the same order, padding added to descr
x = self.data[['col1', 'col2']]
+ assert_equal(x.dtype.names, ('col1', 'col2'))
+ assert_equal(x.dtype.descr,
+ [('col1', '<i4'), ('col2', '<i4'), ('', '|V4')])
+
+ # names change order to match indexing, as of 1.14 - descr can't
+ # represent that
y = self.data[['col2', 'col1']]
- assert_equal(x[0][0], y[0][1])
+ assert_equal(y.dtype.names, ('col2', 'col1'))
+ assert_raises(ValueError, lambda: y.dtype.descr)
def test_pickle_1(self):
# Issue #1529
a = np.array([(1, [])], dtype=[('a', np.int32), ('b', np.int32, 0)])
- assert_equal(a, pickle.loads(pickle.dumps(a)))
- assert_equal(a[0], pickle.loads(pickle.dumps(a[0])))
+ for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
+ assert_equal(a, pickle.loads(pickle.dumps(a, protocol=proto)))
+ assert_equal(a[0], pickle.loads(pickle.dumps(a[0],
+ protocol=proto)))
def test_pickle_2(self):
a = self.data
- assert_equal(a, pickle.loads(pickle.dumps(a)))
- assert_equal(a[0], pickle.loads(pickle.dumps(a[0])))
+ for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
+ assert_equal(a, pickle.loads(pickle.dumps(a, protocol=proto)))
+ assert_equal(a[0], pickle.loads(pickle.dumps(a[0],
+ protocol=proto)))
def test_pickle_3(self):
# Issue #7140
a = self.data
- pa = pickle.loads(pickle.dumps(a[0]))
- assert_(pa.flags.c_contiguous)
- assert_(pa.flags.f_contiguous)
- assert_(pa.flags.writeable)
- assert_(pa.flags.aligned)
+ for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
+ pa = pickle.loads(pickle.dumps(a[0], protocol=proto))
+ assert_(pa.flags.c_contiguous)
+ assert_(pa.flags.f_contiguous)
+ assert_(pa.flags.writeable)
+ assert_(pa.flags.aligned)
def test_objview_record(self):
# https://github.com/numpy/numpy/issues/2599
@@ -329,6 +437,7 @@ class TestRecord(TestCase):
arr = np.zeros((3,), dtype=[('x', int), ('y', int)])
assert_raises(ValueError, lambda: arr[['nofield']])
+
def test_find_duplicate():
l1 = [1, 2, 3, 4, 5, 6]
assert_(np.rec.find_duplicate(l1) == [])
@@ -341,6 +450,3 @@ def test_find_duplicate():
l3 = [2, 2, 1, 4, 1, 6, 2, 3]
assert_(np.rec.find_duplicate(l3) == [2, 1])
-
-if __name__ == "__main__":
- run_module_suite()
diff --git a/numpy/core/tests/test_regression.py b/numpy/core/tests/test_regression.py
index ace2c1814..2421a1161 100644
--- a/numpy/core/tests/test_regression.py
+++ b/numpy/core/tests/test_regression.py
@@ -1,85 +1,90 @@
from __future__ import division, absolute_import, print_function
import copy
-import pickle
import sys
-import platform
import gc
-import warnings
import tempfile
+import pytest
from os import path
from io import BytesIO
from itertools import chain
import numpy as np
from numpy.testing import (
- run_module_suite, TestCase, assert_, assert_equal,
- assert_almost_equal, assert_array_equal, assert_array_almost_equal,
- assert_raises, assert_warns, dec
+ assert_, assert_equal, IS_PYPY, assert_almost_equal,
+ assert_array_equal, assert_array_almost_equal, assert_raises,
+ assert_raises_regex, assert_warns, suppress_warnings,
+ _assert_valid_refcount, HAS_REFCOUNT,
)
-from numpy.testing.utils import _assert_valid_refcount
-from numpy.compat import asbytes, asunicode, asbytes_nested, long, sixu
+from numpy.compat import asbytes, asunicode, long
+from numpy.core.numeric import pickle
-rlevel = 1
+try:
+ RecursionError
+except NameError:
+ RecursionError = RuntimeError # python < 3.5
-class TestRegression(TestCase):
- def test_invalid_round(self,level=rlevel):
+class TestRegression(object):
+ def test_invalid_round(self):
# Ticket #3
v = 4.7599999999999998
assert_array_equal(np.array([v]), np.array(v))
- def test_mem_empty(self,level=rlevel):
+ def test_mem_empty(self):
# Ticket #7
np.empty((1,), dtype=[('x', np.int64)])
- def test_pickle_transposed(self,level=rlevel):
+ def test_pickle_transposed(self):
# Ticket #16
a = np.transpose(np.array([[2, 9], [7, 0], [3, 8]]))
- f = BytesIO()
- pickle.dump(a, f)
- f.seek(0)
- b = pickle.load(f)
- f.close()
- assert_array_equal(a, b)
+ for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
+ f = BytesIO()
+ pickle.dump(a, f, protocol=proto)
+ f.seek(0)
+ b = pickle.load(f)
+ f.close()
+ assert_array_equal(a, b)
- def test_typeNA(self,level=rlevel):
- # Ticket #31
- assert_equal(np.typeNA[np.int64], 'Int64')
- assert_equal(np.typeNA[np.uint64], 'UInt64')
+ def test_typeNA(self):
+ # Issue gh-515
+ with suppress_warnings() as sup:
+ sup.filter(np.VisibleDeprecationWarning)
+ assert_equal(np.typeNA[np.int64], 'Int64')
+ assert_equal(np.typeNA[np.uint64], 'UInt64')
- def test_dtype_names(self,level=rlevel):
+ def test_dtype_names(self):
# Ticket #35
# Should succeed
np.dtype([(('name', 'label'), np.int32, 3)])
- def test_reduce(self,level=rlevel):
+ def test_reduce(self):
# Ticket #40
assert_almost_equal(np.add.reduce([1., .5], dtype=None), 1.5)
- def test_zeros_order(self,level=rlevel):
+ def test_zeros_order(self):
# Ticket #43
np.zeros([3], int, 'C')
np.zeros([3], order='C')
np.zeros([3], int, order='C')
- def test_asarray_with_order(self,level=rlevel):
+ def test_asarray_with_order(self):
# Check that nothing is done when order='F' and array C/F-contiguous
a = np.ones(2)
assert_(a is np.asarray(a, order='F'))
- def test_ravel_with_order(self,level=rlevel):
+ def test_ravel_with_order(self):
# Check that ravel works when order='F' and array C/F-contiguous
a = np.ones(2)
assert_(not a.ravel('F').flags.owndata)
- def test_sort_bigendian(self,level=rlevel):
+ def test_sort_bigendian(self):
# Ticket #47
a = np.linspace(0, 10, 11)
c = a.astype(np.dtype('<f8'))
c.sort()
assert_array_almost_equal(c, a)
- def test_negative_nd_indexing(self,level=rlevel):
+ def test_negative_nd_indexing(self):
# Ticket #49
c = np.arange(125).reshape((5, 5, 5))
origidx = np.array([-1, 0, 1])
@@ -87,16 +92,17 @@ class TestRegression(TestCase):
c[idx]
assert_array_equal(idx, origidx)
- def test_char_dump(self,level=rlevel):
+ def test_char_dump(self):
# Ticket #50
- f = BytesIO()
ca = np.char.array(np.arange(1000, 1010), itemsize=4)
- ca.dump(f)
- f.seek(0)
- ca = np.load(f)
- f.close()
+ for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
+ f = BytesIO()
+ pickle.dump(ca, f, protocol=proto)
+ f.seek(0)
+ ca = np.load(f)
+ f.close()
- def test_noncontiguous_fill(self,level=rlevel):
+ def test_noncontiguous_fill(self):
# Ticket #58.
a = np.zeros((5, 3))
b = a[:, :2,]
@@ -104,77 +110,63 @@ class TestRegression(TestCase):
def rs():
b.shape = (10,)
- self.assertRaises(AttributeError, rs)
+ assert_raises(AttributeError, rs)
- def test_bool(self,level=rlevel):
+ def test_bool(self):
# Ticket #60
np.bool_(1) # Should succeed
- def test_indexing1(self,level=rlevel):
+ def test_indexing1(self):
# Ticket #64
descr = [('x', [('y', [('z', 'c16', (2,)),]),]),]
buffer = ((([6j, 4j],),),)
h = np.array(buffer, dtype=descr)
h['x']['y']['z']
- def test_indexing2(self,level=rlevel):
+ def test_indexing2(self):
# Ticket #65
descr = [('x', 'i4', (2,))]
buffer = ([3, 2],)
h = np.array(buffer, dtype=descr)
h['x']
- def test_round(self,level=rlevel):
+ def test_round(self):
# Ticket #67
x = np.array([1+2j])
assert_almost_equal(x**(-1), [1/(1+2j)])
- def test_scalar_compare(self,level=rlevel):
+ def test_scalar_compare(self):
# Trac Ticket #72
# https://github.com/numpy/numpy/issues/565
a = np.array(['test', 'auto'])
assert_array_equal(a == 'auto', np.array([False, True]))
- self.assertTrue(a[1] == 'auto')
- self.assertTrue(a[0] != 'auto')
+ assert_(a[1] == 'auto')
+ assert_(a[0] != 'auto')
b = np.linspace(0, 10, 11)
# This should return true for now, but will eventually raise an error:
- with warnings.catch_warnings():
- warnings.filterwarnings("ignore", category=DeprecationWarning)
- self.assertTrue(b != 'auto')
- self.assertTrue(b[0] != 'auto')
+ with suppress_warnings() as sup:
+ sup.filter(FutureWarning)
+ assert_(b != 'auto')
+ assert_(b[0] != 'auto')
- def test_unicode_swapping(self,level=rlevel):
+ def test_unicode_swapping(self):
# Ticket #79
ulen = 1
- ucs_value = sixu('\U0010FFFF')
+ ucs_value = u'\U0010FFFF'
ua = np.array([[[ucs_value*ulen]*2]*3]*4, dtype='U%s' % ulen)
ua.newbyteorder() # Should succeed.
- def test_object_array_fill(self,level=rlevel):
+ def test_object_array_fill(self):
# Ticket #86
x = np.zeros(1, 'O')
x.fill([])
- def test_mem_dtype_align(self,level=rlevel):
+ def test_mem_dtype_align(self):
# Ticket #93
- self.assertRaises(TypeError, np.dtype,
- {'names':['a'],'formats':['foo']}, align=1)
-
- @dec.knownfailureif((sys.version_info[0] >= 3) or
- (sys.platform == "win32" and
- platform.architecture()[0] == "64bit"),
- "numpy.intp('0xff', 16) not supported on Py3, "
- "as it does not inherit from Python int")
- def test_intp(self,level=rlevel):
- # Ticket #99
- i_width = np.int_(0).nbytes*2 - 1
- np.intp('0x' + 'f'*i_width, 16)
- self.assertRaises(OverflowError, np.intp, '0x' + 'f'*(i_width+1), 16)
- self.assertRaises(ValueError, np.intp, '0x1', 32)
- assert_equal(255, np.intp('0xFF', 16))
- assert_equal(1024, np.intp(1024))
-
- def test_endian_bool_indexing(self,level=rlevel):
+ assert_raises(TypeError, np.dtype,
+ {'names':['a'], 'formats':['foo']}, align=1)
+
+ def test_endian_bool_indexing(self):
# Ticket #105
a = np.arange(10., dtype='>f8')
b = np.arange(10., dtype='<f8')
@@ -187,7 +179,7 @@ class TestRegression(TestCase):
assert_(np.all(a[ya] > 0.5))
assert_(np.all(b[yb] > 0.5))
- def test_endian_where(self,level=rlevel):
+ def test_endian_where(self):
# GitHub issue #369
net = np.zeros(3, dtype='>f4')
net[1] = 0.00458849
@@ -197,7 +189,7 @@ class TestRegression(TestCase):
correct = np.array([ 0.60520202, 0.00458849, 0.60520202])
assert_array_almost_equal(test, correct)
- def test_endian_recarray(self,level=rlevel):
+ def test_endian_recarray(self):
# Ticket #2185
dt = np.dtype([
('head', '>u4'),
@@ -213,7 +205,7 @@ class TestRegression(TestCase):
buf[0]['data'][0] = d
assert_(buf[0]['head'] == 1)
- def test_mem_dot(self,level=rlevel):
+ def test_mem_dot(self):
# Ticket #106
x = np.random.randn(0, 1)
y = np.random.randn(10, 1)
@@ -227,7 +219,7 @@ class TestRegression(TestCase):
np.core.multiarray.dot(x, np.transpose(y), out=z)
assert_equal(_z, np.ones(10))
- def test_arange_endian(self,level=rlevel):
+ def test_arange_endian(self):
# Ticket #111
ref = np.arange(10)
x = np.arange(10, dtype='<f8')
@@ -235,31 +227,67 @@ class TestRegression(TestCase):
x = np.arange(10, dtype='>f8')
assert_array_equal(ref, x)
- def test_argmax(self,level=rlevel):
+ def test_arange_inf_step(self):
+ ref = np.arange(0, 1, 10)
+ x = np.arange(0, 1, np.inf)
+ assert_array_equal(ref, x)
+
+ ref = np.arange(0, 1, -10)
+ x = np.arange(0, 1, -np.inf)
+ assert_array_equal(ref, x)
+
+ ref = np.arange(0, -1, -10)
+ x = np.arange(0, -1, -np.inf)
+ assert_array_equal(ref, x)
+
+ ref = np.arange(0, -1, 10)
+ x = np.arange(0, -1, np.inf)
+ assert_array_equal(ref, x)
+
+ def test_arange_underflow_stop_and_step(self):
+ finfo = np.finfo(np.float64)
+
+ ref = np.arange(0, finfo.eps, 2 * finfo.eps)
+ x = np.arange(0, finfo.eps, finfo.max)
+ assert_array_equal(ref, x)
+
+ ref = np.arange(0, finfo.eps, -2 * finfo.eps)
+ x = np.arange(0, finfo.eps, -finfo.max)
+ assert_array_equal(ref, x)
+
+ ref = np.arange(0, -finfo.eps, -2 * finfo.eps)
+ x = np.arange(0, -finfo.eps, -finfo.max)
+ assert_array_equal(ref, x)
+
+ ref = np.arange(0, -finfo.eps, 2 * finfo.eps)
+ x = np.arange(0, -finfo.eps, finfo.max)
+ assert_array_equal(ref, x)
+
+ def test_argmax(self):
# Ticket #119
a = np.random.normal(0, 1, (4, 5, 6, 7, 8))
for i in range(a.ndim):
a.argmax(i) # Should succeed
- def test_mem_divmod(self,level=rlevel):
+ def test_mem_divmod(self):
# Ticket #126
for i in range(10):
divmod(np.array([i])[0], 10)
- def test_hstack_invalid_dims(self,level=rlevel):
+ def test_hstack_invalid_dims(self):
# Ticket #128
x = np.arange(9).reshape((3, 3))
y = np.array([0, 0, 0])
- self.assertRaises(ValueError, np.hstack, (x, y))
+ assert_raises(ValueError, np.hstack, (x, y))
- def test_squeeze_type(self,level=rlevel):
+ def test_squeeze_type(self):
# Ticket #133
a = np.array([3])
b = np.array(3)
assert_(type(a.squeeze()) is np.ndarray)
assert_(type(b.squeeze()) is np.ndarray)
- def test_add_identity(self,level=rlevel):
+ def test_add_identity(self):
# Ticket #143
assert_equal(0, np.add.identity)
@@ -268,11 +296,11 @@ class TestRegression(TestCase):
a = np.float_(23.) + 2**135
assert_equal(a, 23. + 2**135)
- def test_binary_repr_0(self,level=rlevel):
+ def test_binary_repr_0(self):
# Ticket #151
assert_equal('0', np.binary_repr(0))
- def test_rec_iterate(self,level=rlevel):
+ def test_rec_iterate(self):
# Ticket #160
descr = np.dtype([('i', int), ('f', float), ('s', '|S3')])
x = np.rec.array([(1, 1.1, '1.0'),
@@ -280,19 +308,19 @@ class TestRegression(TestCase):
x[0].tolist()
[i for i in x[0]]
- def test_unicode_string_comparison(self,level=rlevel):
+ def test_unicode_string_comparison(self):
# Ticket #190
a = np.array('hello', np.unicode_)
b = np.array('world')
a == b
- def test_tobytes_FORTRANORDER_discontiguous(self,level=rlevel):
+ def test_tobytes_FORTRANORDER_discontiguous(self):
# Fix in r2836
# Create non-contiguous Fortran ordered array
x = np.array(np.random.rand(3, 3), order='F')[:, :2]
- assert_array_almost_equal(x.ravel(), np.fromstring(x.tobytes()))
+ assert_array_almost_equal(x.ravel(), np.frombuffer(x.tobytes()))
- def test_flat_assignment(self,level=rlevel):
+ def test_flat_assignment(self):
# Correct behaviour of ticket #194
x = np.empty((3, 1))
x.flat = np.arange(3)
@@ -300,7 +328,7 @@ class TestRegression(TestCase):
x.flat = np.arange(3, dtype=float)
assert_array_almost_equal(x, [[0], [1], [2]])
- def test_broadcast_flat_assignment(self,level=rlevel):
+ def test_broadcast_flat_assignment(self):
# Ticket #194
x = np.empty((3, 1))
@@ -310,8 +338,8 @@ class TestRegression(TestCase):
def bfb():
x[:] = np.arange(3, dtype=float)
- self.assertRaises(ValueError, bfa)
- self.assertRaises(ValueError, bfb)
+ assert_raises(ValueError, bfa)
+ assert_raises(ValueError, bfb)
def test_nonarray_assignment(self):
# See also Issue gh-2870, test for non-array assignment
@@ -328,25 +356,26 @@ class TestRegression(TestCase):
assert_raises(ValueError, assign, a, r, np.nan)
a[r] = np.array(np.nan)
- def test_unpickle_dtype_with_object(self,level=rlevel):
+ def test_unpickle_dtype_with_object(self):
# Implemented in r2840
dt = np.dtype([('x', int), ('y', np.object_), ('z', 'O')])
- f = BytesIO()
- pickle.dump(dt, f)
- f.seek(0)
- dt_ = pickle.load(f)
- f.close()
- assert_equal(dt, dt_)
+ for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
+ f = BytesIO()
+ pickle.dump(dt, f, protocol=proto)
+ f.seek(0)
+ dt_ = pickle.load(f)
+ f.close()
+ assert_equal(dt, dt_)
- def test_mem_array_creation_invalid_specification(self,level=rlevel):
+ def test_mem_array_creation_invalid_specification(self):
# Ticket #196
dt = np.dtype([('x', int), ('y', np.object_)])
# Wrong way
- self.assertRaises(ValueError, np.array, [1, 'object'], dt)
+ assert_raises(ValueError, np.array, [1, 'object'], dt)
# Correct way
np.array([(1, 'object')], dt)
- def test_recarray_single_element(self,level=rlevel):
+ def test_recarray_single_element(self):
# Ticket #202
a = np.array([1, 2, 3], dtype=np.int32)
b = a.copy()
@@ -354,23 +383,23 @@ class TestRegression(TestCase):
assert_array_equal(a, b)
assert_equal(a, r[0][0])
- def test_zero_sized_array_indexing(self,level=rlevel):
+ def test_zero_sized_array_indexing(self):
# Ticket #205
tmp = np.array([])
def index_tmp():
tmp[np.array(10)]
- self.assertRaises(IndexError, index_tmp)
+ assert_raises(IndexError, index_tmp)
- def test_chararray_rstrip(self,level=rlevel):
+ def test_chararray_rstrip(self):
# Ticket #222
x = np.chararray((1,), 5)
- x[0] = asbytes('a ')
+ x[0] = b'a '
x = x.rstrip()
- assert_equal(x[0], asbytes('a'))
+ assert_equal(x[0], b'a')
- def test_object_array_shape(self,level=rlevel):
+ def test_object_array_shape(self):
# Ticket #239
assert_equal(np.array([[1, 2], 3, 4], dtype=object).shape, (3,))
assert_equal(np.array([[1, 2], [3, 4]], dtype=object).shape, (2, 2))
@@ -379,20 +408,20 @@ class TestRegression(TestCase):
assert_equal(np.array([[], [], []], dtype=object).shape, (3, 0))
assert_equal(np.array([[3, 4], [5, 6], None], dtype=object).shape, (3,))
- def test_mem_around(self,level=rlevel):
+ def test_mem_around(self):
# Ticket #243
x = np.zeros((1,))
y = [0]
decimal = 6
np.around(abs(x-y), decimal) <= 10.0**(-decimal)
- def test_character_array_strip(self,level=rlevel):
+ def test_character_array_strip(self):
# Ticket #246
x = np.char.array(("x", "x ", "x "))
for c in x:
assert_equal(c, "x")
- def test_lexsort(self,level=rlevel):
+ def test_lexsort(self):
# Lexsort memory error
v = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
assert_equal(np.lexsort(v), 0)
@@ -415,23 +444,23 @@ class TestRegression(TestCase):
test_data = [
# (original, py2_pickle)
(np.unicode_('\u6f2c'),
- asbytes("cnumpy.core.multiarray\nscalar\np0\n(cnumpy\ndtype\np1\n"
- "(S'U1'\np2\nI0\nI1\ntp3\nRp4\n(I3\nS'<'\np5\nNNNI4\nI4\n"
- "I0\ntp6\nbS',o\\x00\\x00'\np7\ntp8\nRp9\n.")),
+ b"cnumpy.core.multiarray\nscalar\np0\n(cnumpy\ndtype\np1\n"
+ b"(S'U1'\np2\nI0\nI1\ntp3\nRp4\n(I3\nS'<'\np5\nNNNI4\nI4\n"
+ b"I0\ntp6\nbS',o\\x00\\x00'\np7\ntp8\nRp9\n."),
(np.array([9e123], dtype=np.float64),
- asbytes("cnumpy.core.multiarray\n_reconstruct\np0\n(cnumpy\nndarray\n"
- "p1\n(I0\ntp2\nS'b'\np3\ntp4\nRp5\n(I1\n(I1\ntp6\ncnumpy\ndtype\n"
- "p7\n(S'f8'\np8\nI0\nI1\ntp9\nRp10\n(I3\nS'<'\np11\nNNNI-1\nI-1\n"
- "I0\ntp12\nbI00\nS'O\\x81\\xb7Z\\xaa:\\xabY'\np13\ntp14\nb.")),
+ b"cnumpy.core.multiarray\n_reconstruct\np0\n(cnumpy\nndarray\n"
+ b"p1\n(I0\ntp2\nS'b'\np3\ntp4\nRp5\n(I1\n(I1\ntp6\ncnumpy\ndtype\n"
+ b"p7\n(S'f8'\np8\nI0\nI1\ntp9\nRp10\n(I3\nS'<'\np11\nNNNI-1\nI-1\n"
+ b"I0\ntp12\nbI00\nS'O\\x81\\xb7Z\\xaa:\\xabY'\np13\ntp14\nb."),
(np.array([(9e123,)], dtype=[('name', float)]),
- asbytes("cnumpy.core.multiarray\n_reconstruct\np0\n(cnumpy\nndarray\np1\n"
- "(I0\ntp2\nS'b'\np3\ntp4\nRp5\n(I1\n(I1\ntp6\ncnumpy\ndtype\np7\n"
- "(S'V8'\np8\nI0\nI1\ntp9\nRp10\n(I3\nS'|'\np11\nN(S'name'\np12\ntp13\n"
- "(dp14\ng12\n(g7\n(S'f8'\np15\nI0\nI1\ntp16\nRp17\n(I3\nS'<'\np18\nNNNI-1\n"
- "I-1\nI0\ntp19\nbI0\ntp20\nsI8\nI1\nI0\ntp21\n"
- "bI00\nS'O\\x81\\xb7Z\\xaa:\\xabY'\np22\ntp23\nb.")),
+ b"cnumpy.core.multiarray\n_reconstruct\np0\n(cnumpy\nndarray\np1\n"
+ b"(I0\ntp2\nS'b'\np3\ntp4\nRp5\n(I1\n(I1\ntp6\ncnumpy\ndtype\np7\n"
+ b"(S'V8'\np8\nI0\nI1\ntp9\nRp10\n(I3\nS'|'\np11\nN(S'name'\np12\ntp13\n"
+ b"(dp14\ng12\n(g7\n(S'f8'\np15\nI0\nI1\ntp16\nRp17\n(I3\nS'<'\np18\nNNNI-1\n"
+ b"I-1\nI0\ntp19\nbI0\ntp20\nsI8\nI1\nI0\ntp21\n"
+ b"bI00\nS'O\\x81\\xb7Z\\xaa:\\xabY'\np22\ntp23\nb."),
]
if sys.version_info[:2] >= (3, 4):
@@ -444,36 +473,37 @@ class TestRegression(TestCase):
for name in result.dtype.names:
assert_(isinstance(name, str))
- def test_pickle_dtype(self,level=rlevel):
+ def test_pickle_dtype(self):
# Ticket #251
- pickle.dumps(np.float)
+ for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
+ pickle.dumps(float, protocol=proto)
- def test_swap_real(self, level=rlevel):
+ def test_swap_real(self):
# Ticket #265
assert_equal(np.arange(4, dtype='>c8').imag.max(), 0.0)
assert_equal(np.arange(4, dtype='<c8').imag.max(), 0.0)
assert_equal(np.arange(4, dtype='>c8').real.max(), 3.0)
assert_equal(np.arange(4, dtype='<c8').real.max(), 3.0)
- def test_object_array_from_list(self, level=rlevel):
+ def test_object_array_from_list(self):
# Ticket #270
- np.array([1, 'A', None]) # Should succeed
+ assert_(np.array([1, 'A', None]).shape == (3,))
- def test_multiple_assign(self, level=rlevel):
+ def test_multiple_assign(self):
# Ticket #273
a = np.zeros((3, 1), int)
a[[1, 2]] = 1
- def test_empty_array_type(self, level=rlevel):
+ def test_empty_array_type(self):
assert_equal(np.array([]).dtype, np.zeros(0).dtype)
- def test_void_copyswap(self, level=rlevel):
+ def test_void_copyswap(self):
dt = np.dtype([('one', '<i4'), ('two', '<i4')])
x = np.array((1, 2), dtype=dt)
x = x.byteswap()
assert_(x['one'] > 1 and x['two'] > 2)
- def test_method_args(self, level=rlevel):
+ def test_method_args(self):
# Make sure methods and functions have same default axis
# keyword and arguments
funcs1 = ['argmax', 'argmin', 'sum', ('product', 'prod'),
@@ -515,17 +545,17 @@ class TestRegression(TestCase):
res2 = getattr(np, func)(arr1, arr2)
assert_(abs(res1-res2).max() < 1e-8, func)
- def test_mem_lexsort_strings(self, level=rlevel):
+ def test_mem_lexsort_strings(self):
# Ticket #298
lst = ['abc', 'cde', 'fgh']
np.lexsort((lst,))
- def test_fancy_index(self, level=rlevel):
+ def test_fancy_index(self):
# Ticket #302
x = np.array([1, 2])[np.array([0])]
assert_equal(x.shape, (1,))
- def test_recarray_copy(self, level=rlevel):
+ def test_recarray_copy(self):
# Ticket #312
dt = [('x', np.int16), ('y', np.float64)]
ra = np.array([(1, 2.3)], dtype=dt)
@@ -533,64 +563,64 @@ class TestRegression(TestCase):
rb['x'] = 2.
assert_(ra['x'] != rb['x'])
- def test_rec_fromarray(self, level=rlevel):
+ def test_rec_fromarray(self):
# Ticket #322
x1 = np.array([[1, 2], [3, 4], [5, 6]])
x2 = np.array(['a', 'dd', 'xyz'])
x3 = np.array([1.1, 2, 3])
np.rec.fromarrays([x1, x2, x3], formats="(2,)i4,a3,f8")
- def test_object_array_assign(self, level=rlevel):
+ def test_object_array_assign(self):
x = np.empty((2, 2), object)
x.flat[2] = (1, 2, 3)
assert_equal(x.flat[2], (1, 2, 3))
- def test_ndmin_float64(self, level=rlevel):
+ def test_ndmin_float64(self):
# Ticket #324
x = np.array([1, 2, 3], dtype=np.float64)
assert_equal(np.array(x, dtype=np.float32, ndmin=2).ndim, 2)
assert_equal(np.array(x, dtype=np.float64, ndmin=2).ndim, 2)
- def test_ndmin_order(self, level=rlevel):
+ def test_ndmin_order(self):
# Issue #465 and related checks
assert_(np.array([1, 2], order='C', ndmin=3).flags.c_contiguous)
assert_(np.array([1, 2], order='F', ndmin=3).flags.f_contiguous)
assert_(np.array(np.ones((2, 2), order='F'), ndmin=3).flags.f_contiguous)
assert_(np.array(np.ones((2, 2), order='C'), ndmin=3).flags.c_contiguous)
- def test_mem_axis_minimization(self, level=rlevel):
+ def test_mem_axis_minimization(self):
# Ticket #327
data = np.arange(5)
data = np.add.outer(data, data)
- def test_mem_float_imag(self, level=rlevel):
+ def test_mem_float_imag(self):
# Ticket #330
np.float64(1.0).imag
- def test_dtype_tuple(self, level=rlevel):
+ def test_dtype_tuple(self):
# Ticket #334
assert_(np.dtype('i4') == np.dtype(('i4', ())))
- def test_dtype_posttuple(self, level=rlevel):
+ def test_dtype_posttuple(self):
# Ticket #335
np.dtype([('col1', '()i4')])
- def test_numeric_carray_compare(self, level=rlevel):
+ def test_numeric_carray_compare(self):
# Ticket #341
- assert_equal(np.array(['X'], 'c'), asbytes('X'))
+ assert_equal(np.array(['X'], 'c'), b'X')
- def test_string_array_size(self, level=rlevel):
+ def test_string_array_size(self):
# Ticket #342
- self.assertRaises(ValueError,
+ assert_raises(ValueError,
np.array, [['X'], ['X', 'X', 'X']], '|S1')
- def test_dtype_repr(self, level=rlevel):
+ def test_dtype_repr(self):
# Ticket #344
dt1 = np.dtype(('uint32', 2))
dt2 = np.dtype(('uint32', (2,)))
assert_equal(dt1.__repr__(), dt2.__repr__())
- def test_reshape_order(self, level=rlevel):
+ def test_reshape_order(self):
# Make sure reshape order works.
a = np.arange(6).reshape(2, 3, order='F')
assert_equal(a, [[0, 2, 4], [1, 3, 5]])
@@ -598,20 +628,21 @@ class TestRegression(TestCase):
b = a[:, 1]
assert_equal(b.reshape(2, 2, order='F'), [[2, 6], [4, 8]])
- def test_reshape_zero_strides(self, level=rlevel):
+ def test_reshape_zero_strides(self):
# Issue #380, test reshaping of zero strided arrays
a = np.ones(1)
a = np.lib.stride_tricks.as_strided(a, shape=(5,), strides=(0,))
assert_(a.reshape(5, 1).strides[0] == 0)
- def test_reshape_zero_size(self, level=rlevel):
+ def test_reshape_zero_size(self):
# GitHub Issue #2700, setting shape failed for 0-sized arrays
a = np.ones((0, 2))
a.shape = (-1, 2)
# Cannot test if NPY_RELAXED_STRIDES_CHECKING changes the strides.
# With NPY_RELAXED_STRIDES_CHECKING the test becomes superfluous.
- @dec.skipif(np.ones(1).strides[0] == np.iinfo(np.intp).max)
+ @pytest.mark.skipif(np.ones(1).strides[0] == np.iinfo(np.intp).max,
+ reason="Using relaxed stride checking")
def test_reshape_trailing_ones_strides(self):
# GitHub issue gh-2949, bad strides for trailing ones of new shape
a = np.zeros(12, dtype=np.int32)[::2] # not contiguous
@@ -621,22 +652,22 @@ class TestRegression(TestCase):
assert_equal(a.reshape(3, 2, 1, 1, order='F').strides, strides_f)
assert_equal(np.array(0, dtype=np.int32).reshape(1, 1).strides, (4, 4))
- def test_repeat_discont(self, level=rlevel):
+ def test_repeat_discont(self):
# Ticket #352
a = np.arange(12).reshape(4, 3)[:, 2]
assert_equal(a.repeat(3), [2, 2, 2, 5, 5, 5, 8, 8, 8, 11, 11, 11])
- def test_array_index(self, level=rlevel):
+ def test_array_index(self):
# Make sure optimization is not called in this case.
a = np.array([1, 2, 3])
a2 = np.array([[1, 2, 3]])
assert_equal(a[np.where(a == 3)], a2[np.where(a2 == 3)])
- def test_object_argmax(self, level=rlevel):
+ def test_object_argmax(self):
a = np.array([1, 2, 3], dtype=object)
assert_(a.argmax() == 2)
- def test_recarray_fields(self, level=rlevel):
+ def test_recarray_fields(self):
# Ticket #372
dt0 = np.dtype([('f0', 'i4'), ('f1', 'i4')])
dt1 = np.dtype([('f0', 'i8'), ('f1', 'i8')])
@@ -647,22 +678,22 @@ class TestRegression(TestCase):
np.rec.fromarrays([(1, 2), (3, 4)])]:
assert_(a.dtype in [dt0, dt1])
- def test_random_shuffle(self, level=rlevel):
+ def test_random_shuffle(self):
# Ticket #374
a = np.arange(5).reshape((5, 1))
b = a.copy()
np.random.shuffle(b)
assert_equal(np.sort(b, axis=0), a)
- def test_refcount_vdot(self, level=rlevel):
+ def test_refcount_vdot(self):
# Changeset #3443
_assert_valid_refcount(np.vdot)
- def test_startswith(self, level=rlevel):
+ def test_startswith(self):
ca = np.char.array(['Hi', 'There'])
assert_equal(ca.startswith('H'), [True, False])
- def test_noncommutative_reduce_accumulate(self, level=rlevel):
+ def test_noncommutative_reduce_accumulate(self):
# Ticket #413
tosubtract = np.arange(5)
todivide = np.array([2.0, 0.5, 0.25])
@@ -673,28 +704,28 @@ class TestRegression(TestCase):
assert_array_equal(np.divide.accumulate(todivide),
np.array([2., 4., 16.]))
- def test_convolve_empty(self, level=rlevel):
+ def test_convolve_empty(self):
# Convolve should raise an error for empty input array.
- self.assertRaises(ValueError, np.convolve, [], [1])
- self.assertRaises(ValueError, np.convolve, [1], [])
+ assert_raises(ValueError, np.convolve, [], [1])
+ assert_raises(ValueError, np.convolve, [1], [])
- def test_multidim_byteswap(self, level=rlevel):
+ def test_multidim_byteswap(self):
# Ticket #449
r = np.array([(1, (0, 1, 2))], dtype="i2,3i2")
assert_array_equal(r.byteswap(),
np.array([(256, (0, 256, 512))], r.dtype))
- def test_string_NULL(self, level=rlevel):
+ def test_string_NULL(self):
# Changeset 3557
assert_equal(np.array("a\x00\x0b\x0c\x00").item(),
'a\x00\x0b\x0c')
- def test_junk_in_string_fields_of_recarray(self, level=rlevel):
+ def test_junk_in_string_fields_of_recarray(self):
# Ticket #483
- r = np.array([[asbytes('abc')]], dtype=[('var1', '|S20')])
- assert_(asbytes(r['var1'][0][0]) == asbytes('abc'))
+ r = np.array([[b'abc']], dtype=[('var1', '|S20')])
+ assert_(asbytes(r['var1'][0][0]) == b'abc')
- def test_take_output(self, level=rlevel):
+ def test_take_output(self):
# Ensure that 'take' honours output parameter.
x = np.arange(12).reshape((3, 4))
a = np.take(x, [0, 2], axis=1)
@@ -706,20 +737,22 @@ class TestRegression(TestCase):
# Issue gh-3001
d = 123.
a = np.array([d, 1], dtype=object)
- ref_d = sys.getrefcount(d)
+ if HAS_REFCOUNT:
+ ref_d = sys.getrefcount(d)
try:
a.take([0, 100])
except IndexError:
pass
- assert_(ref_d == sys.getrefcount(d))
+ if HAS_REFCOUNT:
+ assert_(ref_d == sys.getrefcount(d))
- def test_array_str_64bit(self, level=rlevel):
+ def test_array_str_64bit(self):
# Ticket #501
s = np.array([1, np.nan], dtype=np.float64)
with np.errstate(all='raise'):
np.array_str(s) # Should succeed
- def test_frompyfunc_endian(self, level=rlevel):
+ def test_frompyfunc_endian(self):
# Ticket #503
from math import radians
uradians = np.frompyfunc(radians, 1, 1)
@@ -728,33 +761,33 @@ class TestRegression(TestCase):
assert_almost_equal(uradians(big_endian).astype(float),
uradians(little_endian).astype(float))
- def test_mem_string_arr(self, level=rlevel):
+ def test_mem_string_arr(self):
# Ticket #514
s = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
t = []
np.hstack((t, s))
- def test_arr_transpose(self, level=rlevel):
+ def test_arr_transpose(self):
# Ticket #516
x = np.random.rand(*(2,)*16)
x.transpose(list(range(16))) # Should succeed
- def test_string_mergesort(self, level=rlevel):
+ def test_string_mergesort(self):
# Ticket #540
x = np.array(['a']*32)
assert_array_equal(x.argsort(kind='m'), np.arange(32))
- def test_argmax_byteorder(self, level=rlevel):
+ def test_argmax_byteorder(self):
# Ticket #546
a = np.arange(3, dtype='>f')
assert_(a[a.argmax()] == a.max())
- def test_rand_seed(self, level=rlevel):
+ def test_rand_seed(self):
# Ticket #555
for l in np.arange(4):
np.random.seed(l)
- def test_mem_deallocation_leak(self, level=rlevel):
+ def test_mem_deallocation_leak(self):
# Ticket #562
a = np.zeros(5, dtype=float)
b = np.array(a, dtype=float)
@@ -762,9 +795,9 @@ class TestRegression(TestCase):
def test_mem_on_invalid_dtype(self):
"Ticket #583"
- self.assertRaises(ValueError, np.fromiter, [['12', ''], ['13', '']], str)
+ assert_raises(ValueError, np.fromiter, [['12', ''], ['13', '']], str)
- def test_dot_negative_stride(self, level=rlevel):
+ def test_dot_negative_stride(self):
# Ticket #588
x = np.array([[1, 5, 25, 125., 625]])
y = np.array([[20.], [160.], [640.], [1280.], [1024.]])
@@ -772,7 +805,7 @@ class TestRegression(TestCase):
y2 = y[::-1]
assert_equal(np.dot(x, z), np.dot(x, y2))
- def test_object_casting(self, level=rlevel):
+ def test_object_casting(self):
# This used to trigger the object-type version of
# the bitwise_or operation, because float64 -> object
# casting succeeds
@@ -781,16 +814,17 @@ class TestRegression(TestCase):
y = np.zeros([484, 286])
x |= y
- self.assertRaises(TypeError, rs)
+ assert_raises(TypeError, rs)
- def test_unicode_scalar(self, level=rlevel):
+ def test_unicode_scalar(self):
# Ticket #600
x = np.array(["DROND", "DROND1"], dtype="U6")
el = x[1]
- new = pickle.loads(pickle.dumps(el))
- assert_equal(new, el)
+ for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
+ new = pickle.loads(pickle.dumps(el, protocol=proto))
+ assert_equal(new, el)
- def test_arange_non_native_dtype(self, level=rlevel):
+ def test_arange_non_native_dtype(self):
# Ticket #616
for T in ('>f4', '<f4'):
dt = np.dtype(T)
@@ -798,93 +832,85 @@ class TestRegression(TestCase):
assert_equal(np.arange(0.5, dtype=dt).dtype, dt)
assert_equal(np.arange(5, dtype=dt).dtype, dt)
- def test_bool_flat_indexing_invalid_nr_elements(self, level=rlevel):
+ def test_bool_flat_indexing_invalid_nr_elements(self):
s = np.ones(10, dtype=float)
x = np.array((15,), dtype=float)
def ia(x, s, v):
x[(s > 0)] = v
- # After removing deprecation, the following are ValueErrors.
- # This might seem odd as compared to the value error below. This
- # is due to the fact that the new code always uses "nonzero" logic
- # and the boolean special case is not taken.
- with warnings.catch_warnings():
- warnings.simplefilter('ignore', DeprecationWarning)
- warnings.simplefilter('ignore', np.VisibleDeprecationWarning)
- self.assertRaises(IndexError, ia, x, s, np.zeros(9, dtype=float))
- self.assertRaises(IndexError, ia, x, s, np.zeros(11, dtype=float))
+ assert_raises(IndexError, ia, x, s, np.zeros(9, dtype=float))
+ assert_raises(IndexError, ia, x, s, np.zeros(11, dtype=float))
+
# Old special case (different code path):
- self.assertRaises(ValueError, ia, x.flat, s, np.zeros(9, dtype=float))
- self.assertRaises(ValueError, ia, x.flat, s, np.zeros(11, dtype=float))
+ assert_raises(ValueError, ia, x.flat, s, np.zeros(9, dtype=float))
+ assert_raises(ValueError, ia, x.flat, s, np.zeros(11, dtype=float))
- def test_mem_scalar_indexing(self, level=rlevel):
+ def test_mem_scalar_indexing(self):
# Ticket #603
x = np.array([0], dtype=float)
index = np.array(0, dtype=np.int32)
x[index]
- def test_binary_repr_0_width(self, level=rlevel):
+ def test_binary_repr_0_width(self):
assert_equal(np.binary_repr(0, width=3), '000')
- def test_fromstring(self, level=rlevel):
+ def test_fromstring(self):
assert_equal(np.fromstring("12:09:09", dtype=int, sep=":"),
[12, 9, 9])
- def test_searchsorted_variable_length(self, level=rlevel):
+ def test_searchsorted_variable_length(self):
x = np.array(['a', 'aa', 'b'])
y = np.array(['d', 'e'])
assert_equal(x.searchsorted(y), [3, 3])
- def test_string_argsort_with_zeros(self, level=rlevel):
+ def test_string_argsort_with_zeros(self):
# Check argsort for strings containing zeros.
- x = np.fromstring("\x00\x02\x00\x01", dtype="|S2")
+ x = np.frombuffer(b"\x00\x02\x00\x01", dtype="|S2")
assert_array_equal(x.argsort(kind='m'), np.array([1, 0]))
assert_array_equal(x.argsort(kind='q'), np.array([1, 0]))
- def test_string_sort_with_zeros(self, level=rlevel):
+ def test_string_sort_with_zeros(self):
# Check sort for strings containing zeros.
- x = np.fromstring("\x00\x02\x00\x01", dtype="|S2")
- y = np.fromstring("\x00\x01\x00\x02", dtype="|S2")
+ x = np.frombuffer(b"\x00\x02\x00\x01", dtype="|S2")
+ y = np.frombuffer(b"\x00\x01\x00\x02", dtype="|S2")
assert_array_equal(np.sort(x, kind="q"), y)
- def test_copy_detection_zero_dim(self, level=rlevel):
+ def test_copy_detection_zero_dim(self):
# Ticket #658
np.indices((0, 3, 4)).T.reshape(-1, 3)
- def test_flat_byteorder(self, level=rlevel):
+ def test_flat_byteorder(self):
# Ticket #657
x = np.arange(10)
assert_array_equal(x.astype('>i4'), x.astype('<i4').flat[:])
assert_array_equal(x.astype('>i4').flat[:], x.astype('<i4'))
- def test_uint64_from_negative(self, level=rlevel):
- assert_equal(np.uint64(-2), np.uint64(18446744073709551614))
-
- def test_sign_bit(self, level=rlevel):
+ def test_sign_bit(self):
x = np.array([0, -0.0, 0])
- assert_equal(str(np.abs(x)), '[ 0. 0. 0.]')
+ assert_equal(str(np.abs(x)), '[0. 0. 0.]')
- def test_flat_index_byteswap(self, level=rlevel):
+ def test_flat_index_byteswap(self):
for dt in (np.dtype('<i4'), np.dtype('>i4')):
x = np.array([-1, 0, 1], dtype=dt)
assert_equal(x.flat[0].dtype, x[0].dtype)
- def test_copy_detection_corner_case(self, level=rlevel):
+ def test_copy_detection_corner_case(self):
# Ticket #658
np.indices((0, 3, 4)).T.reshape(-1, 3)
# Cannot test if NPY_RELAXED_STRIDES_CHECKING changes the strides.
# With NPY_RELAXED_STRIDES_CHECKING the test becomes superfluous,
# 0-sized reshape itself is tested elsewhere.
- @dec.skipif(np.ones(1).strides[0] == np.iinfo(np.intp).max)
- def test_copy_detection_corner_case2(self, level=rlevel):
+ @pytest.mark.skipif(np.ones(1).strides[0] == np.iinfo(np.intp).max,
+ reason="Using relaxed stride checking")
+ def test_copy_detection_corner_case2(self):
# Ticket #771: strides are not set correctly when reshaping 0-sized
# arrays
b = np.indices((0, 3, 4)).T.reshape(-1, 3)
assert_equal(b.strides, (3 * b.itemsize, b.itemsize))
- def test_object_array_refcounting(self, level=rlevel):
+ def test_object_array_refcounting(self):
# Ticket #633
if not hasattr(sys, 'getrefcount'):
return
@@ -928,7 +954,7 @@ class TestRegression(TestCase):
arr0[1] = b
assert_(cnt(b) == cnt0_b + 1)
- arr[:,:] = arr0
+ arr[:, :] = arr0
assert_(cnt(a) == cnt0_a + 6)
assert_(cnt(b) == cnt0_b + 6)
@@ -987,7 +1013,7 @@ class TestRegression(TestCase):
del tmp # Avoid pyflakes unused variable warning
- def test_mem_custom_float_to_array(self, level=rlevel):
+ def test_mem_custom_float_to_array(self):
# Ticket 702
class MyFloat(object):
def __float__(self):
@@ -996,7 +1022,7 @@ class TestRegression(TestCase):
tmp = np.atleast_1d([MyFloat()])
tmp.astype(float) # Should succeed
- def test_object_array_refcount_self_assign(self, level=rlevel):
+ def test_object_array_refcount_self_assign(self):
# Ticket #711
class VictimObject(object):
deleted = False
@@ -1013,32 +1039,23 @@ class TestRegression(TestCase):
arr[:] = arr # trying to induce a segfault by doing it again...
assert_(not arr[0].deleted)
- def test_mem_fromiter_invalid_dtype_string(self, level=rlevel):
+ def test_mem_fromiter_invalid_dtype_string(self):
x = [1, 2, 3]
- self.assertRaises(ValueError,
+ assert_raises(ValueError,
np.fromiter, [xi for xi in x], dtype='S')
- def test_reduce_big_object_array(self, level=rlevel):
+ def test_reduce_big_object_array(self):
# Ticket #713
oldsize = np.setbufsize(10*16)
a = np.array([None]*161, object)
assert_(not np.any(a))
np.setbufsize(oldsize)
- def test_mem_0d_array_index(self, level=rlevel):
+ def test_mem_0d_array_index(self):
# Ticket #714
np.zeros(10)[np.array(0)]
- def test_floats_from_string(self, level=rlevel):
- # Ticket #640, floats from string
- fsingle = np.single('1.234')
- fdouble = np.double('1.234')
- flongdouble = np.longdouble('1.234')
- assert_almost_equal(fsingle, 1.234)
- assert_almost_equal(fdouble, 1.234)
- assert_almost_equal(flongdouble, 1.234)
-
- def test_nonnative_endian_fill(self, level=rlevel):
+ def test_nonnative_endian_fill(self):
# Non-native endian arrays were incorrectly filled with scalars
# before r5034.
if sys.byteorder == 'little':
@@ -1049,16 +1066,17 @@ class TestRegression(TestCase):
x.fill(1)
assert_equal(x, np.array([1], dtype=dtype))
- def test_dot_alignment_sse2(self, level=rlevel):
+ def test_dot_alignment_sse2(self):
# Test for ticket #551, changeset r5140
x = np.zeros((30, 40))
- y = pickle.loads(pickle.dumps(x))
- # y is now typically not aligned on a 8-byte boundary
- z = np.ones((1, y.shape[0]))
- # This shouldn't cause a segmentation fault:
- np.dot(z, y)
-
- def test_astype_copy(self, level=rlevel):
+ for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
+ y = pickle.loads(pickle.dumps(x, protocol=proto))
+ # y is now typically not aligned on a 8-byte boundary
+ z = np.ones((1, y.shape[0]))
+ # This shouldn't cause a segmentation fault:
+ np.dot(z, y)
+
+ def test_astype_copy(self):
# Ticket #788, changeset r5155
# The test data file was generated by scipy.io.savemat.
# The dtype is float64, but the isbuiltin attribute is 0.
@@ -1076,7 +1094,7 @@ class TestRegression(TestCase):
assert_((xp.__array_interface__['data'][0] !=
xpd.__array_interface__['data'][0]))
- def test_compress_small_type(self, level=rlevel):
+ def test_compress_small_type(self):
# Ticket #789, changeset 5217.
# compress with out argument segfaulted if cannot cast safely
import numpy as np
@@ -1090,7 +1108,7 @@ class TestRegression(TestCase):
except TypeError:
pass
- def test_attributes(self, level=rlevel):
+ def test_attributes(self):
# Ticket #791
class TestArray(np.ndarray):
def __new__(cls, data, info):
@@ -1162,7 +1180,7 @@ class TestRegression(TestCase):
assert_(type(dat.nonzero()[0]) is np.ndarray)
assert_(type(dat.nonzero()[1]) is np.ndarray)
- def test_recarray_tolist(self, level=rlevel):
+ def test_recarray_tolist(self):
# Ticket #793, changeset r5215
# Comparisons fail for NaN, so we can't use random memory
# for the test.
@@ -1177,23 +1195,23 @@ class TestRegression(TestCase):
a = np.arange(5)
assert_raises(ValueError, a.item)
- def test_char_array_creation(self, level=rlevel):
+ def test_char_array_creation(self):
a = np.array('123', dtype='c')
- b = np.array(asbytes_nested(['1', '2', '3']))
+ b = np.array([b'1', b'2', b'3'])
assert_equal(a, b)
- def test_unaligned_unicode_access(self, level=rlevel):
+ def test_unaligned_unicode_access(self):
# Ticket #825
for i in range(1, 9):
msg = 'unicode offset: %d chars' % i
t = np.dtype([('a', 'S%d' % i), ('b', 'U2')])
- x = np.array([(asbytes('a'), sixu('b'))], dtype=t)
+ x = np.array([(b'a', u'b')], dtype=t)
if sys.version_info[0] >= 3:
assert_equal(str(x), "[(b'a', 'b')]", err_msg=msg)
else:
assert_equal(str(x), "[('a', u'b')]", err_msg=msg)
- def test_sign_for_complex_nan(self, level=rlevel):
+ def test_sign_for_complex_nan(self):
# Ticket 794.
with np.errstate(invalid='ignore'):
C = np.array([-np.inf, -2+1j, 0, 2-1j, np.inf, np.nan])
@@ -1201,7 +1219,7 @@ class TestRegression(TestCase):
want = np.array([-1+0j, -1+0j, 0+0j, 1+0j, 1+0j, np.nan])
assert_equal(have, want)
- def test_for_equal_names(self, level=rlevel):
+ def test_for_equal_names(self):
# Ticket #674
dt = np.dtype([('foo', float), ('bar', float)])
a = np.zeros(10, dt)
@@ -1211,7 +1229,7 @@ class TestRegression(TestCase):
assert_(a.dtype.names[0] == "notfoo")
assert_(a.dtype.names[1] == "bar")
- def test_for_object_scalar_creation(self, level=rlevel):
+ def test_for_object_scalar_creation(self):
# Ticket #816
a = np.object_()
b = np.object_(3)
@@ -1228,31 +1246,18 @@ class TestRegression(TestCase):
def test_array_resize_method_system_error(self):
# Ticket #840 - order should be an invalid keyword.
x = np.array([[0, 1], [2, 3]])
- self.assertRaises(TypeError, x.resize, (2, 2), order='C')
+ assert_raises(TypeError, x.resize, (2, 2), order='C')
- def test_for_zero_length_in_choose(self, level=rlevel):
+ def test_for_zero_length_in_choose(self):
"Ticket #882"
a = np.array(1)
- self.assertRaises(ValueError, lambda x: x.choose([]), a)
+ assert_raises(ValueError, lambda x: x.choose([]), a)
def test_array_ndmin_overflow(self):
"Ticket #947."
- self.assertRaises(ValueError, lambda: np.array([1], ndmin=33))
+ assert_raises(ValueError, lambda: np.array([1], ndmin=33))
- def test_errobj_reference_leak(self, level=rlevel):
- # Ticket #955
- with np.errstate(all="ignore"):
- z = int(0)
- p = np.int32(-1)
-
- gc.collect()
- n_before = len(gc.get_objects())
- z**p # this shouldn't leak a reference to errobj
- gc.collect()
- n_after = len(gc.get_objects())
- assert_(n_before >= n_after, (n_before, n_after))
-
- def test_void_scalar_with_titles(self, level=rlevel):
+ def test_void_scalar_with_titles(self):
# No ticket
data = [('john', 4), ('mary', 5)]
dtype1 = [(('source:yy', 'name'), 'O'), (('source:xx', 'id'), int)]
@@ -1279,9 +1284,12 @@ class TestRegression(TestCase):
assert_(test_record_void_scalar == test_record)
- #Test pickle and unpickle of void and record scalars
- assert_(pickle.loads(pickle.dumps(test_string)) == test_string)
- assert_(pickle.loads(pickle.dumps(test_record)) == test_record)
+ # Test pickle and unpickle of void and record scalars
+ for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
+ assert_(pickle.loads(
+ pickle.dumps(test_string, protocol=proto)) == test_string)
+ assert_(pickle.loads(
+ pickle.dumps(test_record, protocol=proto)) == test_record)
def test_blasdot_uninitialized_memory(self):
# Ticket #950
@@ -1290,11 +1298,17 @@ class TestRegression(TestCase):
for k in range(3):
# Try to ensure that x->data contains non-zero floats
x = np.array([123456789e199], dtype=np.float64)
- x.resize((m, 0))
+ if IS_PYPY:
+ x.resize((m, 0), refcheck=False)
+ else:
+ x.resize((m, 0))
y = np.array([123456789e199], dtype=np.float64)
- y.resize((0, n))
+ if IS_PYPY:
+ y.resize((0, n), refcheck=False)
+ else:
+ y.resize((0, n))
- # `dot` should just return zero (m,n) matrix
+ # `dot` should just return zero (m, n) matrix
z = np.dot(x, y)
assert_(np.all(z == 0))
assert_(z.shape == (m, n))
@@ -1303,28 +1317,18 @@ class TestRegression(TestCase):
# Regression test for #1061.
# Set a size which cannot fit into a 64 bits signed integer
sz = 2 ** 64
- good = 'Maximum allowed dimension exceeded'
- try:
+ with assert_raises_regex(ValueError,
+ 'Maximum allowed dimension exceeded'):
np.empty(sz)
- except ValueError as e:
- if not str(e) == good:
- self.fail("Got msg '%s', expected '%s'" % (e, good))
- except Exception as e:
- self.fail("Got exception of type %s instead of ValueError" % type(e))
def test_huge_arange(self):
# Regression test for #1062.
# Set a size which cannot fit into a 64 bits signed integer
sz = 2 ** 64
- good = 'Maximum allowed size exceeded'
- try:
+ with assert_raises_regex(ValueError,
+ 'Maximum allowed size exceeded'):
np.arange(sz)
- self.assertTrue(np.size == sz)
- except ValueError as e:
- if not str(e) == good:
- self.fail("Got msg '%s', expected '%s'" % (e, good))
- except Exception as e:
- self.fail("Got exception of type %s instead of ValueError" % type(e))
+ assert_(np.size == sz)
def test_fromiter_bytes(self):
# Ticket #1058
@@ -1364,14 +1368,14 @@ class TestRegression(TestCase):
dt = np.dtype([('f1', np.uint)])
assert_raises(KeyError, dt.__getitem__, "f2")
assert_raises(IndexError, dt.__getitem__, 1)
- assert_raises(ValueError, dt.__getitem__, 0.0)
+ assert_raises(TypeError, dt.__getitem__, 0.0)
def test_lexsort_buffer_length(self):
# Ticket #1217, don't segfault.
a = np.ones(100, dtype=np.int8)
b = np.ones(100, dtype=np.int32)
i = np.lexsort((a[::-1], b))
- assert_equal(i, np.arange(100, dtype=np.int))
+ assert_equal(i, np.arange(100, dtype=int))
def test_object_array_to_fixed_string(self):
# Ticket #1235.
@@ -1388,21 +1392,21 @@ class TestRegression(TestCase):
def test_unicode_to_string_cast(self):
# Ticket #1240.
- a = np.array([[sixu('abc'), sixu('\u03a3')],
- [sixu('asdf'), sixu('erw')]],
+ a = np.array([[u'abc', u'\u03a3'],
+ [u'asdf', u'erw']],
dtype='U')
- self.assertRaises(UnicodeEncodeError, np.array, a, 'S4')
+ assert_raises(UnicodeEncodeError, np.array, a, 'S4')
def test_mixed_string_unicode_array_creation(self):
- a = np.array(['1234', sixu('123')])
+ a = np.array(['1234', u'123'])
assert_(a.itemsize == 16)
- a = np.array([sixu('123'), '1234'])
+ a = np.array([u'123', '1234'])
assert_(a.itemsize == 16)
- a = np.array(['1234', sixu('123'), '12345'])
+ a = np.array(['1234', u'123', '12345'])
assert_(a.itemsize == 20)
- a = np.array([sixu('123'), '1234', sixu('12345')])
+ a = np.array([u'123', '1234', u'12345'])
assert_(a.itemsize == 20)
- a = np.array([sixu('123'), '1234', sixu('1234')])
+ a = np.array([u'123', '1234', u'1234'])
assert_(a.itemsize == 16)
def test_misaligned_objects_segfault(self):
@@ -1442,10 +1446,10 @@ class TestRegression(TestCase):
y = x.byteswap()
if x.dtype.byteorder == z.dtype.byteorder:
# little-endian machine
- assert_equal(x, np.fromstring(y.tobytes(), dtype=dtype.newbyteorder()))
+ assert_equal(x, np.frombuffer(y.tobytes(), dtype=dtype.newbyteorder()))
else:
# big-endian machine
- assert_equal(x, np.fromstring(y.tobytes(), dtype=dtype))
+ assert_equal(x, np.frombuffer(y.tobytes(), dtype=dtype))
# double check real and imaginary parts:
assert_equal(x.real, y.real.byteswap())
assert_equal(x.imag, y.imag.byteswap())
@@ -1458,6 +1462,7 @@ class TestRegression(TestCase):
x[x.nonzero()] = x.ravel()[:1]
assert_(x[0, 1] == x[0, 0])
+ @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
def test_structured_arrays_with_objects2(self):
# Ticket #1299 second test
stra = 'aaaa'
@@ -1472,7 +1477,7 @@ class TestRegression(TestCase):
def test_duplicate_title_and_name(self):
# Ticket #1254
dtspec = [(('a', 'a'), 'i'), ('b', 'i')]
- self.assertRaises(ValueError, np.dtype, dtspec)
+ assert_raises(ValueError, np.dtype, dtspec)
def test_signed_integer_division_overflow(self):
# Ticket #1317.
@@ -1481,7 +1486,7 @@ class TestRegression(TestCase):
min //= -1
with np.errstate(divide="ignore"):
- for t in (np.int8, np.int16, np.int32, np.int64, np.int, np.long):
+ for t in (np.int8, np.int16, np.int32, np.int64, int, np.long):
test_type(t)
def test_buffer_hashlib(self):
@@ -1501,7 +1506,7 @@ class TestRegression(TestCase):
# Check if log1p is behaving on 32 bit intel systems.
assert_(np.isfinite(np.log1p(np.exp2(-53))))
- def test_fromiter_comparison(self, level=rlevel):
+ def test_fromiter_comparison(self):
a = np.fromiter(list(range(10)), dtype='b')
b = np.fromiter(list(range(10)), dtype='B')
assert_(np.alltrue(a == np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])))
@@ -1509,25 +1514,23 @@ class TestRegression(TestCase):
def test_fromstring_crash(self):
# Ticket #1345: the following should not cause a crash
- np.fromstring(asbytes('aa, aa, 1.0'), sep=',')
+ np.fromstring(b'aa, aa, 1.0', sep=',')
def test_ticket_1539(self):
dtypes = [x for x in np.typeDict.values()
if (issubclass(x, np.number)
and not issubclass(x, np.timedelta64))]
- a = np.array([], dtypes[0])
+ a = np.array([], np.bool_) # not x[0] because it is unordered
failures = []
- # ignore complex warnings
- with warnings.catch_warnings():
- warnings.simplefilter('ignore', np.ComplexWarning)
- for x in dtypes:
- b = a.astype(x)
- for y in dtypes:
- c = a.astype(y)
- try:
- np.dot(b, c)
- except TypeError:
- failures.append((x, y))
+
+ for x in dtypes:
+ b = a.astype(x)
+ for y in dtypes:
+ c = a.astype(y)
+ try:
+ np.dot(b, c)
+ except TypeError:
+ failures.append((x, y))
if failures:
raise AssertionError("Failures: %r" % failures)
@@ -1572,11 +1575,12 @@ class TestRegression(TestCase):
y = np.add(x, x, x)
assert_equal(id(x), id(y))
+ @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
def test_take_refcount(self):
# ticket #939
- a = np.arange(16, dtype=np.float)
+ a = np.arange(16, dtype=float)
a.shape = (4, 4)
- lut = np.ones((5 + 3, 4), np.float)
+ lut = np.ones((5 + 3, 4), float)
rgba = np.empty(shape=a.shape + (4,), dtype=lut.dtype)
c1 = sys.getrefcount(rgba)
try:
@@ -1604,7 +1608,7 @@ class TestRegression(TestCase):
f.seek(40)
data = f.read(3)
- assert_equal(data, asbytes("\x01\x02\x03"))
+ assert_equal(data, b"\x01\x02\x03")
f.seek(80)
f.read(4)
@@ -1617,8 +1621,8 @@ class TestRegression(TestCase):
for tp in [np.csingle, np.cdouble, np.clongdouble]:
x = tp(1+2j)
assert_warns(np.ComplexWarning, float, x)
- with warnings.catch_warnings():
- warnings.simplefilter('ignore')
+ with suppress_warnings() as sup:
+ sup.filter(np.ComplexWarning)
assert_equal(float(x), float(x.real))
def test_complex_scalar_complex_cast(self):
@@ -1704,11 +1708,72 @@ class TestRegression(TestCase):
def test_squeeze_contiguous(self):
# Similar to GitHub issue #387
a = np.zeros((1, 2)).squeeze()
- b = np.zeros((2, 2, 2), order='F')[:,:, ::2].squeeze()
+ b = np.zeros((2, 2, 2), order='F')[:, :, ::2].squeeze()
assert_(a.flags.c_contiguous)
assert_(a.flags.f_contiguous)
assert_(b.flags.f_contiguous)
+ def test_squeeze_axis_handling(self):
+ # Issue #10779
+ # Ensure proper handling of objects
+ # that don't support axis specification
+ # when squeezing
+
+ class OldSqueeze(np.ndarray):
+
+ def __new__(cls,
+ input_array):
+ obj = np.asarray(input_array).view(cls)
+ return obj
+
+ # it is perfectly reasonable that prior
+ # to numpy version 1.7.0 a subclass of ndarray
+ # might have been created that did not expect
+ # squeeze to have an axis argument
+ # NOTE: this example is somewhat artificial;
+ # it is designed to simulate an old API
+ # expectation to guard against regression
+ def squeeze(self):
+ return super(OldSqueeze, self).squeeze()
+
+ oldsqueeze = OldSqueeze(np.array([[1],[2],[3]]))
+
+ # if no axis argument is specified the old API
+ # expectation should give the correct result
+ assert_equal(np.squeeze(oldsqueeze),
+ np.array([1,2,3]))
+
+ # likewise, axis=None should work perfectly well
+ # with the old API expectation
+ assert_equal(np.squeeze(oldsqueeze, axis=None),
+ np.array([1,2,3]))
+
+ # however, specification of any particular axis
+ # should raise a TypeError in the context of the
+ # old API specification, even when using a valid
+ # axis specification like 1 for this array
+ with assert_raises(TypeError):
+ # this would silently succeed for array
+ # subclasses / objects that did not support
+ # squeeze axis argument handling before fixing
+ # Issue #10779
+ np.squeeze(oldsqueeze, axis=1)
+
+ # check for the same behavior when using an invalid
+ # axis specification -- in this case axis=0 does not
+ # have size 1, but the priority should be to raise
+ # a TypeError for the axis argument and NOT a
+ # ValueError for squeezing a non-empty dimension
+ with assert_raises(TypeError):
+ np.squeeze(oldsqueeze, axis=0)
+
+ # the new API knows how to handle the axis
+ # argument and will return a ValueError if
+ # attempting to squeeze an axis that is not
+ # of length 1
+ with assert_raises(ValueError):
+ np.squeeze(np.array([[1],[2],[3]]), axis=0)
+
def test_reduce_contiguous(self):
# GitHub issue #387
a = np.add.reduce(np.zeros((2, 1, 2)), (0, 1))
@@ -1721,31 +1786,53 @@ class TestRegression(TestCase):
# Object arrays with references to themselves can cause problems
a = np.array(0, dtype=object)
a[()] = a
- assert_raises(TypeError, int, a)
- assert_raises(TypeError, long, a)
- assert_raises(TypeError, float, a)
- assert_raises(TypeError, oct, a)
- assert_raises(TypeError, hex, a)
-
+ assert_raises(RecursionError, int, a)
+ assert_raises(RecursionError, long, a)
+ assert_raises(RecursionError, float, a)
+ if sys.version_info.major == 2:
+ # in python 3, this falls back on operator.index, which fails on
+ # on dtype=object
+ assert_raises(RecursionError, oct, a)
+ assert_raises(RecursionError, hex, a)
+ a[()] = None
+
+ def test_object_array_circular_reference(self):
# Test the same for a circular reference.
- b = np.array(a, dtype=object)
+ a = np.array(0, dtype=object)
+ b = np.array(0, dtype=object)
a[()] = b
- assert_raises(TypeError, int, a)
- # Numpy has no tp_traverse currently, so circular references
+ b[()] = a
+ assert_raises(RecursionError, int, a)
+ # NumPy has no tp_traverse currently, so circular references
# cannot be detected. So resolve it:
- a[()] = 0
+ a[()] = None
# This was causing a to become like the above
a = np.array(0, dtype=object)
a[...] += 1
assert_equal(a, 1)
+ def test_object_array_nested(self):
+ # but is fine with a reference to a different array
+ a = np.array(0, dtype=object)
+ b = np.array(0, dtype=object)
+ a[()] = b
+ assert_equal(int(a), int(0))
+ assert_equal(long(a), long(0))
+ assert_equal(float(a), float(0))
+ if sys.version_info.major == 2:
+ # in python 3, this falls back on operator.index, which fails on
+ # on dtype=object
+ assert_equal(oct(a), oct(0))
+ assert_equal(hex(a), hex(0))
+
def test_object_array_self_copy(self):
# An object array being copied into itself DECREF'ed before INCREF'ing
# causing segmentation faults (gh-3787)
a = np.array(object(), dtype=object)
np.copyto(a, a)
- assert_equal(sys.getrefcount(a[()]), 2)
+ if HAS_REFCOUNT:
+ assert_(sys.getrefcount(a[()]) == 2)
a[()].__class__ # will segfault if object was deleted
def test_zerosize_accumulate(self):
@@ -1760,8 +1847,8 @@ class TestRegression(TestCase):
def test_setting_rank0_string(self):
"Ticket #1736"
- s1 = asbytes("hello1")
- s2 = asbytes("hello2")
+ s1 = b"hello1"
+ s2 = b"hello2"
a = np.zeros((), dtype="S10")
a[()] = s1
assert_equal(a, np.array(s1))
@@ -1776,9 +1863,9 @@ class TestRegression(TestCase):
def test_string_astype(self):
"Ticket #1748"
- s1 = asbytes('black')
- s2 = asbytes('white')
- s3 = asbytes('other')
+ s1 = b'black'
+ s2 = b'white'
+ s3 = b'other'
a = np.array([[s1], [s2], [s3]])
assert_equal(a.dtype, np.dtype('S5'))
b = a.astype(np.dtype('S0'))
@@ -1786,7 +1873,7 @@ class TestRegression(TestCase):
def test_ticket_1756(self):
# Ticket #1756
- s = asbytes('0123456789abcdef')
+ s = b'0123456789abcdef'
a = np.array([s]*5)
for i in range(1, 17):
a1 = np.array(a, "|S%d" % i)
@@ -1794,8 +1881,8 @@ class TestRegression(TestCase):
assert_equal(a1, a2)
def test_fields_strides(self):
- "Ticket #1760"
- r = np.fromstring('abcdefghijklmnop'*4*3, dtype='i4,(2,3)u2')
+ "gh-2355"
+ r = np.frombuffer(b'abcdefghijklmnop'*4*3, dtype='i4,(2,3)u2')
assert_equal(r[0:3:2]['f1'], r['f1'][0:3:2])
assert_equal(r[0:3:2]['f1'][0], r[0:3:2][0]['f1'])
assert_equal(r[0:3:2]['f1'][0][()], r[0:3:2][0]['f1'][()])
@@ -1817,7 +1904,7 @@ class TestRegression(TestCase):
a['f2'] = 1
except ValueError:
pass
- except:
+ except Exception:
raise AssertionError
def test_ticket_1608(self):
@@ -1842,21 +1929,22 @@ class TestRegression(TestCase):
def test_pickle_bytes_overwrite(self):
if sys.version_info[0] >= 3:
- data = np.array([1], dtype='b')
- data = pickle.loads(pickle.dumps(data))
- data[0] = 0xdd
- bytestring = "\x01 ".encode('ascii')
- assert_equal(bytestring[0:1], '\x01'.encode('ascii'))
+ for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
+ data = np.array([1], dtype='b')
+ data = pickle.loads(pickle.dumps(data, protocol=proto))
+ data[0] = 0xdd
+ bytestring = "\x01 ".encode('ascii')
+ assert_equal(bytestring[0:1], '\x01'.encode('ascii'))
def test_pickle_py2_array_latin1_hack(self):
# Check that unpickling hacks in Py3 that support
# encoding='latin1' work correctly.
# Python2 output for pickle.dumps(numpy.array([129], dtype='b'))
- data = asbytes("cnumpy.core.multiarray\n_reconstruct\np0\n(cnumpy\nndarray\np1\n(I0\n"
- "tp2\nS'b'\np3\ntp4\nRp5\n(I1\n(I1\ntp6\ncnumpy\ndtype\np7\n(S'i1'\np8\n"
- "I0\nI1\ntp9\nRp10\n(I3\nS'|'\np11\nNNNI-1\nI-1\nI0\ntp12\nbI00\nS'\\x81'\n"
- "p13\ntp14\nb.")
+ data = (b"cnumpy.core.multiarray\n_reconstruct\np0\n(cnumpy\nndarray\np1\n(I0\n"
+ b"tp2\nS'b'\np3\ntp4\nRp5\n(I1\n(I1\ntp6\ncnumpy\ndtype\np7\n(S'i1'\np8\n"
+ b"I0\nI1\ntp9\nRp10\n(I3\nS'|'\np11\nNNNI-1\nI-1\nI0\ntp12\nbI00\nS'\\x81'\n"
+ b"p13\ntp14\nb.")
if sys.version_info[0] >= 3:
# This should work:
result = pickle.loads(data, encoding='latin1')
@@ -1872,21 +1960,21 @@ class TestRegression(TestCase):
datas = [
# (original, python2_pickle, koi8r_validity)
(np.unicode_('\u6bd2'),
- asbytes("cnumpy.core.multiarray\nscalar\np0\n(cnumpy\ndtype\np1\n"
- "(S'U1'\np2\nI0\nI1\ntp3\nRp4\n(I3\nS'<'\np5\nNNNI4\nI4\nI0\n"
- "tp6\nbS'\\xd2k\\x00\\x00'\np7\ntp8\nRp9\n."),
+ (b"cnumpy.core.multiarray\nscalar\np0\n(cnumpy\ndtype\np1\n"
+ b"(S'U1'\np2\nI0\nI1\ntp3\nRp4\n(I3\nS'<'\np5\nNNNI4\nI4\nI0\n"
+ b"tp6\nbS'\\xd2k\\x00\\x00'\np7\ntp8\nRp9\n."),
'invalid'),
(np.float64(9e123),
- asbytes("cnumpy.core.multiarray\nscalar\np0\n(cnumpy\ndtype\np1\n(S'f8'\n"
- "p2\nI0\nI1\ntp3\nRp4\n(I3\nS'<'\np5\nNNNI-1\nI-1\nI0\ntp6\n"
- "bS'O\\x81\\xb7Z\\xaa:\\xabY'\np7\ntp8\nRp9\n."),
+ (b"cnumpy.core.multiarray\nscalar\np0\n(cnumpy\ndtype\np1\n(S'f8'\n"
+ b"p2\nI0\nI1\ntp3\nRp4\n(I3\nS'<'\np5\nNNNI-1\nI-1\nI0\ntp6\n"
+ b"bS'O\\x81\\xb7Z\\xaa:\\xabY'\np7\ntp8\nRp9\n."),
'invalid'),
- (np.bytes_(asbytes('\x9c')), # different 8-bit code point in KOI8-R vs latin1
- asbytes("cnumpy.core.multiarray\nscalar\np0\n(cnumpy\ndtype\np1\n(S'S1'\np2\n"
- "I0\nI1\ntp3\nRp4\n(I3\nS'|'\np5\nNNNI1\nI1\nI0\ntp6\nbS'\\x9c'\np7\n"
- "tp8\nRp9\n."),
+ (np.bytes_(b'\x9c'), # different 8-bit code point in KOI8-R vs latin1
+ (b"cnumpy.core.multiarray\nscalar\np0\n(cnumpy\ndtype\np1\n(S'S1'\np2\n"
+ b"I0\nI1\ntp3\nRp4\n(I3\nS'|'\np5\nNNNI1\nI1\nI0\ntp6\nbS'\\x9c'\np7\n"
+ b"tp8\nRp9\n."),
'different'),
]
if sys.version_info[0] >= 3:
@@ -1948,6 +2036,7 @@ class TestRegression(TestCase):
a = np.empty((100000000,), dtype='i1')
del a
+ @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
def test_ufunc_reduce_memoryleak(self):
a = np.arange(6)
acnt = sys.getrefcount(a)
@@ -1978,11 +2067,11 @@ class TestRegression(TestCase):
def test_string_truncation_ucs2(self):
# Ticket #2081. Python compiled with two byte unicode
# can lead to truncation if itemsize is not properly
- # adjusted for Numpy's four byte unicode.
+ # adjusted for NumPy's four byte unicode.
if sys.version_info[0] >= 3:
a = np.array(['abcd'])
else:
- a = np.array([sixu('abcd')])
+ a = np.array([u'abcd'])
assert_equal(a.dtype.itemsize, 16)
def test_unique_stable(self):
@@ -2051,8 +2140,8 @@ class TestRegression(TestCase):
import numpy as np
a = np.array([['Hello', 'Foob']], dtype='U5', order='F')
arr = np.ndarray(shape=[1, 2, 5], dtype='U1', buffer=a)
- arr2 = np.array([[[sixu('H'), sixu('e'), sixu('l'), sixu('l'), sixu('o')],
- [sixu('F'), sixu('o'), sixu('o'), sixu('b'), sixu('')]]])
+ arr2 = np.array([[[u'H', u'e', u'l', u'l', u'o'],
+ [u'F', u'o', u'o', u'b', u'']]])
assert_array_equal(arr, arr2)
def test_assign_from_sequence_error(self):
@@ -2077,8 +2166,31 @@ class TestRegression(TestCase):
assert_equal(arr, arr_cp)
assert_equal(arr.shape, arr_cp.shape)
assert_equal(int(arr), int(arr_cp))
- self.assertTrue(arr is not arr_cp)
- self.assertTrue(isinstance(arr_cp, type(arr)))
+ assert_(arr is not arr_cp)
+ assert_(isinstance(arr_cp, type(arr)))
+
+ def test_deepcopy_F_order_object_array(self):
+ # Ticket #6456.
+ a = {'a': 1}
+ b = {'b': 2}
+ arr = np.array([[a, b], [a, b]], order='F')
+ arr_cp = copy.deepcopy(arr)
+
+ assert_equal(arr, arr_cp)
+ assert_(arr is not arr_cp)
+ # Ensure that we have actually copied the item.
+ assert_(arr[0, 1] is not arr_cp[1, 1])
+ # Ensure we are allowed to have references to the same object.
+ assert_(arr[0, 1] is arr[1, 1])
+ # Check the references hold for the copied objects.
+ assert_(arr_cp[0, 1] is arr_cp[1, 1])
+
+ def test_deepcopy_empty_object_array(self):
+ # Ticket #8536.
+ # Deepcopy should succeed
+ a = np.array([], dtype=object)
+ b = copy.deepcopy(a)
+ assert_(a.shape == b.shape)
def test_bool_subscript_crash(self):
# gh-4494
@@ -2096,7 +2208,7 @@ class TestRegression(TestCase):
class Foo(object):
__array_priority__ = 1002
- def __array__(self,*args,**kwargs):
+ def __array__(self, *args, **kwargs):
raise Exception()
rhs = Foo()
@@ -2104,7 +2216,9 @@ class TestRegression(TestCase):
for f in [op.lt, op.le, op.gt, op.ge]:
if sys.version_info[0] >= 3:
assert_raises(TypeError, f, lhs, rhs)
- else:
+ elif not sys.py3kwarning:
+ # With -3 switch in python 2, DeprecationWarning is raised
+ # which we are not interested in
f(lhs, rhs)
assert_(not op.eq(lhs, rhs))
assert_(op.ne(lhs, rhs))
@@ -2115,17 +2229,17 @@ class TestRegression(TestCase):
def __eq__(self, other):
return "OK"
- x = np.array([1,2,3]).view(Foo)
+ x = np.array([1, 2, 3]).view(Foo)
assert_equal(10 == x, "OK")
assert_equal(np.int32(10) == x, "OK")
assert_equal(np.array([10]) == x, "OK")
def test_pickle_empty_string(self):
# gh-3926
-
- import pickle
- test_string = np.string_('')
- assert_equal(pickle.loads(pickle.dumps(test_string)), test_string)
+ for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
+ test_string = np.string_('')
+ assert_equal(pickle.loads(
+ pickle.dumps(test_string, protocol=proto)), test_string)
def test_frompyfunc_many_args(self):
# gh-5672
@@ -2152,11 +2266,12 @@ class TestRegression(TestCase):
assert_equal(uf(a), ())
assert_array_equal(a, [[3, 2, 1], [5, 4], [9, 7, 8, 6]])
+ @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
def test_leak_in_structured_dtype_comparison(self):
# gh-6250
recordtype = np.dtype([('a', np.float64),
('b', np.int32),
- ('d', (np.str, 5))])
+ ('d', (str, 5))])
# Simple case
a = np.zeros(2, dtype=recordtype)
@@ -2197,6 +2312,106 @@ class TestRegression(TestCase):
new_shape = (2, 7, 7, 43826197)
assert_raises(ValueError, a.reshape, new_shape)
+ def test_invalid_structured_dtypes(self):
+ # gh-2865
+ # mapping python objects to other dtypes
+ assert_raises(ValueError, np.dtype, ('O', [('name', 'i8')]))
+ assert_raises(ValueError, np.dtype, ('i8', [('name', 'O')]))
+ assert_raises(ValueError, np.dtype,
+ ('i8', [('name', [('name', 'O')])]))
+ assert_raises(ValueError, np.dtype, ([('a', 'i4'), ('b', 'i4')], 'O'))
+ assert_raises(ValueError, np.dtype, ('i8', 'O'))
+ # wrong number/type of tuple elements in dict
+ assert_raises(ValueError, np.dtype,
+ ('i', {'name': ('i', 0, 'title', 'oops')}))
+ assert_raises(ValueError, np.dtype,
+ ('i', {'name': ('i', 'wrongtype', 'title')}))
+ # disallowed as of 1.13
+ assert_raises(ValueError, np.dtype,
+ ([('a', 'O'), ('b', 'O')], [('c', 'O'), ('d', 'O')]))
+ # allowed as a special case due to existing use, see gh-2798
+ a = np.ones(1, dtype=('O', [('name', 'O')]))
+ assert_equal(a[0], 1)
+
+ def test_correct_hash_dict(self):
+ # gh-8887 - __hash__ would be None despite tp_hash being set
+ all_types = set(np.typeDict.values()) - {np.void}
+ for t in all_types:
+ val = t()
+
+ try:
+ hash(val)
+ except TypeError as e:
+ assert_equal(t.__hash__, None)
+ else:
+ assert_(t.__hash__ != None)
+
+ def test_scalar_copy(self):
+ scalar_types = set(np.sctypeDict.values())
+ values = {
+ np.void: b"a",
+ np.bytes_: b"a",
+ np.unicode_: "a",
+ np.datetime64: "2017-08-25",
+ }
+ for sctype in scalar_types:
+ item = sctype(values.get(sctype, 1))
+ item2 = copy.copy(item)
+ assert_equal(item, item2)
+
+ def test_void_item_memview(self):
+ va = np.zeros(10, 'V4')
+ x = va[:1].item()
+ va[0] = b'\xff\xff\xff\xff'
+ del va
+ assert_equal(x, b'\x00\x00\x00\x00')
+
+ def test_void_getitem(self):
+ # Test fix for gh-11668.
+ assert_(np.array([b'a'], 'V1').astype('O') == b'a')
+ assert_(np.array([b'ab'], 'V2').astype('O') == b'ab')
+ assert_(np.array([b'abc'], 'V3').astype('O') == b'abc')
+ assert_(np.array([b'abcd'], 'V4').astype('O') == b'abcd')
+
+ def test_structarray_title(self):
+ # The following used to segfault on pypy, due to NPY_TITLE_KEY
+ # not working properly and resulting to double-decref of the
+ # structured array field items:
+ # See: https://bitbucket.org/pypy/pypy/issues/2789
+ for j in range(5):
+ structure = np.array([1], dtype=[(('x', 'X'), np.object_)])
+ structure[0]['x'] = np.array([2])
+ gc.collect()
-if __name__ == "__main__":
- run_module_suite()
+ def test_dtype_scalar_squeeze(self):
+ # gh-11384
+ values = {
+ 'S': b"a",
+ 'M': "2018-06-20",
+ }
+ for ch in np.typecodes['All']:
+ if ch in 'O':
+ continue
+ sctype = np.dtype(ch).type
+ scvalue = sctype(values.get(ch, 3))
+ for axis in [None, ()]:
+ squeezed = scvalue.squeeze(axis=axis)
+ assert_equal(squeezed, scvalue)
+ assert_equal(type(squeezed), type(scvalue))
+
+ def test_field_access_by_title(self):
+ # gh-11507
+ s = 'Some long field name'
+ if HAS_REFCOUNT:
+ base = sys.getrefcount(s)
+ t = np.dtype([((s, 'f1'), np.float64)])
+ data = np.zeros(10, t)
+ for i in range(10):
+ str(data[['f1']])
+ if HAS_REFCOUNT:
+ assert_(base <= sys.getrefcount(s))
+
+ def test_object_casting_errors(self):
+ # gh-11993
+ arr = np.array(['AAAAA', 18465886.0, 18465886.0], dtype=object)
+ assert_raises(TypeError, arr.astype, 'c8')
diff --git a/numpy/core/tests/test_scalar_ctors.py b/numpy/core/tests/test_scalar_ctors.py
new file mode 100644
index 000000000..b21bc9dad
--- /dev/null
+++ b/numpy/core/tests/test_scalar_ctors.py
@@ -0,0 +1,65 @@
+"""
+Test the scalar constructors, which also do type-coercion
+"""
+from __future__ import division, absolute_import, print_function
+
+import sys
+import platform
+import pytest
+
+import numpy as np
+from numpy.testing import (
+ assert_equal, assert_almost_equal, assert_raises, assert_warns,
+ )
+
+class TestFromString(object):
+ def test_floating(self):
+ # Ticket #640, floats from string
+ fsingle = np.single('1.234')
+ fdouble = np.double('1.234')
+ flongdouble = np.longdouble('1.234')
+ assert_almost_equal(fsingle, 1.234)
+ assert_almost_equal(fdouble, 1.234)
+ assert_almost_equal(flongdouble, 1.234)
+
+ def test_floating_overflow(self):
+ """ Strings containing an unrepresentable float overflow """
+ fhalf = np.half('1e10000')
+ assert_equal(fhalf, np.inf)
+ fsingle = np.single('1e10000')
+ assert_equal(fsingle, np.inf)
+ fdouble = np.double('1e10000')
+ assert_equal(fdouble, np.inf)
+ flongdouble = assert_warns(RuntimeWarning, np.longdouble, '1e10000')
+ assert_equal(flongdouble, np.inf)
+
+ fhalf = np.half('-1e10000')
+ assert_equal(fhalf, -np.inf)
+ fsingle = np.single('-1e10000')
+ assert_equal(fsingle, -np.inf)
+ fdouble = np.double('-1e10000')
+ assert_equal(fdouble, -np.inf)
+ flongdouble = assert_warns(RuntimeWarning, np.longdouble, '-1e10000')
+ assert_equal(flongdouble, -np.inf)
+
+ @pytest.mark.skipif((sys.version_info[0] >= 3)
+ or (sys.platform == "win32"
+ and platform.architecture()[0] == "64bit"),
+ reason="numpy.intp('0xff', 16) not supported on Py3 "
+ "or 64 bit Windows")
+ def test_intp(self):
+ # Ticket #99
+ i_width = np.int_(0).nbytes*2 - 1
+ np.intp('0x' + 'f'*i_width, 16)
+ assert_raises(OverflowError, np.intp, '0x' + 'f'*(i_width+1), 16)
+ assert_raises(ValueError, np.intp, '0x1', 32)
+ assert_equal(255, np.intp('0xFF', 16))
+
+
+class TestFromInt(object):
+ def test_intp(self):
+ # Ticket #99
+ assert_equal(1024, np.intp(1024))
+
+ def test_uint64_from_negative(self):
+ assert_equal(np.uint64(-2), np.uint64(18446744073709551614))
diff --git a/numpy/core/tests/test_scalarbuffer.py b/numpy/core/tests/test_scalarbuffer.py
new file mode 100644
index 000000000..cd520d99b
--- /dev/null
+++ b/numpy/core/tests/test_scalarbuffer.py
@@ -0,0 +1,105 @@
+"""
+Test scalar buffer interface adheres to PEP 3118
+"""
+import sys
+import numpy as np
+import pytest
+
+from numpy.testing import assert_, assert_equal, assert_raises
+
+# PEP3118 format strings for native (standard alignment and byteorder) types
+scalars_and_codes = [
+ (np.bool_, '?'),
+ (np.byte, 'b'),
+ (np.short, 'h'),
+ (np.intc, 'i'),
+ (np.int_, 'l'),
+ (np.longlong, 'q'),
+ (np.ubyte, 'B'),
+ (np.ushort, 'H'),
+ (np.uintc, 'I'),
+ (np.uint, 'L'),
+ (np.ulonglong, 'Q'),
+ (np.half, 'e'),
+ (np.single, 'f'),
+ (np.double, 'd'),
+ (np.longdouble, 'g'),
+ (np.csingle, 'Zf'),
+ (np.cdouble, 'Zd'),
+ (np.clongdouble, 'Zg'),
+]
+scalars_only, codes_only = zip(*scalars_and_codes)
+
+
+@pytest.mark.skipif(sys.version_info.major < 3,
+ reason="Python 2 scalars lack a buffer interface")
+class TestScalarPEP3118(object):
+
+ @pytest.mark.parametrize('scalar', scalars_only, ids=codes_only)
+ def test_scalar_match_array(self, scalar):
+ x = scalar()
+ a = np.array([], dtype=np.dtype(scalar))
+ mv_x = memoryview(x)
+ mv_a = memoryview(a)
+ assert_equal(mv_x.format, mv_a.format)
+
+ @pytest.mark.parametrize('scalar', scalars_only, ids=codes_only)
+ def test_scalar_dim(self, scalar):
+ x = scalar()
+ mv_x = memoryview(x)
+ assert_equal(mv_x.itemsize, np.dtype(scalar).itemsize)
+ assert_equal(mv_x.ndim, 0)
+ assert_equal(mv_x.shape, ())
+ assert_equal(mv_x.strides, ())
+ assert_equal(mv_x.suboffsets, ())
+
+ @pytest.mark.parametrize('scalar, code', scalars_and_codes, ids=codes_only)
+ def test_scalar_known_code(self, scalar, code):
+ x = scalar()
+ mv_x = memoryview(x)
+ assert_equal(mv_x.format, code)
+
+ def test_void_scalar_structured_data(self):
+ dt = np.dtype([('name', np.unicode_, 16), ('grades', np.float64, (2,))])
+ x = np.array(('ndarray_scalar', (1.2, 3.0)), dtype=dt)[()]
+ assert_(isinstance(x, np.void))
+ mv_x = memoryview(x)
+ expected_size = 16 * np.dtype((np.unicode_, 1)).itemsize
+ expected_size += 2 * np.dtype((np.float64, 1)).itemsize
+ assert_equal(mv_x.itemsize, expected_size)
+ assert_equal(mv_x.ndim, 0)
+ assert_equal(mv_x.shape, ())
+ assert_equal(mv_x.strides, ())
+ assert_equal(mv_x.suboffsets, ())
+
+ # check scalar format string against ndarray format string
+ a = np.array([('Sarah', (8.0, 7.0)), ('John', (6.0, 7.0))], dtype=dt)
+ assert_(isinstance(a, np.ndarray))
+ mv_a = memoryview(a)
+ assert_equal(mv_x.itemsize, mv_a.itemsize)
+ assert_equal(mv_x.format, mv_a.format)
+
+ def test_datetime_memoryview(self):
+ # gh-11656
+ # Values verified with v1.13.3, shape is not () as in test_scalar_dim
+ def as_dict(m):
+ return dict(strides=m.strides, shape=m.shape, itemsize=m.itemsize,
+ ndim=m.ndim, format=m.format)
+
+ dt1 = np.datetime64('2016-01-01')
+ dt2 = np.datetime64('2017-01-01')
+ expected = {'strides': (1,), 'itemsize': 1, 'ndim': 1,
+ 'shape': (8,), 'format': 'B'}
+ v = memoryview(dt1)
+ res = as_dict(v)
+ assert_equal(res, expected)
+
+ v = memoryview(dt2 - dt1)
+ res = as_dict(v)
+ assert_equal(res, expected)
+
+ dt = np.dtype([('a', 'uint16'), ('b', 'M8[s]')])
+ a = np.empty(1, dt)
+ # Fails to create a PEP 3118 valid buffer
+ assert_raises((ValueError, BufferError), memoryview, a[0])
+
diff --git a/numpy/core/tests/test_scalarinherit.py b/numpy/core/tests/test_scalarinherit.py
index e8cf7fde0..9e32cf624 100644
--- a/numpy/core/tests/test_scalarinherit.py
+++ b/numpy/core/tests/test_scalarinherit.py
@@ -5,7 +5,7 @@
from __future__ import division, absolute_import, print_function
import numpy as np
-from numpy.testing import TestCase, run_module_suite, assert_
+from numpy.testing import assert_
class A(object):
@@ -23,7 +23,7 @@ class B0(np.float64, A):
class C0(B0):
pass
-class TestInherit(TestCase):
+class TestInherit(object):
def test_init(self):
x = B(1.0)
assert_(str(x) == '1.0')
@@ -38,5 +38,38 @@ class TestInherit(TestCase):
y = C0(2.0)
assert_(str(y) == '2.0')
-if __name__ == "__main__":
- run_module_suite()
+
+class TestCharacter(object):
+ def test_char_radd(self):
+ # GH issue 9620, reached gentype_add and raise TypeError
+ np_s = np.string_('abc')
+ np_u = np.unicode_('abc')
+ s = b'def'
+ u = u'def'
+ assert_(np_s.__radd__(np_s) is NotImplemented)
+ assert_(np_s.__radd__(np_u) is NotImplemented)
+ assert_(np_s.__radd__(s) is NotImplemented)
+ assert_(np_s.__radd__(u) is NotImplemented)
+ assert_(np_u.__radd__(np_s) is NotImplemented)
+ assert_(np_u.__radd__(np_u) is NotImplemented)
+ assert_(np_u.__radd__(s) is NotImplemented)
+ assert_(np_u.__radd__(u) is NotImplemented)
+ assert_(s + np_s == b'defabc')
+ assert_(u + np_u == u'defabc')
+
+
+ class Mystr(str, np.generic):
+ # would segfault
+ pass
+
+ ret = s + Mystr('abc')
+ assert_(type(ret) is type(s))
+
+ def test_char_repeat(self):
+ np_s = np.string_('abc')
+ np_u = np.unicode_('abc')
+ np_i = np.int(5)
+ res_s = b'abc' * 5
+ res_u = u'abc' * 5
+ assert_(np_s * np_i == res_s)
+ assert_(np_u * np_i == res_u)
diff --git a/numpy/core/tests/test_scalarmath.py b/numpy/core/tests/test_scalarmath.py
index 52c9d3bc6..a7bb4b3c0 100644
--- a/numpy/core/tests/test_scalarmath.py
+++ b/numpy/core/tests/test_scalarmath.py
@@ -1,16 +1,18 @@
from __future__ import division, absolute_import, print_function
import sys
-import itertools
import warnings
+import itertools
import operator
+import platform
+import pytest
import numpy as np
-from numpy.testing.utils import _gen_alignment_data
from numpy.testing import (
- TestCase, run_module_suite, assert_, assert_equal, assert_raises,
- assert_almost_equal, assert_allclose, assert_array_equal
-)
+ assert_, assert_equal, assert_raises, assert_almost_equal,
+ assert_array_equal, IS_PYPY, suppress_warnings, _gen_alignment_data,
+ assert_warns
+ )
types = [np.bool_, np.byte, np.ubyte, np.short, np.ushort, np.intc, np.uintc,
np.int_, np.uint, np.longlong, np.ulonglong,
@@ -18,17 +20,18 @@ types = [np.bool_, np.byte, np.ubyte, np.short, np.ushort, np.intc, np.uintc,
np.cdouble, np.clongdouble]
floating_types = np.floating.__subclasses__()
+complex_floating_types = np.complexfloating.__subclasses__()
# This compares scalarmath against ufuncs.
-class TestTypes(TestCase):
- def test_types(self, level=1):
+class TestTypes(object):
+ def test_types(self):
for atype in types:
a = atype(1)
assert_(a == 1, "error with %r: got %r" % (atype, a))
- def test_type_add(self, level=1):
+ def test_type_add(self):
# list of types
for k, atype in enumerate(types):
a_scalar = atype(3)
@@ -48,7 +51,7 @@ class TestTypes(TestCase):
"error with types (%d/'%c' + %d/'%c')" %
(k, np.dtype(atype).char, l, np.dtype(btype).char))
- def test_type_create(self, level=1):
+ def test_type_create(self):
for k, atype in enumerate(types):
a = np.array([1, 2, 3], atype)
b = atype([1, 2, 3])
@@ -61,11 +64,11 @@ class TestTypes(TestCase):
np.add(1, 1)
-class TestBaseMath(TestCase):
+class TestBaseMath(object):
def test_blocked(self):
# test alignments offsets for simd instructions
# alignments for vz + 2 * (vs - 1) + 1
- for dt, sz in [(np.float32, 11), (np.float64, 7)]:
+ for dt, sz in [(np.float32, 11), (np.float64, 7), (np.int32, 11)]:
for out, inp1, inp2, msg in _gen_alignment_data(dtype=dt,
type='binary',
max_size=sz):
@@ -73,7 +76,7 @@ class TestBaseMath(TestCase):
inp1[...] = np.ones_like(inp1)
inp2[...] = np.zeros_like(inp2)
assert_almost_equal(np.add(inp1, inp2), exp1, err_msg=msg)
- assert_almost_equal(np.add(inp1, 1), exp1 + 1, err_msg=msg)
+ assert_almost_equal(np.add(inp1, 2), exp1 + 2, err_msg=msg)
assert_almost_equal(np.add(1, inp2), exp1, err_msg=msg)
np.add(inp1, inp2, out=out)
@@ -82,15 +85,17 @@ class TestBaseMath(TestCase):
inp2[...] += np.arange(inp2.size, dtype=dt) + 1
assert_almost_equal(np.square(inp2),
np.multiply(inp2, inp2), err_msg=msg)
- assert_almost_equal(np.reciprocal(inp2),
- np.divide(1, inp2), err_msg=msg)
+ # skip true divide for ints
+ if dt != np.int32 or (sys.version_info.major < 3 and not sys.py3kwarning):
+ assert_almost_equal(np.reciprocal(inp2),
+ np.divide(1, inp2), err_msg=msg)
inp1[...] = np.ones_like(inp1)
- inp2[...] = np.zeros_like(inp2)
- np.add(inp1, 1, out=out)
- assert_almost_equal(out, exp1 + 1, err_msg=msg)
- np.add(1, inp2, out=out)
- assert_almost_equal(out, exp1, err_msg=msg)
+ np.add(inp1, 2, out=out)
+ assert_almost_equal(out, exp1 + 2, err_msg=msg)
+ inp2[...] = np.ones_like(inp2)
+ np.add(2, inp2, out=out)
+ assert_almost_equal(out, exp1 + 2, err_msg=msg)
def test_lower_align(self):
# check data that is not aligned to element size
@@ -105,7 +110,7 @@ class TestBaseMath(TestCase):
np.add(d, np.ones_like(d))
-class TestPower(TestCase):
+class TestPower(object):
def test_small_types(self):
for t in [np.int8, np.int16, np.float16]:
a = t(3)
@@ -122,23 +127,41 @@ class TestPower(TestCase):
else:
assert_almost_equal(b, 6765201, err_msg=msg)
- def test_negative_power(self):
- typelist = [np.int8, np.int16, np.int32, np.int64]
- for t in typelist:
- a = t(2)
- b = t(-4)
- result = a**b
- msg = ("error with %r:"
- "got %r, expected %r") % (t, result, 0.0625)
- assert_(result == 0.0625, msg)
-
- c = t(4)
- d = t(-15)
- result = c**d
- expected = 4.0**-15.0
- msg = ("error with %r:"
- "got %r, expected %r") % (t, result, expected)
- assert_almost_equal(result, expected, err_msg=msg)
+ def test_integers_to_negative_integer_power(self):
+ # Note that the combination of uint64 with a signed integer
+ # has common type np.float64. The other combinations should all
+ # raise a ValueError for integer ** negative integer.
+ exp = [np.array(-1, dt)[()] for dt in 'bhilq']
+
+ # 1 ** -1 possible special case
+ base = [np.array(1, dt)[()] for dt in 'bhilqBHILQ']
+ for i1, i2 in itertools.product(base, exp):
+ if i1.dtype != np.uint64:
+ assert_raises(ValueError, operator.pow, i1, i2)
+ else:
+ res = operator.pow(i1, i2)
+ assert_(res.dtype.type is np.float64)
+ assert_almost_equal(res, 1.)
+
+ # -1 ** -1 possible special case
+ base = [np.array(-1, dt)[()] for dt in 'bhilq']
+ for i1, i2 in itertools.product(base, exp):
+ if i1.dtype != np.uint64:
+ assert_raises(ValueError, operator.pow, i1, i2)
+ else:
+ res = operator.pow(i1, i2)
+ assert_(res.dtype.type is np.float64)
+ assert_almost_equal(res, -1.)
+
+ # 2 ** -1 perhaps generic
+ base = [np.array(2, dt)[()] for dt in 'bhilqBHILQ']
+ for i1, i2 in itertools.product(base, exp):
+ if i1.dtype != np.uint64:
+ assert_raises(ValueError, operator.pow, i1, i2)
+ else:
+ res = operator.pow(i1, i2)
+ assert_(res.dtype.type is np.float64)
+ assert_almost_equal(res, .5)
def test_mixed_types(self):
typelist = [np.int8, np.int16, np.float16,
@@ -156,31 +179,46 @@ class TestPower(TestCase):
else:
assert_almost_equal(result, 9, err_msg=msg)
+ def test_modular_power(self):
+ # modular power is not implemented, so ensure it errors
+ a = 5
+ b = 4
+ c = 10
+ expected = pow(a, b, c) # noqa: F841
+ for t in (np.int32, np.float32, np.complex64):
+ # note that 3-operand power only dispatches on the first argument
+ assert_raises(TypeError, operator.pow, t(a), b, c)
+ assert_raises(TypeError, operator.pow, np.array(t(a)), b, c)
+
-class TestModulus(TestCase):
+def floordiv_and_mod(x, y):
+ return (x // y, x % y)
- floordiv = operator.floordiv
- mod = operator.mod
+
+def _signs(dt):
+ if dt in np.typecodes['UnsignedInteger']:
+ return (+1,)
+ else:
+ return (+1, -1)
+
+
+class TestModulus(object):
def test_modulus_basic(self):
dt = np.typecodes['AllInteger'] + np.typecodes['Float']
- for dt1, dt2 in itertools.product(dt, dt):
- for sg1, sg2 in itertools.product((+1, -1), (+1, -1)):
- if sg1 == -1 and dt1 in np.typecodes['UnsignedInteger']:
- continue
- if sg2 == -1 and dt2 in np.typecodes['UnsignedInteger']:
- continue
- fmt = 'dt1: %s, dt2: %s, sg1: %s, sg2: %s'
- msg = fmt % (dt1, dt2, sg1, sg2)
- a = np.array(sg1*71, dtype=dt1)[()]
- b = np.array(sg2*19, dtype=dt2)[()]
- div = self.floordiv(a, b)
- rem = self.mod(a, b)
- assert_equal(div*b + rem, a, err_msg=msg)
- if sg2 == -1:
- assert_(b < rem <= 0, msg)
- else:
- assert_(b > rem >= 0, msg)
+ for op in [floordiv_and_mod, divmod]:
+ for dt1, dt2 in itertools.product(dt, dt):
+ for sg1, sg2 in itertools.product(_signs(dt1), _signs(dt2)):
+ fmt = 'op: %s, dt1: %s, dt2: %s, sg1: %s, sg2: %s'
+ msg = fmt % (op.__name__, dt1, dt2, sg1, sg2)
+ a = np.array(sg1*71, dtype=dt1)[()]
+ b = np.array(sg2*19, dtype=dt2)[()]
+ div, rem = op(a, b)
+ assert_equal(div*b + rem, a, err_msg=msg)
+ if sg2 == -1:
+ assert_(b < rem <= 0, msg)
+ else:
+ assert_(b > rem >= 0, msg)
def test_float_modulus_exact(self):
# test that float results are exact for small integers. This also
@@ -199,65 +237,64 @@ class TestModulus(TestCase):
tgtdiv = np.where((tgtdiv == 0.0) & ((b < 0) ^ (a < 0)), -0.0, tgtdiv)
tgtrem = np.where((tgtrem == 0.0) & (b < 0), -0.0, tgtrem)
- for dt in np.typecodes['Float']:
- msg = 'dtype: %s' % (dt,)
- fa = a.astype(dt)
- fb = b.astype(dt)
- # use list comprehension so a_ and b_ are scalars
- div = [self.floordiv(a_, b_) for a_, b_ in zip(fa, fb)]
- rem = [self.mod(a_, b_) for a_, b_ in zip(fa, fb)]
- assert_equal(div, tgtdiv, err_msg=msg)
- assert_equal(rem, tgtrem, err_msg=msg)
+ for op in [floordiv_and_mod, divmod]:
+ for dt in np.typecodes['Float']:
+ msg = 'op: %s, dtype: %s' % (op.__name__, dt)
+ fa = a.astype(dt)
+ fb = b.astype(dt)
+ # use list comprehension so a_ and b_ are scalars
+ div, rem = zip(*[op(a_, b_) for a_, b_ in zip(fa, fb)])
+ assert_equal(div, tgtdiv, err_msg=msg)
+ assert_equal(rem, tgtrem, err_msg=msg)
def test_float_modulus_roundoff(self):
# gh-6127
dt = np.typecodes['Float']
- for dt1, dt2 in itertools.product(dt, dt):
- for sg1, sg2 in itertools.product((+1, -1), (+1, -1)):
- fmt = 'dt1: %s, dt2: %s, sg1: %s, sg2: %s'
- msg = fmt % (dt1, dt2, sg1, sg2)
- a = np.array(sg1*78*6e-8, dtype=dt1)[()]
- b = np.array(sg2*6e-8, dtype=dt2)[()]
- div = self.floordiv(a, b)
- rem = self.mod(a, b)
- # Equal assertion should hold when fmod is used
- assert_equal(div*b + rem, a, err_msg=msg)
- if sg2 == -1:
- assert_(b < rem <= 0, msg)
- else:
- assert_(b > rem >= 0, msg)
+ for op in [floordiv_and_mod, divmod]:
+ for dt1, dt2 in itertools.product(dt, dt):
+ for sg1, sg2 in itertools.product((+1, -1), (+1, -1)):
+ fmt = 'op: %s, dt1: %s, dt2: %s, sg1: %s, sg2: %s'
+ msg = fmt % (op.__name__, dt1, dt2, sg1, sg2)
+ a = np.array(sg1*78*6e-8, dtype=dt1)[()]
+ b = np.array(sg2*6e-8, dtype=dt2)[()]
+ div, rem = op(a, b)
+ # Equal assertion should hold when fmod is used
+ assert_equal(div*b + rem, a, err_msg=msg)
+ if sg2 == -1:
+ assert_(b < rem <= 0, msg)
+ else:
+ assert_(b > rem >= 0, msg)
def test_float_modulus_corner_cases(self):
# Check remainder magnitude.
for dt in np.typecodes['Float']:
b = np.array(1.0, dtype=dt)
a = np.nextafter(np.array(0.0, dtype=dt), -b)
- rem = self.mod(a, b)
+ rem = operator.mod(a, b)
assert_(rem <= b, 'dt: %s' % dt)
- rem = self.mod(-a, -b)
+ rem = operator.mod(-a, -b)
assert_(rem >= -b, 'dt: %s' % dt)
# Check nans, inf
- with warnings.catch_warnings():
- warnings.simplefilter('always')
- warnings.simplefilter('ignore', RuntimeWarning)
+ with suppress_warnings() as sup:
+ sup.filter(RuntimeWarning, "invalid value encountered in remainder")
for dt in np.typecodes['Float']:
fone = np.array(1.0, dtype=dt)
fzer = np.array(0.0, dtype=dt)
finf = np.array(np.inf, dtype=dt)
fnan = np.array(np.nan, dtype=dt)
- rem = self.mod(fone, fzer)
+ rem = operator.mod(fone, fzer)
assert_(np.isnan(rem), 'dt: %s' % dt)
# MSVC 2008 returns NaN here, so disable the check.
- #rem = self.mod(fone, finf)
+ #rem = operator.mod(fone, finf)
#assert_(rem == fone, 'dt: %s' % dt)
- rem = self.mod(fone, fnan)
+ rem = operator.mod(fone, fnan)
assert_(np.isnan(rem), 'dt: %s' % dt)
- rem = self.mod(finf, fone)
+ rem = operator.mod(finf, fone)
assert_(np.isnan(rem), 'dt: %s' % dt)
-class TestComplexDivision(TestCase):
+class TestComplexDivision(object):
def test_zero_division(self):
with np.errstate(all="ignore"):
for t in [np.complex64, np.complex128]:
@@ -329,7 +366,7 @@ class TestComplexDivision(TestCase):
assert_equal(result.imag, ex[1])
-class TestConversion(TestCase):
+class TestConversion(object):
def test_int_from_long(self):
l = [1e6, 1e12, 1e18, -1e6, -1e12, -1e18]
li = [10**6, 10**12, 10**18, -10**6, -10**12, -10**18]
@@ -363,12 +400,44 @@ class TestConversion(TestCase):
for code in 'lLqQ':
assert_raises(OverflowError, overflow_error_func, code)
- def test_longdouble_int(self):
+ def test_int_from_infinite_longdouble(self):
# gh-627
x = np.longdouble(np.inf)
+ assert_raises(OverflowError, int, x)
+ with suppress_warnings() as sup:
+ sup.record(np.ComplexWarning)
+ x = np.clongdouble(np.inf)
+ assert_raises(OverflowError, int, x)
+ assert_equal(len(sup.log), 1)
+
+ @pytest.mark.skipif(not IS_PYPY, reason="Test is PyPy only (gh-9972)")
+ def test_int_from_infinite_longdouble___int__(self):
+ x = np.longdouble(np.inf)
assert_raises(OverflowError, x.__int__)
- x = np.clongdouble(np.inf)
- assert_raises(OverflowError, x.__int__)
+ with suppress_warnings() as sup:
+ sup.record(np.ComplexWarning)
+ x = np.clongdouble(np.inf)
+ assert_raises(OverflowError, x.__int__)
+ assert_equal(len(sup.log), 1)
+
+ @pytest.mark.skipif(np.finfo(np.double) == np.finfo(np.longdouble),
+ reason="long double is same as double")
+ @pytest.mark.skipif(platform.machine().startswith("ppc64"),
+ reason="IBM double double")
+ def test_int_from_huge_longdouble(self):
+ # Produce a longdouble that would overflow a double,
+ # use exponent that avoids bug in Darwin pow function.
+ exp = np.finfo(np.double).maxexp - 1
+ huge_ld = 2 * 1234 * np.longdouble(2) ** exp
+ huge_i = 2 * 1234 * 2 ** exp
+ assert_(huge_ld != np.inf)
+ assert_equal(int(huge_ld), huge_i)
+
+ def test_int_from_longdouble(self):
+ x = np.longdouble(1.5)
+ assert_equal(int(x), 1)
+ x = np.longdouble(-10.5)
+ assert_equal(int(x), -10)
def test_numpy_scalar_relational_operators(self):
# All integer
@@ -411,8 +480,29 @@ class TestConversion(TestCase):
assert_(np.array(-1, dtype=dt1)[()] == np.array(-1, dtype=dt2)[()],
"type %s and %s failed" % (dt1, dt2))
-
-#class TestRepr(TestCase):
+ def test_scalar_comparison_to_none(self):
+ # Scalars should just return False and not give a warnings.
+ # The comparisons are flagged by pep8, ignore that.
+ with warnings.catch_warnings(record=True) as w:
+ warnings.filterwarnings('always', '', FutureWarning)
+ assert_(not np.float32(1) == None)
+ assert_(not np.str_('test') == None)
+ # This is dubious (see below):
+ assert_(not np.datetime64('NaT') == None)
+
+ assert_(np.float32(1) != None)
+ assert_(np.str_('test') != None)
+ # This is dubious (see below):
+ assert_(np.datetime64('NaT') != None)
+ assert_(len(w) == 0)
+
+ # For documentation purposes, this is why the datetime is dubious.
+ # At the time of deprecation this was no behaviour change, but
+ # it has to be considered when the deprecations are done.
+ assert_(np.equal(np.datetime64('NaT'), None))
+
+
+#class TestRepr(object):
# def test_repr(self):
# for t in types:
# val = t(1197346475.0137341)
@@ -429,7 +519,7 @@ class TestRepr(object):
storage_bytes = np.dtype(t).itemsize*8
# could add some more types to the list below
for which in ['small denorm', 'small norm']:
- # Values from http://en.wikipedia.org/wiki/IEEE_754
+ # Values from https://en.wikipedia.org/wiki/IEEE_754
constr = np.array([0x00]*storage_bytes, dtype=np.uint8)
if which == 'small denorm':
byte = last_fraction_bit_idx // 8
@@ -451,37 +541,52 @@ class TestRepr(object):
# long double test cannot work, because eval goes through a python
# float
for t in [np.float32, np.float64]:
- yield self._test_type_repr, t
+ self._test_type_repr(t)
-class TestSizeOf(TestCase):
+if not IS_PYPY:
+ # sys.getsizeof() is not valid on PyPy
+ class TestSizeOf(object):
- def test_equal_nbytes(self):
- for type in types:
- x = type(0)
- assert_(sys.getsizeof(x) > x.nbytes)
+ def test_equal_nbytes(self):
+ for type in types:
+ x = type(0)
+ assert_(sys.getsizeof(x) > x.nbytes)
- def test_error(self):
- d = np.float32()
- assert_raises(TypeError, d.__sizeof__, "a")
+ def test_error(self):
+ d = np.float32()
+ assert_raises(TypeError, d.__sizeof__, "a")
-class TestMultiply(TestCase):
+class TestMultiply(object):
def test_seq_repeat(self):
# Test that basic sequences get repeated when multiplied with
# numpy integers. And errors are raised when multiplied with others.
# Some of this behaviour may be controversial and could be open for
# change.
+ accepted_types = set(np.typecodes["AllInteger"])
+ deprecated_types = {'?'}
+ forbidden_types = (
+ set(np.typecodes["All"]) - accepted_types - deprecated_types)
+ forbidden_types -= {'V'} # can't default-construct void scalars
+
for seq_type in (list, tuple):
seq = seq_type([1, 2, 3])
- for numpy_type in np.typecodes["AllInteger"]:
+ for numpy_type in accepted_types:
i = np.dtype(numpy_type).type(2)
assert_equal(seq * i, seq * int(i))
assert_equal(i * seq, int(i) * seq)
- for numpy_type in np.typecodes["All"].replace("V", ""):
- if numpy_type in np.typecodes["AllInteger"]:
- continue
+ for numpy_type in deprecated_types:
+ i = np.dtype(numpy_type).type()
+ assert_equal(
+ assert_warns(DeprecationWarning, operator.mul, seq, i),
+ seq * int(i))
+ assert_equal(
+ assert_warns(DeprecationWarning, operator.mul, i, seq),
+ int(i) * seq)
+
+ for numpy_type in forbidden_types:
i = np.dtype(numpy_type).type()
assert_raises(TypeError, operator.mul, seq, i)
assert_raises(TypeError, operator.mul, i, seq)
@@ -504,10 +609,37 @@ class TestMultiply(TestCase):
assert_array_equal(np.int_(3) * arr_like, np.full(3, 3))
-class TestAbs(TestCase):
+class TestNegative(object):
+ def test_exceptions(self):
+ a = np.ones((), dtype=np.bool_)[()]
+ assert_raises(TypeError, operator.neg, a)
+
+ def test_result(self):
+ types = np.typecodes['AllInteger'] + np.typecodes['AllFloat']
+ with suppress_warnings() as sup:
+ sup.filter(RuntimeWarning)
+ for dt in types:
+ a = np.ones((), dtype=dt)[()]
+ assert_equal(operator.neg(a) + a, 0)
+
+
+class TestSubtract(object):
+ def test_exceptions(self):
+ a = np.ones((), dtype=np.bool_)[()]
+ assert_raises(TypeError, operator.sub, a, a)
+ def test_result(self):
+ types = np.typecodes['AllInteger'] + np.typecodes['AllFloat']
+ with suppress_warnings() as sup:
+ sup.filter(RuntimeWarning)
+ for dt in types:
+ a = np.ones((), dtype=dt)[()]
+ assert_equal(operator.sub(a, a), 0)
+
+
+class TestAbs(object):
def _test_abs_func(self, absfunc):
- for tp in floating_types:
+ for tp in floating_types + complex_floating_types:
x = tp(-1.5)
assert_equal(absfunc(x), 1.5)
x = tp(0.0)
@@ -518,6 +650,15 @@ class TestAbs(TestCase):
res = absfunc(x)
assert_equal(res, 0.0)
+ x = tp(np.finfo(tp).max)
+ assert_equal(absfunc(x), x.real)
+
+ x = tp(np.finfo(tp).tiny)
+ assert_equal(absfunc(x), x.real)
+
+ x = tp(np.finfo(tp).min)
+ assert_equal(absfunc(x), -x.real)
+
def test_builtin_abs(self):
self._test_abs_func(abs)
@@ -525,7 +666,7 @@ class TestAbs(TestCase):
self._test_abs_func(np.abs)
-class TestBitShifts(TestCase):
+class TestBitShifts(object):
def test_left_shift(self):
# gh-2449
@@ -555,7 +696,3 @@ class TestBitShifts(TestCase):
assert_equal(res_neg, -1)
# Result on scalars should be the same as on arrays
assert_array_equal(arr >> shift, [res_pos, res_neg], dt)
-
-
-if __name__ == "__main__":
- run_module_suite()
diff --git a/numpy/core/tests/test_scalarprint.py b/numpy/core/tests/test_scalarprint.py
index 8d0f27182..cde1355aa 100644
--- a/numpy/core/tests/test_scalarprint.py
+++ b/numpy/core/tests/test_scalarprint.py
@@ -4,27 +4,323 @@
"""
from __future__ import division, absolute_import, print_function
-import numpy as np
-from numpy.testing import TestCase, assert_, run_module_suite
+import code, sys
+import platform
+import pytest
+from tempfile import TemporaryFile
+import numpy as np
+from numpy.testing import assert_, assert_equal, suppress_warnings
-class TestRealScalars(TestCase):
+class TestRealScalars(object):
def test_str(self):
svals = [0.0, -0.0, 1, -1, np.inf, -np.inf, np.nan]
styps = [np.float16, np.float32, np.float64, np.longdouble]
- actual = [str(f(c)) for c in svals for f in styps]
wanted = [
- '0.0', '0.0', '0.0', '0.0',
- '-0.0', '-0.0', '-0.0', '-0.0',
- '1.0', '1.0', '1.0', '1.0',
- '-1.0', '-1.0', '-1.0', '-1.0',
- 'inf', 'inf', 'inf', 'inf',
- '-inf', '-inf', '-inf', '-inf',
- 'nan', 'nan', 'nan', 'nan']
+ ['0.0', '0.0', '0.0', '0.0' ],
+ ['-0.0', '-0.0', '-0.0', '-0.0'],
+ ['1.0', '1.0', '1.0', '1.0' ],
+ ['-1.0', '-1.0', '-1.0', '-1.0'],
+ ['inf', 'inf', 'inf', 'inf' ],
+ ['-inf', '-inf', '-inf', '-inf'],
+ ['nan', 'nan', 'nan', 'nan']]
+
+ for wants, val in zip(wanted, svals):
+ for want, styp in zip(wants, styps):
+ msg = 'for str({}({}))'.format(np.dtype(styp).name, repr(val))
+ assert_equal(str(styp(val)), want, err_msg=msg)
+
+ def test_scalar_cutoffs(self):
+ # test that both the str and repr of np.float64 behaves
+ # like python floats in python3. Note that in python2
+ # the str has truncated digits, but we do not do this
+ def check(v):
+ # we compare str to repr, to avoid python2 truncation behavior
+ assert_equal(str(np.float64(v)), repr(v))
+ assert_equal(repr(np.float64(v)), repr(v))
+
+ # check we use the same number of significant digits
+ check(1.12345678901234567890)
+ check(0.0112345678901234567890)
+
+ # check switch from scientific output to positional and back
+ check(1e-5)
+ check(1e-4)
+ check(1e15)
+ check(1e16)
+
+ def test_py2_float_print(self):
+ # gh-10753
+ # In python2, the python float type implements an obsolte method
+ # tp_print, which overrides tp_repr and tp_str when using "print" to
+ # output to a "real file" (ie, not a StringIO). Make sure we don't
+ # inherit it.
+ x = np.double(0.1999999999999)
+ with TemporaryFile('r+t') as f:
+ print(x, file=f)
+ f.seek(0)
+ output = f.read()
+ assert_equal(output, str(x) + '\n')
+ # In python2 the value float('0.1999999999999') prints with reduced
+ # precision as '0.2', but we want numpy's np.double('0.1999999999999')
+ # to print the unique value, '0.1999999999999'.
+
+ # gh-11031
+ # Only in the python2 interactive shell and when stdout is a "real"
+ # file, the output of the last command is printed to stdout without
+ # Py_PRINT_RAW (unlike the print statement) so `>>> x` and `>>> print
+ # x` are potentially different. Make sure they are the same. The only
+ # way I found to get prompt-like output is using an actual prompt from
+ # the 'code' module. Again, must use tempfile to get a "real" file.
+
+ # dummy user-input which enters one line and then ctrl-Ds.
+ def userinput():
+ yield 'np.sqrt(2)'
+ raise EOFError
+ gen = userinput()
+ input_func = lambda prompt="": next(gen)
+
+ with TemporaryFile('r+t') as fo, TemporaryFile('r+t') as fe:
+ orig_stdout, orig_stderr = sys.stdout, sys.stderr
+ sys.stdout, sys.stderr = fo, fe
+
+ # py2 code.interact sends irrelevant internal DeprecationWarnings
+ with suppress_warnings() as sup:
+ sup.filter(DeprecationWarning)
+ code.interact(local={'np': np}, readfunc=input_func, banner='')
+
+ sys.stdout, sys.stderr = orig_stdout, orig_stderr
+
+ fo.seek(0)
+ capture = fo.read().strip()
+
+ assert_equal(capture, repr(np.sqrt(2)))
+
+ def test_dragon4(self):
+ # these tests are adapted from Ryan Juckett's dragon4 implementation,
+ # see dragon4.c for details.
+
+ fpos32 = lambda x, **k: np.format_float_positional(np.float32(x), **k)
+ fsci32 = lambda x, **k: np.format_float_scientific(np.float32(x), **k)
+ fpos64 = lambda x, **k: np.format_float_positional(np.float64(x), **k)
+ fsci64 = lambda x, **k: np.format_float_scientific(np.float64(x), **k)
+
+ preckwd = lambda prec: {'unique': False, 'precision': prec}
+
+ assert_equal(fpos32('1.0'), "1.")
+ assert_equal(fsci32('1.0'), "1.e+00")
+ assert_equal(fpos32('10.234'), "10.234")
+ assert_equal(fpos32('-10.234'), "-10.234")
+ assert_equal(fsci32('10.234'), "1.0234e+01")
+ assert_equal(fsci32('-10.234'), "-1.0234e+01")
+ assert_equal(fpos32('1000.0'), "1000.")
+ assert_equal(fpos32('1.0', precision=0), "1.")
+ assert_equal(fsci32('1.0', precision=0), "1.e+00")
+ assert_equal(fpos32('10.234', precision=0), "10.")
+ assert_equal(fpos32('-10.234', precision=0), "-10.")
+ assert_equal(fsci32('10.234', precision=0), "1.e+01")
+ assert_equal(fsci32('-10.234', precision=0), "-1.e+01")
+ assert_equal(fpos32('10.234', precision=2), "10.23")
+ assert_equal(fsci32('-10.234', precision=2), "-1.02e+01")
+ assert_equal(fsci64('9.9999999999999995e-08', **preckwd(16)),
+ '9.9999999999999995e-08')
+ assert_equal(fsci64('9.8813129168249309e-324', **preckwd(16)),
+ '9.8813129168249309e-324')
+ assert_equal(fsci64('9.9999999999999694e-311', **preckwd(16)),
+ '9.9999999999999694e-311')
+
+
+ # test rounding
+ # 3.1415927410 is closest float32 to np.pi
+ assert_equal(fpos32('3.14159265358979323846', **preckwd(10)),
+ "3.1415927410")
+ assert_equal(fsci32('3.14159265358979323846', **preckwd(10)),
+ "3.1415927410e+00")
+ assert_equal(fpos64('3.14159265358979323846', **preckwd(10)),
+ "3.1415926536")
+ assert_equal(fsci64('3.14159265358979323846', **preckwd(10)),
+ "3.1415926536e+00")
+ # 299792448 is closest float32 to 299792458
+ assert_equal(fpos32('299792458.0', **preckwd(5)), "299792448.00000")
+ assert_equal(fsci32('299792458.0', **preckwd(5)), "2.99792e+08")
+ assert_equal(fpos64('299792458.0', **preckwd(5)), "299792458.00000")
+ assert_equal(fsci64('299792458.0', **preckwd(5)), "2.99792e+08")
+
+ assert_equal(fpos32('3.14159265358979323846', **preckwd(25)),
+ "3.1415927410125732421875000")
+ assert_equal(fpos64('3.14159265358979323846', **preckwd(50)),
+ "3.14159265358979311599796346854418516159057617187500")
+ assert_equal(fpos64('3.14159265358979323846'), "3.141592653589793")
+
+
+ # smallest numbers
+ assert_equal(fpos32(0.5**(126 + 23), unique=False, precision=149),
+ "0.00000000000000000000000000000000000000000000140129846432"
+ "4817070923729583289916131280261941876515771757068283889791"
+ "08268586060148663818836212158203125")
+ assert_equal(fpos64(0.5**(1022 + 52), unique=False, precision=1074),
+ "0.00000000000000000000000000000000000000000000000000000000"
+ "0000000000000000000000000000000000000000000000000000000000"
+ "0000000000000000000000000000000000000000000000000000000000"
+ "0000000000000000000000000000000000000000000000000000000000"
+ "0000000000000000000000000000000000000000000000000000000000"
+ "0000000000000000000000000000000000049406564584124654417656"
+ "8792868221372365059802614324764425585682500675507270208751"
+ "8652998363616359923797965646954457177309266567103559397963"
+ "9877479601078187812630071319031140452784581716784898210368"
+ "8718636056998730723050006387409153564984387312473397273169"
+ "6151400317153853980741262385655911710266585566867681870395"
+ "6031062493194527159149245532930545654440112748012970999954"
+ "1931989409080416563324524757147869014726780159355238611550"
+ "1348035264934720193790268107107491703332226844753335720832"
+ "4319360923828934583680601060115061698097530783422773183292"
+ "4790498252473077637592724787465608477820373446969953364701"
+ "7972677717585125660551199131504891101451037862738167250955"
+ "8373897335989936648099411642057026370902792427675445652290"
+ "87538682506419718265533447265625")
+
+ # largest numbers
+ assert_equal(fpos32(np.finfo(np.float32).max, **preckwd(0)),
+ "340282346638528859811704183484516925440.")
+ assert_equal(fpos64(np.finfo(np.float64).max, **preckwd(0)),
+ "1797693134862315708145274237317043567980705675258449965989"
+ "1747680315726078002853876058955863276687817154045895351438"
+ "2464234321326889464182768467546703537516986049910576551282"
+ "0762454900903893289440758685084551339423045832369032229481"
+ "6580855933212334827479782620414472316873817718091929988125"
+ "0404026184124858368.")
+ # Warning: In unique mode only the integer digits necessary for
+ # uniqueness are computed, the rest are 0. Should we change this?
+ assert_equal(fpos32(np.finfo(np.float32).max, precision=0),
+ "340282350000000000000000000000000000000.")
+
+ # test trailing zeros
+ assert_equal(fpos32('1.0', unique=False, precision=3), "1.000")
+ assert_equal(fpos64('1.0', unique=False, precision=3), "1.000")
+ assert_equal(fsci32('1.0', unique=False, precision=3), "1.000e+00")
+ assert_equal(fsci64('1.0', unique=False, precision=3), "1.000e+00")
+ assert_equal(fpos32('1.5', unique=False, precision=3), "1.500")
+ assert_equal(fpos64('1.5', unique=False, precision=3), "1.500")
+ assert_equal(fsci32('1.5', unique=False, precision=3), "1.500e+00")
+ assert_equal(fsci64('1.5', unique=False, precision=3), "1.500e+00")
+ # gh-10713
+ assert_equal(fpos64('324', unique=False, precision=5, fractional=False), "324.00")
+
+ def test_dragon4_interface(self):
+ tps = [np.float16, np.float32, np.float64]
+ if hasattr(np, 'float128'):
+ tps.append(np.float128)
+
+ fpos = np.format_float_positional
+ fsci = np.format_float_scientific
+
+ for tp in tps:
+ # test padding
+ assert_equal(fpos(tp('1.0'), pad_left=4, pad_right=4), " 1. ")
+ assert_equal(fpos(tp('-1.0'), pad_left=4, pad_right=4), " -1. ")
+ assert_equal(fpos(tp('-10.2'),
+ pad_left=4, pad_right=4), " -10.2 ")
+
+ # test exp_digits
+ assert_equal(fsci(tp('1.23e1'), exp_digits=5), "1.23e+00001")
+
+ # test fixed (non-unique) mode
+ assert_equal(fpos(tp('1.0'), unique=False, precision=4), "1.0000")
+ assert_equal(fsci(tp('1.0'), unique=False, precision=4),
+ "1.0000e+00")
+
+ # test trimming
+ # trim of 'k' or '.' only affects non-unique mode, since unique
+ # mode will not output trailing 0s.
+ assert_equal(fpos(tp('1.'), unique=False, precision=4, trim='k'),
+ "1.0000")
+
+ assert_equal(fpos(tp('1.'), unique=False, precision=4, trim='.'),
+ "1.")
+ assert_equal(fpos(tp('1.2'), unique=False, precision=4, trim='.'),
+ "1.2" if tp != np.float16 else "1.2002")
+
+ assert_equal(fpos(tp('1.'), unique=False, precision=4, trim='0'),
+ "1.0")
+ assert_equal(fpos(tp('1.2'), unique=False, precision=4, trim='0'),
+ "1.2" if tp != np.float16 else "1.2002")
+ assert_equal(fpos(tp('1.'), trim='0'), "1.0")
+
+ assert_equal(fpos(tp('1.'), unique=False, precision=4, trim='-'),
+ "1")
+ assert_equal(fpos(tp('1.2'), unique=False, precision=4, trim='-'),
+ "1.2" if tp != np.float16 else "1.2002")
+ assert_equal(fpos(tp('1.'), trim='-'), "1")
+
+ @pytest.mark.skipif(not platform.machine().startswith("ppc64"),
+ reason="only applies to ppc float128 values")
+ def test_ppc64_ibm_double_double128(self):
+ # check that the precision decreases once we get into the subnormal
+ # range. Unlike float64, this starts around 1e-292 instead of 1e-308,
+ # which happens when the first double is normal and the second is
+ # subnormal.
+ x = np.float128('2.123123123123123123123123123123123e-286')
+ got = [str(x/np.float128('2e' + str(i))) for i in range(0,40)]
+ expected = [
+ "1.06156156156156156156156156156157e-286",
+ "1.06156156156156156156156156156158e-287",
+ "1.06156156156156156156156156156159e-288",
+ "1.0615615615615615615615615615616e-289",
+ "1.06156156156156156156156156156157e-290",
+ "1.06156156156156156156156156156156e-291",
+ "1.0615615615615615615615615615616e-292",
+ "1.0615615615615615615615615615615e-293",
+ "1.061561561561561561561561561562e-294",
+ "1.06156156156156156156156156155e-295",
+ "1.0615615615615615615615615616e-296",
+ "1.06156156156156156156156156e-297",
+ "1.06156156156156156156156157e-298",
+ "1.0615615615615615615615616e-299",
+ "1.06156156156156156156156e-300",
+ "1.06156156156156156156155e-301",
+ "1.0615615615615615615616e-302",
+ "1.061561561561561561562e-303",
+ "1.06156156156156156156e-304",
+ "1.0615615615615615618e-305",
+ "1.06156156156156156e-306",
+ "1.06156156156156157e-307",
+ "1.0615615615615616e-308",
+ "1.06156156156156e-309",
+ "1.06156156156157e-310",
+ "1.0615615615616e-311",
+ "1.06156156156e-312",
+ "1.06156156154e-313",
+ "1.0615615616e-314",
+ "1.06156156e-315",
+ "1.06156155e-316",
+ "1.061562e-317",
+ "1.06156e-318",
+ "1.06155e-319",
+ "1.0617e-320",
+ "1.06e-321",
+ "1.04e-322",
+ "1e-323",
+ "0.0",
+ "0.0"]
+ assert_equal(got, expected)
- for res, val in zip(actual, wanted):
- assert_(res == val)
+ # Note: we follow glibc behavior, but it (or gcc) might not be right.
+ # In particular we can get two values that print the same but are not
+ # equal:
+ a = np.float128('2')/np.float128('3')
+ b = np.float128(str(a))
+ assert_equal(str(a), str(b))
+ assert_(a != b)
+ def float32_roundtrip(self):
+ # gh-9360
+ x = np.float32(1024 - 2**-14)
+ y = np.float32(1024 - 2**-13)
+ assert_(repr(x) != repr(y))
+ assert_equal(np.float32(repr(x)), x)
+ assert_equal(np.float32(repr(y)), y)
-if __name__ == "__main__":
- run_module_suite()
+ def float64_vs_python(self):
+ # gh-2643, gh-6136, gh-6908
+ assert_equal(repr(np.float64(0.1)), repr(0.1))
+ assert_(repr(np.float64(0.20000000000000004)) != repr(0.2))
diff --git a/numpy/core/tests/test_shape_base.py b/numpy/core/tests/test_shape_base.py
index ac8dc1eea..ef5c118ec 100644
--- a/numpy/core/tests/test_shape_base.py
+++ b/numpy/core/tests/test_shape_base.py
@@ -1,13 +1,22 @@
from __future__ import division, absolute_import, print_function
+import pytest
+import sys
import numpy as np
+from numpy.core import (
+ array, arange, atleast_1d, atleast_2d, atleast_3d, block, vstack, hstack,
+ newaxis, concatenate, stack
+ )
+from numpy.core.shape_base import (_block_dispatcher, _block_setup,
+ _block_concatenate, _block_slicing)
+from numpy.testing import (
+ assert_, assert_raises, assert_array_equal, assert_equal,
+ assert_raises_regex, assert_warns
+ )
+
from numpy.compat import long
-from numpy.core import (array, arange, atleast_1d, atleast_2d, atleast_3d,
- vstack, hstack, newaxis, concatenate, stack)
-from numpy.testing import (TestCase, assert_, assert_raises, assert_array_equal,
- assert_equal, run_module_suite, assert_raises_regex)
-class TestAtleast1d(TestCase):
+class TestAtleast1d(object):
def test_0D_array(self):
a = array(1)
b = array(2)
@@ -48,7 +57,7 @@ class TestAtleast1d(TestCase):
assert_(atleast_1d([[2, 3], [4, 5]]).shape == (2, 2))
-class TestAtleast2d(TestCase):
+class TestAtleast2d(object):
def test_0D_array(self):
a = array(1)
b = array(2)
@@ -87,7 +96,7 @@ class TestAtleast2d(TestCase):
assert_(atleast_2d([[[3, 1], [4, 5]], [[3, 5], [1, 2]]]).shape == (2, 2, 2))
-class TestAtleast3d(TestCase):
+class TestAtleast3d(object):
def test_0D_array(self):
a = array(1)
b = array(2)
@@ -119,10 +128,13 @@ class TestAtleast3d(TestCase):
assert_array_equal(res, desired)
-class TestHstack(TestCase):
+class TestHstack(object):
def test_non_iterable(self):
assert_raises(TypeError, hstack, 1)
+ def test_empty_input(self):
+ assert_raises(ValueError, hstack, ())
+
def test_0D_array(self):
a = array(1)
b = array(2)
@@ -144,11 +156,22 @@ class TestHstack(TestCase):
desired = array([[1, 1], [2, 2]])
assert_array_equal(res, desired)
+ def test_generator(self):
+ with assert_warns(FutureWarning):
+ hstack((np.arange(3) for _ in range(2)))
+ if sys.version_info.major > 2:
+ # map returns a list on Python 2
+ with assert_warns(FutureWarning):
+ hstack(map(lambda x: x, np.ones((3, 2))))
+
-class TestVstack(TestCase):
+class TestVstack(object):
def test_non_iterable(self):
assert_raises(TypeError, vstack, 1)
+ def test_empty_input(self):
+ assert_raises(ValueError, vstack, ())
+
def test_0D_array(self):
a = array(1)
b = array(2)
@@ -177,15 +200,25 @@ class TestVstack(TestCase):
desired = array([[1, 2], [1, 2]])
assert_array_equal(res, desired)
+ def test_generator(self):
+ with assert_warns(FutureWarning):
+ vstack((np.arange(3) for _ in range(2)))
+
+
+class TestConcatenate(object):
+ def test_returns_copy(self):
+ a = np.eye(3)
+ b = np.concatenate([a])
+ b[0, 0] = 2
+ assert b[0, 0] != a[0, 0]
-class TestConcatenate(TestCase):
def test_exceptions(self):
# test axis must be in bounds
for ndim in [1, 2, 3]:
a = np.ones((1,)*ndim)
np.concatenate((a, a), axis=0) # OK
- assert_raises(IndexError, np.concatenate, (a, a), axis=ndim)
- assert_raises(IndexError, np.concatenate, (a, a), axis=-(ndim + 1))
+ assert_raises(np.AxisError, np.concatenate, (a, a), axis=ndim)
+ assert_raises(np.AxisError, np.concatenate, (a, a), axis=-(ndim + 1))
# Scalars cannot be concatenated
assert_raises(ValueError, concatenate, (0,))
@@ -199,8 +232,8 @@ class TestConcatenate(TestCase):
np.concatenate((a, b), axis=axis[0]) # OK
assert_raises(ValueError, np.concatenate, (a, b), axis=axis[1])
assert_raises(ValueError, np.concatenate, (a, b), axis=axis[2])
- a = np.rollaxis(a, -1)
- b = np.rollaxis(b, -1)
+ a = np.moveaxis(a, -1, 0)
+ b = np.moveaxis(b, -1, 0)
axis.append(axis.pop(0))
# No arrays to concatenate raises ValueError
@@ -221,6 +254,12 @@ class TestConcatenate(TestCase):
'0', '1', '2', 'x'])
assert_array_equal(r, d)
+ out = np.zeros(a.size + len(b))
+ r = np.concatenate((a, b), axis=None)
+ rout = np.concatenate((a, b), axis=None, out=out)
+ assert_(out is rout)
+ assert_equal(r, rout)
+
def test_large_concatenate_axis_None(self):
# When no axis is given, concatenate uses flattened versions.
# This also had a bug with many arrays (see gh-5979).
@@ -269,6 +308,34 @@ class TestConcatenate(TestCase):
assert_array_equal(concatenate((a0, a1, a2), -1), res)
assert_array_equal(concatenate((a0.T, a1.T, a2.T), 0), res.T)
+ out = res.copy()
+ rout = concatenate((a0, a1, a2), 2, out=out)
+ assert_(out is rout)
+ assert_equal(res, rout)
+
+ def test_bad_out_shape(self):
+ a = array([1, 2])
+ b = array([3, 4])
+
+ assert_raises(ValueError, concatenate, (a, b), out=np.empty(5))
+ assert_raises(ValueError, concatenate, (a, b), out=np.empty((4,1)))
+ assert_raises(ValueError, concatenate, (a, b), out=np.empty((1,4)))
+ concatenate((a, b), out=np.empty(4))
+
+ def test_out_dtype(self):
+ out = np.empty(4, np.float32)
+ res = concatenate((array([1, 2]), array([3, 4])), out=out)
+ assert_(out is res)
+
+ out = np.empty(4, np.complex64)
+ res = concatenate((array([0.1, 0.2]), array([0.3, 0.4])), out=out)
+ assert_(out is res)
+
+ # invalid cast
+ out = np.empty(4, np.int32)
+ assert_raises(TypeError, concatenate,
+ (array([0.1, 0.2]), array([0.3, 0.4])), out=out)
+
def test_stack():
# non-iterable input
@@ -294,13 +361,13 @@ def test_stack():
expected_shapes = [(10, 3), (3, 10), (3, 10), (10, 3)]
for axis, expected_shape in zip(axes, expected_shapes):
assert_equal(np.stack(arrays, axis).shape, expected_shape)
- assert_raises_regex(IndexError, 'out of bounds', stack, arrays, axis=2)
- assert_raises_regex(IndexError, 'out of bounds', stack, arrays, axis=-3)
+ assert_raises_regex(np.AxisError, 'out of bounds', stack, arrays, axis=2)
+ assert_raises_regex(np.AxisError, 'out of bounds', stack, arrays, axis=-3)
# all shapes for 2d input
arrays = [np.random.randn(3, 4) for _ in range(10)]
axes = [0, 1, 2, -1, -2, -3]
expected_shapes = [(10, 3, 4), (3, 10, 4), (3, 4, 10),
- (3, 4, 10), (3, 10, 4), (10, 3, 4)]
+ (3, 4, 10), (3, 10, 4), (10, 3, 4)]
for axis, expected_shape in zip(axes, expected_shapes):
assert_equal(np.stack(arrays, axis).shape, expected_shape)
# empty arrays
@@ -318,11 +385,318 @@ def test_stack():
stack, [np.zeros((3, 3)), np.zeros(3)], axis=1)
assert_raises_regex(ValueError, 'must have the same shape',
stack, [np.arange(2), np.arange(3)])
- # np.matrix
- m = np.matrix([[1, 2], [3, 4]])
- assert_raises_regex(ValueError, 'shape too large to be a matrix',
- stack, [m, m])
+ # generator is deprecated
+ with assert_warns(FutureWarning):
+ result = stack((x for x in range(3)))
+ assert_array_equal(result, np.array([0, 1, 2]))
+
+
+class TestBlock(object):
+ @pytest.fixture(params=['block', 'force_concatenate', 'force_slicing'])
+ def block(self, request):
+ # blocking small arrays and large arrays go through different paths.
+ # the algorithm is triggered depending on the number of element
+ # copies required.
+ # We define a test fixture that forces most tests to go through
+ # both code paths.
+ # Ultimately, this should be removed if a single algorithm is found
+ # to be faster for both small and large arrays.
+ def _block_force_concatenate(arrays):
+ arrays, list_ndim, result_ndim, _ = _block_setup(arrays)
+ return _block_concatenate(arrays, list_ndim, result_ndim)
+
+ def _block_force_slicing(arrays):
+ arrays, list_ndim, result_ndim, _ = _block_setup(arrays)
+ return _block_slicing(arrays, list_ndim, result_ndim)
+
+ if request.param == 'force_concatenate':
+ return _block_force_concatenate
+ elif request.param == 'force_slicing':
+ return _block_force_slicing
+ elif request.param == 'block':
+ return block
+ else:
+ raise ValueError('Unknown blocking request. There is a typo in the tests.')
+
+ def test_returns_copy(self, block):
+ a = np.eye(3)
+ b = block(a)
+ b[0, 0] = 2
+ assert b[0, 0] != a[0, 0]
+
+ def test_block_total_size_estimate(self, block):
+ _, _, _, total_size = _block_setup([1])
+ assert total_size == 1
+
+ _, _, _, total_size = _block_setup([[1]])
+ assert total_size == 1
+
+ _, _, _, total_size = _block_setup([[1, 1]])
+ assert total_size == 2
+
+ _, _, _, total_size = _block_setup([[1], [1]])
+ assert total_size == 2
+
+ _, _, _, total_size = _block_setup([[1, 2], [3, 4]])
+ assert total_size == 4
+
+ def test_block_simple_row_wise(self, block):
+ a_2d = np.ones((2, 2))
+ b_2d = 2 * a_2d
+ desired = np.array([[1, 1, 2, 2],
+ [1, 1, 2, 2]])
+ result = block([a_2d, b_2d])
+ assert_equal(desired, result)
+
+ def test_block_simple_column_wise(self, block):
+ a_2d = np.ones((2, 2))
+ b_2d = 2 * a_2d
+ expected = np.array([[1, 1],
+ [1, 1],
+ [2, 2],
+ [2, 2]])
+ result = block([[a_2d], [b_2d]])
+ assert_equal(expected, result)
+
+ def test_block_with_1d_arrays_row_wise(self, block):
+ # # # 1-D vectors are treated as row arrays
+ a = np.array([1, 2, 3])
+ b = np.array([2, 3, 4])
+ expected = np.array([1, 2, 3, 2, 3, 4])
+ result = block([a, b])
+ assert_equal(expected, result)
+
+ def test_block_with_1d_arrays_multiple_rows(self, block):
+ a = np.array([1, 2, 3])
+ b = np.array([2, 3, 4])
+ expected = np.array([[1, 2, 3, 2, 3, 4],
+ [1, 2, 3, 2, 3, 4]])
+ result = block([[a, b], [a, b]])
+ assert_equal(expected, result)
+
+ def test_block_with_1d_arrays_column_wise(self, block):
+ # # # 1-D vectors are treated as row arrays
+ a_1d = np.array([1, 2, 3])
+ b_1d = np.array([2, 3, 4])
+ expected = np.array([[1, 2, 3],
+ [2, 3, 4]])
+ result = block([[a_1d], [b_1d]])
+ assert_equal(expected, result)
+
+ def test_block_mixed_1d_and_2d(self, block):
+ a_2d = np.ones((2, 2))
+ b_1d = np.array([2, 2])
+ result = block([[a_2d], [b_1d]])
+ expected = np.array([[1, 1],
+ [1, 1],
+ [2, 2]])
+ assert_equal(expected, result)
+
+ def test_block_complicated(self, block):
+ # a bit more complicated
+ one_2d = np.array([[1, 1, 1]])
+ two_2d = np.array([[2, 2, 2]])
+ three_2d = np.array([[3, 3, 3, 3, 3, 3]])
+ four_1d = np.array([4, 4, 4, 4, 4, 4])
+ five_0d = np.array(5)
+ six_1d = np.array([6, 6, 6, 6, 6])
+ zero_2d = np.zeros((2, 6))
+
+ expected = np.array([[1, 1, 1, 2, 2, 2],
+ [3, 3, 3, 3, 3, 3],
+ [4, 4, 4, 4, 4, 4],
+ [5, 6, 6, 6, 6, 6],
+ [0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0]])
+
+ result = block([[one_2d, two_2d],
+ [three_2d],
+ [four_1d],
+ [five_0d, six_1d],
+ [zero_2d]])
+ assert_equal(result, expected)
+
+ def test_nested(self, block):
+ one = np.array([1, 1, 1])
+ two = np.array([[2, 2, 2], [2, 2, 2], [2, 2, 2]])
+ three = np.array([3, 3, 3])
+ four = np.array([4, 4, 4])
+ five = np.array(5)
+ six = np.array([6, 6, 6, 6, 6])
+ zero = np.zeros((2, 6))
+
+ result = block([
+ [
+ block([
+ [one],
+ [three],
+ [four]
+ ]),
+ two
+ ],
+ [five, six],
+ [zero]
+ ])
+ expected = np.array([[1, 1, 1, 2, 2, 2],
+ [3, 3, 3, 2, 2, 2],
+ [4, 4, 4, 2, 2, 2],
+ [5, 6, 6, 6, 6, 6],
+ [0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0]])
+
+ assert_equal(result, expected)
+
+ def test_3d(self, block):
+ a000 = np.ones((2, 2, 2), int) * 1
+
+ a100 = np.ones((3, 2, 2), int) * 2
+ a010 = np.ones((2, 3, 2), int) * 3
+ a001 = np.ones((2, 2, 3), int) * 4
+
+ a011 = np.ones((2, 3, 3), int) * 5
+ a101 = np.ones((3, 2, 3), int) * 6
+ a110 = np.ones((3, 3, 2), int) * 7
+
+ a111 = np.ones((3, 3, 3), int) * 8
+
+ result = block([
+ [
+ [a000, a001],
+ [a010, a011],
+ ],
+ [
+ [a100, a101],
+ [a110, a111],
+ ]
+ ])
+ expected = array([[[1, 1, 4, 4, 4],
+ [1, 1, 4, 4, 4],
+ [3, 3, 5, 5, 5],
+ [3, 3, 5, 5, 5],
+ [3, 3, 5, 5, 5]],
+
+ [[1, 1, 4, 4, 4],
+ [1, 1, 4, 4, 4],
+ [3, 3, 5, 5, 5],
+ [3, 3, 5, 5, 5],
+ [3, 3, 5, 5, 5]],
+
+ [[2, 2, 6, 6, 6],
+ [2, 2, 6, 6, 6],
+ [7, 7, 8, 8, 8],
+ [7, 7, 8, 8, 8],
+ [7, 7, 8, 8, 8]],
+
+ [[2, 2, 6, 6, 6],
+ [2, 2, 6, 6, 6],
+ [7, 7, 8, 8, 8],
+ [7, 7, 8, 8, 8],
+ [7, 7, 8, 8, 8]],
+
+ [[2, 2, 6, 6, 6],
+ [2, 2, 6, 6, 6],
+ [7, 7, 8, 8, 8],
+ [7, 7, 8, 8, 8],
+ [7, 7, 8, 8, 8]]])
+
+ assert_array_equal(result, expected)
+
+ def test_block_with_mismatched_shape(self, block):
+ a = np.array([0, 0])
+ b = np.eye(2)
+ assert_raises(ValueError, block, [a, b])
+ assert_raises(ValueError, block, [b, a])
+
+ to_block = [[np.ones((2,3)), np.ones((2,2))],
+ [np.ones((2,2)), np.ones((2,2))]]
+ assert_raises(ValueError, block, to_block)
+ def test_no_lists(self, block):
+ assert_equal(block(1), np.array(1))
+ assert_equal(block(np.eye(3)), np.eye(3))
+
+ def test_invalid_nesting(self, block):
+ msg = 'depths are mismatched'
+ assert_raises_regex(ValueError, msg, block, [1, [2]])
+ assert_raises_regex(ValueError, msg, block, [1, []])
+ assert_raises_regex(ValueError, msg, block, [[1], 2])
+ assert_raises_regex(ValueError, msg, block, [[], 2])
+ assert_raises_regex(ValueError, msg, block, [
+ [[1], [2]],
+ [[3, 4]],
+ [5] # missing brackets
+ ])
+
+ def test_empty_lists(self, block):
+ assert_raises_regex(ValueError, 'empty', block, [])
+ assert_raises_regex(ValueError, 'empty', block, [[]])
+ assert_raises_regex(ValueError, 'empty', block, [[1], []])
+
+ def test_tuple(self, block):
+ assert_raises_regex(TypeError, 'tuple', block, ([1, 2], [3, 4]))
+ assert_raises_regex(TypeError, 'tuple', block, [(1, 2), (3, 4)])
+
+ def test_different_ndims(self, block):
+ a = 1.
+ b = 2 * np.ones((1, 2))
+ c = 3 * np.ones((1, 1, 3))
+
+ result = block([a, b, c])
+ expected = np.array([[[1., 2., 2., 3., 3., 3.]]])
+
+ assert_equal(result, expected)
+
+ def test_different_ndims_depths(self, block):
+ a = 1.
+ b = 2 * np.ones((1, 2))
+ c = 3 * np.ones((1, 2, 3))
+
+ result = block([[a, b], [c]])
+ expected = np.array([[[1., 2., 2.],
+ [3., 3., 3.],
+ [3., 3., 3.]]])
+
+ assert_equal(result, expected)
+
+ def test_block_memory_order(self, block):
+ # 3D
+ arr_c = np.zeros((3,)*3, order='C')
+ arr_f = np.zeros((3,)*3, order='F')
+
+ b_c = [[[arr_c, arr_c],
+ [arr_c, arr_c]],
+ [[arr_c, arr_c],
+ [arr_c, arr_c]]]
+ b_f = [[[arr_f, arr_f],
+ [arr_f, arr_f]],
+ [[arr_f, arr_f],
+ [arr_f, arr_f]]]
-if __name__ == "__main__":
- run_module_suite()
+ assert block(b_c).flags['C_CONTIGUOUS']
+ assert block(b_f).flags['F_CONTIGUOUS']
+
+ arr_c = np.zeros((3, 3), order='C')
+ arr_f = np.zeros((3, 3), order='F')
+ # 2D
+ b_c = [[arr_c, arr_c],
+ [arr_c, arr_c]]
+
+ b_f = [[arr_f, arr_f],
+ [arr_f, arr_f]]
+
+ assert block(b_c).flags['C_CONTIGUOUS']
+ assert block(b_f).flags['F_CONTIGUOUS']
+
+
+def test_block_dispatcher():
+ class ArrayLike(object):
+ pass
+ a = ArrayLike()
+ b = ArrayLike()
+ c = ArrayLike()
+ assert_equal(list(_block_dispatcher(a)), [a])
+ assert_equal(list(_block_dispatcher([a])), [a])
+ assert_equal(list(_block_dispatcher([a, b])), [a, b])
+ assert_equal(list(_block_dispatcher([[a], [b, [c]]])), [a, b, c])
+ # don't recurse into non-lists
+ assert_equal(list(_block_dispatcher((a, b))), [(a, b)])
diff --git a/numpy/core/tests/test_ufunc.py b/numpy/core/tests/test_ufunc.py
index 62fe3c04b..b83b8ccff 100644
--- a/numpy/core/tests/test_ufunc.py
+++ b/numpy/core/tests/test_ufunc.py
@@ -1,21 +1,25 @@
from __future__ import division, absolute_import, print_function
+import warnings
+import itertools
+
import numpy as np
-import numpy.core.umath_tests as umt
-import numpy.core.operand_flag_tests as opflag_tests
-from numpy.compat import asbytes
-from numpy.core.test_rational import rational, test_add, test_add_rationals
+import numpy.core._umath_tests as umt
+import numpy.linalg._umath_linalg as uml
+import numpy.core._operand_flag_tests as opflag_tests
+import numpy.core._rational_tests as _rational_tests
from numpy.testing import (
- TestCase, run_module_suite, assert_, assert_equal, assert_raises,
- assert_array_equal, assert_almost_equal, assert_array_almost_equal,
- assert_no_warnings
-)
+ assert_, assert_equal, assert_raises, assert_array_equal,
+ assert_almost_equal, assert_array_almost_equal, assert_no_warnings,
+ assert_allclose,
+ )
+from numpy.core.numeric import pickle
-class TestUfuncKwargs(TestCase):
+class TestUfuncKwargs(object):
def test_kwarg_exact(self):
assert_raises(TypeError, np.add, 1, 2, castingx='safe')
- assert_raises(TypeError, np.add, 1, 2, dtypex=np.int)
+ assert_raises(TypeError, np.add, 1, 2, dtypex=int)
assert_raises(TypeError, np.add, 1, 2, extobjx=[4096])
assert_raises(TypeError, np.add, 1, 2, outx=None)
assert_raises(TypeError, np.add, 1, 2, sigx='ii->i')
@@ -29,24 +33,30 @@ class TestUfuncKwargs(TestCase):
def test_sig_dtype(self):
assert_raises(RuntimeError, np.add, 1, 2, sig='ii->i',
- dtype=np.int)
+ dtype=int)
assert_raises(RuntimeError, np.add, 1, 2, signature='ii->i',
- dtype=np.int)
+ dtype=int)
+
+ def test_extobj_refcount(self):
+ # Should not segfault with USE_DEBUG.
+ assert_raises(TypeError, np.add, 1, 2, extobj=[4096], parrot=True)
-class TestUfunc(TestCase):
+class TestUfunc(object):
def test_pickle(self):
- import pickle
- assert_(pickle.loads(pickle.dumps(np.sin)) is np.sin)
+ for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
+ assert_(pickle.loads(pickle.dumps(np.sin,
+ protocol=proto)) is np.sin)
- # Check that ufunc not defined in the top level numpy namespace such as
- # numpy.core.test_rational.test_add can also be pickled
- assert_(pickle.loads(pickle.dumps(test_add)) is test_add)
+ # Check that ufunc not defined in the top level numpy namespace
+ # such as numpy.core._rational_tests.test_add can also be pickled
+ res = pickle.loads(pickle.dumps(_rational_tests.test_add,
+ protocol=proto))
+ assert_(res is _rational_tests.test_add)
def test_pickle_withstring(self):
- import pickle
- astring = asbytes("cnumpy.core\n_ufunc_reconstruct\np0\n"
- "(S'numpy.core.umath'\np1\nS'cos'\np2\ntp3\nRp4\n.")
+ astring = (b"cnumpy.core\n_ufunc_reconstruct\np0\n"
+ b"(S'numpy.core.umath'\np1\nS'cos'\np2\ntp3\nRp4\n.")
assert_(pickle.loads(astring) is np.cos)
def test_reduceat_shifting_sum(self):
@@ -172,22 +182,22 @@ class TestUfunc(TestCase):
# check unary PyUFunc_O_O
msg = "PyUFunc_O_O"
- x = np.ones(10, dtype=np.object)[0::2]
+ x = np.ones(10, dtype=object)[0::2]
assert_(np.all(np.abs(x) == 1), msg)
# check unary PyUFunc_O_O_method
msg = "PyUFunc_O_O_method"
- x = np.zeros(10, dtype=np.object)[0::2]
+ x = np.zeros(10, dtype=object)[0::2]
for i in range(len(x)):
x[i] = foo()
assert_(np.all(np.conjugate(x) == True), msg)
# check binary PyUFunc_OO_O
msg = "PyUFunc_OO_O"
- x = np.ones(10, dtype=np.object)[0::2]
+ x = np.ones(10, dtype=object)[0::2]
assert_(np.all(np.add(x, x) == 2), msg)
# check binary PyUFunc_OO_O_method
msg = "PyUFunc_OO_O_method"
- x = np.zeros(10, dtype=np.object)[0::2]
+ x = np.zeros(10, dtype=object)[0::2]
for i in range(len(x)):
x[i] = foo()
assert_(np.all(np.logical_xor(x, x)), msg)
@@ -278,14 +288,98 @@ class TestUfunc(TestCase):
"""
pass
- def test_signature(self):
+ # from include/numpy/ufuncobject.h
+ size_inferred = 2
+ can_ignore = 4
+ def test_signature0(self):
# the arguments to test_signature are: nin, nout, core_signature
- # pass
- assert_equal(umt.test_signature(2, 1, "(i),(i)->()"), 1)
-
- # pass. empty core signature; treat as plain ufunc (with trivial core)
- assert_equal(umt.test_signature(2, 1, "(),()->()"), 0)
-
+ enabled, num_dims, ixs, flags, sizes = umt.test_signature(
+ 2, 1, "(i),(i)->()")
+ assert_equal(enabled, 1)
+ assert_equal(num_dims, (1, 1, 0))
+ assert_equal(ixs, (0, 0))
+ assert_equal(flags, (self.size_inferred,))
+ assert_equal(sizes, (-1,))
+
+ def test_signature1(self):
+ # empty core signature; treat as plain ufunc (with trivial core)
+ enabled, num_dims, ixs, flags, sizes = umt.test_signature(
+ 2, 1, "(),()->()")
+ assert_equal(enabled, 0)
+ assert_equal(num_dims, (0, 0, 0))
+ assert_equal(ixs, ())
+ assert_equal(flags, ())
+ assert_equal(sizes, ())
+
+ def test_signature2(self):
+ # more complicated names for variables
+ enabled, num_dims, ixs, flags, sizes = umt.test_signature(
+ 2, 1, "(i1,i2),(J_1)->(_kAB)")
+ assert_equal(enabled, 1)
+ assert_equal(num_dims, (2, 1, 1))
+ assert_equal(ixs, (0, 1, 2, 3))
+ assert_equal(flags, (self.size_inferred,)*4)
+ assert_equal(sizes, (-1, -1, -1, -1))
+
+ def test_signature3(self):
+ enabled, num_dims, ixs, flags, sizes = umt.test_signature(
+ 2, 1, u"(i1, i12), (J_1)->(i12, i2)")
+ assert_equal(enabled, 1)
+ assert_equal(num_dims, (2, 1, 2))
+ assert_equal(ixs, (0, 1, 2, 1, 3))
+ assert_equal(flags, (self.size_inferred,)*4)
+ assert_equal(sizes, (-1, -1, -1, -1))
+
+ def test_signature4(self):
+ # matrix_multiply signature from _umath_tests
+ enabled, num_dims, ixs, flags, sizes = umt.test_signature(
+ 2, 1, "(n,k),(k,m)->(n,m)")
+ assert_equal(enabled, 1)
+ assert_equal(num_dims, (2, 2, 2))
+ assert_equal(ixs, (0, 1, 1, 2, 0, 2))
+ assert_equal(flags, (self.size_inferred,)*3)
+ assert_equal(sizes, (-1, -1, -1))
+
+ def test_signature5(self):
+ # matmul signature from _umath_tests
+ enabled, num_dims, ixs, flags, sizes = umt.test_signature(
+ 2, 1, "(n?,k),(k,m?)->(n?,m?)")
+ assert_equal(enabled, 1)
+ assert_equal(num_dims, (2, 2, 2))
+ assert_equal(ixs, (0, 1, 1, 2, 0, 2))
+ assert_equal(flags, (self.size_inferred | self.can_ignore,
+ self.size_inferred,
+ self.size_inferred | self.can_ignore))
+ assert_equal(sizes, (-1, -1, -1))
+
+ def test_signature6(self):
+ enabled, num_dims, ixs, flags, sizes = umt.test_signature(
+ 1, 1, "(3)->()")
+ assert_equal(enabled, 1)
+ assert_equal(num_dims, (1, 0))
+ assert_equal(ixs, (0,))
+ assert_equal(flags, (0,))
+ assert_equal(sizes, (3,))
+
+ def test_signature7(self):
+ enabled, num_dims, ixs, flags, sizes = umt.test_signature(
+ 3, 1, "(3),(03,3),(n)->(9)")
+ assert_equal(enabled, 1)
+ assert_equal(num_dims, (1, 2, 1, 1))
+ assert_equal(ixs, (0, 0, 0, 1, 2))
+ assert_equal(flags, (0, self.size_inferred, 0))
+ assert_equal(sizes, (3, -1, 9))
+
+ def test_signature8(self):
+ enabled, num_dims, ixs, flags, sizes = umt.test_signature(
+ 3, 1, "(3?),(3?,3?),(n)->(9)")
+ assert_equal(enabled, 1)
+ assert_equal(num_dims, (1, 2, 1, 1))
+ assert_equal(ixs, (0, 0, 0, 1, 2))
+ assert_equal(flags, (self.can_ignore, self.size_inferred, 0))
+ assert_equal(sizes, (3, -1, 9))
+
+ def test_signature_failure0(self):
# in the following calls, a ValueError should be raised because
# of error in core signature
# FIXME These should be using assert_raises
@@ -298,6 +392,7 @@ class TestUfunc(TestCase):
except ValueError:
pass
+ def test_signature_failure1(self):
# error: parenthesis matching
msg = "core_sig: parenthesis matching"
try:
@@ -306,6 +401,7 @@ class TestUfunc(TestCase):
except ValueError:
pass
+ def test_signature_failure2(self):
# error: incomplete signature. letters outside of parenthesis are ignored
msg = "core_sig: incomplete signature"
try:
@@ -314,6 +410,7 @@ class TestUfunc(TestCase):
except ValueError:
pass
+ def test_signature_failure3(self):
# error: incomplete signature. 2 output arguments are specified
msg = "core_sig: incomplete signature"
try:
@@ -322,9 +419,6 @@ class TestUfunc(TestCase):
except ValueError:
pass
- # more complicated names for variables
- assert_equal(umt.test_signature(2, 1, "(i1,i2),(J_1)->(_kAB)"), 1)
-
def test_get_signature(self):
assert_equal(umt.inner1d.signature, "(i),(i)->()")
@@ -354,14 +448,78 @@ class TestUfunc(TestCase):
assert_equal(b, [0, 0, 1])
def test_true_divide(self):
- # True_divide has a non uniform signature, see #3484.
- # This also tests type_tuple_type_resolver.
- a = np.full(5, 12.5)
- b = np.full(5, 10.0)
- tgt = np.full(5, 1.25)
- assert_almost_equal(np.true_divide(a, b, dtype=np.float64), tgt)
- assert_almost_equal(np.true_divide(a, b, dtype=np.float32), tgt)
- assert_raises(TypeError, np.true_divide, a, b, dtype=np.int)
+ a = np.array(10)
+ b = np.array(20)
+ tgt = np.array(0.5)
+
+ for tc in 'bhilqBHILQefdgFDG':
+ dt = np.dtype(tc)
+ aa = a.astype(dt)
+ bb = b.astype(dt)
+
+ # Check result value and dtype.
+ for x, y in itertools.product([aa, -aa], [bb, -bb]):
+
+ # Check with no output type specified
+ if tc in 'FDG':
+ tgt = complex(x)/complex(y)
+ else:
+ tgt = float(x)/float(y)
+
+ res = np.true_divide(x, y)
+ rtol = max(np.finfo(res).resolution, 1e-15)
+ assert_allclose(res, tgt, rtol=rtol)
+
+ if tc in 'bhilqBHILQ':
+ assert_(res.dtype.name == 'float64')
+ else:
+ assert_(res.dtype.name == dt.name )
+
+ # Check with output type specified. This also checks for the
+ # incorrect casts in issue gh-3484 because the unary '-' does
+ # not change types, even for unsigned types, Hence casts in the
+ # ufunc from signed to unsigned and vice versa will lead to
+ # errors in the values.
+ for tcout in 'bhilqBHILQ':
+ dtout = np.dtype(tcout)
+ assert_raises(TypeError, np.true_divide, x, y, dtype=dtout)
+
+ for tcout in 'efdg':
+ dtout = np.dtype(tcout)
+ if tc in 'FDG':
+ # Casting complex to float is not allowed
+ assert_raises(TypeError, np.true_divide, x, y, dtype=dtout)
+ else:
+ tgt = float(x)/float(y)
+ rtol = max(np.finfo(dtout).resolution, 1e-15)
+ atol = max(np.finfo(dtout).tiny, 3e-308)
+ # Some test values result in invalid for float16.
+ with np.errstate(invalid='ignore'):
+ res = np.true_divide(x, y, dtype=dtout)
+ if not np.isfinite(res) and tcout == 'e':
+ continue
+ assert_allclose(res, tgt, rtol=rtol, atol=atol)
+ assert_(res.dtype.name == dtout.name)
+
+ for tcout in 'FDG':
+ dtout = np.dtype(tcout)
+ tgt = complex(x)/complex(y)
+ rtol = max(np.finfo(dtout).resolution, 1e-15)
+ atol = max(np.finfo(dtout).tiny, 3e-308)
+ res = np.true_divide(x, y, dtype=dtout)
+ if not np.isfinite(res):
+ continue
+ assert_allclose(res, tgt, rtol=rtol, atol=atol)
+ assert_(res.dtype.name == dtout.name)
+
+ # Check booleans
+ a = np.ones((), dtype=np.bool_)
+ res = np.true_divide(a, a)
+ assert_(res == 1.0)
+ assert_(res.dtype.name == 'float64')
+ res = np.true_divide(~a, a)
+ assert_(res == 0.0)
+ assert_(res.dtype.name == 'float64')
def test_sum_stability(self):
a = np.ones(500, dtype=np.float32)
@@ -371,13 +529,22 @@ class TestUfunc(TestCase):
assert_almost_equal((a / 10.).sum() - a.size / 10., 0, 13)
def test_sum(self):
- for dt in (np.int, np.float16, np.float32, np.float64, np.longdouble):
+ for dt in (int, np.float16, np.float32, np.float64, np.longdouble):
for v in (0, 1, 2, 7, 8, 9, 15, 16, 19, 127,
128, 1024, 1235):
tgt = dt(v * (v + 1) / 2)
d = np.arange(1, v + 1, dtype=dt)
- assert_almost_equal(np.sum(d), tgt)
- assert_almost_equal(np.sum(d[::-1]), tgt)
+
+ # warning if sum overflows, which it does in float16
+ overflow = not np.isfinite(tgt)
+
+ with warnings.catch_warnings(record=True) as w:
+ warnings.simplefilter("always")
+ assert_almost_equal(np.sum(d), tgt)
+ assert_equal(len(w), 1 * overflow)
+
+ assert_almost_equal(np.sum(d[::-1]), tgt)
+ assert_equal(len(w), 2 * overflow)
d = np.ones(500, dtype=dt)
assert_almost_equal(np.sum(d[::2]), 250.)
@@ -418,6 +585,17 @@ class TestUfunc(TestCase):
d += d
assert_almost_equal(d, 2. + 2j)
+ def test_sum_initial(self):
+ # Integer, single axis
+ assert_equal(np.sum([3], initial=2), 5)
+
+ # Floating point
+ assert_almost_equal(np.sum([0.2], initial=0.1), 0.3)
+
+ # Multiple non-adjacent axes
+ assert_equal(np.sum(np.ones((2, 3, 5), dtype=np.int64), axis=(0, 2), initial=2),
+ [12, 12, 12])
+
def test_inner1d(self):
a = np.arange(6).reshape((2, 3))
assert_array_equal(umt.inner1d(a, a), np.sum(a*a, axis=-1))
@@ -522,6 +700,232 @@ class TestUfunc(TestCase):
umt.inner1d(a, b, out=c[..., 0])
assert_array_equal(c[..., 0], np.sum(a*b, axis=-1), err_msg=msg)
+ def test_axes_argument(self):
+ # inner1d signature: '(i),(i)->()'
+ inner1d = umt.inner1d
+ a = np.arange(27.).reshape((3, 3, 3))
+ b = np.arange(10., 19.).reshape((3, 1, 3))
+ # basic tests on inputs (outputs tested below with matrix_multiply).
+ c = inner1d(a, b)
+ assert_array_equal(c, (a * b).sum(-1))
+ # default
+ c = inner1d(a, b, axes=[(-1,), (-1,), ()])
+ assert_array_equal(c, (a * b).sum(-1))
+ # integers ok for single axis.
+ c = inner1d(a, b, axes=[-1, -1, ()])
+ assert_array_equal(c, (a * b).sum(-1))
+ # mix fine
+ c = inner1d(a, b, axes=[(-1,), -1, ()])
+ assert_array_equal(c, (a * b).sum(-1))
+ # can omit last axis.
+ c = inner1d(a, b, axes=[-1, -1])
+ assert_array_equal(c, (a * b).sum(-1))
+ # can pass in other types of integer (with __index__ protocol)
+ c = inner1d(a, b, axes=[np.int8(-1), np.array(-1, dtype=np.int32)])
+ assert_array_equal(c, (a * b).sum(-1))
+ # swap some axes
+ c = inner1d(a, b, axes=[0, 0])
+ assert_array_equal(c, (a * b).sum(0))
+ c = inner1d(a, b, axes=[0, 2])
+ assert_array_equal(c, (a.transpose(1, 2, 0) * b).sum(-1))
+ # Check errors for improperly constructed axes arguments.
+ # should have list.
+ assert_raises(TypeError, inner1d, a, b, axes=-1)
+ # needs enough elements
+ assert_raises(ValueError, inner1d, a, b, axes=[-1])
+ # should pass in indices.
+ assert_raises(TypeError, inner1d, a, b, axes=[-1.0, -1.0])
+ assert_raises(TypeError, inner1d, a, b, axes=[(-1.0,), -1])
+ assert_raises(TypeError, inner1d, a, b, axes=[None, 1])
+ # cannot pass an index unless there is only one dimension
+ # (output is wrong in this case)
+ assert_raises(TypeError, inner1d, a, b, axes=[-1, -1, -1])
+ # or pass in generally the wrong number of axes
+ assert_raises(ValueError, inner1d, a, b, axes=[-1, -1, (-1,)])
+ assert_raises(ValueError, inner1d, a, b, axes=[-1, (-2, -1), ()])
+ # axes need to have same length.
+ assert_raises(ValueError, inner1d, a, b, axes=[0, 1])
+
+ # matrix_multiply signature: '(m,n),(n,p)->(m,p)'
+ mm = umt.matrix_multiply
+ a = np.arange(12).reshape((2, 3, 2))
+ b = np.arange(8).reshape((2, 2, 2, 1)) + 1
+ # Sanity check.
+ c = mm(a, b)
+ assert_array_equal(c, np.matmul(a, b))
+ # Default axes.
+ c = mm(a, b, axes=[(-2, -1), (-2, -1), (-2, -1)])
+ assert_array_equal(c, np.matmul(a, b))
+ # Default with explicit axes.
+ c = mm(a, b, axes=[(1, 2), (2, 3), (2, 3)])
+ assert_array_equal(c, np.matmul(a, b))
+ # swap some axes.
+ c = mm(a, b, axes=[(0, -1), (1, 2), (-2, -1)])
+ assert_array_equal(c, np.matmul(a.transpose(1, 0, 2),
+ b.transpose(0, 3, 1, 2)))
+ # Default with output array.
+ c = np.empty((2, 2, 3, 1))
+ d = mm(a, b, out=c, axes=[(1, 2), (2, 3), (2, 3)])
+ assert_(c is d)
+ assert_array_equal(c, np.matmul(a, b))
+ # Transposed output array
+ c = np.empty((1, 2, 2, 3))
+ d = mm(a, b, out=c, axes=[(-2, -1), (-2, -1), (3, 0)])
+ assert_(c is d)
+ assert_array_equal(c, np.matmul(a, b).transpose(3, 0, 1, 2))
+ # Check errors for improperly constructed axes arguments.
+ # wrong argument
+ assert_raises(TypeError, mm, a, b, axis=1)
+ # axes should be list
+ assert_raises(TypeError, mm, a, b, axes=1)
+ assert_raises(TypeError, mm, a, b, axes=((-2, -1), (-2, -1), (-2, -1)))
+ # list needs to have right length
+ assert_raises(ValueError, mm, a, b, axes=[])
+ assert_raises(ValueError, mm, a, b, axes=[(-2, -1)])
+ # list should contain tuples for multiple axes
+ assert_raises(TypeError, mm, a, b, axes=[-1, -1, -1])
+ assert_raises(TypeError, mm, a, b, axes=[(-2, -1), (-2, -1), -1])
+ assert_raises(TypeError,
+ mm, a, b, axes=[[-2, -1], [-2, -1], [-2, -1]])
+ assert_raises(TypeError,
+ mm, a, b, axes=[(-2, -1), (-2, -1), [-2, -1]])
+ assert_raises(TypeError, mm, a, b, axes=[(-2, -1), (-2, -1), None])
+ # tuples should not have duplicated values
+ assert_raises(ValueError, mm, a, b, axes=[(-2, -1), (-2, -1), (-2, -2)])
+ # arrays should have enough axes.
+ z = np.zeros((2, 2))
+ assert_raises(ValueError, mm, z, z[0])
+ assert_raises(ValueError, mm, z, z, out=z[:, 0])
+ assert_raises(ValueError, mm, z[1], z, axes=[0, 1])
+ assert_raises(ValueError, mm, z, z, out=z[0], axes=[0, 1])
+ # Regular ufuncs should not accept axes.
+ assert_raises(TypeError, np.add, 1., 1., axes=[0])
+ # should be able to deal with bad unrelated kwargs.
+ assert_raises(TypeError, mm, z, z, axes=[0, 1], parrot=True)
+
+ def test_axis_argument(self):
+ # inner1d signature: '(i),(i)->()'
+ inner1d = umt.inner1d
+ a = np.arange(27.).reshape((3, 3, 3))
+ b = np.arange(10., 19.).reshape((3, 1, 3))
+ c = inner1d(a, b)
+ assert_array_equal(c, (a * b).sum(-1))
+ c = inner1d(a, b, axis=-1)
+ assert_array_equal(c, (a * b).sum(-1))
+ out = np.zeros_like(c)
+ d = inner1d(a, b, axis=-1, out=out)
+ assert_(d is out)
+ assert_array_equal(d, c)
+ c = inner1d(a, b, axis=0)
+ assert_array_equal(c, (a * b).sum(0))
+ # Sanity checks on innerwt and cumsum.
+ a = np.arange(6).reshape((2, 3))
+ b = np.arange(10, 16).reshape((2, 3))
+ w = np.arange(20, 26).reshape((2, 3))
+ assert_array_equal(umt.innerwt(a, b, w, axis=0),
+ np.sum(a * b * w, axis=0))
+ assert_array_equal(umt.cumsum(a, axis=0), np.cumsum(a, axis=0))
+ assert_array_equal(umt.cumsum(a, axis=-1), np.cumsum(a, axis=-1))
+ out = np.empty_like(a)
+ b = umt.cumsum(a, out=out, axis=0)
+ assert_(out is b)
+ assert_array_equal(b, np.cumsum(a, axis=0))
+ b = umt.cumsum(a, out=out, axis=1)
+ assert_(out is b)
+ assert_array_equal(b, np.cumsum(a, axis=-1))
+ # Check errors.
+ # Cannot pass in both axis and axes.
+ assert_raises(TypeError, inner1d, a, b, axis=0, axes=[0, 0])
+ # Not an integer.
+ assert_raises(TypeError, inner1d, a, b, axis=[0])
+ # more than 1 core dimensions.
+ mm = umt.matrix_multiply
+ assert_raises(TypeError, mm, a, b, axis=1)
+ # Output wrong size in axis.
+ out = np.empty((1, 2, 3), dtype=a.dtype)
+ assert_raises(ValueError, umt.cumsum, a, out=out, axis=0)
+ # Regular ufuncs should not accept axis.
+ assert_raises(TypeError, np.add, 1., 1., axis=0)
+
+ def test_keepdims_argument(self):
+ # inner1d signature: '(i),(i)->()'
+ inner1d = umt.inner1d
+ a = np.arange(27.).reshape((3, 3, 3))
+ b = np.arange(10., 19.).reshape((3, 1, 3))
+ c = inner1d(a, b)
+ assert_array_equal(c, (a * b).sum(-1))
+ c = inner1d(a, b, keepdims=False)
+ assert_array_equal(c, (a * b).sum(-1))
+ c = inner1d(a, b, keepdims=True)
+ assert_array_equal(c, (a * b).sum(-1, keepdims=True))
+ out = np.zeros_like(c)
+ d = inner1d(a, b, keepdims=True, out=out)
+ assert_(d is out)
+ assert_array_equal(d, c)
+ # Now combined with axis and axes.
+ c = inner1d(a, b, axis=-1, keepdims=False)
+ assert_array_equal(c, (a * b).sum(-1, keepdims=False))
+ c = inner1d(a, b, axis=-1, keepdims=True)
+ assert_array_equal(c, (a * b).sum(-1, keepdims=True))
+ c = inner1d(a, b, axis=0, keepdims=False)
+ assert_array_equal(c, (a * b).sum(0, keepdims=False))
+ c = inner1d(a, b, axis=0, keepdims=True)
+ assert_array_equal(c, (a * b).sum(0, keepdims=True))
+ c = inner1d(a, b, axes=[(-1,), (-1,), ()], keepdims=False)
+ assert_array_equal(c, (a * b).sum(-1))
+ c = inner1d(a, b, axes=[(-1,), (-1,), (-1,)], keepdims=True)
+ assert_array_equal(c, (a * b).sum(-1, keepdims=True))
+ c = inner1d(a, b, axes=[0, 0], keepdims=False)
+ assert_array_equal(c, (a * b).sum(0))
+ c = inner1d(a, b, axes=[0, 0, 0], keepdims=True)
+ assert_array_equal(c, (a * b).sum(0, keepdims=True))
+ c = inner1d(a, b, axes=[0, 2], keepdims=False)
+ assert_array_equal(c, (a.transpose(1, 2, 0) * b).sum(-1))
+ c = inner1d(a, b, axes=[0, 2], keepdims=True)
+ assert_array_equal(c, (a.transpose(1, 2, 0) * b).sum(-1,
+ keepdims=True))
+ c = inner1d(a, b, axes=[0, 2, 2], keepdims=True)
+ assert_array_equal(c, (a.transpose(1, 2, 0) * b).sum(-1,
+ keepdims=True))
+ c = inner1d(a, b, axes=[0, 2, 0], keepdims=True)
+ assert_array_equal(c, (a * b.transpose(2, 0, 1)).sum(0, keepdims=True))
+ # Hardly useful, but should work.
+ c = inner1d(a, b, axes=[0, 2, 1], keepdims=True)
+ assert_array_equal(c, (a.transpose(1, 0, 2) * b.transpose(0, 2, 1))
+ .sum(1, keepdims=True))
+ # Check with two core dimensions.
+ a = np.eye(3) * np.arange(4.)[:, np.newaxis, np.newaxis]
+ expected = uml.det(a)
+ c = uml.det(a, keepdims=False)
+ assert_array_equal(c, expected)
+ c = uml.det(a, keepdims=True)
+ assert_array_equal(c, expected[:, np.newaxis, np.newaxis])
+ a = np.eye(3) * np.arange(4.)[:, np.newaxis, np.newaxis]
+ expected_s, expected_l = uml.slogdet(a)
+ cs, cl = uml.slogdet(a, keepdims=False)
+ assert_array_equal(cs, expected_s)
+ assert_array_equal(cl, expected_l)
+ cs, cl = uml.slogdet(a, keepdims=True)
+ assert_array_equal(cs, expected_s[:, np.newaxis, np.newaxis])
+ assert_array_equal(cl, expected_l[:, np.newaxis, np.newaxis])
+ # Sanity check on innerwt.
+ a = np.arange(6).reshape((2, 3))
+ b = np.arange(10, 16).reshape((2, 3))
+ w = np.arange(20, 26).reshape((2, 3))
+ assert_array_equal(umt.innerwt(a, b, w, keepdims=True),
+ np.sum(a * b * w, axis=-1, keepdims=True))
+ assert_array_equal(umt.innerwt(a, b, w, axis=0, keepdims=True),
+ np.sum(a * b * w, axis=0, keepdims=True))
+ # Check errors.
+ # Not a boolean
+ assert_raises(TypeError, inner1d, a, b, keepdims='true')
+ # More than 1 core dimension, and core output dimensions.
+ mm = umt.matrix_multiply
+ assert_raises(TypeError, mm, a, b, keepdims=True)
+ assert_raises(TypeError, mm, a, b, keepdims=False)
+ # Regular ufuncs should not accept keepdims.
+ assert_raises(TypeError, np.add, 1., 1., keepdims=False)
+
def test_innerwt(self):
a = np.arange(6).reshape((2, 3))
b = np.arange(10, 16).reshape((2, 3))
@@ -539,10 +943,99 @@ class TestUfunc(TestCase):
w = np.array([], dtype='f8')
assert_array_equal(umt.innerwt(a, b, w), np.sum(a*b*w, axis=-1))
+ def test_cross1d(self):
+ """Test with fixed-sized signature."""
+ a = np.eye(3)
+ assert_array_equal(umt.cross1d(a, a), np.zeros((3, 3)))
+ out = np.zeros((3, 3))
+ result = umt.cross1d(a[0], a, out)
+ assert_(result is out)
+ assert_array_equal(result, np.vstack((np.zeros(3), a[2], -a[1])))
+ assert_raises(ValueError, umt.cross1d, np.eye(4), np.eye(4))
+ assert_raises(ValueError, umt.cross1d, a, np.arange(4.))
+ assert_raises(ValueError, umt.cross1d, a, np.arange(3.), np.zeros((3, 4)))
+
+ def test_can_ignore_signature(self):
+ # Comparing the effects of ? in signature:
+ # matrix_multiply: (m,n),(n,p)->(m,p) # all must be there.
+ # matmul: (m?,n),(n,p?)->(m?,p?) # allow missing m, p.
+ mat = np.arange(12).reshape((2, 3, 2))
+ single_vec = np.arange(2)
+ col_vec = single_vec[:, np.newaxis]
+ col_vec_array = np.arange(8).reshape((2, 2, 2, 1)) + 1
+ # matrix @ single column vector with proper dimension
+ mm_col_vec = umt.matrix_multiply(mat, col_vec)
+ # matmul does the same thing
+ matmul_col_vec = umt.matmul(mat, col_vec)
+ assert_array_equal(matmul_col_vec, mm_col_vec)
+ # matrix @ vector without dimension making it a column vector.
+ # matrix multiply fails -> missing core dim.
+ assert_raises(ValueError, umt.matrix_multiply, mat, single_vec)
+ # matmul mimicker passes, and returns a vector.
+ matmul_col = umt.matmul(mat, single_vec)
+ assert_array_equal(matmul_col, mm_col_vec.squeeze())
+ # Now with a column array: same as for column vector,
+ # broadcasting sensibly.
+ mm_col_vec = umt.matrix_multiply(mat, col_vec_array)
+ matmul_col_vec = umt.matmul(mat, col_vec_array)
+ assert_array_equal(matmul_col_vec, mm_col_vec)
+ # As above, but for row vector
+ single_vec = np.arange(3)
+ row_vec = single_vec[np.newaxis, :]
+ row_vec_array = np.arange(24).reshape((4, 2, 1, 1, 3)) + 1
+ # row vector @ matrix
+ mm_row_vec = umt.matrix_multiply(row_vec, mat)
+ matmul_row_vec = umt.matmul(row_vec, mat)
+ assert_array_equal(matmul_row_vec, mm_row_vec)
+ # single row vector @ matrix
+ assert_raises(ValueError, umt.matrix_multiply, single_vec, mat)
+ matmul_row = umt.matmul(single_vec, mat)
+ assert_array_equal(matmul_row, mm_row_vec.squeeze())
+ # row vector array @ matrix
+ mm_row_vec = umt.matrix_multiply(row_vec_array, mat)
+ matmul_row_vec = umt.matmul(row_vec_array, mat)
+ assert_array_equal(matmul_row_vec, mm_row_vec)
+ # Now for vector combinations
+ # row vector @ column vector
+ col_vec = row_vec.T
+ col_vec_array = row_vec_array.swapaxes(-2, -1)
+ mm_row_col_vec = umt.matrix_multiply(row_vec, col_vec)
+ matmul_row_col_vec = umt.matmul(row_vec, col_vec)
+ assert_array_equal(matmul_row_col_vec, mm_row_col_vec)
+ # single row vector @ single col vector
+ assert_raises(ValueError, umt.matrix_multiply, single_vec, single_vec)
+ matmul_row_col = umt.matmul(single_vec, single_vec)
+ assert_array_equal(matmul_row_col, mm_row_col_vec.squeeze())
+ # row vector array @ matrix
+ mm_row_col_array = umt.matrix_multiply(row_vec_array, col_vec_array)
+ matmul_row_col_array = umt.matmul(row_vec_array, col_vec_array)
+ assert_array_equal(matmul_row_col_array, mm_row_col_array)
+ # Finally, check that things are *not* squeezed if one gives an
+ # output.
+ out = np.zeros_like(mm_row_col_array)
+ out = umt.matrix_multiply(row_vec_array, col_vec_array, out=out)
+ assert_array_equal(out, mm_row_col_array)
+ out[:] = 0
+ out = umt.matmul(row_vec_array, col_vec_array, out=out)
+ assert_array_equal(out, mm_row_col_array)
+ # And check one cannot put missing dimensions back.
+ out = np.zeros_like(mm_row_col_vec)
+ assert_raises(ValueError, umt.matrix_multiply, single_vec, single_vec,
+ out)
+ # But fine for matmul, since it is just a broadcast.
+ out = umt.matmul(single_vec, single_vec, out)
+ assert_array_equal(out, mm_row_col_vec.squeeze())
+
def test_matrix_multiply(self):
self.compare_matrix_multiply_results(np.long)
self.compare_matrix_multiply_results(np.double)
+ def test_matrix_multiply_umath_empty(self):
+ res = umt.matrix_multiply(np.ones((0, 10)), np.ones((10, 0)))
+ assert_array_equal(res, np.zeros((0, 0)))
+ res = umt.matrix_multiply(np.ones((10, 0)), np.ones((0, 10)))
+ assert_array_equal(res, np.zeros((10, 10)))
+
def compare_matrix_multiply_results(self, tp):
d1 = np.array(np.random.rand(2, 3, 4), dtype=tp)
d2 = np.array(np.random.rand(2, 3, 4), dtype=tp)
@@ -598,7 +1091,7 @@ class TestUfunc(TestCase):
assert_equal(ref, True, err_msg="reference check")
def test_euclidean_pdist(self):
- a = np.arange(12, dtype=np.float).reshape(4, 3)
+ a = np.arange(12, dtype=float).reshape(4, 3)
out = np.empty((a.shape[0] * (a.shape[0] - 1) // 2,), dtype=a.dtype)
umt.euclidean_pdist(a, out)
b = np.sqrt(np.sum((a[:, None] - a)**2, axis=-1))
@@ -607,6 +1100,11 @@ class TestUfunc(TestCase):
# An output array is required to determine p with signature (n,d)->(p)
assert_raises(ValueError, umt.euclidean_pdist, a)
+ def test_cumsum(self):
+ a = np.arange(10)
+ result = umt.cumsum(a)
+ assert_array_equal(result, a.cumsum())
+
def test_object_logical(self):
a = np.array([3, None, True, False, "test", ""], dtype=object)
assert_equal(np.logical_or(a, None),
@@ -633,6 +1131,21 @@ class TestUfunc(TestCase):
assert_equal(np.logical_or.reduce(a), 3)
assert_equal(np.logical_and.reduce(a), None)
+ def test_object_comparison(self):
+ class HasComparisons(object):
+ def __eq__(self, other):
+ return '=='
+
+ arr0d = np.array(HasComparisons())
+ assert_equal(arr0d == arr0d, True)
+ assert_equal(np.equal(arr0d, arr0d), True) # normal behavior is a cast
+ assert_equal(np.equal(arr0d, arr0d, dtype=object), '==')
+
+ arr1d = np.array([HasComparisons()])
+ assert_equal(arr1d == arr1d, np.array([True]))
+ assert_equal(np.equal(arr1d, arr1d), np.array([True])) # normal behavior is a cast
+ assert_equal(np.equal(arr1d, arr1d, dtype=object), np.array(['==']))
+
def test_object_array_reduction(self):
# Reductions on object arrays
a = np.array(['a', 'b', 'c'], dtype=object)
@@ -648,6 +1161,7 @@ class TestUfunc(TestCase):
assert_equal(np.min(a), False)
assert_equal(np.array([[1]], dtype=object).sum(), 1)
assert_equal(np.array([[[1, 2]]], dtype=object).sum((0, 1)), [1, 2])
+ assert_equal(np.array([1], dtype=object).sum(initial=1), 2)
def test_object_array_accumulate_inplace(self):
# Checks that in-place accumulates work, see also gh-7402
@@ -684,13 +1198,6 @@ class TestUfunc(TestCase):
np.add.reduceat(arr, np.arange(4), out=arr, axis=-1)
assert_array_equal(arr, out)
- def test_object_scalar_multiply(self):
- # Tickets #2469 and #4482
- arr = np.matrix([1, 2], dtype=object)
- desired = np.matrix([[3, 6]], dtype=object)
- assert_equal(np.multiply(arr, 3), desired)
- assert_equal(np.multiply(3, arr), desired)
-
def test_zerosize_reduction(self):
# Test with default dtype and object dtype
for a in [[], np.array([], dtype=object)]:
@@ -703,14 +1210,14 @@ class TestUfunc(TestCase):
def test_axis_out_of_bounds(self):
a = np.array([False, False])
- assert_raises(ValueError, a.all, axis=1)
+ assert_raises(np.AxisError, a.all, axis=1)
a = np.array([False, False])
- assert_raises(ValueError, a.all, axis=-2)
+ assert_raises(np.AxisError, a.all, axis=-2)
a = np.array([False, False])
- assert_raises(ValueError, a.any, axis=1)
+ assert_raises(np.AxisError, a.any, axis=1)
a = np.array([False, False])
- assert_raises(ValueError, a.any, axis=-2)
+ assert_raises(np.AxisError, a.any, axis=-2)
def test_scalar_reduction(self):
# The functions 'sum', 'prod', etc allow specifying axis=0
@@ -779,8 +1286,19 @@ class TestUfunc(TestCase):
np.add(a, b, out=c, where=[1, 0, 0, 1, 0, 0, 1, 1, 1, 0])
assert_equal(c, [2, 1.5, 1.5, 2, 1.5, 1.5, 2, 2, 2, 1.5])
+ def test_where_param_alloc(self):
+ # With casting and allocated output
+ a = np.array([1], dtype=np.int64)
+ m = np.array([True], dtype=bool)
+ assert_equal(np.sqrt(a, where=m), [1])
+
+ # No casting and allocated output
+ a = np.array([1], dtype=np.float64)
+ m = np.array([True], dtype=bool)
+ assert_equal(np.sqrt(a, where=m), [1])
+
def check_identityless_reduction(self, a):
- # np.minimum.reduce is a identityless reduction
+ # np.minimum.reduce is an identityless reduction
# Verify that it sees the zero at various positions
a[...] = 1
@@ -849,6 +1367,35 @@ class TestUfunc(TestCase):
a = a[1:, 1:, 1:]
self.check_identityless_reduction(a)
+ def test_initial_reduction(self):
+ # np.minimum.reduce is an identityless reduction
+
+ # For cases like np.maximum(np.abs(...), initial=0)
+ # More generally, a supremum over non-negative numbers.
+ assert_equal(np.maximum.reduce([], initial=0), 0)
+
+ # For cases like reduction of an empty array over the reals.
+ assert_equal(np.minimum.reduce([], initial=np.inf), np.inf)
+ assert_equal(np.maximum.reduce([], initial=-np.inf), -np.inf)
+
+ # Random tests
+ assert_equal(np.minimum.reduce([5], initial=4), 4)
+ assert_equal(np.maximum.reduce([4], initial=5), 5)
+ assert_equal(np.maximum.reduce([5], initial=4), 5)
+ assert_equal(np.minimum.reduce([4], initial=5), 4)
+
+ # Check initial=None raises ValueError for both types of ufunc reductions
+ assert_raises(ValueError, np.minimum.reduce, [], initial=None)
+ assert_raises(ValueError, np.add.reduce, [], initial=None)
+
+ # Check that np._NoValue gives default behavior.
+ assert_equal(np.add.reduce([], initial=np._NoValue), 0)
+
+ # Check that initial kwarg behaves as intended for dtype=object
+ a = np.array([10], dtype=object)
+ res = np.add.reduce(a, initial=5)
+ assert_equal(res, 15)
+
def test_identityless_reduction_nonreorderable(self):
a = np.array([[8.0, 2.0, 2.0], [1.0, 0.5, 0.25]])
@@ -952,15 +1499,17 @@ class TestUfunc(TestCase):
a = np.array([0, 1, 2], dtype='i8')
b = np.array([0, 1, 2], dtype='i8')
- c = np.empty(3, dtype=rational)
+ c = np.empty(3, dtype=_rational_tests.rational)
# Output must be specified so numpy knows what
# ufunc signature to look for
- result = test_add(a, b, c)
- assert_equal(result, np.array([0, 2, 4], dtype=rational))
+ result = _rational_tests.test_add(a, b, c)
+ target = np.array([0, 2, 4], dtype=_rational_tests.rational)
+ assert_equal(result, target)
# no output type should raise TypeError
- assert_raises(TypeError, test_add, a, b)
+ with assert_raises(TypeError):
+ _rational_tests.test_add(a, b)
def test_operand_flags(self):
a = np.arange(16, dtype='l').reshape(4, 4)
@@ -976,7 +1525,7 @@ class TestUfunc(TestCase):
assert_equal(a, 10)
def test_struct_ufunc(self):
- import numpy.core.struct_ufunc_test as struct_ufunc
+ import numpy.core._struct_ufunc_tests as struct_ufunc
a = np.array([(1, 2, 3)], dtype='u8,u8,u8')
b = np.array([(1, 2, 3)], dtype='u8,u8,u8')
@@ -985,16 +1534,31 @@ class TestUfunc(TestCase):
assert_equal(result, np.array([(2, 4, 6)], dtype='u8,u8,u8'))
def test_custom_ufunc(self):
- a = np.array([rational(1, 2), rational(1, 3), rational(1, 4)],
- dtype=rational)
- b = np.array([rational(1, 2), rational(1, 3), rational(1, 4)],
- dtype=rational)
-
- result = test_add_rationals(a, b)
- expected = np.array([rational(1), rational(2, 3), rational(1, 2)],
- dtype=rational)
+ a = np.array(
+ [_rational_tests.rational(1, 2),
+ _rational_tests.rational(1, 3),
+ _rational_tests.rational(1, 4)],
+ dtype=_rational_tests.rational)
+ b = np.array(
+ [_rational_tests.rational(1, 2),
+ _rational_tests.rational(1, 3),
+ _rational_tests.rational(1, 4)],
+ dtype=_rational_tests.rational)
+
+ result = _rational_tests.test_add_rationals(a, b)
+ expected = np.array(
+ [_rational_tests.rational(1),
+ _rational_tests.rational(2, 3),
+ _rational_tests.rational(1, 2)],
+ dtype=_rational_tests.rational)
assert_equal(result, expected)
+ def test_custom_ufunc_forced_sig(self):
+ # gh-9351 - looking for a non-first userloop would previously hang
+ with assert_raises(TypeError):
+ np.multiply(_rational_tests.rational(1), 1,
+ signature=(_rational_tests.rational, int, None))
+
def test_custom_array_like(self):
class MyThing(object):
@@ -1013,7 +1577,7 @@ class TestUfunc(TestCase):
MyThing.getitem_count += 1
if not isinstance(i, tuple):
i = (i,)
- if len(i) > len(self.shape):
+ if len(i) > self.ndim:
raise IndexError("boo")
return MyThing(self.shape[len(i):])
@@ -1157,9 +1721,9 @@ class TestUfunc(TestCase):
assert_array_equal(values, [1, 8, 6, 4])
# Test exception thrown
- values = np.array(['a', 1], dtype=np.object)
- self.assertRaises(TypeError, np.add.at, values, [0, 1], 1)
- assert_array_equal(values, np.array(['a', 1], dtype=np.object))
+ values = np.array(['a', 1], dtype=object)
+ assert_raises(TypeError, np.add.at, values, [0, 1], 1)
+ assert_array_equal(values, np.array(['a', 1], dtype=object))
# Test multiple output ufuncs raise error, gh-5665
assert_raises(ValueError, np.modf.at, np.arange(10), [1])
@@ -1183,15 +1747,18 @@ class TestUfunc(TestCase):
assert_equal(f(d, 0, None, None), r)
assert_equal(f(d, 0, None, None, keepdims=False), r)
assert_equal(f(d, 0, None, None, True), r.reshape((1,) + r.shape))
+ assert_equal(f(d, 0, None, None, False, 0), r)
+ assert_equal(f(d, 0, None, None, False, initial=0), r)
# multiple keywords
assert_equal(f(d, axis=0, dtype=None, out=None, keepdims=False), r)
assert_equal(f(d, 0, dtype=None, out=None, keepdims=False), r)
assert_equal(f(d, 0, None, out=None, keepdims=False), r)
+ assert_equal(f(d, 0, None, out=None, keepdims=False, initial=0), r)
# too little
assert_raises(TypeError, f)
# too much
- assert_raises(TypeError, f, d, 0, None, None, False, 1)
+ assert_raises(TypeError, f, d, 0, None, None, False, 0, 1)
# invalid axis
assert_raises(TypeError, f, d, "invalid")
assert_raises(TypeError, f, d, axis="invalid")
@@ -1226,7 +1793,7 @@ class TestUfunc(TestCase):
# https://github.com/numpy/numpy/issues/4855
class MyA(np.ndarray):
- def __numpy_ufunc__(self, ufunc, method, i, inputs, **kwargs):
+ def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
return getattr(ufunc, method)(*(input.view(np.ndarray)
for input in inputs), **kwargs)
a = np.arange(12.).reshape(4,3)
@@ -1236,6 +1803,16 @@ class TestUfunc(TestCase):
target = np.array([ True, False, False, False], dtype=bool)
assert_equal(np.all(target == (mra == ra[0])), True)
+ def test_scalar_equal(self):
+ # Scalar comparisons should always work, without deprecation warnings.
+ # even when the ufunc fails.
+ a = np.array(0.)
+ b = np.array('a')
+ assert_(a != b)
+ assert_(b != a)
+ assert_(not (a == b))
+ assert_(not (b == a))
+
def test_NotImplemented_not_returned(self):
# See gh-5964 and gh-2091. Some of these functions are not operator
# related and were fixed for other reasons in the past.
@@ -1245,18 +1822,38 @@ class TestUfunc(TestCase):
np.bitwise_xor, np.left_shift, np.right_shift, np.fmax,
np.fmin, np.fmod, np.hypot, np.logaddexp, np.logaddexp2,
np.logical_and, np.logical_or, np.logical_xor, np.maximum,
- np.minimum, np.mod
- ]
-
- # These functions still return NotImplemented. Will be fixed in
- # future.
- # bad = [np.greater, np.greater_equal, np.less, np.less_equal, np.not_equal]
+ np.minimum, np.mod,
+ np.greater, np.greater_equal, np.less, np.less_equal,
+ np.equal, np.not_equal]
a = np.array('1')
b = 1
+ c = np.array([1., 2.])
for f in binary_funcs:
assert_raises(TypeError, f, a, b)
+ assert_raises(TypeError, f, c, a)
+
+ def test_reduce_noncontig_output(self):
+ # Check that reduction deals with non-contiguous output arrays
+ # appropriately.
+ #
+ # gh-8036
+
+ x = np.arange(7*13*8, dtype=np.int16).reshape(7, 13, 8)
+ x = x[4:6,1:11:6,1:5].transpose(1, 2, 0)
+ y_base = np.arange(4*4, dtype=np.int16).reshape(4, 4)
+ y = y_base[::2,:]
+
+ y_base_copy = y_base.copy()
+
+ r0 = np.add.reduce(x, out=y.copy(), axis=2)
+ r1 = np.add.reduce(x, out=y, axis=2)
+ # The results should match, and y_base shouldn't get clobbered
+ assert_equal(r0, r1)
+ assert_equal(y_base[1,:], y_base_copy[1,:])
+ assert_equal(y_base[3,:], y_base_copy[3,:])
-if __name__ == "__main__":
- run_module_suite()
+ def test_no_doc_string(self):
+ # gh-9337
+ assert_('\n' not in umt.inner1d_no_doc.__doc__)
diff --git a/numpy/core/tests/test_umath.py b/numpy/core/tests/test_umath.py
index f0f664a6f..2f8edebc0 100644
--- a/numpy/core/tests/test_umath.py
+++ b/numpy/core/tests/test_umath.py
@@ -1,18 +1,20 @@
from __future__ import division, absolute_import, print_function
-import sys
import platform
import warnings
+import fnmatch
import itertools
+import pytest
-from numpy.testing.utils import _gen_alignment_data
import numpy.core.umath as ncu
+from numpy.core import _umath_tests as ncu_tests
import numpy as np
from numpy.testing import (
- TestCase, run_module_suite, assert_, assert_equal, assert_raises,
+ assert_, assert_equal, assert_raises, assert_raises_regex,
assert_array_equal, assert_almost_equal, assert_array_almost_equal,
- dec, assert_allclose, assert_no_warnings
-)
+ assert_allclose, assert_no_warnings, suppress_warnings,
+ _gen_alignment_data
+ )
def on_powerpc():
@@ -22,14 +24,14 @@ def on_powerpc():
class _FilterInvalids(object):
- def setUp(self):
+ def setup(self):
self.olderr = np.seterr(invalid='ignore')
- def tearDown(self):
+ def teardown(self):
np.seterr(**self.olderr)
-class TestConstants(TestCase):
+class TestConstants(object):
def test_pi(self):
assert_allclose(ncu.pi, 3.141592653589793, 1e-15)
@@ -40,7 +42,7 @@ class TestConstants(TestCase):
assert_allclose(ncu.euler_gamma, 0.5772156649015329, 1e-15)
-class TestOut(TestCase):
+class TestOut(object):
def test_out_subok(self):
for subok in (True, False):
a = np.array(0.5)
@@ -173,7 +175,56 @@ class TestOut(TestCase):
assert_(w[0].category is DeprecationWarning)
-class TestDivision(TestCase):
+class TestComparisons(object):
+ def test_ignore_object_identity_in_equal(self):
+ # Check error raised when comparing identical objects whose comparison
+ # is not a simple boolean, e.g., arrays that are compared elementwise.
+ a = np.array([np.array([1, 2, 3]), None], dtype=object)
+ assert_raises(ValueError, np.equal, a, a)
+
+ # Check error raised when comparing identical non-comparable objects.
+ class FunkyType(object):
+ def __eq__(self, other):
+ raise TypeError("I won't compare")
+
+ a = np.array([FunkyType()])
+ assert_raises(TypeError, np.equal, a, a)
+
+ # Check identity doesn't override comparison mismatch.
+ a = np.array([np.nan], dtype=object)
+ assert_equal(np.equal(a, a), [False])
+
+ def test_ignore_object_identity_in_not_equal(self):
+ # Check error raised when comparing identical objects whose comparison
+ # is not a simple boolean, e.g., arrays that are compared elementwise.
+ a = np.array([np.array([1, 2, 3]), None], dtype=object)
+ assert_raises(ValueError, np.not_equal, a, a)
+
+ # Check error raised when comparing identical non-comparable objects.
+ class FunkyType(object):
+ def __ne__(self, other):
+ raise TypeError("I won't compare")
+
+ a = np.array([FunkyType()])
+ assert_raises(TypeError, np.not_equal, a, a)
+
+ # Check identity doesn't override comparison mismatch.
+ a = np.array([np.nan], dtype=object)
+ assert_equal(np.not_equal(a, a), [True])
+
+
+class TestAdd(object):
+ def test_reduce_alignment(self):
+ # gh-9876
+ # make sure arrays with weird strides work with the optimizations in
+ # pairwise_sum_@TYPE@. On x86, the 'b' field will count as aligned at a
+ # 4 byte offset, even though its itemsize is 8.
+ a = np.zeros(2, dtype=[('a', np.int32), ('b', np.float64)])
+ a['a'] = -1
+ assert_equal(a['b'].sum(), 0)
+
+
+class TestDivision(object):
def test_division_int(self):
# int division should follow Python
x = np.array([5, 10, 90, 100, -5, -10, -90, -100, -120])
@@ -223,27 +274,34 @@ class TestDivision(TestCase):
assert_equal(y, [1.e+110, 0], err_msg=msg)
-class TestRemainder(TestCase):
+def floor_divide_and_remainder(x, y):
+ return (np.floor_divide(x, y), np.remainder(x, y))
+
+
+def _signs(dt):
+ if dt in np.typecodes['UnsignedInteger']:
+ return (+1,)
+ else:
+ return (+1, -1)
+
+
+class TestRemainder(object):
def test_remainder_basic(self):
dt = np.typecodes['AllInteger'] + np.typecodes['Float']
- for dt1, dt2 in itertools.product(dt, dt):
- for sg1, sg2 in itertools.product((+1, -1), (+1, -1)):
- if sg1 == -1 and dt1 in np.typecodes['UnsignedInteger']:
- continue
- if sg2 == -1 and dt2 in np.typecodes['UnsignedInteger']:
- continue
- fmt = 'dt1: %s, dt2: %s, sg1: %s, sg2: %s'
- msg = fmt % (dt1, dt2, sg1, sg2)
- a = np.array(sg1*71, dtype=dt1)
- b = np.array(sg2*19, dtype=dt2)
- div = np.floor_divide(a, b)
- rem = np.remainder(a, b)
- assert_equal(div*b + rem, a, err_msg=msg)
- if sg2 == -1:
- assert_(b < rem <= 0, msg)
- else:
- assert_(b > rem >= 0, msg)
+ for op in [floor_divide_and_remainder, np.divmod]:
+ for dt1, dt2 in itertools.product(dt, dt):
+ for sg1, sg2 in itertools.product(_signs(dt1), _signs(dt2)):
+ fmt = 'op: %s, dt1: %s, dt2: %s, sg1: %s, sg2: %s'
+ msg = fmt % (op.__name__, dt1, dt2, sg1, sg2)
+ a = np.array(sg1*71, dtype=dt1)
+ b = np.array(sg2*19, dtype=dt2)
+ div, rem = op(a, b)
+ assert_equal(div*b + rem, a, err_msg=msg)
+ if sg2 == -1:
+ assert_(b < rem <= 0, msg)
+ else:
+ assert_(b > rem >= 0, msg)
def test_float_remainder_exact(self):
# test that float results are exact for small integers. This also
@@ -262,32 +320,32 @@ class TestRemainder(TestCase):
tgtdiv = np.where((tgtdiv == 0.0) & ((b < 0) ^ (a < 0)), -0.0, tgtdiv)
tgtrem = np.where((tgtrem == 0.0) & (b < 0), -0.0, tgtrem)
- for dt in np.typecodes['Float']:
- msg = 'dtype: %s' % (dt,)
- fa = a.astype(dt)
- fb = b.astype(dt)
- div = np.floor_divide(fa, fb)
- rem = np.remainder(fa, fb)
- assert_equal(div, tgtdiv, err_msg=msg)
- assert_equal(rem, tgtrem, err_msg=msg)
+ for op in [floor_divide_and_remainder, np.divmod]:
+ for dt in np.typecodes['Float']:
+ msg = 'op: %s, dtype: %s' % (op.__name__, dt)
+ fa = a.astype(dt)
+ fb = b.astype(dt)
+ div, rem = op(fa, fb)
+ assert_equal(div, tgtdiv, err_msg=msg)
+ assert_equal(rem, tgtrem, err_msg=msg)
def test_float_remainder_roundoff(self):
# gh-6127
dt = np.typecodes['Float']
- for dt1, dt2 in itertools.product(dt, dt):
- for sg1, sg2 in itertools.product((+1, -1), (+1, -1)):
- fmt = 'dt1: %s, dt2: %s, sg1: %s, sg2: %s'
- msg = fmt % (dt1, dt2, sg1, sg2)
- a = np.array(sg1*78*6e-8, dtype=dt1)
- b = np.array(sg2*6e-8, dtype=dt2)
- div = np.floor_divide(a, b)
- rem = np.remainder(a, b)
- # Equal assertion should hold when fmod is used
- assert_equal(div*b + rem, a, err_msg=msg)
- if sg2 == -1:
- assert_(b < rem <= 0, msg)
- else:
- assert_(b > rem >= 0, msg)
+ for op in [floor_divide_and_remainder, np.divmod]:
+ for dt1, dt2 in itertools.product(dt, dt):
+ for sg1, sg2 in itertools.product((+1, -1), (+1, -1)):
+ fmt = 'op: %s, dt1: %s, dt2: %s, sg1: %s, sg2: %s'
+ msg = fmt % (op.__name__, dt1, dt2, sg1, sg2)
+ a = np.array(sg1*78*6e-8, dtype=dt1)
+ b = np.array(sg2*6e-8, dtype=dt2)
+ div, rem = op(a, b)
+ # Equal assertion should hold when fmod is used
+ assert_equal(div*b + rem, a, err_msg=msg)
+ if sg2 == -1:
+ assert_(b < rem <= 0, msg)
+ else:
+ assert_(b > rem >= 0, msg)
def test_float_remainder_corner_cases(self):
# Check remainder magnitude.
@@ -300,9 +358,8 @@ class TestRemainder(TestCase):
assert_(rem >= -b, 'dt: %s' % dt)
# Check nans, inf
- with warnings.catch_warnings():
- warnings.simplefilter('always')
- warnings.simplefilter('ignore', RuntimeWarning)
+ with suppress_warnings() as sup:
+ sup.filter(RuntimeWarning, "invalid value encountered in remainder")
for dt in np.typecodes['Float']:
fone = np.array(1.0, dtype=dt)
fzer = np.array(0.0, dtype=dt)
@@ -319,7 +376,7 @@ class TestRemainder(TestCase):
assert_(np.isnan(rem), 'dt: %s, rem: %s' % (dt, rem))
-class TestCbrt(TestCase):
+class TestCbrt(object):
def test_cbrt_scalar(self):
assert_almost_equal((np.cbrt(np.float32(-2.5)**3)), -2.5)
@@ -332,7 +389,7 @@ class TestCbrt(TestCase):
assert_equal(np.cbrt(-np.inf), -np.inf)
-class TestPower(TestCase):
+class TestPower(object):
def test_power_float(self):
x = np.array([1., 2., 3.])
assert_equal(x**0, [1., 1., 1.])
@@ -419,14 +476,70 @@ class TestPower(TestCase):
def test_fast_power(self):
x = np.array([1, 2, 3], np.int16)
- assert_((x**2.00001).dtype is (x**2.0).dtype)
+ res = x**2.0
+ assert_((x**2.00001).dtype is res.dtype)
+ assert_array_equal(res, [1, 4, 9])
+ # check the inplace operation on the casted copy doesn't mess with x
+ assert_(not np.may_share_memory(res, x))
+ assert_array_equal(x, [1, 2, 3])
# Check that the fast path ignores 1-element not 0-d arrays
res = x ** np.array([[[2]]])
assert_equal(res.shape, (1, 1, 3))
-
-class TestLog2(TestCase):
+ def test_integer_power(self):
+ a = np.array([15, 15], 'i8')
+ b = np.power(a, a)
+ assert_equal(b, [437893890380859375, 437893890380859375])
+
+ def test_integer_power_with_integer_zero_exponent(self):
+ dtypes = np.typecodes['Integer']
+ for dt in dtypes:
+ arr = np.arange(-10, 10, dtype=dt)
+ assert_equal(np.power(arr, 0), np.ones_like(arr))
+
+ dtypes = np.typecodes['UnsignedInteger']
+ for dt in dtypes:
+ arr = np.arange(10, dtype=dt)
+ assert_equal(np.power(arr, 0), np.ones_like(arr))
+
+ def test_integer_power_of_1(self):
+ dtypes = np.typecodes['AllInteger']
+ for dt in dtypes:
+ arr = np.arange(10, dtype=dt)
+ assert_equal(np.power(1, arr), np.ones_like(arr))
+
+ def test_integer_power_of_zero(self):
+ dtypes = np.typecodes['AllInteger']
+ for dt in dtypes:
+ arr = np.arange(1, 10, dtype=dt)
+ assert_equal(np.power(0, arr), np.zeros_like(arr))
+
+ def test_integer_to_negative_power(self):
+ dtypes = np.typecodes['Integer']
+ for dt in dtypes:
+ a = np.array([0, 1, 2, 3], dtype=dt)
+ b = np.array([0, 1, 2, -3], dtype=dt)
+ one = np.array(1, dtype=dt)
+ minusone = np.array(-1, dtype=dt)
+ assert_raises(ValueError, np.power, a, b)
+ assert_raises(ValueError, np.power, a, minusone)
+ assert_raises(ValueError, np.power, one, b)
+ assert_raises(ValueError, np.power, one, minusone)
+
+
+class TestFloat_power(object):
+ def test_type_conversion(self):
+ arg_type = '?bhilBHILefdgFDG'
+ res_type = 'ddddddddddddgDDG'
+ for dtin, dtout in zip(arg_type, res_type):
+ msg = "dtin: %s, dtout: %s" % (dtin, dtout)
+ arg = np.ones(1, dtype=dtin)
+ res = np.float_power(arg, arg)
+ assert_(res.dtype.name == np.dtype(dtout).name, msg)
+
+
+class TestLog2(object):
def test_log2_values(self):
x = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024]
y = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
@@ -457,7 +570,7 @@ class TestLog2(TestCase):
assert_(w[2].category is RuntimeWarning)
-class TestExp2(TestCase):
+class TestExp2(object):
def test_exp2_values(self):
x = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024]
y = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
@@ -509,7 +622,7 @@ class TestLogAddExp2(_FilterInvalids):
assert_(np.isnan(np.logaddexp2(np.nan, np.nan)))
-class TestLog(TestCase):
+class TestLog(object):
def test_log_values(self):
x = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024]
y = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
@@ -520,7 +633,7 @@ class TestLog(TestCase):
assert_almost_equal(np.log(xf), yf)
-class TestExp(TestCase):
+class TestExp(object):
def test_exp_values(self):
x = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024]
y = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
@@ -571,8 +684,12 @@ class TestLogAddExp(_FilterInvalids):
assert_(np.isnan(np.logaddexp(0, np.nan)))
assert_(np.isnan(np.logaddexp(np.nan, np.nan)))
+ def test_reduce(self):
+ assert_equal(np.logaddexp.identity, -np.inf)
+ assert_equal(np.logaddexp.reduce([]), -np.inf)
+
-class TestLog1p(TestCase):
+class TestLog1p(object):
def test_log1p(self):
assert_almost_equal(ncu.log1p(0.2), ncu.log(1.2))
assert_almost_equal(ncu.log1p(1e-6), ncu.log(1+1e-6))
@@ -586,7 +703,7 @@ class TestLog1p(TestCase):
assert_equal(ncu.log1p(-np.inf), np.nan)
-class TestExpm1(TestCase):
+class TestExpm1(object):
def test_expm1(self):
assert_almost_equal(ncu.expm1(0.2), ncu.exp(0.2)-1)
assert_almost_equal(ncu.expm1(1e-6), ncu.exp(1e-6)-1)
@@ -599,11 +716,17 @@ class TestExpm1(TestCase):
assert_equal(ncu.expm1(-np.inf), -1.)
-class TestHypot(TestCase, object):
+class TestHypot(object):
def test_simple(self):
assert_almost_equal(ncu.hypot(1, 1), ncu.sqrt(2))
assert_almost_equal(ncu.hypot(0, 0), 0)
+ def test_reduce(self):
+ assert_almost_equal(ncu.hypot.reduce([3.0, 4.0]), 5.0)
+ assert_almost_equal(ncu.hypot.reduce([3.0, 4.0, 0]), 5.0)
+ assert_almost_equal(ncu.hypot.reduce([9.0, 12.0, 20.0]), 25.0)
+ assert_equal(ncu.hypot.reduce([]), 0.0)
+
def assert_hypot_isnan(x, y):
with np.errstate(invalid='ignore'):
@@ -617,7 +740,7 @@ def assert_hypot_isinf(x, y):
"hypot(%s, %s) is %s, not inf" % (x, y, ncu.hypot(x, y)))
-class TestHypotSpecialValues(TestCase):
+class TestHypotSpecialValues(object):
def test_nan_outputs(self):
assert_hypot_isnan(np.nan, np.nan)
assert_hypot_isnan(np.nan, 1)
@@ -654,7 +777,7 @@ def assert_arctan2_isnzero(x, y):
assert_((ncu.arctan2(x, y) == 0 and np.signbit(ncu.arctan2(x, y))), "arctan(%s, %s) is %s, not -0" % (x, y, ncu.arctan2(x, y)))
-class TestArctan2SpecialValues(TestCase):
+class TestArctan2SpecialValues(object):
def test_one_one(self):
# atan2(1, 1) returns pi/4.
assert_almost_equal(ncu.arctan2(1, 1), 0.25 * np.pi)
@@ -723,7 +846,7 @@ class TestArctan2SpecialValues(TestCase):
assert_arctan2_isnan(np.nan, np.nan)
-class TestLdexp(TestCase):
+class TestLdexp(object):
def _check_ldexp(self, tp):
assert_almost_equal(ncu.ldexp(np.array(2., np.float32),
np.array(3, tp)), 16.)
@@ -789,22 +912,22 @@ class TestMaximum(_FilterInvalids):
# fail if cmp is used instead of rich compare.
# Failure cannot be guaranteed.
for i in range(1):
- x = np.array(float('nan'), np.object)
+ x = np.array(float('nan'), object)
y = 1.0
- z = np.array(float('nan'), np.object)
+ z = np.array(float('nan'), object)
assert_(np.maximum(x, y) == 1.0)
assert_(np.maximum(z, y) == 1.0)
def test_complex_nans(self):
nan = np.nan
for cnan in [complex(nan, 0), complex(0, nan), complex(nan, nan)]:
- arg1 = np.array([0, cnan, cnan], dtype=np.complex)
- arg2 = np.array([cnan, 0, cnan], dtype=np.complex)
- out = np.array([nan, nan, nan], dtype=np.complex)
+ arg1 = np.array([0, cnan, cnan], dtype=complex)
+ arg2 = np.array([cnan, 0, cnan], dtype=complex)
+ out = np.array([nan, nan, nan], dtype=complex)
assert_equal(np.maximum(arg1, arg2), out)
def test_object_array(self):
- arg1 = np.arange(5, dtype=np.object)
+ arg1 = np.arange(5, dtype=object)
arg2 = arg1 + 1
assert_equal(np.maximum(arg1, arg2), arg2)
@@ -847,22 +970,22 @@ class TestMinimum(_FilterInvalids):
# fail if cmp is used instead of rich compare.
# Failure cannot be guaranteed.
for i in range(1):
- x = np.array(float('nan'), np.object)
+ x = np.array(float('nan'), object)
y = 1.0
- z = np.array(float('nan'), np.object)
+ z = np.array(float('nan'), object)
assert_(np.minimum(x, y) == 1.0)
assert_(np.minimum(z, y) == 1.0)
def test_complex_nans(self):
nan = np.nan
for cnan in [complex(nan, 0), complex(0, nan), complex(nan, nan)]:
- arg1 = np.array([0, cnan, cnan], dtype=np.complex)
- arg2 = np.array([cnan, 0, cnan], dtype=np.complex)
- out = np.array([nan, nan, nan], dtype=np.complex)
+ arg1 = np.array([0, cnan, cnan], dtype=complex)
+ arg2 = np.array([cnan, 0, cnan], dtype=complex)
+ out = np.array([nan, nan, nan], dtype=complex)
assert_equal(np.minimum(arg1, arg2), out)
def test_object_array(self):
- arg1 = np.arange(5, dtype=np.object)
+ arg1 = np.arange(5, dtype=object)
arg2 = arg1 + 1
assert_equal(np.minimum(arg1, arg2), arg1)
@@ -903,9 +1026,9 @@ class TestFmax(_FilterInvalids):
def test_complex_nans(self):
nan = np.nan
for cnan in [complex(nan, 0), complex(0, nan), complex(nan, nan)]:
- arg1 = np.array([0, cnan, cnan], dtype=np.complex)
- arg2 = np.array([cnan, 0, cnan], dtype=np.complex)
- out = np.array([0, 0, nan], dtype=np.complex)
+ arg1 = np.array([0, cnan, cnan], dtype=complex)
+ arg2 = np.array([cnan, 0, cnan], dtype=complex)
+ out = np.array([0, 0, nan], dtype=complex)
assert_equal(np.fmax(arg1, arg2), out)
@@ -945,13 +1068,19 @@ class TestFmin(_FilterInvalids):
def test_complex_nans(self):
nan = np.nan
for cnan in [complex(nan, 0), complex(0, nan), complex(nan, nan)]:
- arg1 = np.array([0, cnan, cnan], dtype=np.complex)
- arg2 = np.array([cnan, 0, cnan], dtype=np.complex)
- out = np.array([0, 0, nan], dtype=np.complex)
+ arg1 = np.array([0, cnan, cnan], dtype=complex)
+ arg2 = np.array([cnan, 0, cnan], dtype=complex)
+ out = np.array([0, 0, nan], dtype=complex)
assert_equal(np.fmin(arg1, arg2), out)
-class TestBool(TestCase):
+class TestBool(object):
+ def test_exceptions(self):
+ a = np.ones(1, dtype=np.bool_)
+ assert_raises(TypeError, np.negative, a)
+ assert_raises(TypeError, np.positive, a)
+ assert_raises(TypeError, np.subtract, a, a)
+
def test_truth_table_logical(self):
# 2, 3 and 4 serves as true values
input1 = [0, 0, 3, 2]
@@ -990,8 +1119,25 @@ class TestBool(TestCase):
out = [False, True, True, False]
assert_equal(np.bitwise_xor(arg1, arg2), out)
+ def test_reduce(self):
+ none = np.array([0, 0, 0, 0], bool)
+ some = np.array([1, 0, 1, 1], bool)
+ every = np.array([1, 1, 1, 1], bool)
+ empty = np.array([], bool)
+
+ arrs = [none, some, every, empty]
+
+ for arr in arrs:
+ assert_equal(np.logical_and.reduce(arr), all(arr))
-class TestBitwiseUFuncs(TestCase):
+ for arr in arrs:
+ assert_equal(np.logical_or.reduce(arr), any(arr))
+
+ for arr in arrs:
+ assert_equal(np.logical_xor.reduce(arr), arr.sum() % 2 == 1)
+
+
+class TestBitwiseUFuncs(object):
bitwise_types = [np.dtype(c) for c in '?' + 'bBhHiIlLqQ' + 'O']
@@ -1030,7 +1176,6 @@ class TestBitwiseUFuncs(TestCase):
assert_(np.bitwise_xor(zeros, zeros).dtype == dt, msg)
assert_(np.bitwise_and(zeros, zeros).dtype == dt, msg)
-
def test_identity(self):
assert_(np.bitwise_or.identity == 0, 'bitwise_or')
assert_(np.bitwise_xor.identity == 0, 'bitwise_xor')
@@ -1076,10 +1221,10 @@ class TestBitwiseUFuncs(TestCase):
assert_(type(f.reduce(btype)) is bool, msg)
-class TestInt(TestCase):
+class TestInt(object):
def test_logical_not(self):
x = np.ones(10, dtype=np.int16)
- o = np.ones(10 * 2, dtype=np.bool)
+ o = np.ones(10 * 2, dtype=bool)
tgt = o.copy()
tgt[::2] = False
os = o[::2]
@@ -1087,24 +1232,46 @@ class TestInt(TestCase):
assert_array_equal(o, tgt)
-class TestFloatingPoint(TestCase):
+class TestFloatingPoint(object):
def test_floating_point(self):
assert_equal(ncu.FLOATING_POINT_SUPPORT, 1)
-class TestDegrees(TestCase):
+class TestDegrees(object):
def test_degrees(self):
assert_almost_equal(ncu.degrees(np.pi), 180.0)
assert_almost_equal(ncu.degrees(-0.5*np.pi), -90.0)
-class TestRadians(TestCase):
+class TestRadians(object):
def test_radians(self):
assert_almost_equal(ncu.radians(180.0), np.pi)
assert_almost_equal(ncu.radians(-90.0), -0.5*np.pi)
-class TestSign(TestCase):
+class TestHeavside(object):
+ def test_heaviside(self):
+ x = np.array([[-30.0, -0.1, 0.0, 0.2], [7.5, np.nan, np.inf, -np.inf]])
+ expectedhalf = np.array([[0.0, 0.0, 0.5, 1.0], [1.0, np.nan, 1.0, 0.0]])
+ expected1 = expectedhalf.copy()
+ expected1[0, 2] = 1
+
+ h = ncu.heaviside(x, 0.5)
+ assert_equal(h, expectedhalf)
+
+ h = ncu.heaviside(x, 1.0)
+ assert_equal(h, expected1)
+
+ x = x.astype(np.float32)
+
+ h = ncu.heaviside(x, np.float32(0.5))
+ assert_equal(h, expectedhalf.astype(np.float32))
+
+ h = ncu.heaviside(x, np.float32(1.0))
+ assert_equal(h, expected1.astype(np.float32))
+
+
+class TestSign(object):
def test_sign(self):
a = np.array([np.inf, -np.inf, np.nan, 0.0, 3.0, -3.0])
out = np.zeros(a.shape)
@@ -1121,7 +1288,7 @@ class TestSign(TestCase):
# In reference to github issue #6229
foo = np.array([-.1, 0, .1])
- a = np.sign(foo.astype(np.object))
+ a = np.sign(foo.astype(object))
b = np.sign(foo)
assert_array_equal(a, b)
@@ -1130,11 +1297,12 @@ class TestSign(TestCase):
# In reference to github issue #6229
def test_nan():
foo = np.array([np.nan])
- a = np.sign(foo.astype(np.object))
+ # FIXME: a not used
+ a = np.sign(foo.astype(object))
assert_raises(TypeError, test_nan)
-class TestMinMax(TestCase):
+class TestMinMax(object):
def test_minmax_blocked(self):
# simd tests on max/min, test all alignments, slow but important
# for 2 * vz + 2 * (vs - 1) + 1 (unrolled once)
@@ -1145,8 +1313,11 @@ class TestMinMax(TestCase):
inp[:] = np.arange(inp.size, dtype=dt)
inp[i] = np.nan
emsg = lambda: '%r\n%s' % (inp, msg)
- assert_(np.isnan(inp.max()), msg=emsg)
- assert_(np.isnan(inp.min()), msg=emsg)
+ with suppress_warnings() as sup:
+ sup.filter(RuntimeWarning,
+ "invalid value encountered in reduce")
+ assert_(np.isnan(inp.max()), msg=emsg)
+ assert_(np.isnan(inp.min()), msg=emsg)
inp[i] = 1e10
assert_equal(inp.max(), 1e10, err_msg=msg)
@@ -1160,8 +1331,21 @@ class TestMinMax(TestCase):
assert_equal(d.max(), d[0])
assert_equal(d.min(), d[0])
+ def test_reduce_reorder(self):
+ # gh 10370, 11029 Some compilers reorder the call to npy_getfloatstatus
+ # and put it before the call to an intrisic function that causes
+ # invalid status to be set. Also make sure warnings are not emitted
+ for n in (2, 4, 8, 16, 32):
+ for dt in (np.float32, np.float16, np.complex64):
+ for r in np.diagflat(np.array([np.nan] * n, dtype=dt)):
+ assert_equal(np.min(r), np.nan)
+
+ def test_minimize_no_warns(self):
+ a = np.minimum(np.nan, 1)
+ assert_equal(a, np.nan)
+
-class TestAbsoluteNegative(TestCase):
+class TestAbsoluteNegative(object):
def test_abs_neg_blocked(self):
# simd tests on abs, test all alignments for vz + 2 * (vs - 1) + 1
for dt, sz in [(np.float32, 11), (np.float64, 5)]:
@@ -1170,27 +1354,26 @@ class TestAbsoluteNegative(TestCase):
tgt = [ncu.absolute(i) for i in inp]
np.absolute(inp, out=out)
assert_equal(out, tgt, err_msg=msg)
- self.assertTrue((out >= 0).all())
+ assert_((out >= 0).all())
tgt = [-1*(i) for i in inp]
np.negative(inp, out=out)
assert_equal(out, tgt, err_msg=msg)
- # will throw invalid flag depending on compiler optimizations
- with np.errstate(invalid='ignore'):
- for v in [np.nan, -np.inf, np.inf]:
- for i in range(inp.size):
- d = np.arange(inp.size, dtype=dt)
- inp[:] = -d
- inp[i] = v
- d[i] = -v if v == -np.inf else v
- assert_array_equal(np.abs(inp), d, err_msg=msg)
- np.abs(inp, out=out)
- assert_array_equal(out, d, err_msg=msg)
-
- assert_array_equal(-inp, -1*inp, err_msg=msg)
- np.negative(inp, out=out)
- assert_array_equal(out, -1*inp, err_msg=msg)
+ for v in [np.nan, -np.inf, np.inf]:
+ for i in range(inp.size):
+ d = np.arange(inp.size, dtype=dt)
+ inp[:] = -d
+ inp[i] = v
+ d[i] = -v if v == -np.inf else v
+ assert_array_equal(np.abs(inp), d, err_msg=msg)
+ np.abs(inp, out=out)
+ assert_array_equal(out, d, err_msg=msg)
+
+ assert_array_equal(-inp, -1*inp, err_msg=msg)
+ d = -1 * inp
+ np.negative(inp, out=out)
+ assert_array_equal(out, d, err_msg=msg)
def test_lower_align(self):
# check data that is not aligned to element size
@@ -1204,7 +1387,26 @@ class TestAbsoluteNegative(TestCase):
np.abs(np.ones_like(d), out=d)
-class TestSpecialMethods(TestCase):
+class TestPositive(object):
+ def test_valid(self):
+ valid_dtypes = [int, float, complex, object]
+ for dtype in valid_dtypes:
+ x = np.arange(5, dtype=dtype)
+ result = np.positive(x)
+ assert_equal(x, result, err_msg=str(dtype))
+
+ def test_invalid(self):
+ with assert_raises(TypeError):
+ np.positive(True)
+ with assert_raises(TypeError):
+ np.positive(np.datetime64('2000-01-01'))
+ with assert_raises(TypeError):
+ np.positive(np.array(['foo'], dtype=str))
+ with assert_raises(TypeError):
+ np.positive(np.array(['bar'], dtype=object))
+
+
+class TestSpecialMethods(object):
def test_wrap(self):
class with_wrap(object):
@@ -1221,11 +1423,62 @@ class TestSpecialMethods(TestCase):
x = ncu.minimum(a, a)
assert_equal(x.arr, np.zeros(1))
func, args, i = x.context
- self.assertTrue(func is ncu.minimum)
- self.assertEqual(len(args), 2)
+ assert_(func is ncu.minimum)
+ assert_equal(len(args), 2)
assert_equal(args[0], a)
assert_equal(args[1], a)
- self.assertEqual(i, 0)
+ assert_equal(i, 0)
+
+ def test_wrap_and_prepare_out(self):
+ # Calling convention for out should not affect how special methods are
+ # called
+
+ class StoreArrayPrepareWrap(np.ndarray):
+ _wrap_args = None
+ _prepare_args = None
+ def __new__(cls):
+ return np.empty(()).view(cls)
+ def __array_wrap__(self, obj, context):
+ self._wrap_args = context[1]
+ return obj
+ def __array_prepare__(self, obj, context):
+ self._prepare_args = context[1]
+ return obj
+ @property
+ def args(self):
+ # We need to ensure these are fetched at the same time, before
+ # any other ufuncs are calld by the assertions
+ return (self._prepare_args, self._wrap_args)
+ def __repr__(self):
+ return "a" # for short test output
+
+ def do_test(f_call, f_expected):
+ a = StoreArrayPrepareWrap()
+ f_call(a)
+ p, w = a.args
+ expected = f_expected(a)
+ try:
+ assert_equal(p, expected)
+ assert_equal(w, expected)
+ except AssertionError as e:
+ # assert_equal produces truly useless error messages
+ raise AssertionError("\n".join([
+ "Bad arguments passed in ufunc call",
+ " expected: {}".format(expected),
+ " __array_prepare__ got: {}".format(p),
+ " __array_wrap__ got: {}".format(w)
+ ]))
+
+ # method not on the out argument
+ do_test(lambda a: np.add(a, 0), lambda a: (a, 0))
+ do_test(lambda a: np.add(a, 0, None), lambda a: (a, 0))
+ do_test(lambda a: np.add(a, 0, out=None), lambda a: (a, 0))
+ do_test(lambda a: np.add(a, 0, out=(None,)), lambda a: (a, 0))
+
+ # method on the out argument
+ do_test(lambda a: np.add(0, 0, a), lambda a: (0, 0, a))
+ do_test(lambda a: np.add(0, 0, out=a), lambda a: (0, 0, a))
+ do_test(lambda a: np.add(0, 0, out=(a,)), lambda a: (0, 0, a))
def test_wrap_with_iterable(self):
# test fix for bug #1026:
@@ -1241,7 +1494,7 @@ class TestSpecialMethods(TestCase):
a = with_wrap()
x = ncu.multiply(a, (1, 2, 3))
- self.assertTrue(isinstance(x, with_wrap))
+ assert_(isinstance(x, with_wrap))
assert_array_equal(x, np.array((1, 2, 3)))
def test_priority_with_scalar(self):
@@ -1255,7 +1508,7 @@ class TestSpecialMethods(TestCase):
a = A()
x = np.float64(1)*a
- self.assertTrue(isinstance(x, A))
+ assert_(isinstance(x, A))
assert_array_equal(x, np.array(1))
def test_old_wrap(self):
@@ -1296,37 +1549,70 @@ class TestSpecialMethods(TestCase):
b = B()
c = C()
f = ncu.minimum
- self.assertTrue(type(f(x, x)) is np.ndarray)
- self.assertTrue(type(f(x, a)) is A)
- self.assertTrue(type(f(x, b)) is B)
- self.assertTrue(type(f(x, c)) is C)
- self.assertTrue(type(f(a, x)) is A)
- self.assertTrue(type(f(b, x)) is B)
- self.assertTrue(type(f(c, x)) is C)
-
- self.assertTrue(type(f(a, a)) is A)
- self.assertTrue(type(f(a, b)) is B)
- self.assertTrue(type(f(b, a)) is B)
- self.assertTrue(type(f(b, b)) is B)
- self.assertTrue(type(f(b, c)) is C)
- self.assertTrue(type(f(c, b)) is C)
- self.assertTrue(type(f(c, c)) is C)
-
- self.assertTrue(type(ncu.exp(a) is A))
- self.assertTrue(type(ncu.exp(b) is B))
- self.assertTrue(type(ncu.exp(c) is C))
+ assert_(type(f(x, x)) is np.ndarray)
+ assert_(type(f(x, a)) is A)
+ assert_(type(f(x, b)) is B)
+ assert_(type(f(x, c)) is C)
+ assert_(type(f(a, x)) is A)
+ assert_(type(f(b, x)) is B)
+ assert_(type(f(c, x)) is C)
+
+ assert_(type(f(a, a)) is A)
+ assert_(type(f(a, b)) is B)
+ assert_(type(f(b, a)) is B)
+ assert_(type(f(b, b)) is B)
+ assert_(type(f(b, c)) is C)
+ assert_(type(f(c, b)) is C)
+ assert_(type(f(c, c)) is C)
+
+ assert_(type(ncu.exp(a) is A))
+ assert_(type(ncu.exp(b) is B))
+ assert_(type(ncu.exp(c) is C))
def test_failing_wrap(self):
class A(object):
def __array__(self):
- return np.zeros(1)
+ return np.zeros(2)
def __array_wrap__(self, arr, context):
raise RuntimeError
a = A()
- self.assertRaises(RuntimeError, ncu.maximum, a, a)
+ assert_raises(RuntimeError, ncu.maximum, a, a)
+ assert_raises(RuntimeError, ncu.maximum.reduce, a)
+
+ def test_failing_out_wrap(self):
+
+ singleton = np.array([1.0])
+
+ class Ok(np.ndarray):
+ def __array_wrap__(self, obj):
+ return singleton
+
+ class Bad(np.ndarray):
+ def __array_wrap__(self, obj):
+ raise RuntimeError
+
+ ok = np.empty(1).view(Ok)
+ bad = np.empty(1).view(Bad)
+
+ # double-free (segfault) of "ok" if "bad" raises an exception
+ for i in range(10):
+ assert_raises(RuntimeError, ncu.frexp, 1, ok, bad)
+
+ def test_none_wrap(self):
+ # Tests that issue #8507 is resolved. Previously, this would segfault
+
+ class A(object):
+ def __array__(self):
+ return np.zeros(1)
+
+ def __array_wrap__(self, arr, context=None):
+ return None
+
+ a = A()
+ assert_equal(ncu.maximum(a, a), None)
def test_default_prepare(self):
@@ -1358,6 +1644,22 @@ class TestSpecialMethods(TestCase):
assert_equal(x, np.array(2))
assert_equal(type(x), with_prepare)
+ def test_prepare_out(self):
+
+ class with_prepare(np.ndarray):
+ __array_priority__ = 10
+
+ def __array_prepare__(self, arr, context):
+ return np.array(arr).view(type=with_prepare)
+
+ a = np.array([1]).view(type=with_prepare)
+ x = np.add(a, a, a)
+ # Returned array is new, because of the strange
+ # __array_prepare__ above
+ assert_(not np.shares_memory(x, a))
+ assert_equal(x, np.array([2]))
+ assert_equal(type(x), with_prepare)
+
def test_failing_prepare(self):
class A(object):
@@ -1368,7 +1670,7 @@ class TestSpecialMethods(TestCase):
raise RuntimeError
a = A()
- self.assertRaises(RuntimeError, ncu.maximum, a, a)
+ assert_raises(RuntimeError, ncu.maximum, a, a)
def test_array_with_context(self):
@@ -1390,58 +1692,40 @@ class TestSpecialMethods(TestCase):
a = A()
ncu.maximum(np.zeros(1), a)
- self.assertTrue(a.func is ncu.maximum)
+ assert_(a.func is ncu.maximum)
assert_equal(a.args[0], 0)
- self.assertTrue(a.args[1] is a)
- self.assertTrue(a.i == 1)
+ assert_(a.args[1] is a)
+ assert_(a.i == 1)
assert_equal(ncu.maximum(a, B()), 0)
assert_equal(ncu.maximum(a, C()), 0)
- def test_ufunc_override_disabled(self):
- # 2016-01-29: NUMPY_UFUNC_DISABLED
- # This test should be removed when __numpy_ufunc__ is re-enabled.
-
- class MyArray(object):
- def __numpy_ufunc__(self, *args, **kwargs):
- self._numpy_ufunc_called = True
-
- my_array = MyArray()
- real_array = np.ones(10)
- assert_raises(TypeError, lambda: real_array + my_array)
- assert_raises(TypeError, np.add, real_array, my_array)
- assert not hasattr(my_array, "_numpy_ufunc_called")
-
-
def test_ufunc_override(self):
- # 2016-01-29: NUMPY_UFUNC_DISABLED
- return
-
+ # check override works even with instance with high priority.
class A(object):
- def __numpy_ufunc__(self, func, method, pos, inputs, **kwargs):
- return self, func, method, pos, inputs, kwargs
+ def __array_ufunc__(self, func, method, *inputs, **kwargs):
+ return self, func, method, inputs, kwargs
+
+ class MyNDArray(np.ndarray):
+ __array_priority__ = 100
a = A()
- b = np.matrix([1])
+ b = np.array([1]).view(MyNDArray)
res0 = np.multiply(a, b)
- res1 = np.dot(a, b)
+ res1 = np.multiply(b, b, out=a)
# self
assert_equal(res0[0], a)
assert_equal(res1[0], a)
assert_equal(res0[1], np.multiply)
- assert_equal(res1[1], np.dot)
+ assert_equal(res1[1], np.multiply)
assert_equal(res0[2], '__call__')
assert_equal(res1[2], '__call__')
- assert_equal(res0[3], 0)
- assert_equal(res1[3], 0)
- assert_equal(res0[4], (a, b))
- assert_equal(res1[4], (a, b))
- assert_equal(res0[5], {})
- assert_equal(res1[5], {})
+ assert_equal(res0[3], (a, b))
+ assert_equal(res1[3], (b, b))
+ assert_equal(res0[4], {})
+ assert_equal(res1[4], {'out': (a,)})
def test_ufunc_override_mro(self):
- # 2016-01-29: NUMPY_UFUNC_DISABLED
- return
# Some multi arg functions for testing.
def tres_mul(a, b, c):
@@ -1455,30 +1739,34 @@ class TestSpecialMethods(TestCase):
four_mul_ufunc = np.frompyfunc(quatro_mul, 4, 1)
class A(object):
- def __numpy_ufunc__(self, func, method, pos, inputs, **kwargs):
+ def __array_ufunc__(self, func, method, *inputs, **kwargs):
return "A"
class ASub(A):
- def __numpy_ufunc__(self, func, method, pos, inputs, **kwargs):
+ def __array_ufunc__(self, func, method, *inputs, **kwargs):
return "ASub"
class B(object):
- def __numpy_ufunc__(self, func, method, pos, inputs, **kwargs):
+ def __array_ufunc__(self, func, method, *inputs, **kwargs):
return "B"
class C(object):
- def __numpy_ufunc__(self, func, method, pos, inputs, **kwargs):
+ def __init__(self):
+ self.count = 0
+
+ def __array_ufunc__(self, func, method, *inputs, **kwargs):
+ self.count += 1
return NotImplemented
- class CSub(object):
- def __numpy_ufunc__(self, func, method, pos, inputs, **kwargs):
+ class CSub(C):
+ def __array_ufunc__(self, func, method, *inputs, **kwargs):
+ self.count += 1
return NotImplemented
a = A()
a_sub = ASub()
b = B()
c = C()
- c_sub = CSub()
# Standard
res = np.multiply(a, a_sub)
@@ -1489,11 +1777,27 @@ class TestSpecialMethods(TestCase):
# With 1 NotImplemented
res = np.multiply(c, a)
assert_equal(res, "A")
+ assert_equal(c.count, 1)
+ # Check our counter works, so we can trust tests below.
+ res = np.multiply(c, a)
+ assert_equal(c.count, 2)
# Both NotImplemented.
+ c = C()
+ c_sub = CSub()
assert_raises(TypeError, np.multiply, c, c_sub)
+ assert_equal(c.count, 1)
+ assert_equal(c_sub.count, 1)
+ c.count = c_sub.count = 0
assert_raises(TypeError, np.multiply, c_sub, c)
+ assert_equal(c.count, 1)
+ assert_equal(c_sub.count, 1)
+ c.count = 0
+ assert_raises(TypeError, np.multiply, c, c)
+ assert_equal(c.count, 1)
+ c.count = 0
assert_raises(TypeError, np.multiply, 2, c)
+ assert_equal(c.count, 1)
# Ternary testing.
assert_equal(three_mul_ufunc(a, 1, 2), "A")
@@ -1505,11 +1809,19 @@ class TestSpecialMethods(TestCase):
assert_equal(three_mul_ufunc(a, 2, b), "A")
assert_equal(three_mul_ufunc(a, 2, a_sub), "ASub")
assert_equal(three_mul_ufunc(a, a_sub, 3), "ASub")
+ c.count = 0
assert_equal(three_mul_ufunc(c, a_sub, 3), "ASub")
+ assert_equal(c.count, 1)
+ c.count = 0
assert_equal(three_mul_ufunc(1, a_sub, c), "ASub")
+ assert_equal(c.count, 0)
+ c.count = 0
assert_equal(three_mul_ufunc(a, b, c), "A")
+ assert_equal(c.count, 0)
+ c_sub.count = 0
assert_equal(three_mul_ufunc(a, b, c_sub), "A")
+ assert_equal(c_sub.count, 0)
assert_equal(three_mul_ufunc(1, 2, b), "B")
assert_raises(TypeError, three_mul_ufunc, 1, 2, c)
@@ -1528,17 +1840,31 @@ class TestSpecialMethods(TestCase):
assert_equal(four_mul_ufunc(a_sub, 1, 2, a), "ASub")
assert_equal(four_mul_ufunc(a, 1, 2, a_sub), "ASub")
+ c = C()
+ c_sub = CSub()
assert_raises(TypeError, four_mul_ufunc, 1, 2, 3, c)
+ assert_equal(c.count, 1)
+ c.count = 0
assert_raises(TypeError, four_mul_ufunc, 1, 2, c_sub, c)
- assert_raises(TypeError, four_mul_ufunc, 1, c, c_sub, c)
+ assert_equal(c_sub.count, 1)
+ assert_equal(c.count, 1)
+ c2 = C()
+ c.count = c_sub.count = 0
+ assert_raises(TypeError, four_mul_ufunc, 1, c, c_sub, c2)
+ assert_equal(c_sub.count, 1)
+ assert_equal(c.count, 1)
+ assert_equal(c2.count, 0)
+ c.count = c2.count = c_sub.count = 0
+ assert_raises(TypeError, four_mul_ufunc, c2, c, c_sub, c)
+ assert_equal(c_sub.count, 1)
+ assert_equal(c.count, 0)
+ assert_equal(c2.count, 1)
def test_ufunc_override_methods(self):
- # 2016-01-29: NUMPY_UFUNC_DISABLED
- return
class A(object):
- def __numpy_ufunc__(self, ufunc, method, pos, inputs, **kwargs):
- return self, ufunc, method, pos, inputs, kwargs
+ def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
+ return self, ufunc, method, inputs, kwargs
# __call__
a = A()
@@ -1546,45 +1872,70 @@ class TestSpecialMethods(TestCase):
assert_equal(res[0], a)
assert_equal(res[1], np.multiply)
assert_equal(res[2], '__call__')
- assert_equal(res[3], 1)
- assert_equal(res[4], (1, a))
- assert_equal(res[5], {'foo': 'bar', 'answer': 42})
+ assert_equal(res[3], (1, a))
+ assert_equal(res[4], {'foo': 'bar', 'answer': 42})
+
+ # __call__, wrong args
+ assert_raises(TypeError, np.multiply, a)
+ assert_raises(TypeError, np.multiply, a, a, a, a)
+ assert_raises(TypeError, np.multiply, a, a, sig='a', signature='a')
+ assert_raises(TypeError, ncu_tests.inner1d, a, a, axis=0, axes=[0, 0])
# reduce, positional args
res = np.multiply.reduce(a, 'axis0', 'dtype0', 'out0', 'keep0')
assert_equal(res[0], a)
assert_equal(res[1], np.multiply)
assert_equal(res[2], 'reduce')
- assert_equal(res[3], 0)
- assert_equal(res[4], (a,))
- assert_equal(res[5], {'dtype':'dtype0',
- 'out': 'out0',
- 'keepdims': 'keep0',
- 'axis': 'axis0'})
+ assert_equal(res[3], (a,))
+ assert_equal(res[4], {'dtype':'dtype0',
+ 'out': ('out0',),
+ 'keepdims': 'keep0',
+ 'axis': 'axis0'})
# reduce, kwargs
res = np.multiply.reduce(a, axis='axis0', dtype='dtype0', out='out0',
- keepdims='keep0')
+ keepdims='keep0', initial='init0')
assert_equal(res[0], a)
assert_equal(res[1], np.multiply)
assert_equal(res[2], 'reduce')
- assert_equal(res[3], 0)
- assert_equal(res[4], (a,))
- assert_equal(res[5], {'dtype':'dtype0',
- 'out': 'out0',
- 'keepdims': 'keep0',
- 'axis': 'axis0'})
+ assert_equal(res[3], (a,))
+ assert_equal(res[4], {'dtype':'dtype0',
+ 'out': ('out0',),
+ 'keepdims': 'keep0',
+ 'axis': 'axis0',
+ 'initial': 'init0'})
+
+ # reduce, output equal to None removed, but not other explicit ones,
+ # even if they are at their default value.
+ res = np.multiply.reduce(a, 0, None, None, False)
+ assert_equal(res[4], {'axis': 0, 'dtype': None, 'keepdims': False})
+ res = np.multiply.reduce(a, out=None, axis=0, keepdims=True)
+ assert_equal(res[4], {'axis': 0, 'keepdims': True})
+ res = np.multiply.reduce(a, None, out=(None,), dtype=None)
+ assert_equal(res[4], {'axis': None, 'dtype': None})
+ res = np.multiply.reduce(a, 0, None, None, False, 2)
+ assert_equal(res[4], {'axis': 0, 'dtype': None, 'keepdims': False, 'initial': 2})
+ # np._NoValue ignored for initial.
+ res = np.multiply.reduce(a, 0, None, None, False, np._NoValue)
+ assert_equal(res[4], {'axis': 0, 'dtype': None, 'keepdims': False})
+ # None kept for initial.
+ res = np.multiply.reduce(a, 0, None, None, False, None)
+ assert_equal(res[4], {'axis': 0, 'dtype': None, 'keepdims': False, 'initial': None})
+
+ # reduce, wrong args
+ assert_raises(ValueError, np.multiply.reduce, a, out=())
+ assert_raises(ValueError, np.multiply.reduce, a, out=('out0', 'out1'))
+ assert_raises(TypeError, np.multiply.reduce, a, 'axis0', axis='axis0')
# accumulate, pos args
res = np.multiply.accumulate(a, 'axis0', 'dtype0', 'out0')
assert_equal(res[0], a)
assert_equal(res[1], np.multiply)
assert_equal(res[2], 'accumulate')
- assert_equal(res[3], 0)
- assert_equal(res[4], (a,))
- assert_equal(res[5], {'dtype':'dtype0',
- 'out': 'out0',
- 'axis': 'axis0'})
+ assert_equal(res[3], (a,))
+ assert_equal(res[4], {'dtype':'dtype0',
+ 'out': ('out0',),
+ 'axis': 'axis0'})
# accumulate, kwargs
res = np.multiply.accumulate(a, axis='axis0', dtype='dtype0',
@@ -1592,22 +1943,35 @@ class TestSpecialMethods(TestCase):
assert_equal(res[0], a)
assert_equal(res[1], np.multiply)
assert_equal(res[2], 'accumulate')
- assert_equal(res[3], 0)
- assert_equal(res[4], (a,))
- assert_equal(res[5], {'dtype':'dtype0',
- 'out': 'out0',
- 'axis': 'axis0'})
+ assert_equal(res[3], (a,))
+ assert_equal(res[4], {'dtype':'dtype0',
+ 'out': ('out0',),
+ 'axis': 'axis0'})
+
+ # accumulate, output equal to None removed.
+ res = np.multiply.accumulate(a, 0, None, None)
+ assert_equal(res[4], {'axis': 0, 'dtype': None})
+ res = np.multiply.accumulate(a, out=None, axis=0, dtype='dtype1')
+ assert_equal(res[4], {'axis': 0, 'dtype': 'dtype1'})
+ res = np.multiply.accumulate(a, None, out=(None,), dtype=None)
+ assert_equal(res[4], {'axis': None, 'dtype': None})
+
+ # accumulate, wrong args
+ assert_raises(ValueError, np.multiply.accumulate, a, out=())
+ assert_raises(ValueError, np.multiply.accumulate, a,
+ out=('out0', 'out1'))
+ assert_raises(TypeError, np.multiply.accumulate, a,
+ 'axis0', axis='axis0')
# reduceat, pos args
res = np.multiply.reduceat(a, [4, 2], 'axis0', 'dtype0', 'out0')
assert_equal(res[0], a)
assert_equal(res[1], np.multiply)
assert_equal(res[2], 'reduceat')
- assert_equal(res[3], 0)
- assert_equal(res[4], (a, [4, 2]))
- assert_equal(res[5], {'dtype':'dtype0',
- 'out': 'out0',
- 'axis': 'axis0'})
+ assert_equal(res[3], (a, [4, 2]))
+ assert_equal(res[4], {'dtype':'dtype0',
+ 'out': ('out0',),
+ 'axis': 'axis0'})
# reduceat, kwargs
res = np.multiply.reduceat(a, [4, 2], axis='axis0', dtype='dtype0',
@@ -1615,39 +1979,58 @@ class TestSpecialMethods(TestCase):
assert_equal(res[0], a)
assert_equal(res[1], np.multiply)
assert_equal(res[2], 'reduceat')
- assert_equal(res[3], 0)
- assert_equal(res[4], (a, [4, 2]))
- assert_equal(res[5], {'dtype':'dtype0',
- 'out': 'out0',
- 'axis': 'axis0'})
+ assert_equal(res[3], (a, [4, 2]))
+ assert_equal(res[4], {'dtype':'dtype0',
+ 'out': ('out0',),
+ 'axis': 'axis0'})
+
+ # reduceat, output equal to None removed.
+ res = np.multiply.reduceat(a, [4, 2], 0, None, None)
+ assert_equal(res[4], {'axis': 0, 'dtype': None})
+ res = np.multiply.reduceat(a, [4, 2], axis=None, out=None, dtype='dt')
+ assert_equal(res[4], {'axis': None, 'dtype': 'dt'})
+ res = np.multiply.reduceat(a, [4, 2], None, None, out=(None,))
+ assert_equal(res[4], {'axis': None, 'dtype': None})
+
+ # reduceat, wrong args
+ assert_raises(ValueError, np.multiply.reduce, a, [4, 2], out=())
+ assert_raises(ValueError, np.multiply.reduce, a, [4, 2],
+ out=('out0', 'out1'))
+ assert_raises(TypeError, np.multiply.reduce, a, [4, 2],
+ 'axis0', axis='axis0')
# outer
res = np.multiply.outer(a, 42)
assert_equal(res[0], a)
assert_equal(res[1], np.multiply)
assert_equal(res[2], 'outer')
- assert_equal(res[3], 0)
- assert_equal(res[4], (a, 42))
- assert_equal(res[5], {})
+ assert_equal(res[3], (a, 42))
+ assert_equal(res[4], {})
+
+ # outer, wrong args
+ assert_raises(TypeError, np.multiply.outer, a)
+ assert_raises(TypeError, np.multiply.outer, a, a, a, a)
+ assert_raises(TypeError, np.multiply.outer, a, a, sig='a', signature='a')
# at
res = np.multiply.at(a, [4, 2], 'b0')
assert_equal(res[0], a)
assert_equal(res[1], np.multiply)
assert_equal(res[2], 'at')
- assert_equal(res[3], 0)
- assert_equal(res[4], (a, [4, 2], 'b0'))
+ assert_equal(res[3], (a, [4, 2], 'b0'))
+
+ # at, wrong args
+ assert_raises(TypeError, np.multiply.at, a)
+ assert_raises(TypeError, np.multiply.at, a, a, a, a)
def test_ufunc_override_out(self):
- # 2016-01-29: NUMPY_UFUNC_DISABLED
- return
class A(object):
- def __numpy_ufunc__(self, ufunc, method, pos, inputs, **kwargs):
+ def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
return kwargs
class B(object):
- def __numpy_ufunc__(self, ufunc, method, pos, inputs, **kwargs):
+ def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
return kwargs
a = A()
@@ -1659,12 +2042,12 @@ class TestSpecialMethods(TestCase):
res4 = np.multiply(a, 4, 'out_arg')
res5 = np.multiply(a, 5, out='out_arg')
- assert_equal(res0['out'], 'out_arg')
- assert_equal(res1['out'], 'out_arg')
- assert_equal(res2['out'], 'out_arg')
- assert_equal(res3['out'], 'out_arg')
- assert_equal(res4['out'], 'out_arg')
- assert_equal(res5['out'], 'out_arg')
+ assert_equal(res0['out'][0], 'out_arg')
+ assert_equal(res1['out'][0], 'out_arg')
+ assert_equal(res2['out'][0], 'out_arg')
+ assert_equal(res3['out'][0], 'out_arg')
+ assert_equal(res4['out'][0], 'out_arg')
+ assert_equal(res5['out'][0], 'out_arg')
# ufuncs with multiple output modf and frexp.
res6 = np.modf(a, 'out0', 'out1')
@@ -1674,28 +2057,395 @@ class TestSpecialMethods(TestCase):
assert_equal(res7['out'][0], 'out0')
assert_equal(res7['out'][1], 'out1')
+ # While we're at it, check that default output is never passed on.
+ assert_(np.sin(a, None) == {})
+ assert_(np.sin(a, out=None) == {})
+ assert_(np.sin(a, out=(None,)) == {})
+ assert_(np.modf(a, None) == {})
+ assert_(np.modf(a, None, None) == {})
+ assert_(np.modf(a, out=(None, None)) == {})
+ with warnings.catch_warnings(record=True) as w:
+ warnings.filterwarnings('always', '', DeprecationWarning)
+ assert_(np.modf(a, out=None) == {})
+ assert_(w[0].category is DeprecationWarning)
+
+ # don't give positional and output argument, or too many arguments.
+ # wrong number of arguments in the tuple is an error too.
+ assert_raises(TypeError, np.multiply, a, b, 'one', out='two')
+ assert_raises(TypeError, np.multiply, a, b, 'one', 'two')
+ assert_raises(ValueError, np.multiply, a, b, out=('one', 'two'))
+ assert_raises(ValueError, np.multiply, a, out=())
+ assert_raises(TypeError, np.modf, a, 'one', out=('two', 'three'))
+ assert_raises(TypeError, np.modf, a, 'one', 'two', 'three')
+ assert_raises(ValueError, np.modf, a, out=('one', 'two', 'three'))
+ assert_raises(ValueError, np.modf, a, out=('one',))
+
def test_ufunc_override_exception(self):
- # 2016-01-29: NUMPY_UFUNC_DISABLED
- return
class A(object):
- def __numpy_ufunc__(self, *a, **kwargs):
+ def __array_ufunc__(self, *a, **kwargs):
raise ValueError("oops")
a = A()
- for func in [np.divide, np.dot]:
- assert_raises(ValueError, func, a, a)
+ assert_raises(ValueError, np.negative, 1, out=a)
+ assert_raises(ValueError, np.negative, a)
+ assert_raises(ValueError, np.divide, 1., a)
+
+ def test_ufunc_override_not_implemented(self):
+
+ class A(object):
+ def __array_ufunc__(self, *args, **kwargs):
+ return NotImplemented
+
+ msg = ("operand type(s) all returned NotImplemented from "
+ "__array_ufunc__(<ufunc 'negative'>, '__call__', <*>): 'A'")
+ with assert_raises_regex(TypeError, fnmatch.translate(msg)):
+ np.negative(A())
+
+ msg = ("operand type(s) all returned NotImplemented from "
+ "__array_ufunc__(<ufunc 'add'>, '__call__', <*>, <object *>, "
+ "out=(1,)): 'A', 'object', 'int'")
+ with assert_raises_regex(TypeError, fnmatch.translate(msg)):
+ np.add(A(), object(), out=1)
+
+ def test_ufunc_override_disabled(self):
+
+ class OptOut(object):
+ __array_ufunc__ = None
-class TestChoose(TestCase):
+ opt_out = OptOut()
+
+ # ufuncs always raise
+ msg = "operand 'OptOut' does not support ufuncs"
+ with assert_raises_regex(TypeError, msg):
+ np.add(opt_out, 1)
+ with assert_raises_regex(TypeError, msg):
+ np.add(1, opt_out)
+ with assert_raises_regex(TypeError, msg):
+ np.negative(opt_out)
+
+ # opt-outs still hold even when other arguments have pathological
+ # __array_ufunc__ implementations
+
+ class GreedyArray(object):
+ def __array_ufunc__(self, *args, **kwargs):
+ return self
+
+ greedy = GreedyArray()
+ assert_(np.negative(greedy) is greedy)
+ with assert_raises_regex(TypeError, msg):
+ np.add(greedy, opt_out)
+ with assert_raises_regex(TypeError, msg):
+ np.add(greedy, 1, out=opt_out)
+
+ def test_gufunc_override(self):
+ # gufunc are just ufunc instances, but follow a different path,
+ # so check __array_ufunc__ overrides them properly.
+ class A(object):
+ def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
+ return self, ufunc, method, inputs, kwargs
+
+ inner1d = ncu_tests.inner1d
+ a = A()
+ res = inner1d(a, a)
+ assert_equal(res[0], a)
+ assert_equal(res[1], inner1d)
+ assert_equal(res[2], '__call__')
+ assert_equal(res[3], (a, a))
+ assert_equal(res[4], {})
+
+ res = inner1d(1, 1, out=a)
+ assert_equal(res[0], a)
+ assert_equal(res[1], inner1d)
+ assert_equal(res[2], '__call__')
+ assert_equal(res[3], (1, 1))
+ assert_equal(res[4], {'out': (a,)})
+
+ # wrong number of arguments in the tuple is an error too.
+ assert_raises(TypeError, inner1d, a, out='two')
+ assert_raises(TypeError, inner1d, a, a, 'one', out='two')
+ assert_raises(TypeError, inner1d, a, a, 'one', 'two')
+ assert_raises(ValueError, inner1d, a, a, out=('one', 'two'))
+ assert_raises(ValueError, inner1d, a, a, out=())
+
+ def test_ufunc_override_with_super(self):
+ # NOTE: this class is given as an example in doc/subclassing.py;
+ # if you make any changes here, do update it there too.
+ class A(np.ndarray):
+ def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
+ args = []
+ in_no = []
+ for i, input_ in enumerate(inputs):
+ if isinstance(input_, A):
+ in_no.append(i)
+ args.append(input_.view(np.ndarray))
+ else:
+ args.append(input_)
+
+ outputs = kwargs.pop('out', None)
+ out_no = []
+ if outputs:
+ out_args = []
+ for j, output in enumerate(outputs):
+ if isinstance(output, A):
+ out_no.append(j)
+ out_args.append(output.view(np.ndarray))
+ else:
+ out_args.append(output)
+ kwargs['out'] = tuple(out_args)
+ else:
+ outputs = (None,) * ufunc.nout
+
+ info = {}
+ if in_no:
+ info['inputs'] = in_no
+ if out_no:
+ info['outputs'] = out_no
+
+ results = super(A, self).__array_ufunc__(ufunc, method,
+ *args, **kwargs)
+ if results is NotImplemented:
+ return NotImplemented
+
+ if method == 'at':
+ if isinstance(inputs[0], A):
+ inputs[0].info = info
+ return
+
+ if ufunc.nout == 1:
+ results = (results,)
+
+ results = tuple((np.asarray(result).view(A)
+ if output is None else output)
+ for result, output in zip(results, outputs))
+ if results and isinstance(results[0], A):
+ results[0].info = info
+
+ return results[0] if len(results) == 1 else results
+
+ class B(object):
+ def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
+ if any(isinstance(input_, A) for input_ in inputs):
+ return "A!"
+ else:
+ return NotImplemented
+
+ d = np.arange(5.)
+ # 1 input, 1 output
+ a = np.arange(5.).view(A)
+ b = np.sin(a)
+ check = np.sin(d)
+ assert_(np.all(check == b))
+ assert_equal(b.info, {'inputs': [0]})
+ b = np.sin(d, out=(a,))
+ assert_(np.all(check == b))
+ assert_equal(b.info, {'outputs': [0]})
+ assert_(b is a)
+ a = np.arange(5.).view(A)
+ b = np.sin(a, out=a)
+ assert_(np.all(check == b))
+ assert_equal(b.info, {'inputs': [0], 'outputs': [0]})
+
+ # 1 input, 2 outputs
+ a = np.arange(5.).view(A)
+ b1, b2 = np.modf(a)
+ assert_equal(b1.info, {'inputs': [0]})
+ b1, b2 = np.modf(d, out=(None, a))
+ assert_(b2 is a)
+ assert_equal(b1.info, {'outputs': [1]})
+ a = np.arange(5.).view(A)
+ b = np.arange(5.).view(A)
+ c1, c2 = np.modf(a, out=(a, b))
+ assert_(c1 is a)
+ assert_(c2 is b)
+ assert_equal(c1.info, {'inputs': [0], 'outputs': [0, 1]})
+
+ # 2 input, 1 output
+ a = np.arange(5.).view(A)
+ b = np.arange(5.).view(A)
+ c = np.add(a, b, out=a)
+ assert_(c is a)
+ assert_equal(c.info, {'inputs': [0, 1], 'outputs': [0]})
+ # some tests with a non-ndarray subclass
+ a = np.arange(5.)
+ b = B()
+ assert_(a.__array_ufunc__(np.add, '__call__', a, b) is NotImplemented)
+ assert_(b.__array_ufunc__(np.add, '__call__', a, b) is NotImplemented)
+ assert_raises(TypeError, np.add, a, b)
+ a = a.view(A)
+ assert_(a.__array_ufunc__(np.add, '__call__', a, b) is NotImplemented)
+ assert_(b.__array_ufunc__(np.add, '__call__', a, b) == "A!")
+ assert_(np.add(a, b) == "A!")
+ # regression check for gh-9102 -- tests ufunc.reduce implicitly.
+ d = np.array([[1, 2, 3], [1, 2, 3]])
+ a = d.view(A)
+ c = a.any()
+ check = d.any()
+ assert_equal(c, check)
+ assert_(c.info, {'inputs': [0]})
+ c = a.max()
+ check = d.max()
+ assert_equal(c, check)
+ assert_(c.info, {'inputs': [0]})
+ b = np.array(0).view(A)
+ c = a.max(out=b)
+ assert_equal(c, check)
+ assert_(c is b)
+ assert_(c.info, {'inputs': [0], 'outputs': [0]})
+ check = a.max(axis=0)
+ b = np.zeros_like(check).view(A)
+ c = a.max(axis=0, out=b)
+ assert_equal(c, check)
+ assert_(c is b)
+ assert_(c.info, {'inputs': [0], 'outputs': [0]})
+ # simple explicit tests of reduce, accumulate, reduceat
+ check = np.add.reduce(d, axis=1)
+ c = np.add.reduce(a, axis=1)
+ assert_equal(c, check)
+ assert_(c.info, {'inputs': [0]})
+ b = np.zeros_like(c)
+ c = np.add.reduce(a, 1, None, b)
+ assert_equal(c, check)
+ assert_(c is b)
+ assert_(c.info, {'inputs': [0], 'outputs': [0]})
+ check = np.add.accumulate(d, axis=0)
+ c = np.add.accumulate(a, axis=0)
+ assert_equal(c, check)
+ assert_(c.info, {'inputs': [0]})
+ b = np.zeros_like(c)
+ c = np.add.accumulate(a, 0, None, b)
+ assert_equal(c, check)
+ assert_(c is b)
+ assert_(c.info, {'inputs': [0], 'outputs': [0]})
+ indices = [0, 2, 1]
+ check = np.add.reduceat(d, indices, axis=1)
+ c = np.add.reduceat(a, indices, axis=1)
+ assert_equal(c, check)
+ assert_(c.info, {'inputs': [0]})
+ b = np.zeros_like(c)
+ c = np.add.reduceat(a, indices, 1, None, b)
+ assert_equal(c, check)
+ assert_(c is b)
+ assert_(c.info, {'inputs': [0], 'outputs': [0]})
+ # and a few tests for at
+ d = np.array([[1, 2, 3], [1, 2, 3]])
+ check = d.copy()
+ a = d.copy().view(A)
+ np.add.at(check, ([0, 1], [0, 2]), 1.)
+ np.add.at(a, ([0, 1], [0, 2]), 1.)
+ assert_equal(a, check)
+ assert_(a.info, {'inputs': [0]})
+ b = np.array(1.).view(A)
+ a = d.copy().view(A)
+ np.add.at(a, ([0, 1], [0, 2]), b)
+ assert_equal(a, check)
+ assert_(a.info, {'inputs': [0, 2]})
+
+
+class TestChoose(object):
def test_mixed(self):
c = np.array([True, True])
a = np.array([True, True])
assert_equal(np.choose(c, (a, 1)), np.array([1, 1]))
-def is_longdouble_finfo_bogus():
- info = np.finfo(np.longcomplex)
- return not np.isfinite(np.log10(info.tiny/info.eps))
+class TestRationalFunctions(object):
+ def test_lcm(self):
+ self._test_lcm_inner(np.int16)
+ self._test_lcm_inner(np.uint16)
+
+ def test_lcm_object(self):
+ self._test_lcm_inner(np.object_)
+
+ def test_gcd(self):
+ self._test_gcd_inner(np.int16)
+ self._test_lcm_inner(np.uint16)
+
+ def test_gcd_object(self):
+ self._test_gcd_inner(np.object_)
+
+ def _test_lcm_inner(self, dtype):
+ # basic use
+ a = np.array([12, 120], dtype=dtype)
+ b = np.array([20, 200], dtype=dtype)
+ assert_equal(np.lcm(a, b), [60, 600])
+
+ if not issubclass(dtype, np.unsignedinteger):
+ # negatives are ignored
+ a = np.array([12, -12, 12, -12], dtype=dtype)
+ b = np.array([20, 20, -20, -20], dtype=dtype)
+ assert_equal(np.lcm(a, b), [60]*4)
+
+ # reduce
+ a = np.array([3, 12, 20], dtype=dtype)
+ assert_equal(np.lcm.reduce([3, 12, 20]), 60)
+
+ # broadcasting, and a test including 0
+ a = np.arange(6).astype(dtype)
+ b = 20
+ assert_equal(np.lcm(a, b), [0, 20, 20, 60, 20, 20])
+
+ def _test_gcd_inner(self, dtype):
+ # basic use
+ a = np.array([12, 120], dtype=dtype)
+ b = np.array([20, 200], dtype=dtype)
+ assert_equal(np.gcd(a, b), [4, 40])
+
+ if not issubclass(dtype, np.unsignedinteger):
+ # negatives are ignored
+ a = np.array([12, -12, 12, -12], dtype=dtype)
+ b = np.array([20, 20, -20, -20], dtype=dtype)
+ assert_equal(np.gcd(a, b), [4]*4)
+
+ # reduce
+ a = np.array([15, 25, 35], dtype=dtype)
+ assert_equal(np.gcd.reduce(a), 5)
+
+ # broadcasting, and a test including 0
+ a = np.arange(6).astype(dtype)
+ b = 20
+ assert_equal(np.gcd(a, b), [20, 1, 2, 1, 4, 5])
+
+ def test_lcm_overflow(self):
+ # verify that we don't overflow when a*b does overflow
+ big = np.int32(np.iinfo(np.int32).max // 11)
+ a = 2*big
+ b = 5*big
+ assert_equal(np.lcm(a, b), 10*big)
+
+ def test_gcd_overflow(self):
+ for dtype in (np.int32, np.int64):
+ # verify that we don't overflow when taking abs(x)
+ # not relevant for lcm, where the result is unrepresentable anyway
+ a = dtype(np.iinfo(dtype).min) # negative power of two
+ q = -(a // 4)
+ assert_equal(np.gcd(a, q*3), q)
+ assert_equal(np.gcd(a, -q*3), q)
+
+ def test_decimal(self):
+ from decimal import Decimal
+ a = np.array([1, 1, -1, -1]) * Decimal('0.20')
+ b = np.array([1, -1, 1, -1]) * Decimal('0.12')
+
+ assert_equal(np.gcd(a, b), 4*[Decimal('0.04')])
+ assert_equal(np.lcm(a, b), 4*[Decimal('0.60')])
+
+ def test_float(self):
+ # not well-defined on float due to rounding errors
+ assert_raises(TypeError, np.gcd, 0.3, 0.4)
+ assert_raises(TypeError, np.lcm, 0.3, 0.4)
+
+ def test_builtin_long(self):
+ # sanity check that array coercion is alright for builtin longs
+ assert_equal(np.array(2**200).item(), 2**200)
+
+ # expressed as prime factors
+ a = np.array(2**100 * 3**5)
+ b = np.array([2**100 * 5**7, 2**50 * 3**10])
+ assert_equal(np.gcd(a, b), [2**100, 2**50 * 3**5])
+ assert_equal(np.lcm(a, b), [2**100 * 3**5 * 5**7, 2**100 * 3**10])
+
+ assert_equal(np.gcd(2**100, 3**100), 1)
class TestComplexFunctions(object):
@@ -1711,7 +2461,7 @@ class TestComplexFunctions(object):
else:
x = .5
fr = f(x)
- fz = f(np.complex(x))
+ fz = f(complex(x))
assert_almost_equal(fz.real, fr, err_msg='real part %s' % f)
assert_almost_equal(fz.imag, 0., err_msg='imag part %s' % f)
@@ -1726,53 +2476,53 @@ class TestComplexFunctions(object):
def test_branch_cuts(self):
# check branch cuts and continuity on them
- yield _check_branch_cut, np.log, -0.5, 1j, 1, -1, True
- yield _check_branch_cut, np.log2, -0.5, 1j, 1, -1, True
- yield _check_branch_cut, np.log10, -0.5, 1j, 1, -1, True
- yield _check_branch_cut, np.log1p, -1.5, 1j, 1, -1, True
- yield _check_branch_cut, np.sqrt, -0.5, 1j, 1, -1, True
+ _check_branch_cut(np.log, -0.5, 1j, 1, -1, True)
+ _check_branch_cut(np.log2, -0.5, 1j, 1, -1, True)
+ _check_branch_cut(np.log10, -0.5, 1j, 1, -1, True)
+ _check_branch_cut(np.log1p, -1.5, 1j, 1, -1, True)
+ _check_branch_cut(np.sqrt, -0.5, 1j, 1, -1, True)
- yield _check_branch_cut, np.arcsin, [ -2, 2], [1j, 1j], 1, -1, True
- yield _check_branch_cut, np.arccos, [ -2, 2], [1j, 1j], 1, -1, True
- yield _check_branch_cut, np.arctan, [0-2j, 2j], [1, 1], -1, 1, True
+ _check_branch_cut(np.arcsin, [ -2, 2], [1j, 1j], 1, -1, True)
+ _check_branch_cut(np.arccos, [ -2, 2], [1j, 1j], 1, -1, True)
+ _check_branch_cut(np.arctan, [0-2j, 2j], [1, 1], -1, 1, True)
- yield _check_branch_cut, np.arcsinh, [0-2j, 2j], [1, 1], -1, 1, True
- yield _check_branch_cut, np.arccosh, [ -1, 0.5], [1j, 1j], 1, -1, True
- yield _check_branch_cut, np.arctanh, [ -2, 2], [1j, 1j], 1, -1, True
+ _check_branch_cut(np.arcsinh, [0-2j, 2j], [1, 1], -1, 1, True)
+ _check_branch_cut(np.arccosh, [ -1, 0.5], [1j, 1j], 1, -1, True)
+ _check_branch_cut(np.arctanh, [ -2, 2], [1j, 1j], 1, -1, True)
# check against bogus branch cuts: assert continuity between quadrants
- yield _check_branch_cut, np.arcsin, [0-2j, 2j], [ 1, 1], 1, 1
- yield _check_branch_cut, np.arccos, [0-2j, 2j], [ 1, 1], 1, 1
- yield _check_branch_cut, np.arctan, [ -2, 2], [1j, 1j], 1, 1
+ _check_branch_cut(np.arcsin, [0-2j, 2j], [ 1, 1], 1, 1)
+ _check_branch_cut(np.arccos, [0-2j, 2j], [ 1, 1], 1, 1)
+ _check_branch_cut(np.arctan, [ -2, 2], [1j, 1j], 1, 1)
- yield _check_branch_cut, np.arcsinh, [ -2, 2, 0], [1j, 1j, 1], 1, 1
- yield _check_branch_cut, np.arccosh, [0-2j, 2j, 2], [1, 1, 1j], 1, 1
- yield _check_branch_cut, np.arctanh, [0-2j, 2j, 0], [1, 1, 1j], 1, 1
+ _check_branch_cut(np.arcsinh, [ -2, 2, 0], [1j, 1j, 1], 1, 1)
+ _check_branch_cut(np.arccosh, [0-2j, 2j, 2], [1, 1, 1j], 1, 1)
+ _check_branch_cut(np.arctanh, [0-2j, 2j, 0], [1, 1, 1j], 1, 1)
def test_branch_cuts_complex64(self):
# check branch cuts and continuity on them
- yield _check_branch_cut, np.log, -0.5, 1j, 1, -1, True, np.complex64
- yield _check_branch_cut, np.log2, -0.5, 1j, 1, -1, True, np.complex64
- yield _check_branch_cut, np.log10, -0.5, 1j, 1, -1, True, np.complex64
- yield _check_branch_cut, np.log1p, -1.5, 1j, 1, -1, True, np.complex64
- yield _check_branch_cut, np.sqrt, -0.5, 1j, 1, -1, True, np.complex64
+ _check_branch_cut(np.log, -0.5, 1j, 1, -1, True, np.complex64)
+ _check_branch_cut(np.log2, -0.5, 1j, 1, -1, True, np.complex64)
+ _check_branch_cut(np.log10, -0.5, 1j, 1, -1, True, np.complex64)
+ _check_branch_cut(np.log1p, -1.5, 1j, 1, -1, True, np.complex64)
+ _check_branch_cut(np.sqrt, -0.5, 1j, 1, -1, True, np.complex64)
- yield _check_branch_cut, np.arcsin, [ -2, 2], [1j, 1j], 1, -1, True, np.complex64
- yield _check_branch_cut, np.arccos, [ -2, 2], [1j, 1j], 1, -1, True, np.complex64
- yield _check_branch_cut, np.arctan, [0-2j, 2j], [1, 1], -1, 1, True, np.complex64
+ _check_branch_cut(np.arcsin, [ -2, 2], [1j, 1j], 1, -1, True, np.complex64)
+ _check_branch_cut(np.arccos, [ -2, 2], [1j, 1j], 1, -1, True, np.complex64)
+ _check_branch_cut(np.arctan, [0-2j, 2j], [1, 1], -1, 1, True, np.complex64)
- yield _check_branch_cut, np.arcsinh, [0-2j, 2j], [1, 1], -1, 1, True, np.complex64
- yield _check_branch_cut, np.arccosh, [ -1, 0.5], [1j, 1j], 1, -1, True, np.complex64
- yield _check_branch_cut, np.arctanh, [ -2, 2], [1j, 1j], 1, -1, True, np.complex64
+ _check_branch_cut(np.arcsinh, [0-2j, 2j], [1, 1], -1, 1, True, np.complex64)
+ _check_branch_cut(np.arccosh, [ -1, 0.5], [1j, 1j], 1, -1, True, np.complex64)
+ _check_branch_cut(np.arctanh, [ -2, 2], [1j, 1j], 1, -1, True, np.complex64)
# check against bogus branch cuts: assert continuity between quadrants
- yield _check_branch_cut, np.arcsin, [0-2j, 2j], [ 1, 1], 1, 1, False, np.complex64
- yield _check_branch_cut, np.arccos, [0-2j, 2j], [ 1, 1], 1, 1, False, np.complex64
- yield _check_branch_cut, np.arctan, [ -2, 2], [1j, 1j], 1, 1, False, np.complex64
+ _check_branch_cut(np.arcsin, [0-2j, 2j], [ 1, 1], 1, 1, False, np.complex64)
+ _check_branch_cut(np.arccos, [0-2j, 2j], [ 1, 1], 1, 1, False, np.complex64)
+ _check_branch_cut(np.arctan, [ -2, 2], [1j, 1j], 1, 1, False, np.complex64)
- yield _check_branch_cut, np.arcsinh, [ -2, 2, 0], [1j, 1j, 1], 1, 1, False, np.complex64
- yield _check_branch_cut, np.arccosh, [0-2j, 2j, 2], [1, 1, 1j], 1, 1, False, np.complex64
- yield _check_branch_cut, np.arctanh, [0-2j, 2j, 0], [1, 1, 1j], 1, 1, False, np.complex64
+ _check_branch_cut(np.arcsinh, [ -2, 2, 0], [1j, 1j, 1], 1, 1, False, np.complex64)
+ _check_branch_cut(np.arccosh, [0-2j, 2j, 2], [1, 1, 1j], 1, 1, False, np.complex64)
+ _check_branch_cut(np.arctanh, [0-2j, 2j, 0], [1, 1, 1j], 1, 1, False, np.complex64)
def test_against_cmath(self):
import cmath
@@ -1780,7 +2530,7 @@ class TestComplexFunctions(object):
points = [-1-1j, -1+1j, +1-1j, +1+1j]
name_map = {'arcsin': 'asin', 'arccos': 'acos', 'arctan': 'atan',
'arcsinh': 'asinh', 'arccosh': 'acosh', 'arctanh': 'atanh'}
- atol = 4*np.finfo(np.complex).eps
+ atol = 4*np.finfo(complex).eps
for func in self.funcs:
fname = func.__name__.split('.')[-1]
cname = name_map.get(fname, fname)
@@ -1793,7 +2543,8 @@ class TestComplexFunctions(object):
b = cfunc(p)
assert_(abs(a - b) < atol, "%s %s: %s; cmath: %s" % (fname, p, a, b))
- def check_loss_of_precision(self, dtype):
+ @pytest.mark.parametrize('dtype', [np.complex64, np.complex_, np.longcomplex])
+ def test_loss_of_precision(self, dtype):
"""Check loss of precision in complex arc* functions"""
# Check against known-good functions
@@ -1835,10 +2586,11 @@ class TestComplexFunctions(object):
# It's not guaranteed that the system-provided arc functions
# are accurate down to a few epsilons. (Eg. on Linux 64-bit)
# So, give more leeway for long complex tests here:
- check(x_series, 50*eps)
+ # Can use 2.1 for > Ubuntu LTS Trusty (2014), glibc = 2.19.
+ check(x_series, 50.0*eps)
else:
check(x_series, 2.1*eps)
- check(x_basic, 2*eps/1e-3)
+ check(x_basic, 2.0*eps/1e-3)
# Check a few points
@@ -1878,28 +2630,27 @@ class TestComplexFunctions(object):
check(func, pts, 1j)
check(func, pts, 1+1j)
- def test_loss_of_precision(self):
- for dtype in [np.complex64, np.complex_]:
- yield self.check_loss_of_precision, dtype
- @dec.knownfailureif(is_longdouble_finfo_bogus(), "Bogus long double finfo")
- def test_loss_of_precision_longcomplex(self):
- self.check_loss_of_precision(np.longcomplex)
-
-
-class TestAttributes(TestCase):
+class TestAttributes(object):
def test_attributes(self):
add = ncu.add
assert_equal(add.__name__, 'add')
- assert_(add.__doc__.startswith('add(x1, x2[, out])\n\n'))
- self.assertTrue(add.ntypes >= 18) # don't fail if types added
- self.assertTrue('ii->i' in add.types)
+ assert_(add.ntypes >= 18) # don't fail if types added
+ assert_('ii->i' in add.types)
assert_equal(add.nin, 2)
assert_equal(add.nout, 1)
assert_equal(add.identity, 0)
+ def test_doc(self):
+ # don't bother checking the long list of kwargs, which are likely to
+ # change
+ assert_(ncu.add.__doc__.startswith(
+ "add(x1, x2, /, out=None, *, where=True"))
+ assert_(ncu.frexp.__doc__.startswith(
+ "frexp(x[, out1, out2], / [, out=(None, None)], *, where=True"))
-class TestSubclass(TestCase):
+
+class TestSubclass(object):
def test_subclass_op(self):
@@ -1913,7 +2664,7 @@ class TestSubclass(TestCase):
assert_equal(a+a, a)
def _check_branch_cut(f, x0, dx, re_sign=1, im_sign=-1, sig_zero_ok=False,
- dtype=np.complex):
+ dtype=complex):
"""
Check for a branch cut in a function.
@@ -1995,14 +2746,25 @@ def _test_nextafter(t):
def test_nextafter():
return _test_nextafter(np.float64)
+
def test_nextafterf():
return _test_nextafter(np.float32)
-@dec.knownfailureif(sys.platform == 'win32' or on_powerpc(),
- "Long double support buggy on win32 and PPC, ticket 1664.")
+
+@pytest.mark.skipif(np.finfo(np.double) == np.finfo(np.longdouble),
+ reason="long double is same as double")
+@pytest.mark.xfail(condition=platform.machine().startswith("ppc64"),
+ reason="IBM double double")
def test_nextafterl():
return _test_nextafter(np.longdouble)
+
+def test_nextafter_0():
+ for t, direction in itertools.product(np.sctypes['float'], (1, -1)):
+ tiny = np.finfo(t).tiny
+ assert_(0. < direction * np.nextafter(t(0), t(direction)) < tiny)
+ assert_equal(np.nextafter(t(0), t(direction)) / t(2.1), direction * 0.0)
+
def _test_spacing(t):
one = t(1)
eps = np.finfo(t).eps
@@ -2021,8 +2783,11 @@ def test_spacing():
def test_spacingf():
return _test_spacing(np.float32)
-@dec.knownfailureif(sys.platform == 'win32' or on_powerpc(),
- "Long double support buggy on win32 and PPC, ticket 1664.")
+
+@pytest.mark.skipif(np.finfo(np.double) == np.finfo(np.longdouble),
+ reason="long double is same as double")
+@pytest.mark.xfail(condition=platform.machine().startswith("ppc64"),
+ reason="IBM double double")
def test_spacingl():
return _test_spacing(np.longdouble)
@@ -2143,5 +2908,7 @@ def test_rint_big_int():
assert_equal(val, np.rint(val))
-if __name__ == "__main__":
- run_module_suite()
+def test_signaling_nan_exceptions():
+ with assert_no_warnings():
+ a = np.ndarray(shape=(), dtype='float32', buffer=b'\x00\xe0\xbf\xff')
+ np.isnan(a)
diff --git a/numpy/core/tests/test_umath_complex.py b/numpy/core/tests/test_umath_complex.py
index 536ad398a..785ae8c57 100644
--- a/numpy/core/tests/test_umath_complex.py
+++ b/numpy/core/tests/test_umath_complex.py
@@ -2,13 +2,13 @@ from __future__ import division, absolute_import, print_function
import sys
import platform
+import pytest
import numpy as np
import numpy.core.umath as ncu
from numpy.testing import (
- TestCase, run_module_suite, assert_equal, assert_array_equal,
- assert_almost_equal, dec
-)
+ assert_raises, assert_equal, assert_array_equal, assert_almost_equal
+ )
# TODO: branch cuts (use Pauli code)
# TODO: conj 'symmetry'
@@ -17,17 +17,17 @@ from numpy.testing import (
# At least on Windows the results of many complex functions are not conforming
# to the C99 standard. See ticket 1574.
# Ditto for Solaris (ticket 1642) and OS X on PowerPC.
+#FIXME: this will probably change when we require full C99 campatibility
with np.errstate(all='ignore'):
functions_seem_flaky = ((np.exp(complex(np.inf, 0)).imag != 0)
or (np.log(complex(np.NZERO, 0)).imag != np.pi))
# TODO: replace with a check on whether platform-provided C99 funcs are used
-skip_complex_tests = (not sys.platform.startswith('linux') or functions_seem_flaky)
+xfail_complex_tests = (not sys.platform.startswith('linux') or functions_seem_flaky)
+
+# TODO This can be xfail when the generator functions are got rid of.
+platform_skip = pytest.mark.skipif(xfail_complex_tests,
+ reason="Inadequate C99 complex support")
-def platform_skip(func):
- return dec.skipif(skip_complex_tests,
- "Numpy is using complex functions (e.g. sqrt) provided by your"
- "platform's C library. However, they do not seem to behave according"
- "to C99 -- so C99 tests are skipped.")(func)
class TestCexp(object):
@@ -35,11 +35,11 @@ class TestCexp(object):
check = check_complex_value
f = np.exp
- yield check, f, 1, 0, np.exp(1), 0, False
- yield check, f, 0, 1, np.cos(1), np.sin(1), False
+ check(f, 1, 0, np.exp(1), 0, False)
+ check(f, 0, 1, np.cos(1), np.sin(1), False)
- ref = np.exp(1) * np.complex(np.cos(1), np.sin(1))
- yield check, f, 1, 1, ref.real, ref.imag, False
+ ref = np.exp(1) * complex(np.cos(1), np.sin(1))
+ check(f, 1, 1, ref.real, ref.imag, False)
@platform_skip
def test_special_values(self):
@@ -49,87 +49,88 @@ class TestCexp(object):
f = np.exp
# cexp(+-0 + 0i) is 1 + 0i
- yield check, f, np.PZERO, 0, 1, 0, False
- yield check, f, np.NZERO, 0, 1, 0, False
+ check(f, np.PZERO, 0, 1, 0, False)
+ check(f, np.NZERO, 0, 1, 0, False)
# cexp(x + infi) is nan + nani for finite x and raises 'invalid' FPU
# exception
- yield check, f, 1, np.inf, np.nan, np.nan
- yield check, f, -1, np.inf, np.nan, np.nan
- yield check, f, 0, np.inf, np.nan, np.nan
+ check(f, 1, np.inf, np.nan, np.nan)
+ check(f, -1, np.inf, np.nan, np.nan)
+ check(f, 0, np.inf, np.nan, np.nan)
# cexp(inf + 0i) is inf + 0i
- yield check, f, np.inf, 0, np.inf, 0
+ check(f, np.inf, 0, np.inf, 0)
# cexp(-inf + yi) is +0 * (cos(y) + i sin(y)) for finite y
- yield check, f, -np.inf, 1, np.PZERO, np.PZERO
- yield check, f, -np.inf, 0.75 * np.pi, np.NZERO, np.PZERO
+ check(f, -np.inf, 1, np.PZERO, np.PZERO)
+ check(f, -np.inf, 0.75 * np.pi, np.NZERO, np.PZERO)
# cexp(inf + yi) is +inf * (cos(y) + i sin(y)) for finite y
- yield check, f, np.inf, 1, np.inf, np.inf
- yield check, f, np.inf, 0.75 * np.pi, -np.inf, np.inf
+ check(f, np.inf, 1, np.inf, np.inf)
+ check(f, np.inf, 0.75 * np.pi, -np.inf, np.inf)
# cexp(-inf + inf i) is +-0 +- 0i (signs unspecified)
def _check_ninf_inf(dummy):
msgform = "cexp(-inf, inf) is (%f, %f), expected (+-0, +-0)"
with np.errstate(invalid='ignore'):
- z = f(np.array(np.complex(-np.inf, np.inf)))
+ z = f(np.array(complex(-np.inf, np.inf)))
if z.real != 0 or z.imag != 0:
raise AssertionError(msgform % (z.real, z.imag))
- yield _check_ninf_inf, None
+ _check_ninf_inf(None)
# cexp(inf + inf i) is +-inf + NaNi and raised invalid FPU ex.
def _check_inf_inf(dummy):
msgform = "cexp(inf, inf) is (%f, %f), expected (+-inf, nan)"
with np.errstate(invalid='ignore'):
- z = f(np.array(np.complex(np.inf, np.inf)))
+ z = f(np.array(complex(np.inf, np.inf)))
if not np.isinf(z.real) or not np.isnan(z.imag):
raise AssertionError(msgform % (z.real, z.imag))
- yield _check_inf_inf, None
+ _check_inf_inf(None)
# cexp(-inf + nan i) is +-0 +- 0i
def _check_ninf_nan(dummy):
msgform = "cexp(-inf, nan) is (%f, %f), expected (+-0, +-0)"
with np.errstate(invalid='ignore'):
- z = f(np.array(np.complex(-np.inf, np.nan)))
+ z = f(np.array(complex(-np.inf, np.nan)))
if z.real != 0 or z.imag != 0:
raise AssertionError(msgform % (z.real, z.imag))
- yield _check_ninf_nan, None
+ _check_ninf_nan(None)
# cexp(inf + nan i) is +-inf + nan
def _check_inf_nan(dummy):
msgform = "cexp(-inf, nan) is (%f, %f), expected (+-inf, nan)"
with np.errstate(invalid='ignore'):
- z = f(np.array(np.complex(np.inf, np.nan)))
+ z = f(np.array(complex(np.inf, np.nan)))
if not np.isinf(z.real) or not np.isnan(z.imag):
raise AssertionError(msgform % (z.real, z.imag))
- yield _check_inf_nan, None
+ _check_inf_nan(None)
# cexp(nan + yi) is nan + nani for y != 0 (optional: raises invalid FPU
# ex)
- yield check, f, np.nan, 1, np.nan, np.nan
- yield check, f, np.nan, -1, np.nan, np.nan
+ check(f, np.nan, 1, np.nan, np.nan)
+ check(f, np.nan, -1, np.nan, np.nan)
- yield check, f, np.nan, np.inf, np.nan, np.nan
- yield check, f, np.nan, -np.inf, np.nan, np.nan
+ check(f, np.nan, np.inf, np.nan, np.nan)
+ check(f, np.nan, -np.inf, np.nan, np.nan)
# cexp(nan + nani) is nan + nani
- yield check, f, np.nan, np.nan, np.nan, np.nan
+ check(f, np.nan, np.nan, np.nan, np.nan)
- @dec.knownfailureif(True, "cexp(nan + 0I) is wrong on most implementations")
+ # TODO This can be xfail when the generator functions are got rid of.
+ @pytest.mark.skip(reason="cexp(nan + 0I) is wrong on most platforms")
def test_special_values2(self):
# XXX: most implementations get it wrong here (including glibc <= 2.10)
# cexp(nan + 0i) is nan + 0i
check = check_complex_value
f = np.exp
- yield check, f, np.nan, 0, np.nan, 0
+ check(f, np.nan, 0, np.nan, 0)
-class TestClog(TestCase):
+class TestClog(object):
def test_simple(self):
x = np.array([1+0j, 1+2j])
y_r = np.log(np.abs(x)) + 1j * np.angle(x)
@@ -138,7 +139,7 @@ class TestClog(TestCase):
assert_almost_equal(y[i], y_r[i])
@platform_skip
- @dec.skipif(platform.machine() == "armv5tel", "See gh-413.")
+ @pytest.mark.skipif(platform.machine() == "armv5tel", reason="See gh-413.")
def test_special_values(self):
xl = []
yl = []
@@ -150,9 +151,9 @@ class TestClog(TestCase):
# clog(-0 + i0) returns -inf + i pi and raises the 'divide-by-zero'
# floating-point exception.
with np.errstate(divide='raise'):
- x = np.array([np.NZERO], dtype=np.complex)
- y = np.complex(-np.inf, np.pi)
- self.assertRaises(FloatingPointError, np.log, x)
+ x = np.array([np.NZERO], dtype=complex)
+ y = complex(-np.inf, np.pi)
+ assert_raises(FloatingPointError, np.log, x)
with np.errstate(divide='ignore'):
assert_almost_equal(np.log(x), y)
@@ -162,9 +163,9 @@ class TestClog(TestCase):
# clog(+0 + i0) returns -inf + i0 and raises the 'divide-by-zero'
# floating-point exception.
with np.errstate(divide='raise'):
- x = np.array([0], dtype=np.complex)
- y = np.complex(-np.inf, 0)
- self.assertRaises(FloatingPointError, np.log, x)
+ x = np.array([0], dtype=complex)
+ y = complex(-np.inf, 0)
+ assert_raises(FloatingPointError, np.log, x)
with np.errstate(divide='ignore'):
assert_almost_equal(np.log(x), y)
@@ -172,13 +173,13 @@ class TestClog(TestCase):
yl.append(y)
# clog(x + i inf returns +inf + i pi /2, for finite x.
- x = np.array([complex(1, np.inf)], dtype=np.complex)
- y = np.complex(np.inf, 0.5 * np.pi)
+ x = np.array([complex(1, np.inf)], dtype=complex)
+ y = complex(np.inf, 0.5 * np.pi)
assert_almost_equal(np.log(x), y)
xl.append(x)
yl.append(y)
- x = np.array([complex(-1, np.inf)], dtype=np.complex)
+ x = np.array([complex(-1, np.inf)], dtype=complex)
assert_almost_equal(np.log(x), y)
xl.append(x)
yl.append(y)
@@ -186,9 +187,9 @@ class TestClog(TestCase):
# clog(x + iNaN) returns NaN + iNaN and optionally raises the
# 'invalid' floating- point exception, for finite x.
with np.errstate(invalid='raise'):
- x = np.array([complex(1., np.nan)], dtype=np.complex)
- y = np.complex(np.nan, np.nan)
- #self.assertRaises(FloatingPointError, np.log, x)
+ x = np.array([complex(1., np.nan)], dtype=complex)
+ y = complex(np.nan, np.nan)
+ #assert_raises(FloatingPointError, np.log, x)
with np.errstate(invalid='ignore'):
assert_almost_equal(np.log(x), y)
@@ -196,8 +197,8 @@ class TestClog(TestCase):
yl.append(y)
with np.errstate(invalid='raise'):
- x = np.array([np.inf + 1j * np.nan], dtype=np.complex)
- #self.assertRaises(FloatingPointError, np.log, x)
+ x = np.array([np.inf + 1j * np.nan], dtype=complex)
+ #assert_raises(FloatingPointError, np.log, x)
with np.errstate(invalid='ignore'):
assert_almost_equal(np.log(x), y)
@@ -205,92 +206,96 @@ class TestClog(TestCase):
yl.append(y)
# clog(- inf + iy) returns +inf + ipi , for finite positive-signed y.
- x = np.array([-np.inf + 1j], dtype=np.complex)
- y = np.complex(np.inf, np.pi)
+ x = np.array([-np.inf + 1j], dtype=complex)
+ y = complex(np.inf, np.pi)
assert_almost_equal(np.log(x), y)
xl.append(x)
yl.append(y)
# clog(+ inf + iy) returns +inf + i0, for finite positive-signed y.
- x = np.array([np.inf + 1j], dtype=np.complex)
- y = np.complex(np.inf, 0)
+ x = np.array([np.inf + 1j], dtype=complex)
+ y = complex(np.inf, 0)
assert_almost_equal(np.log(x), y)
xl.append(x)
yl.append(y)
# clog(- inf + i inf) returns +inf + i3pi /4.
- x = np.array([complex(-np.inf, np.inf)], dtype=np.complex)
- y = np.complex(np.inf, 0.75 * np.pi)
+ x = np.array([complex(-np.inf, np.inf)], dtype=complex)
+ y = complex(np.inf, 0.75 * np.pi)
assert_almost_equal(np.log(x), y)
xl.append(x)
yl.append(y)
# clog(+ inf + i inf) returns +inf + ipi /4.
- x = np.array([complex(np.inf, np.inf)], dtype=np.complex)
- y = np.complex(np.inf, 0.25 * np.pi)
+ x = np.array([complex(np.inf, np.inf)], dtype=complex)
+ y = complex(np.inf, 0.25 * np.pi)
assert_almost_equal(np.log(x), y)
xl.append(x)
yl.append(y)
# clog(+/- inf + iNaN) returns +inf + iNaN.
- x = np.array([complex(np.inf, np.nan)], dtype=np.complex)
- y = np.complex(np.inf, np.nan)
+ x = np.array([complex(np.inf, np.nan)], dtype=complex)
+ y = complex(np.inf, np.nan)
assert_almost_equal(np.log(x), y)
xl.append(x)
yl.append(y)
- x = np.array([complex(-np.inf, np.nan)], dtype=np.complex)
+ x = np.array([complex(-np.inf, np.nan)], dtype=complex)
assert_almost_equal(np.log(x), y)
xl.append(x)
yl.append(y)
# clog(NaN + iy) returns NaN + iNaN and optionally raises the
# 'invalid' floating-point exception, for finite y.
- x = np.array([complex(np.nan, 1)], dtype=np.complex)
- y = np.complex(np.nan, np.nan)
+ x = np.array([complex(np.nan, 1)], dtype=complex)
+ y = complex(np.nan, np.nan)
assert_almost_equal(np.log(x), y)
xl.append(x)
yl.append(y)
# clog(NaN + i inf) returns +inf + iNaN.
- x = np.array([complex(np.nan, np.inf)], dtype=np.complex)
- y = np.complex(np.inf, np.nan)
+ x = np.array([complex(np.nan, np.inf)], dtype=complex)
+ y = complex(np.inf, np.nan)
assert_almost_equal(np.log(x), y)
xl.append(x)
yl.append(y)
# clog(NaN + iNaN) returns NaN + iNaN.
- x = np.array([complex(np.nan, np.nan)], dtype=np.complex)
- y = np.complex(np.nan, np.nan)
+ x = np.array([complex(np.nan, np.nan)], dtype=complex)
+ y = complex(np.nan, np.nan)
assert_almost_equal(np.log(x), y)
xl.append(x)
yl.append(y)
# clog(conj(z)) = conj(clog(z)).
- xa = np.array(xl, dtype=np.complex)
- ya = np.array(yl, dtype=np.complex)
+ xa = np.array(xl, dtype=complex)
+ ya = np.array(yl, dtype=complex)
with np.errstate(divide='ignore'):
for i in range(len(xa)):
assert_almost_equal(np.log(xa[i].conj()), ya[i].conj())
+
class TestCsqrt(object):
def test_simple(self):
# sqrt(1)
- yield check_complex_value, np.sqrt, 1, 0, 1, 0
+ check_complex_value(np.sqrt, 1, 0, 1, 0)
# sqrt(1i)
- yield check_complex_value, np.sqrt, 0, 1, 0.5*np.sqrt(2), 0.5*np.sqrt(2), False
+ rres = 0.5*np.sqrt(2)
+ ires = rres
+ check_complex_value(np.sqrt, 0, 1, rres, ires, False)
# sqrt(-1)
- yield check_complex_value, np.sqrt, -1, 0, 0, 1
+ check_complex_value(np.sqrt, -1, 0, 0, 1)
def test_simple_conjugate(self):
- ref = np.conj(np.sqrt(np.complex(1, 1)))
+ ref = np.conj(np.sqrt(complex(1, 1)))
def f(z):
return np.sqrt(np.conj(z))
- yield check_complex_value, f, 1, 1, ref.real, ref.imag, False
+
+ check_complex_value(f, 1, 1, ref.real, ref.imag, False)
#def test_branch_cut(self):
# _check_branch_cut(f, -1, 0, 1, -1)
@@ -303,58 +308,58 @@ class TestCsqrt(object):
f = np.sqrt
# csqrt(+-0 + 0i) is 0 + 0i
- yield check, f, np.PZERO, 0, 0, 0
- yield check, f, np.NZERO, 0, 0, 0
+ check(f, np.PZERO, 0, 0, 0)
+ check(f, np.NZERO, 0, 0, 0)
# csqrt(x + infi) is inf + infi for any x (including NaN)
- yield check, f, 1, np.inf, np.inf, np.inf
- yield check, f, -1, np.inf, np.inf, np.inf
+ check(f, 1, np.inf, np.inf, np.inf)
+ check(f, -1, np.inf, np.inf, np.inf)
- yield check, f, np.PZERO, np.inf, np.inf, np.inf
- yield check, f, np.NZERO, np.inf, np.inf, np.inf
- yield check, f, np.inf, np.inf, np.inf, np.inf
- yield check, f, -np.inf, np.inf, np.inf, np.inf
- yield check, f, -np.nan, np.inf, np.inf, np.inf
+ check(f, np.PZERO, np.inf, np.inf, np.inf)
+ check(f, np.NZERO, np.inf, np.inf, np.inf)
+ check(f, np.inf, np.inf, np.inf, np.inf)
+ check(f, -np.inf, np.inf, np.inf, np.inf)
+ check(f, -np.nan, np.inf, np.inf, np.inf)
# csqrt(x + nani) is nan + nani for any finite x
- yield check, f, 1, np.nan, np.nan, np.nan
- yield check, f, -1, np.nan, np.nan, np.nan
- yield check, f, 0, np.nan, np.nan, np.nan
+ check(f, 1, np.nan, np.nan, np.nan)
+ check(f, -1, np.nan, np.nan, np.nan)
+ check(f, 0, np.nan, np.nan, np.nan)
# csqrt(-inf + yi) is +0 + infi for any finite y > 0
- yield check, f, -np.inf, 1, np.PZERO, np.inf
+ check(f, -np.inf, 1, np.PZERO, np.inf)
# csqrt(inf + yi) is +inf + 0i for any finite y > 0
- yield check, f, np.inf, 1, np.inf, np.PZERO
+ check(f, np.inf, 1, np.inf, np.PZERO)
# csqrt(-inf + nani) is nan +- infi (both +i infi are valid)
def _check_ninf_nan(dummy):
msgform = "csqrt(-inf, nan) is (%f, %f), expected (nan, +-inf)"
- z = np.sqrt(np.array(np.complex(-np.inf, np.nan)))
+ z = np.sqrt(np.array(complex(-np.inf, np.nan)))
#Fixme: ugly workaround for isinf bug.
with np.errstate(invalid='ignore'):
if not (np.isnan(z.real) and np.isinf(z.imag)):
raise AssertionError(msgform % (z.real, z.imag))
- yield _check_ninf_nan, None
+ _check_ninf_nan(None)
# csqrt(+inf + nani) is inf + nani
- yield check, f, np.inf, np.nan, np.inf, np.nan
+ check(f, np.inf, np.nan, np.inf, np.nan)
# csqrt(nan + yi) is nan + nani for any finite y (infinite handled in x
# + nani)
- yield check, f, np.nan, 0, np.nan, np.nan
- yield check, f, np.nan, 1, np.nan, np.nan
- yield check, f, np.nan, np.nan, np.nan, np.nan
+ check(f, np.nan, 0, np.nan, np.nan)
+ check(f, np.nan, 1, np.nan, np.nan)
+ check(f, np.nan, np.nan, np.nan, np.nan)
# XXX: check for conj(csqrt(z)) == csqrt(conj(z)) (need to fix branch
# cuts first)
-class TestCpow(TestCase):
- def setUp(self):
+class TestCpow(object):
+ def setup(self):
self.olderr = np.seterr(invalid='ignore')
- def tearDown(self):
+ def teardown(self):
np.seterr(**self.olderr)
def test_simple(self):
@@ -391,10 +396,10 @@ class TestCpow(TestCase):
assert_almost_equal(n_r[i], p_r[i], err_msg='Loop %d\n' % i)
class TestCabs(object):
- def setUp(self):
+ def setup(self):
self.olderr = np.seterr(invalid='ignore')
- def tearDown(self):
+ def teardown(self):
np.seterr(**self.olderr)
def test_simple(self):
@@ -406,16 +411,16 @@ class TestCabs(object):
def test_fabs(self):
# Test that np.abs(x +- 0j) == np.abs(x) (as mandated by C99 for cabs)
- x = np.array([1+0j], dtype=np.complex)
+ x = np.array([1+0j], dtype=complex)
assert_array_equal(np.abs(x), np.real(x))
- x = np.array([complex(1, np.NZERO)], dtype=np.complex)
+ x = np.array([complex(1, np.NZERO)], dtype=complex)
assert_array_equal(np.abs(x), np.real(x))
- x = np.array([complex(np.inf, np.NZERO)], dtype=np.complex)
+ x = np.array([complex(np.inf, np.NZERO)], dtype=complex)
assert_array_equal(np.abs(x), np.real(x))
- x = np.array([complex(np.nan, np.NZERO)], dtype=np.complex)
+ x = np.array([complex(np.nan, np.NZERO)], dtype=complex)
assert_array_equal(np.abs(x), np.real(x))
def test_cabs_inf_nan(self):
@@ -424,33 +429,33 @@ class TestCabs(object):
# cabs(+-nan + nani) returns nan
x.append(np.nan)
y.append(np.nan)
- yield check_real_value, np.abs, np.nan, np.nan, np.nan
+ check_real_value(np.abs, np.nan, np.nan, np.nan)
x.append(np.nan)
y.append(-np.nan)
- yield check_real_value, np.abs, -np.nan, np.nan, np.nan
+ check_real_value(np.abs, -np.nan, np.nan, np.nan)
# According to C99 standard, if exactly one of the real/part is inf and
# the other nan, then cabs should return inf
x.append(np.inf)
y.append(np.nan)
- yield check_real_value, np.abs, np.inf, np.nan, np.inf
+ check_real_value(np.abs, np.inf, np.nan, np.inf)
x.append(-np.inf)
y.append(np.nan)
- yield check_real_value, np.abs, -np.inf, np.nan, np.inf
+ check_real_value(np.abs, -np.inf, np.nan, np.inf)
# cabs(conj(z)) == conj(cabs(z)) (= cabs(z))
def f(a):
return np.abs(np.conj(a))
def g(a, b):
- return np.abs(np.complex(a, b))
+ return np.abs(complex(a, b))
- xa = np.array(x, dtype=np.complex)
+ xa = np.array(x, dtype=complex)
for i in range(len(xa)):
ref = g(x[i], y[i])
- yield check_real_value, f, x[i], y[i], ref
+ check_real_value(f, x[i], y[i], ref)
class TestCarg(object):
def test_simple(self):
@@ -460,63 +465,65 @@ class TestCarg(object):
check_real_value(ncu._arg, 1, 1, 0.25*np.pi, False)
check_real_value(ncu._arg, np.PZERO, np.PZERO, np.PZERO)
- @dec.knownfailureif(True,
- "Complex arithmetic with signed zero is buggy on most implementation")
+ # TODO This can be xfail when the generator functions are got rid of.
+ @pytest.mark.skip(
+ reason="Complex arithmetic with signed zero fails on most platforms")
def test_zero(self):
# carg(-0 +- 0i) returns +- pi
- yield check_real_value, ncu._arg, np.NZERO, np.PZERO, np.pi, False
- yield check_real_value, ncu._arg, np.NZERO, np.NZERO, -np.pi, False
+ check_real_value(ncu._arg, np.NZERO, np.PZERO, np.pi, False)
+ check_real_value(ncu._arg, np.NZERO, np.NZERO, -np.pi, False)
# carg(+0 +- 0i) returns +- 0
- yield check_real_value, ncu._arg, np.PZERO, np.PZERO, np.PZERO
- yield check_real_value, ncu._arg, np.PZERO, np.NZERO, np.NZERO
+ check_real_value(ncu._arg, np.PZERO, np.PZERO, np.PZERO)
+ check_real_value(ncu._arg, np.PZERO, np.NZERO, np.NZERO)
# carg(x +- 0i) returns +- 0 for x > 0
- yield check_real_value, ncu._arg, 1, np.PZERO, np.PZERO, False
- yield check_real_value, ncu._arg, 1, np.NZERO, np.NZERO, False
+ check_real_value(ncu._arg, 1, np.PZERO, np.PZERO, False)
+ check_real_value(ncu._arg, 1, np.NZERO, np.NZERO, False)
# carg(x +- 0i) returns +- pi for x < 0
- yield check_real_value, ncu._arg, -1, np.PZERO, np.pi, False
- yield check_real_value, ncu._arg, -1, np.NZERO, -np.pi, False
+ check_real_value(ncu._arg, -1, np.PZERO, np.pi, False)
+ check_real_value(ncu._arg, -1, np.NZERO, -np.pi, False)
# carg(+- 0 + yi) returns pi/2 for y > 0
- yield check_real_value, ncu._arg, np.PZERO, 1, 0.5 * np.pi, False
- yield check_real_value, ncu._arg, np.NZERO, 1, 0.5 * np.pi, False
+ check_real_value(ncu._arg, np.PZERO, 1, 0.5 * np.pi, False)
+ check_real_value(ncu._arg, np.NZERO, 1, 0.5 * np.pi, False)
# carg(+- 0 + yi) returns -pi/2 for y < 0
- yield check_real_value, ncu._arg, np.PZERO, -1, 0.5 * np.pi, False
- yield check_real_value, ncu._arg, np.NZERO, -1, -0.5 * np.pi, False
+ check_real_value(ncu._arg, np.PZERO, -1, 0.5 * np.pi, False)
+ check_real_value(ncu._arg, np.NZERO, -1, -0.5 * np.pi, False)
#def test_branch_cuts(self):
# _check_branch_cut(ncu._arg, -1, 1j, -1, 1)
def test_special_values(self):
# carg(-np.inf +- yi) returns +-pi for finite y > 0
- yield check_real_value, ncu._arg, -np.inf, 1, np.pi, False
- yield check_real_value, ncu._arg, -np.inf, -1, -np.pi, False
+ check_real_value(ncu._arg, -np.inf, 1, np.pi, False)
+ check_real_value(ncu._arg, -np.inf, -1, -np.pi, False)
# carg(np.inf +- yi) returns +-0 for finite y > 0
- yield check_real_value, ncu._arg, np.inf, 1, np.PZERO, False
- yield check_real_value, ncu._arg, np.inf, -1, np.NZERO, False
+ check_real_value(ncu._arg, np.inf, 1, np.PZERO, False)
+ check_real_value(ncu._arg, np.inf, -1, np.NZERO, False)
# carg(x +- np.infi) returns +-pi/2 for finite x
- yield check_real_value, ncu._arg, 1, np.inf, 0.5 * np.pi, False
- yield check_real_value, ncu._arg, 1, -np.inf, -0.5 * np.pi, False
+ check_real_value(ncu._arg, 1, np.inf, 0.5 * np.pi, False)
+ check_real_value(ncu._arg, 1, -np.inf, -0.5 * np.pi, False)
# carg(-np.inf +- np.infi) returns +-3pi/4
- yield check_real_value, ncu._arg, -np.inf, np.inf, 0.75 * np.pi, False
- yield check_real_value, ncu._arg, -np.inf, -np.inf, -0.75 * np.pi, False
+ check_real_value(ncu._arg, -np.inf, np.inf, 0.75 * np.pi, False)
+ check_real_value(ncu._arg, -np.inf, -np.inf, -0.75 * np.pi, False)
# carg(np.inf +- np.infi) returns +-pi/4
- yield check_real_value, ncu._arg, np.inf, np.inf, 0.25 * np.pi, False
- yield check_real_value, ncu._arg, np.inf, -np.inf, -0.25 * np.pi, False
+ check_real_value(ncu._arg, np.inf, np.inf, 0.25 * np.pi, False)
+ check_real_value(ncu._arg, np.inf, -np.inf, -0.25 * np.pi, False)
# carg(x + yi) returns np.nan if x or y is nan
- yield check_real_value, ncu._arg, np.nan, 0, np.nan, False
- yield check_real_value, ncu._arg, 0, np.nan, np.nan, False
+ check_real_value(ncu._arg, np.nan, 0, np.nan, False)
+ check_real_value(ncu._arg, 0, np.nan, np.nan, False)
+
+ check_real_value(ncu._arg, np.nan, np.inf, np.nan, False)
+ check_real_value(ncu._arg, np.inf, np.nan, np.nan, False)
- yield check_real_value, ncu._arg, np.nan, np.inf, np.nan, False
- yield check_real_value, ncu._arg, np.inf, np.nan, np.nan, False
def check_real_value(f, x1, y1, x, exact=True):
z1 = np.array([complex(x1, y1)])
@@ -525,14 +532,12 @@ def check_real_value(f, x1, y1, x, exact=True):
else:
assert_almost_equal(f(z1), x)
+
def check_complex_value(f, x1, y1, x2, y2, exact=True):
z1 = np.array([complex(x1, y1)])
- z2 = np.complex(x2, y2)
+ z2 = complex(x2, y2)
with np.errstate(invalid='ignore'):
if exact:
assert_equal(f(z1), z2)
else:
assert_almost_equal(f(z1), z2)
-
-if __name__ == "__main__":
- run_module_suite()
diff --git a/numpy/core/tests/test_unicode.py b/numpy/core/tests/test_unicode.py
index 7a421a5fb..2ffd8801b 100644
--- a/numpy/core/tests/test_unicode.py
+++ b/numpy/core/tests/test_unicode.py
@@ -3,8 +3,8 @@ from __future__ import division, absolute_import, print_function
import sys
import numpy as np
-from numpy.compat import asbytes, unicode, sixu
-from numpy.testing import TestCase, run_module_suite, assert_equal
+from numpy.compat import unicode
+from numpy.testing import assert_, assert_equal, assert_array_equal
# Guess the UCS length for this python interpreter
if sys.version_info[:2] >= (3, 3):
@@ -14,27 +14,24 @@ if sys.version_info[:2] >= (3, 3):
def buffer_length(arr):
if isinstance(arr, unicode):
arr = str(arr)
- return (sys.getsizeof(arr+"a") - sys.getsizeof(arr)) * len(arr)
- v = memoryview(arr)
- if v.shape is None:
- return len(v) * v.itemsize
- else:
- return np.prod(v.shape) * v.itemsize
-elif sys.version_info[0] >= 3:
- import array as _array
-
- ucs4 = (_array.array('u').itemsize == 4)
-
- def buffer_length(arr):
- if isinstance(arr, unicode):
- return _array.array('u').itemsize * len(arr)
+ if not arr:
+ charmax = 0
+ else:
+ charmax = max([ord(c) for c in arr])
+ if charmax < 256:
+ size = 1
+ elif charmax < 65536:
+ size = 2
+ else:
+ size = 4
+ return size * len(arr)
v = memoryview(arr)
if v.shape is None:
return len(v) * v.itemsize
else:
return np.prod(v.shape) * v.itemsize
else:
- if len(buffer(sixu('u'))) == 4:
+ if len(buffer(u'u')) == 4:
ucs4 = True
else:
ucs4 = False
@@ -47,33 +44,47 @@ else:
# In both cases below we need to make sure that the byte swapped value (as
# UCS4) is still a valid unicode:
# Value that can be represented in UCS2 interpreters
-ucs2_value = sixu('\u0900')
+ucs2_value = u'\u0900'
# Value that cannot be represented in UCS2 interpreters (but can in UCS4)
-ucs4_value = sixu('\U00100900')
+ucs4_value = u'\U00100900'
+
+
+def test_string_cast():
+ str_arr = np.array(["1234", "1234\0\0"], dtype='S')
+ uni_arr1 = str_arr.astype('>U')
+ uni_arr2 = str_arr.astype('<U')
+
+ if sys.version_info[0] < 3:
+ assert_array_equal(str_arr, uni_arr1)
+ assert_array_equal(str_arr, uni_arr2)
+ else:
+ assert_(str_arr != uni_arr1)
+ assert_(str_arr != uni_arr2)
+ assert_array_equal(uni_arr1, uni_arr2)
############################################################
# Creation tests
############################################################
-class create_zeros(object):
+class CreateZeros(object):
"""Check the creation of zero-valued arrays"""
def content_check(self, ua, ua_scalar, nbytes):
# Check the length of the unicode base type
- self.assertTrue(int(ua.dtype.str[2:]) == self.ulen)
+ assert_(int(ua.dtype.str[2:]) == self.ulen)
# Check the length of the data buffer
- self.assertTrue(buffer_length(ua) == nbytes)
+ assert_(buffer_length(ua) == nbytes)
# Small check that data in array element is ok
- self.assertTrue(ua_scalar == sixu(''))
+ assert_(ua_scalar == u'')
# Encode to ascii and double check
- self.assertTrue(ua_scalar.encode('ascii') == asbytes(''))
+ assert_(ua_scalar.encode('ascii') == b'')
# Check buffer lengths for scalars
if ucs4:
- self.assertTrue(buffer_length(ua_scalar) == 0)
+ assert_(buffer_length(ua_scalar) == 0)
else:
- self.assertTrue(buffer_length(ua_scalar) == 0)
+ assert_(buffer_length(ua_scalar) == 0)
def test_zeros0D(self):
# Check creation of 0-dimensional objects
@@ -93,47 +104,47 @@ class create_zeros(object):
self.content_check(ua, ua[-1, -1, -1], 4*self.ulen*2*3*4)
-class test_create_zeros_1(create_zeros, TestCase):
+class TestCreateZeros_1(CreateZeros):
"""Check the creation of zero-valued arrays (size 1)"""
ulen = 1
-class test_create_zeros_2(create_zeros, TestCase):
+class TestCreateZeros_2(CreateZeros):
"""Check the creation of zero-valued arrays (size 2)"""
ulen = 2
-class test_create_zeros_1009(create_zeros, TestCase):
+class TestCreateZeros_1009(CreateZeros):
"""Check the creation of zero-valued arrays (size 1009)"""
ulen = 1009
-class create_values(object):
+class CreateValues(object):
"""Check the creation of unicode arrays with values"""
def content_check(self, ua, ua_scalar, nbytes):
# Check the length of the unicode base type
- self.assertTrue(int(ua.dtype.str[2:]) == self.ulen)
+ assert_(int(ua.dtype.str[2:]) == self.ulen)
# Check the length of the data buffer
- self.assertTrue(buffer_length(ua) == nbytes)
+ assert_(buffer_length(ua) == nbytes)
# Small check that data in array element is ok
- self.assertTrue(ua_scalar == self.ucs_value*self.ulen)
+ assert_(ua_scalar == self.ucs_value*self.ulen)
# Encode to UTF-8 and double check
- self.assertTrue(ua_scalar.encode('utf-8') ==
+ assert_(ua_scalar.encode('utf-8') ==
(self.ucs_value*self.ulen).encode('utf-8'))
# Check buffer lengths for scalars
if ucs4:
- self.assertTrue(buffer_length(ua_scalar) == 4*self.ulen)
+ assert_(buffer_length(ua_scalar) == 4*self.ulen)
else:
if self.ucs_value == ucs4_value:
# In UCS2, the \U0010FFFF will be represented using a
# surrogate *pair*
- self.assertTrue(buffer_length(ua_scalar) == 2*2*self.ulen)
+ assert_(buffer_length(ua_scalar) == 2*2*self.ulen)
else:
# In UCS2, the \uFFFF will be represented using a
# regular 2-byte word
- self.assertTrue(buffer_length(ua_scalar) == 2*self.ulen)
+ assert_(buffer_length(ua_scalar) == 2*self.ulen)
def test_values0D(self):
# Check creation of 0-dimensional objects with values
@@ -153,37 +164,37 @@ class create_values(object):
self.content_check(ua, ua[-1, -1, -1], 4*self.ulen*2*3*4)
-class test_create_values_1_ucs2(create_values, TestCase):
+class TestCreateValues_1_UCS2(CreateValues):
"""Check the creation of valued arrays (size 1, UCS2 values)"""
ulen = 1
ucs_value = ucs2_value
-class test_create_values_1_ucs4(create_values, TestCase):
+class TestCreateValues_1_UCS4(CreateValues):
"""Check the creation of valued arrays (size 1, UCS4 values)"""
ulen = 1
ucs_value = ucs4_value
-class test_create_values_2_ucs2(create_values, TestCase):
+class TestCreateValues_2_UCS2(CreateValues):
"""Check the creation of valued arrays (size 2, UCS2 values)"""
ulen = 2
ucs_value = ucs2_value
-class test_create_values_2_ucs4(create_values, TestCase):
+class TestCreateValues_2_UCS4(CreateValues):
"""Check the creation of valued arrays (size 2, UCS4 values)"""
ulen = 2
ucs_value = ucs4_value
-class test_create_values_1009_ucs2(create_values, TestCase):
+class TestCreateValues_1009_UCS2(CreateValues):
"""Check the creation of valued arrays (size 1009, UCS2 values)"""
ulen = 1009
ucs_value = ucs2_value
-class test_create_values_1009_ucs4(create_values, TestCase):
+class TestCreateValues_1009_UCS4(CreateValues):
"""Check the creation of valued arrays (size 1009, UCS4 values)"""
ulen = 1009
ucs_value = ucs4_value
@@ -193,32 +204,32 @@ class test_create_values_1009_ucs4(create_values, TestCase):
# Assignment tests
############################################################
-class assign_values(object):
+class AssignValues(object):
"""Check the assignment of unicode arrays with values"""
def content_check(self, ua, ua_scalar, nbytes):
# Check the length of the unicode base type
- self.assertTrue(int(ua.dtype.str[2:]) == self.ulen)
+ assert_(int(ua.dtype.str[2:]) == self.ulen)
# Check the length of the data buffer
- self.assertTrue(buffer_length(ua) == nbytes)
+ assert_(buffer_length(ua) == nbytes)
# Small check that data in array element is ok
- self.assertTrue(ua_scalar == self.ucs_value*self.ulen)
+ assert_(ua_scalar == self.ucs_value*self.ulen)
# Encode to UTF-8 and double check
- self.assertTrue(ua_scalar.encode('utf-8') ==
+ assert_(ua_scalar.encode('utf-8') ==
(self.ucs_value*self.ulen).encode('utf-8'))
# Check buffer lengths for scalars
if ucs4:
- self.assertTrue(buffer_length(ua_scalar) == 4*self.ulen)
+ assert_(buffer_length(ua_scalar) == 4*self.ulen)
else:
if self.ucs_value == ucs4_value:
# In UCS2, the \U0010FFFF will be represented using a
# surrogate *pair*
- self.assertTrue(buffer_length(ua_scalar) == 2*2*self.ulen)
+ assert_(buffer_length(ua_scalar) == 2*2*self.ulen)
else:
# In UCS2, the \uFFFF will be represented using a
# regular 2-byte word
- self.assertTrue(buffer_length(ua_scalar) == 2*self.ulen)
+ assert_(buffer_length(ua_scalar) == 2*self.ulen)
def test_values0D(self):
# Check assignment of 0-dimensional objects with values
@@ -243,37 +254,37 @@ class assign_values(object):
self.content_check(ua, ua[-1, -1, -1], 4*self.ulen*2*3*4)
-class test_assign_values_1_ucs2(assign_values, TestCase):
+class TestAssignValues_1_UCS2(AssignValues):
"""Check the assignment of valued arrays (size 1, UCS2 values)"""
ulen = 1
ucs_value = ucs2_value
-class test_assign_values_1_ucs4(assign_values, TestCase):
+class TestAssignValues_1_UCS4(AssignValues):
"""Check the assignment of valued arrays (size 1, UCS4 values)"""
ulen = 1
ucs_value = ucs4_value
-class test_assign_values_2_ucs2(assign_values, TestCase):
+class TestAssignValues_2_UCS2(AssignValues):
"""Check the assignment of valued arrays (size 2, UCS2 values)"""
ulen = 2
ucs_value = ucs2_value
-class test_assign_values_2_ucs4(assign_values, TestCase):
+class TestAssignValues_2_UCS4(AssignValues):
"""Check the assignment of valued arrays (size 2, UCS4 values)"""
ulen = 2
ucs_value = ucs4_value
-class test_assign_values_1009_ucs2(assign_values, TestCase):
+class TestAssignValues_1009_UCS2(AssignValues):
"""Check the assignment of valued arrays (size 1009, UCS2 values)"""
ulen = 1009
ucs_value = ucs2_value
-class test_assign_values_1009_ucs4(assign_values, TestCase):
+class TestAssignValues_1009_UCS4(AssignValues):
"""Check the assignment of valued arrays (size 1009, UCS4 values)"""
ulen = 1009
ucs_value = ucs4_value
@@ -283,7 +294,7 @@ class test_assign_values_1009_ucs4(assign_values, TestCase):
# Byteorder tests
############################################################
-class byteorder_values:
+class ByteorderValues(object):
"""Check the byteorder of unicode arrays in round-trip conversions"""
def test_values0D(self):
@@ -293,7 +304,7 @@ class byteorder_values:
# This changes the interpretation of the data region (but not the
# actual data), therefore the returned scalars are not
# the same (they are byte-swapped versions of each other).
- self.assertTrue(ua[()] != ua2[()])
+ assert_(ua[()] != ua2[()])
ua3 = ua2.newbyteorder()
# Arrays must be equal after the round-trip
assert_equal(ua, ua3)
@@ -302,8 +313,8 @@ class byteorder_values:
# Check byteorder of single-dimensional objects
ua = np.array([self.ucs_value*self.ulen]*2, dtype='U%s' % self.ulen)
ua2 = ua.newbyteorder()
- self.assertTrue(ua[0] != ua2[0])
- self.assertTrue(ua[-1] != ua2[-1])
+ assert_((ua != ua2).all())
+ assert_(ua[-1] != ua2[-1])
ua3 = ua2.newbyteorder()
# Arrays must be equal after the round-trip
assert_equal(ua, ua3)
@@ -311,50 +322,75 @@ class byteorder_values:
def test_valuesMD(self):
# Check byteorder of multi-dimensional objects
ua = np.array([[[self.ucs_value*self.ulen]*2]*3]*4,
- dtype='U%s' % self.ulen)
+ dtype='U%s' % self.ulen)
ua2 = ua.newbyteorder()
- self.assertTrue(ua[0, 0, 0] != ua2[0, 0, 0])
- self.assertTrue(ua[-1, -1, -1] != ua2[-1, -1, -1])
+ assert_((ua != ua2).all())
+ assert_(ua[-1, -1, -1] != ua2[-1, -1, -1])
ua3 = ua2.newbyteorder()
# Arrays must be equal after the round-trip
assert_equal(ua, ua3)
-
-class test_byteorder_1_ucs2(byteorder_values, TestCase):
+ def test_values_cast(self):
+ # Check byteorder of when casting the array for a strided and
+ # contiguous array:
+ test1 = np.array([self.ucs_value*self.ulen]*2, dtype='U%s' % self.ulen)
+ test2 = np.repeat(test1, 2)[::2]
+ for ua in (test1, test2):
+ ua2 = ua.astype(dtype=ua.dtype.newbyteorder())
+ assert_((ua == ua2).all())
+ assert_(ua[-1] == ua2[-1])
+ ua3 = ua2.astype(dtype=ua.dtype)
+ # Arrays must be equal after the round-trip
+ assert_equal(ua, ua3)
+
+ def test_values_updowncast(self):
+ # Check byteorder of when casting the array to a longer and shorter
+ # string length for strided and contiguous arrays
+ test1 = np.array([self.ucs_value*self.ulen]*2, dtype='U%s' % self.ulen)
+ test2 = np.repeat(test1, 2)[::2]
+ for ua in (test1, test2):
+ # Cast to a longer type with zero padding
+ longer_type = np.dtype('U%s' % (self.ulen+1)).newbyteorder()
+ ua2 = ua.astype(dtype=longer_type)
+ assert_((ua == ua2).all())
+ assert_(ua[-1] == ua2[-1])
+ # Cast back again with truncating:
+ ua3 = ua2.astype(dtype=ua.dtype)
+ # Arrays must be equal after the round-trip
+ assert_equal(ua, ua3)
+
+
+class TestByteorder_1_UCS2(ByteorderValues):
"""Check the byteorder in unicode (size 1, UCS2 values)"""
ulen = 1
ucs_value = ucs2_value
-class test_byteorder_1_ucs4(byteorder_values, TestCase):
+class TestByteorder_1_UCS4(ByteorderValues):
"""Check the byteorder in unicode (size 1, UCS4 values)"""
ulen = 1
ucs_value = ucs4_value
-class test_byteorder_2_ucs2(byteorder_values, TestCase):
+class TestByteorder_2_UCS2(ByteorderValues):
"""Check the byteorder in unicode (size 2, UCS2 values)"""
ulen = 2
ucs_value = ucs2_value
-class test_byteorder_2_ucs4(byteorder_values, TestCase):
+class TestByteorder_2_UCS4(ByteorderValues):
"""Check the byteorder in unicode (size 2, UCS4 values)"""
ulen = 2
ucs_value = ucs4_value
-class test_byteorder_1009_ucs2(byteorder_values, TestCase):
+class TestByteorder_1009_UCS2(ByteorderValues):
"""Check the byteorder in unicode (size 1009, UCS2 values)"""
ulen = 1009
ucs_value = ucs2_value
-class test_byteorder_1009_ucs4(byteorder_values, TestCase):
+class TestByteorder_1009_UCS4(ByteorderValues):
"""Check the byteorder in unicode (size 1009, UCS4 values)"""
ulen = 1009
ucs_value = ucs4_value
-
-
-if __name__ == "__main__":
- run_module_suite()