summaryrefslogtreecommitdiff
path: root/numpy/testing
diff options
context:
space:
mode:
authorEric Wieser <wieser.eric@gmail.com>2018-04-22 14:09:43 -0700
committerGitHub <noreply@github.com>2018-04-22 14:09:43 -0700
commit80de28de294b24f926133a86176f64f6a13c5411 (patch)
tree33aaa221ac82a571243adb56a60359240bb52507 /numpy/testing
parent14e64281cfe374a9cad476599cbe9b4fa850efb7 (diff)
parentb5c1bcf1e8ef6e9c11bb4138a15286e648fcbce0 (diff)
downloadnumpy-80de28de294b24f926133a86176f64f6a13c5411.tar.gz
Merge branch 'master' into npzfile-mappin
Diffstat (limited to 'numpy/testing')
-rw-r--r--numpy/testing/__init__.py15
-rw-r--r--numpy/testing/_private/__init__.py (renamed from numpy/testing/nose_tools/__init__.py)0
-rw-r--r--numpy/testing/_private/decorators.py (renamed from numpy/testing/nose_tools/decorators.py)19
-rw-r--r--numpy/testing/_private/noseclasses.py (renamed from numpy/testing/nose_tools/noseclasses.py)2
-rw-r--r--numpy/testing/_private/nosetester.py (renamed from numpy/testing/nose_tools/nosetester.py)0
-rw-r--r--numpy/testing/_private/parameterized.py (renamed from numpy/testing/nose_tools/parameterized.py)4
-rw-r--r--numpy/testing/_private/pytesttester.py194
-rw-r--r--numpy/testing/_private/utils.py (renamed from numpy/testing/nose_tools/utils.py)150
-rw-r--r--numpy/testing/decorators.py11
-rw-r--r--numpy/testing/noseclasses.py11
-rw-r--r--numpy/testing/nosetester.py10
-rw-r--r--numpy/testing/pytest_tools/__init__.py0
-rw-r--r--numpy/testing/pytest_tools/decorators.py278
-rw-r--r--numpy/testing/pytest_tools/noseclasses.py342
-rw-r--r--numpy/testing/pytest_tools/nosetester.py566
-rw-r--r--numpy/testing/pytest_tools/utils.py2268
-rwxr-xr-xnumpy/testing/setup.py3
-rw-r--r--numpy/testing/tests/test_decorators.py344
-rw-r--r--numpy/testing/tests/test_doctesting.py3
-rw-r--r--numpy/testing/tests/test_utils.py531
-rw-r--r--numpy/testing/utils.py13
21 files changed, 886 insertions, 3878 deletions
diff --git a/numpy/testing/__init__.py b/numpy/testing/__init__.py
index 9485b455e..a7c85931c 100644
--- a/numpy/testing/__init__.py
+++ b/numpy/testing/__init__.py
@@ -9,7 +9,14 @@ from __future__ import division, absolute_import, print_function
from unittest import TestCase
-from . import decorators as dec
-from .nosetester import run_module_suite, NoseTester as Tester, _numpy_tester
-from .utils import *
-test = _numpy_tester().test
+from ._private.utils import *
+from ._private import decorators as dec
+from ._private.nosetester import (
+ run_module_suite, NoseTester as Tester
+ )
+
+__all__ = _private.utils.__all__ + ['TestCase', 'run_module_suite']
+
+from ._private.pytesttester import PytestTester
+test = PytestTester(__name__)
+del PytestTester
diff --git a/numpy/testing/nose_tools/__init__.py b/numpy/testing/_private/__init__.py
index e69de29bb..e69de29bb 100644
--- a/numpy/testing/nose_tools/__init__.py
+++ b/numpy/testing/_private/__init__.py
diff --git a/numpy/testing/nose_tools/decorators.py b/numpy/testing/_private/decorators.py
index 243c0c8c1..60d3f968f 100644
--- a/numpy/testing/nose_tools/decorators.py
+++ b/numpy/testing/_private/decorators.py
@@ -15,12 +15,17 @@ function name, setup and teardown functions and so on - see
"""
from __future__ import division, absolute_import, print_function
-import collections
+try:
+ # Accessing collections abstract classes from collections
+ # has been deprecated since Python 3.3
+ import collections.abc as collections_abc
+except ImportError:
+ import collections as collections_abc
-from .utils import SkipTest, assert_warns
+from .utils import SkipTest, assert_warns, HAS_REFCOUNT
__all__ = ['slow', 'setastest', 'skipif', 'knownfailureif', 'deprecated',
- 'parametrize',]
+ 'parametrize', '_needs_refcount',]
def slow(t):
@@ -126,7 +131,7 @@ def skipif(skip_condition, msg=None):
import nose
# Allow for both boolean or callable skip conditions.
- if isinstance(skip_condition, collections.Callable):
+ if isinstance(skip_condition, collections_abc.Callable):
skip_val = lambda: skip_condition()
else:
skip_val = lambda: skip_condition
@@ -202,7 +207,7 @@ def knownfailureif(fail_condition, msg=None):
msg = 'Test skipped due to known failure'
# Allow for both boolean or callable known failure conditions.
- if isinstance(fail_condition, collections.Callable):
+ if isinstance(fail_condition, collections_abc.Callable):
fail_val = lambda: fail_condition()
else:
fail_val = lambda: fail_condition
@@ -257,7 +262,7 @@ def deprecated(conditional=True):
with assert_warns(DeprecationWarning):
f(*args, **kwargs)
- if isinstance(conditional, collections.Callable):
+ if isinstance(conditional, collections_abc.Callable):
cond = conditional()
else:
cond = conditional
@@ -283,3 +288,5 @@ def parametrize(vars, input):
from .parameterized import parameterized
return parameterized(input)
+
+_needs_refcount = skipif(not HAS_REFCOUNT, "python has no sys.getrefcount")
diff --git a/numpy/testing/nose_tools/noseclasses.py b/numpy/testing/_private/noseclasses.py
index 9756b9b45..08dec0ca9 100644
--- a/numpy/testing/nose_tools/noseclasses.py
+++ b/numpy/testing/_private/noseclasses.py
@@ -325,7 +325,7 @@ class FPUModeCheckPlugin(Plugin):
"""
def prepareTestCase(self, test):
- from numpy.core.multiarray_tests import get_fpu_mode
+ from numpy.core._multiarray_tests import get_fpu_mode
def run(result):
old_mode = get_fpu_mode()
diff --git a/numpy/testing/nose_tools/nosetester.py b/numpy/testing/_private/nosetester.py
index c2cf58377..c2cf58377 100644
--- a/numpy/testing/nose_tools/nosetester.py
+++ b/numpy/testing/_private/nosetester.py
diff --git a/numpy/testing/nose_tools/parameterized.py b/numpy/testing/_private/parameterized.py
index d094f7c7f..53e67517d 100644
--- a/numpy/testing/nose_tools/parameterized.py
+++ b/numpy/testing/_private/parameterized.py
@@ -456,8 +456,8 @@ class parameterized(object):
frame = stack[1]
frame_locals = frame[0].f_locals
- paramters = cls.input_as_callable(input)()
- for num, p in enumerate(paramters):
+ parameters = cls.input_as_callable(input)()
+ for num, p in enumerate(parameters):
name = name_func(f, num, p)
frame_locals[name] = cls.param_as_standalone_func(p, f, name)
frame_locals[name].__doc__ = doc_func(f, num, p)
diff --git a/numpy/testing/_private/pytesttester.py b/numpy/testing/_private/pytesttester.py
new file mode 100644
index 000000000..8c73fafa4
--- /dev/null
+++ b/numpy/testing/_private/pytesttester.py
@@ -0,0 +1,194 @@
+"""
+Pytest test running.
+
+This module implements the ``test()`` function for NumPy modules. The usual
+boiler plate for doing that is to put the following in the module
+``__init__.py`` file::
+
+ from numpy.testing import PytestTester
+ test = PytestTester(__name__).test
+ del PytestTester
+
+
+Warnings filtering and other runtime settings should be dealt with in the
+``pytest.ini`` file in the numpy repo root. The behavior of the test depends on
+whether or not that file is found as follows:
+
+* ``pytest.ini`` is present (develop mode)
+ All warnings except those explicily filtered out are raised as error.
+* ``pytest.ini`` is absent (release mode)
+ DeprecationWarnings and PendingDeprecationWarnings are ignored, other
+ warnings are passed through.
+
+In practice, tests run from the numpy repo are run in develop mode. That
+includes the standard ``python runtests.py`` invocation.
+
+"""
+from __future__ import division, absolute_import, print_function
+
+import sys
+import os
+
+__all__ = ['PytestTester']
+
+
+
+def _show_numpy_info():
+ import numpy as np
+
+ print("NumPy version %s" % np.__version__)
+ relaxed_strides = np.ones((10, 1), order="C").flags.f_contiguous
+ print("NumPy relaxed strides checking option:", relaxed_strides)
+
+
+class PytestTester(object):
+ """
+ Pytest test runner.
+
+ This class is made available in ``numpy.testing``, and a test function
+ is typically added to a package's __init__.py like so::
+
+ from numpy.testing import PytestTester
+ test = PytestTester(__name__).test
+ del PytestTester
+
+ Calling this test function finds and runs all tests associated with the
+ module and all its sub-modules.
+
+ Attributes
+ ----------
+ module_name : str
+ Full path to the package to test.
+
+ Parameters
+ ----------
+ module_name : module name
+ The name of the module to test.
+
+ """
+ def __init__(self, module_name):
+ self.module_name = module_name
+
+ def __call__(self, label='fast', verbose=1, extra_argv=None,
+ doctests=False, coverage=False, durations=-1, tests=None):
+ """
+ Run tests for module using pytest.
+
+ Parameters
+ ----------
+ label : {'fast', 'full'}, optional
+ Identifies the tests to run. When set to 'fast', tests decorated
+ with `pytest.mark.slow` are skipped, when 'full', the slow marker
+ is ignored.
+ verbose : int, optional
+ Verbosity value for test outputs, in the range 1-3. Default is 1.
+ extra_argv : list, optional
+ List with any extra arguments to pass to pytests.
+ doctests : bool, optional
+ .. note:: Not supported
+ coverage : bool, optional
+ If True, report coverage of NumPy code. Default is False.
+ Requires installation of (pip) pytest-cov.
+ durations : int, optional
+ If < 0, do nothing, If 0, report time of all tests, if > 0,
+ report the time of the slowest `timer` tests. Default is -1.
+ tests : test or list of tests
+ Tests to be executed with pytest '--pyargs'
+
+ Returns
+ -------
+ result : bool
+ Return True on success, false otherwise.
+
+ Notes
+ -----
+ Each NumPy module exposes `test` in its namespace to run all tests for it.
+ For example, to run all tests for numpy.lib:
+
+ >>> np.lib.test() #doctest: +SKIP
+
+ Examples
+ --------
+ >>> result = np.lib.test() #doctest: +SKIP
+ Running unit tests for numpy.lib
+ ...
+ Ran 976 tests in 3.933s
+
+ OK
+
+ >>> result.errors #doctest: +SKIP
+ []
+ >>> result.knownfail #doctest: +SKIP
+ []
+
+ """
+ import pytest
+ import warnings
+
+ #FIXME This is no longer needed? Assume it was for use in tests.
+ # cap verbosity at 3, which is equivalent to the pytest '-vv' option
+ #from . import utils
+ #verbose = min(int(verbose), 3)
+ #utils.verbose = verbose
+ #
+
+ module = sys.modules[self.module_name]
+ module_path = os.path.abspath(module.__path__[0])
+
+ # setup the pytest arguments
+ pytest_args = ["-l"]
+
+ # offset verbosity. The "-q" cancels a "-v".
+ pytest_args += ["-q"]
+
+ # Filter out distutils cpu warnings (could be localized to
+ # distutils tests). ASV has problems with top level import,
+ # so fetch module for suppression here.
+ with warnings.catch_warnings():
+ warnings.simplefilter("always")
+ from numpy.distutils import cpuinfo
+
+ # Filter out annoying import messages. Want these in both develop and
+ # release mode.
+ pytest_args += [
+ "-W ignore:Not importing directory",
+ "-W ignore:numpy.dtype size changed",
+ "-W ignore:numpy.ufunc size changed",
+ "-W ignore::UserWarning:cpuinfo",
+ ]
+
+ if doctests:
+ raise ValueError("Doctests not supported")
+
+ if extra_argv:
+ pytest_args += list(extra_argv)
+
+ if verbose > 1:
+ pytest_args += ["-" + "v"*(verbose - 1)]
+
+ if coverage:
+ pytest_args += ["--cov=" + module_path]
+
+ if label == "fast":
+ pytest_args += ["-m", "not slow"]
+ elif label != "full":
+ pytest_args += ["-m", label]
+
+ if durations >= 0:
+ pytest_args += ["--durations=%s" % durations]
+
+ if tests is None:
+ tests = [self.module_name]
+
+ pytest_args += ["--pyargs"] + list(tests)
+
+
+ # run tests.
+ _show_numpy_info()
+
+ try:
+ code = pytest.main(pytest_args)
+ except SystemExit as exc:
+ code = exc.code
+
+ return code == 0
diff --git a/numpy/testing/nose_tools/utils.py b/numpy/testing/_private/utils.py
index 2d97b5c1e..b0c0b0c48 100644
--- a/numpy/testing/nose_tools/utils.py
+++ b/numpy/testing/_private/utils.py
@@ -7,6 +7,7 @@ from __future__ import division, absolute_import, print_function
import os
import sys
import re
+import gc
import operator
import warnings
from functools import partial, wraps
@@ -14,6 +15,7 @@ import shutil
import contextlib
from tempfile import mkdtemp, mkstemp
from unittest.case import SkipTest
+import pprint
from numpy.core import(
float32, empty, arange, array_repr, ndarray, isnat, array)
@@ -35,7 +37,7 @@ __all__ = [
'assert_allclose', 'IgnoreException', 'clear_and_catch_warnings',
'SkipTest', 'KnownFailureException', 'temppath', 'tempdir', 'IS_PYPY',
'HAS_REFCOUNT', 'suppress_warnings', 'assert_array_compare',
- '_assert_valid_refcount', '_gen_alignment_data',
+ '_assert_valid_refcount', '_gen_alignment_data', 'assert_no_gc_cycles',
]
@@ -1156,10 +1158,54 @@ def rundocs(filename=None, raise_on_error=True):
raise AssertionError("Some doctests failed:\n%s" % "\n".join(msg))
-def raises(*args,**kwargs):
+def raises(*args):
+ """Decorator to check for raised exceptions.
+
+ The decorated test function must raise one of the passed exceptions to
+ pass. If you want to test many assertions about exceptions in a single
+ test, you may want to use `assert_raises` instead.
+
+ .. warning::
+ This decorator is nose specific, do not use it if you are using a
+ different test framework.
+
+ Parameters
+ ----------
+ args : exceptions
+ The test passes if any of the passed exceptions is raised.
+
+ Raises
+ ------
+ AssertionError
+
+ Examples
+ --------
+
+ Usage::
+
+ @raises(TypeError, ValueError)
+ def test_raises_type_error():
+ raise TypeError("This test passes")
+
+ @raises(Exception)
+ def test_that_fails_by_passing():
+ pass
+
+ """
nose = import_nose()
- return nose.tools.raises(*args,**kwargs)
+ return nose.tools.raises(*args)
+
+#
+# assert_raises and assert_raises_regex are taken from unittest.
+#
+import unittest
+
+class _Dummy(unittest.TestCase):
+ def nop(self):
+ pass
+
+_d = _Dummy('nop')
def assert_raises(*args, **kwargs):
"""
@@ -1187,8 +1233,7 @@ def assert_raises(*args, **kwargs):
"""
__tracebackhide__ = True # Hide traceback for py.test
- nose = import_nose()
- return nose.tools.assert_raises(*args,**kwargs)
+ return _d.assertRaises(*args,**kwargs)
def assert_raises_regex(exception_class, expected_regexp, *args, **kwargs):
@@ -1212,13 +1257,12 @@ def assert_raises_regex(exception_class, expected_regexp, *args, **kwargs):
"""
__tracebackhide__ = True # Hide traceback for py.test
- nose = import_nose()
if sys.version_info.major >= 3:
- funcname = nose.tools.assert_raises_regex
+ funcname = _d.assertRaisesRegex
else:
# Only present in Python 2.7, missing from unittest in 2.6
- funcname = nose.tools.assert_raises_regexp
+ funcname = _d.assertRaisesRegexp
return funcname(exception_class, expected_regexp, *args, **kwargs)
@@ -1577,7 +1621,9 @@ def integer_repr(x):
"""Return the signed-magnitude interpretation of the binary representation of
x."""
import numpy as np
- if x.dtype == np.float32:
+ if x.dtype == np.float16:
+ return _integer_repr(x, np.int16, np.int16(-2**15))
+ elif x.dtype == np.float32:
return _integer_repr(x, np.int32, np.int32(-2**31))
elif x.dtype == np.float64:
return _integer_repr(x, np.int64, np.int64(-2**63))
@@ -2228,3 +2274,89 @@ class suppress_warnings(object):
return func(*args, **kwargs)
return new_func
+
+
+@contextlib.contextmanager
+def _assert_no_gc_cycles_context(name=None):
+ __tracebackhide__ = True # Hide traceback for py.test
+
+ # not meaningful to test if there is no refcounting
+ if not HAS_REFCOUNT:
+ return
+
+ assert_(gc.isenabled())
+ gc.disable()
+ gc_debug = gc.get_debug()
+ try:
+ for i in range(100):
+ if gc.collect() == 0:
+ break
+ else:
+ raise RuntimeError(
+ "Unable to fully collect garbage - perhaps a __del__ method is "
+ "creating more reference cycles?")
+
+ gc.set_debug(gc.DEBUG_SAVEALL)
+ yield
+ # gc.collect returns the number of unreachable objects in cycles that
+ # were found -- we are checking that no cycles were created in the context
+ n_objects_in_cycles = gc.collect()
+ objects_in_cycles = gc.garbage[:]
+ finally:
+ del gc.garbage[:]
+ gc.set_debug(gc_debug)
+ gc.enable()
+
+ if n_objects_in_cycles:
+ name_str = " when calling %s" % name if name is not None else ""
+ raise AssertionError(
+ "Reference cycles were found{}: {} objects were collected, "
+ "of which {} are shown below:{}"
+ .format(
+ name_str,
+ n_objects_in_cycles,
+ len(objects_in_cycles),
+ ''.join(
+ "\n {} object with id={}:\n {}".format(
+ type(o).__name__,
+ id(o),
+ pprint.pformat(o).replace('\n', '\n ')
+ ) for o in objects_in_cycles
+ )
+ )
+ )
+
+
+def assert_no_gc_cycles(*args, **kwargs):
+ """
+ Fail if the given callable produces any reference cycles.
+
+ If called with all arguments omitted, may be used as a context manager:
+
+ with assert_no_gc_cycles():
+ do_something()
+
+ .. versionadded:: 1.15.0
+
+ Parameters
+ ----------
+ func : callable
+ The callable to test.
+ \\*args : Arguments
+ Arguments passed to `func`.
+ \\*\\*kwargs : Kwargs
+ Keyword arguments passed to `func`.
+
+ Returns
+ -------
+ Nothing. The result is deliberately discarded to ensure that all cycles
+ are found.
+
+ """
+ if not args:
+ return _assert_no_gc_cycles_context()
+
+ func = args[0]
+ args = args[1:]
+ with _assert_no_gc_cycles_context(name=func.__name__):
+ func(*args, **kwargs)
diff --git a/numpy/testing/decorators.py b/numpy/testing/decorators.py
index 21bcdd798..68c1554b5 100644
--- a/numpy/testing/decorators.py
+++ b/numpy/testing/decorators.py
@@ -3,6 +3,13 @@ Back compatibility decorators module. It will import the appropriate
set of tools
"""
-import os
+from __future__ import division, absolute_import, print_function
-from .nose_tools.decorators import *
+import warnings
+
+# 2018-04-04, numpy 1.15.0
+warnings.warn("Importing from numpy.testing.decorators is deprecated, "
+ "import from numpy.testing instead.",
+ DeprecationWarning, stacklevel=2)
+
+from ._private.decorators import *
diff --git a/numpy/testing/noseclasses.py b/numpy/testing/noseclasses.py
index 144c4e7e4..e0e728a32 100644
--- a/numpy/testing/noseclasses.py
+++ b/numpy/testing/noseclasses.py
@@ -2,4 +2,13 @@
Back compatibility noseclasses module. It will import the appropriate
set of tools
"""
-from .nose_tools.noseclasses import * \ No newline at end of file
+from __future__ import division, absolute_import, print_function
+
+import warnings
+
+# 2018-04-04, numpy 1.15.0
+warnings.warn("Importing from numpy.testing.noseclasses is deprecated, "
+ "import from numpy.testing instead",
+ DeprecationWarning, stacklevel=2)
+
+from ._private.noseclasses import *
diff --git a/numpy/testing/nosetester.py b/numpy/testing/nosetester.py
index 949fae03e..c8c7d6e68 100644
--- a/numpy/testing/nosetester.py
+++ b/numpy/testing/nosetester.py
@@ -3,10 +3,16 @@ Back compatibility nosetester module. It will import the appropriate
set of tools
"""
-import os
+from __future__ import division, absolute_import, print_function
-from .nose_tools.nosetester import *
+import warnings
+# 2018-04-04, numpy 1.15.0
+warnings.warn("Importing from numpy.testing.nosetester is deprecated, "
+ "import from numpy.testing instead.",
+ DeprecationWarning, stacklevel=2)
+
+from ._private.nosetester import *
__all__ = ['get_package_name', 'run_module_suite', 'NoseTester',
'_numpy_tester', 'get_package_name', 'import_nose',
diff --git a/numpy/testing/pytest_tools/__init__.py b/numpy/testing/pytest_tools/__init__.py
deleted file mode 100644
index e69de29bb..000000000
--- a/numpy/testing/pytest_tools/__init__.py
+++ /dev/null
diff --git a/numpy/testing/pytest_tools/decorators.py b/numpy/testing/pytest_tools/decorators.py
deleted file mode 100644
index 08a39e0c0..000000000
--- a/numpy/testing/pytest_tools/decorators.py
+++ /dev/null
@@ -1,278 +0,0 @@
-"""
-Compatibility shim for pytest compatibility with the nose decorators.
-
-Decorators for labeling and modifying behavior of test objects.
-
-Decorators that merely return a modified version of the original
-function object are straightforward.
-
-Decorators that return a new function will not preserve meta-data such as
-function name, setup and teardown functions and so on.
-
-"""
-from __future__ import division, absolute_import, print_function
-
-import collections
-
-from .utils import SkipTest, assert_warns
-
-__all__ = ['slow', 'setastest', 'skipif', 'knownfailureif', 'deprecated',
- 'parametrize',]
-
-
-def slow(t):
- """
- Label a test as 'slow'.
-
- The exact definition of a slow test is obviously both subjective and
- hardware-dependent, but in general any individual test that requires more
- than a second or two should be labeled as slow (the whole suite consits of
- thousands of tests, so even a second is significant).
-
- Parameters
- ----------
- t : callable
- The test to mark as slow.
-
- Returns
- -------
- t : callable
- The decorated test `t`.
-
- Examples
- --------
- The `numpy.testing` module includes ``import decorators as dec``.
- A test can be decorated as slow like this::
-
- from numpy.testing import *
-
- @dec.slow
- def test_big(self):
- print('Big, slow test')
-
- """
- import pytest
-
- return pytest.mark.slow(t)
-
-
-def setastest(tf=True):
- """
- Signals to nose that this function is or is not a test.
-
- Parameters
- ----------
- tf : bool
- If True, specifies that the decorated callable is a test.
- If False, specifies that the decorated callable is not a test.
- Default is True.
-
- Examples
- --------
- `setastest` can be used in the following way::
-
- from numpy.testing.decorators import setastest
-
- @setastest(False)
- def func_with_test_in_name(arg1, arg2):
- pass
-
- """
- def set_test(t):
- t.__test__ = tf
- return t
- return set_test
-
-
-def skipif(skip_condition, msg=None):
- """
- Make function raise SkipTest exception if a given condition is true.
-
- If the condition is a callable, it is used at runtime to dynamically
- make the decision. This is useful for tests that may require costly
- imports, to delay the cost until the test suite is actually executed.
-
- Parameters
- ----------
- skip_condition : bool or callable
- Flag to determine whether to skip the decorated test.
- msg : str, optional
- Message to give on raising a SkipTest exception. Default is None.
-
- Returns
- -------
- decorator : function
- Decorator which, when applied to a function, causes SkipTest
- to be raised when `skip_condition` is True, and the function
- to be called normally otherwise.
-
- Notes
- -----
- Undecorated functions are returned and that may lead to some lost
- information. Note that this function differ from the pytest fixture
- ``pytest.mark.skipif``. The latter marks test functions on import and the
- skip is handled during collection, hence it cannot be used for non-test
- functions, nor does it handle callable conditions.
-
- """
- def skip_decorator(f):
- # Local import to avoid a hard pytest dependency and only incur the
- # import time overhead at actual test-time.
- import inspect
- import pytest
-
- if msg is None:
- out = 'Test skipped due to test condition'
- else:
- out = msg
-
- # Allow for both boolean or callable skip conditions.
- if isinstance(skip_condition, collections.Callable):
- skip_val = lambda: skip_condition()
- else:
- skip_val = lambda: skip_condition
-
- # We need to define *two* skippers because Python doesn't allow both
- # return with value and yield inside the same function.
- def get_msg(func,msg=None):
- """Skip message with information about function being skipped."""
- if msg is None:
- out = 'Test skipped due to test condition'
- else:
- out = msg
- return "Skipping test: %s: %s" % (func.__name__, out)
-
- def skipper_func(*args, **kwargs):
- """Skipper for normal test functions."""
- if skip_val():
- raise SkipTest(get_msg(f, msg))
- else:
- return f(*args, **kwargs)
-
- def skipper_gen(*args, **kwargs):
- """Skipper for test generators."""
- if skip_val():
- raise SkipTest(get_msg(f, msg))
- else:
- for x in f(*args, **kwargs):
- yield x
-
- # Choose the right skipper to use when building the actual decorator.
- if inspect.isgeneratorfunction(f):
- skipper = skipper_gen
- else:
- skipper = skipper_func
- return skipper
-
- return skip_decorator
-
-
-def knownfailureif(fail_condition, msg=None):
- """
- Make function raise KnownFailureException exception if given condition is true.
-
- If the condition is a callable, it is used at runtime to dynamically
- make the decision. This is useful for tests that may require costly
- imports, to delay the cost until the test suite is actually executed.
-
- Parameters
- ----------
- fail_condition : bool or callable
- Flag to determine whether to mark the decorated test as a known
- failure (if True) or not (if False).
- msg : str, optional
- Message to give on raising a KnownFailureException exception.
- Default is None.
-
- Returns
- -------
- decorator : function
- Decorator, which, when applied to a function, causes
- KnownFailureException to be raised when `fail_condition` is True,
- and the function to be called normally otherwise.
-
- Notes
- -----
- The decorator itself is not decorated in the pytest case unlike for nose.
-
- """
- import pytest
- from .utils import KnownFailureException
-
- if msg is None:
- msg = 'Test skipped due to known failure'
-
- # Allow for both boolean or callable known failure conditions.
- if isinstance(fail_condition, collections.Callable):
- fail_val = lambda: fail_condition()
- else:
- fail_val = lambda: fail_condition
-
- def knownfail_decorator(f):
-
- def knownfailer(*args, **kwargs):
- if fail_val():
- raise KnownFailureException(msg)
- return f(*args, **kwargs)
-
- return knownfailer
-
- return knownfail_decorator
-
-
-def deprecated(conditional=True):
- """
- Filter deprecation warnings while running the test suite.
-
- This decorator can be used to filter DeprecationWarning's, to avoid
- printing them during the test suite run, while checking that the test
- actually raises a DeprecationWarning.
-
- Parameters
- ----------
- conditional : bool or callable, optional
- Flag to determine whether to mark test as deprecated or not. If the
- condition is a callable, it is used at runtime to dynamically make the
- decision. Default is True.
-
- Returns
- -------
- decorator : function
- The `deprecated` decorator itself.
-
- Notes
- -----
- .. versionadded:: 1.4.0
-
- """
- def deprecate_decorator(f):
-
- def _deprecated_imp(*args, **kwargs):
- # Poor man's replacement for the with statement
- with assert_warns(DeprecationWarning):
- f(*args, **kwargs)
-
- if isinstance(conditional, collections.Callable):
- cond = conditional()
- else:
- cond = conditional
- if cond:
- return _deprecated_imp
- else:
- return f
- return deprecate_decorator
-
-
-def parametrize(vars, input):
- """
- Pytest compatibility class. This implements the simplest level of
- pytest.mark.parametrize for use in nose as an aid in making the transition
- to pytest. It achieves that by adding a dummy var parameter and ignoring
- the doc_func parameter of the base class. It does not support variable
- substitution by name, nor does it support nesting or classes. See the
- pytest documentation for usage.
-
- """
- import pytest
-
- return pytest.mark.parametrize(vars, input)
diff --git a/numpy/testing/pytest_tools/noseclasses.py b/numpy/testing/pytest_tools/noseclasses.py
deleted file mode 100644
index 2486029fe..000000000
--- a/numpy/testing/pytest_tools/noseclasses.py
+++ /dev/null
@@ -1,342 +0,0 @@
-# These classes implement a doctest runner plugin for nose, a "known failure"
-# error class, and a customized TestProgram for NumPy.
-
-# Because this module imports nose directly, it should not
-# be used except by nosetester.py to avoid a general NumPy
-# dependency on nose.
-from __future__ import division, absolute_import, print_function
-
-import os
-import doctest
-import inspect
-
-import numpy
-import pytest
-from .utils import KnownFailureException, SkipTest
-import _pytest.runner
-import _pytest.skipping
-
-
-class NpyPlugin(object):
-
- def pytest_runtest_makereport(self, call):
- if call.excinfo:
- if call.excinfo.errisinstance(KnownFailureException):
- #let's substitute the excinfo with a pytest.xfail one
- call2 = call.__class__(
- lambda: _pytest.runner.skip(str(call.excinfo.value)),
- call.when)
- print()
- print()
- print(call.excinfo._getreprcrash())
- print()
- print(call.excinfo)
- print()
- print(call2.excinfo)
- print()
- call.excinfo = call2.excinfo
- if call.excinfo.errisinstance(SkipTest):
- #let's substitute the excinfo with a pytest.skip one
- call2 = call.__class__(
- lambda: _pytest.runner.skip(str(call.excinfo.value)),
- call.when)
- call.excinfo = call2.excinfo
-
-
-if False:
- from nose.plugins import doctests as npd
- from nose.plugins.errorclass import ErrorClass, ErrorClassPlugin
- from nose.plugins.base import Plugin
- from nose.util import src
- from .nosetester import get_package_name
- # Some of the classes in this module begin with 'Numpy' to clearly distinguish
- # them from the plethora of very similar names from nose/unittest/doctest
-
- #-----------------------------------------------------------------------------
- # Modified version of the one in the stdlib, that fixes a python bug (doctests
- # not found in extension modules, http://bugs.python.org/issue3158)
- class NumpyDocTestFinder(doctest.DocTestFinder):
-
- def _from_module(self, module, object):
- """
- Return true if the given object is defined in the given
- module.
- """
- if module is None:
- return True
- elif inspect.isfunction(object):
- return module.__dict__ is object.__globals__
- elif inspect.isbuiltin(object):
- return module.__name__ == object.__module__
- elif inspect.isclass(object):
- return module.__name__ == object.__module__
- elif inspect.ismethod(object):
- # This one may be a bug in cython that fails to correctly set the
- # __module__ attribute of methods, but since the same error is easy
- # to make by extension code writers, having this safety in place
- # isn't such a bad idea
- return module.__name__ == object.__self__.__class__.__module__
- elif inspect.getmodule(object) is not None:
- return module is inspect.getmodule(object)
- elif hasattr(object, '__module__'):
- return module.__name__ == object.__module__
- elif isinstance(object, property):
- return True # [XX] no way not be sure.
- else:
- raise ValueError("object must be a class or function")
-
- def _find(self, tests, obj, name, module, source_lines, globs, seen):
- """
- Find tests for the given object and any contained objects, and
- add them to `tests`.
- """
-
- doctest.DocTestFinder._find(self, tests, obj, name, module,
- source_lines, globs, seen)
-
- # Below we re-run pieces of the above method with manual modifications,
- # because the original code is buggy and fails to correctly identify
- # doctests in extension modules.
-
- # Local shorthands
- from inspect import (
- isroutine, isclass, ismodule, isfunction, ismethod
- )
-
- # Look for tests in a module's contained objects.
- if ismodule(obj) and self._recurse:
- for valname, val in obj.__dict__.items():
- valname1 = '%s.%s' % (name, valname)
- if ( (isroutine(val) or isclass(val))
- and self._from_module(module, val)):
-
- self._find(tests, val, valname1, module, source_lines,
- globs, seen)
-
- # Look for tests in a class's contained objects.
- if isclass(obj) and self._recurse:
- for valname, val in obj.__dict__.items():
- # Special handling for staticmethod/classmethod.
- if isinstance(val, staticmethod):
- val = getattr(obj, valname)
- if isinstance(val, classmethod):
- val = getattr(obj, valname).__func__
-
- # Recurse to methods, properties, and nested classes.
- if ((isfunction(val) or isclass(val) or
- ismethod(val) or isinstance(val, property)) and
- self._from_module(module, val)):
- valname = '%s.%s' % (name, valname)
- self._find(tests, val, valname, module, source_lines,
- globs, seen)
-
-
- # second-chance checker; if the default comparison doesn't
- # pass, then see if the expected output string contains flags that
- # tell us to ignore the output
- class NumpyOutputChecker(doctest.OutputChecker):
- def check_output(self, want, got, optionflags):
- ret = doctest.OutputChecker.check_output(self, want, got,
- optionflags)
- if not ret:
- if "#random" in want:
- return True
-
- # it would be useful to normalize endianness so that
- # bigendian machines don't fail all the tests (and there are
- # actually some bigendian examples in the doctests). Let's try
- # making them all little endian
- got = got.replace("'>", "'<")
- want = want.replace("'>", "'<")
-
- # try to normalize out 32 and 64 bit default int sizes
- for sz in [4, 8]:
- got = got.replace("'<i%d'" % sz, "int")
- want = want.replace("'<i%d'" % sz, "int")
-
- ret = doctest.OutputChecker.check_output(self, want,
- got, optionflags)
-
- return ret
-
-
- # Subclass nose.plugins.doctests.DocTestCase to work around a bug in
- # its constructor that blocks non-default arguments from being passed
- # down into doctest.DocTestCase
- class NumpyDocTestCase(npd.DocTestCase):
- def __init__(self, test, optionflags=0, setUp=None, tearDown=None,
- checker=None, obj=None, result_var='_'):
- self._result_var = result_var
- self._nose_obj = obj
- doctest.DocTestCase.__init__(self, test,
- optionflags=optionflags,
- setUp=setUp, tearDown=tearDown,
- checker=checker)
-
-
- print_state = numpy.get_printoptions()
-
- class NumpyDoctest(npd.Doctest):
- name = 'numpydoctest' # call nosetests with --with-numpydoctest
- score = 1000 # load late, after doctest builtin
-
- # always use whitespace and ellipsis options for doctests
- doctest_optflags = doctest.NORMALIZE_WHITESPACE | doctest.ELLIPSIS
-
- # files that should be ignored for doctests
- doctest_ignore = ['generate_numpy_api.py',
- 'setup.py']
-
- # Custom classes; class variables to allow subclassing
- doctest_case_class = NumpyDocTestCase
- out_check_class = NumpyOutputChecker
- test_finder_class = NumpyDocTestFinder
-
- # Don't use the standard doctest option handler; hard-code the option values
- def options(self, parser, env=os.environ):
- Plugin.options(self, parser, env)
- # Test doctests in 'test' files / directories. Standard plugin default
- # is False
- self.doctest_tests = True
- # Variable name; if defined, doctest results stored in this variable in
- # the top-level namespace. None is the standard default
- self.doctest_result_var = None
-
- def configure(self, options, config):
- # parent method sets enabled flag from command line --with-numpydoctest
- Plugin.configure(self, options, config)
- self.finder = self.test_finder_class()
- self.parser = doctest.DocTestParser()
- if self.enabled:
- # Pull standard doctest out of plugin list; there's no reason to run
- # both. In practice the Unplugger plugin above would cover us when
- # run from a standard numpy.test() call; this is just in case
- # someone wants to run our plugin outside the numpy.test() machinery
- config.plugins.plugins = [p for p in config.plugins.plugins
- if p.name != 'doctest']
-
- def set_test_context(self, test):
- """ Configure `test` object to set test context
-
- We set the numpy / scipy standard doctest namespace
-
- Parameters
- ----------
- test : test object
- with ``globs`` dictionary defining namespace
-
- Returns
- -------
- None
-
- Notes
- -----
- `test` object modified in place
- """
- # set the namespace for tests
- pkg_name = get_package_name(os.path.dirname(test.filename))
-
- # Each doctest should execute in an environment equivalent to
- # starting Python and executing "import numpy as np", and,
- # for SciPy packages, an additional import of the local
- # package (so that scipy.linalg.basic.py's doctests have an
- # implicit "from scipy import linalg" as well.
- #
- # Note: __file__ allows the doctest in NoseTester to run
- # without producing an error
- test.globs = {'__builtins__':__builtins__,
- '__file__':'__main__',
- '__name__':'__main__',
- 'np':numpy}
- # add appropriate scipy import for SciPy tests
- if 'scipy' in pkg_name:
- p = pkg_name.split('.')
- p2 = p[-1]
- test.globs[p2] = __import__(pkg_name, test.globs, {}, [p2])
-
- # Override test loading to customize test context (with set_test_context
- # method), set standard docstring options, and install our own test output
- # checker
- def loadTestsFromModule(self, module):
- if not self.matches(module.__name__):
- npd.log.debug("Doctest doesn't want module %s", module)
- return
- try:
- tests = self.finder.find(module)
- except AttributeError:
- # nose allows module.__test__ = False; doctest does not and
- # throws AttributeError
- return
- if not tests:
- return
- tests.sort()
- module_file = src(module.__file__)
- for test in tests:
- if not test.examples:
- continue
- if not test.filename:
- test.filename = module_file
- # Set test namespace; test altered in place
- self.set_test_context(test)
- yield self.doctest_case_class(test,
- optionflags=self.doctest_optflags,
- checker=self.out_check_class(),
- result_var=self.doctest_result_var)
-
- # Add an afterContext method to nose.plugins.doctests.Doctest in order
- # to restore print options to the original state after each doctest
- def afterContext(self):
- numpy.set_printoptions(**print_state)
-
- # Ignore NumPy-specific build files that shouldn't be searched for tests
- def wantFile(self, file):
- bn = os.path.basename(file)
- if bn in self.doctest_ignore:
- return False
- return npd.Doctest.wantFile(self, file)
-
-
- class Unplugger(object):
- """ Nose plugin to remove named plugin late in loading
-
- By default it removes the "doctest" plugin.
- """
- name = 'unplugger'
- enabled = True # always enabled
- score = 4000 # load late in order to be after builtins
-
- def __init__(self, to_unplug='doctest'):
- self.to_unplug = to_unplug
-
- def options(self, parser, env):
- pass
-
- def configure(self, options, config):
- # Pull named plugin out of plugins list
- config.plugins.plugins = [p for p in config.plugins.plugins
- if p.name != self.to_unplug]
-
-
-
- # Class allows us to save the results of the tests in runTests - see runTests
- # method docstring for details
- class NumpyTestProgram(nose.core.TestProgram):
- def runTests(self):
- """Run Tests. Returns true on success, false on failure, and
- sets self.success to the same value.
-
- Because nose currently discards the test result object, but we need
- to return it to the user, override TestProgram.runTests to retain
- the result
- """
- if self.testRunner is None:
- self.testRunner = nose.core.TextTestRunner(stream=self.config.stream,
- verbosity=self.config.verbosity,
- config=self.config)
- plug_runner = self.config.plugins.prepareTestRunner(self.testRunner)
- if plug_runner is not None:
- self.testRunner = plug_runner
- self.result = self.testRunner.run(self.test)
- self.success = self.result.wasSuccessful()
- return self.success
-
diff --git a/numpy/testing/pytest_tools/nosetester.py b/numpy/testing/pytest_tools/nosetester.py
deleted file mode 100644
index 46e2b9b8c..000000000
--- a/numpy/testing/pytest_tools/nosetester.py
+++ /dev/null
@@ -1,566 +0,0 @@
-"""
-Nose test running.
-
-This module implements ``test()`` and ``bench()`` functions for NumPy modules.
-
-"""
-from __future__ import division, absolute_import, print_function
-
-import os
-import sys
-import warnings
-from numpy.compat import basestring
-import numpy as np
-
-from .utils import import_nose, suppress_warnings
-
-
-__all__ = ['get_package_name', 'run_module_suite', 'NoseTester',
- '_numpy_tester', 'get_package_name', 'import_nose',
- 'suppress_warnings']
-
-
-def get_package_name(filepath):
- """
- Given a path where a package is installed, determine its name.
-
- Parameters
- ----------
- filepath : str
- Path to a file. If the determination fails, "numpy" is returned.
-
- Examples
- --------
- >>> np.testing.nosetester.get_package_name('nonsense')
- 'numpy'
-
- """
-
- fullpath = filepath[:]
- pkg_name = []
- while 'site-packages' in filepath or 'dist-packages' in filepath:
- filepath, p2 = os.path.split(filepath)
- if p2 in ('site-packages', 'dist-packages'):
- break
- pkg_name.append(p2)
-
- # if package name determination failed, just default to numpy/scipy
- if not pkg_name:
- if 'scipy' in fullpath:
- return 'scipy'
- else:
- return 'numpy'
-
- # otherwise, reverse to get correct order and return
- pkg_name.reverse()
-
- # don't include the outer egg directory
- if pkg_name[0].endswith('.egg'):
- pkg_name.pop(0)
-
- return '.'.join(pkg_name)
-
-
-def run_module_suite(file_to_run=None, argv=None):
- """
- Run a test module.
-
- Equivalent to calling ``$ nosetests <argv> <file_to_run>`` from
- the command line. This version is for pytest rather than nose.
-
- Parameters
- ----------
- file_to_run : str, optional
- Path to test module, or None.
- By default, run the module from which this function is called.
- argv : list of strings
- Arguments to be passed to the pytest runner. ``argv[0]`` is
- ignored. All command line arguments accepted by ``pytest``
- will work. If it is the default value None, sys.argv is used.
-
- .. versionadded:: 1.14.0
-
- Examples
- --------
- Adding the following::
-
- if __name__ == "__main__" :
- run_module_suite(argv=sys.argv)
-
- at the end of a test module will run the tests when that module is
- called in the python interpreter.
-
- Alternatively, calling::
-
- >>> run_module_suite(file_to_run="numpy/tests/test_matlib.py")
-
- from an interpreter will run all the test routine in 'test_matlib.py'.
- """
- import pytest
- if file_to_run is None:
- f = sys._getframe(1)
- file_to_run = f.f_locals.get('__file__', None)
- if file_to_run is None:
- raise AssertionError
-
- if argv is None:
- argv = sys.argv[1:] + [file_to_run]
- else:
- argv = argv + [file_to_run]
-
- pytest.main(argv)
-
-if False:
- # disable run_module_suite and NoseTester
- # until later
- class NoseTester(object):
- """
- Nose test runner.
-
- This class is made available as numpy.testing.Tester, and a test function
- is typically added to a package's __init__.py like so::
-
- from numpy.testing import Tester
- test = Tester().test
-
- Calling this test function finds and runs all tests associated with the
- package and all its sub-packages.
-
- Attributes
- ----------
- package_path : str
- Full path to the package to test.
- package_name : str
- Name of the package to test.
-
- Parameters
- ----------
- package : module, str or None, optional
- The package to test. If a string, this should be the full path to
- the package. If None (default), `package` is set to the module from
- which `NoseTester` is initialized.
- raise_warnings : None, str or sequence of warnings, optional
- This specifies which warnings to configure as 'raise' instead
- of being shown once during the test execution. Valid strings are:
-
- - "develop" : equals ``(Warning,)``
- - "release" : equals ``()``, don't raise on any warnings.
-
- Default is "release".
- depth : int, optional
- If `package` is None, then this can be used to initialize from the
- module of the caller of (the caller of (...)) the code that
- initializes `NoseTester`. Default of 0 means the module of the
- immediate caller; higher values are useful for utility routines that
- want to initialize `NoseTester` objects on behalf of other code.
-
- """
- def __init__(self, package=None, raise_warnings="release", depth=0):
- # Back-compat: 'None' used to mean either "release" or "develop"
- # depending on whether this was a release or develop version of
- # numpy. Those semantics were fine for testing numpy, but not so
- # helpful for downstream projects like scipy that use
- # numpy.testing. (They want to set this based on whether *they* are a
- # release or develop version, not whether numpy is.) So we continue to
- # accept 'None' for back-compat, but it's now just an alias for the
- # default "release".
- if raise_warnings is None:
- raise_warnings = "release"
-
- package_name = None
- if package is None:
- f = sys._getframe(1 + depth)
- package_path = f.f_locals.get('__file__', None)
- if package_path is None:
- raise AssertionError
- package_path = os.path.dirname(package_path)
- package_name = f.f_locals.get('__name__', None)
- elif isinstance(package, type(os)):
- package_path = os.path.dirname(package.__file__)
- package_name = getattr(package, '__name__', None)
- else:
- package_path = str(package)
-
- self.package_path = package_path
-
- # Find the package name under test; this name is used to limit coverage
- # reporting (if enabled).
- if package_name is None:
- package_name = get_package_name(package_path)
- self.package_name = package_name
-
- # Set to "release" in constructor in maintenance branches.
- self.raise_warnings = raise_warnings
-
- def _test_argv(self, label, verbose, extra_argv):
- ''' Generate argv for nosetests command
-
- Parameters
- ----------
- label : {'fast', 'full', '', attribute identifier}, optional
- see ``test`` docstring
- verbose : int, optional
- Integer in range 1..3, bigger means more verbose.
- extra_argv : list, optional
- List with any extra arguments to pass to nosetests.
-
- Returns
- -------
- argv : list
- command line arguments that will be passed to nose
- '''
- argv = [__file__, self.package_path, '-s']
- if label and label != 'full':
- if not isinstance(label, basestring):
- raise TypeError('Selection label should be a string')
- if label == 'fast':
- label = 'not slow'
- argv += ['-A', label]
-
- argv += [['-q'], [''], ['-v']][min(verbose - 1, 2)]
-
- # FIXME is this true of pytest
- # When installing with setuptools, and also in some other cases, the
- # test_*.py files end up marked +x executable. Nose, by default, does
- # not run files marked with +x as they might be scripts. However, in
- # our case nose only looks for test_*.py files under the package
- # directory, which should be safe.
- # argv += ['--exe']
- if extra_argv:
- argv += extra_argv
- return argv
-
- def _show_system_info(self):
- import pytest
- import numpy
-
- print("NumPy version %s" % numpy.__version__)
- relaxed_strides = numpy.ones((10, 1), order="C").flags.f_contiguous
- print("NumPy relaxed strides checking option:", relaxed_strides)
- npdir = os.path.dirname(numpy.__file__)
- print("NumPy is installed in %s" % npdir)
-
- if 'scipy' in self.package_name:
- import scipy
- print("SciPy version %s" % scipy.__version__)
- spdir = os.path.dirname(scipy.__file__)
- print("SciPy is installed in %s" % spdir)
-
- pyversion = sys.version.replace('\n', '')
- print("Python version %s" % pyversion)
- print("pytest version %d.%d.%d" % pytest.__versioninfo__)
-
- def _get_custom_doctester(self):
- """ Return instantiated plugin for doctests
-
- Allows subclassing of this class to override doctester
-
- A return value of None means use the nose builtin doctest plugin
- """
- from .noseclasses import NumpyDoctest
- return NumpyDoctest()
-
- def prepare_test_args(self, label='fast', verbose=1, extra_argv=None,
- doctests=False, coverage=False, timer=False):
- """
- Run tests for module using nose.
-
- This method does the heavy lifting for the `test` method. It takes all
- the same arguments, for details see `test`.
-
- See Also
- --------
- test
-
- """
- # fail with nice error message if nose is not present
- import_nose()
- # compile argv
- argv = self._test_argv(label, verbose, extra_argv)
- # our way of doing coverage
- if coverage:
- argv += ['--cover-package=%s' % self.package_name, '--with-coverage',
- '--cover-tests', '--cover-erase']
-
- if timer:
- if timer is True:
- argv += ['--with-timer']
- elif isinstance(timer, int):
- argv += ['--with-timer', '--timer-top-n', str(timer)]
-
- # construct list of plugins
- import nose.plugins.builtin
- from nose.plugins import EntryPointPluginManager
- from .noseclasses import KnownFailurePlugin, Unplugger
- plugins = [KnownFailurePlugin()]
- plugins += [p() for p in nose.plugins.builtin.plugins]
- try:
- # External plugins (like nose-timer)
- entrypoint_manager = EntryPointPluginManager()
- entrypoint_manager.loadPlugins()
- plugins += [p for p in entrypoint_manager.plugins]
- except ImportError:
- # Relies on pkg_resources, not a hard dependency
- pass
-
- # add doctesting if required
- doctest_argv = '--with-doctest' in argv
- if doctests == False and doctest_argv:
- doctests = True
- plug = self._get_custom_doctester()
- if plug is None:
- # use standard doctesting
- if doctests and not doctest_argv:
- argv += ['--with-doctest']
- else: # custom doctesting
- if doctest_argv: # in fact the unplugger would take care of this
- argv.remove('--with-doctest')
- plugins += [Unplugger('doctest'), plug]
- if doctests:
- argv += ['--with-' + plug.name]
- return argv, plugins
-
- def test(self, label='fast', verbose=1, extra_argv=None,
- doctests=False, coverage=False, raise_warnings=None,
- timer=False):
- """
- Run tests for module using nose.
-
- Parameters
- ----------
- label : {'fast', 'full', '', attribute identifier}, optional
- Identifies the tests to run. This can be a string to pass to
- the nosetests executable with the '-A' option, or one of several
- special values. Special values are:
- * 'fast' - the default - which corresponds to the ``nosetests -A``
- option of 'not slow'.
- * 'full' - fast (as above) and slow tests as in the
- 'no -A' option to nosetests - this is the same as ''.
- * None or '' - run all tests.
- attribute_identifier - string passed directly to nosetests as '-A'.
- verbose : int, optional
- Verbosity value for test outputs, in the range 1..3. Default is 1.
- extra_argv : list, optional
- List with any extra arguments to pass to nosetests.
- doctests : bool, optional
- If True, run doctests in module. Default is False.
- coverage : bool, optional
- If True, report coverage of NumPy code. Default is False.
- (This requires the `coverage module:
- <http://nedbatchelder.com/code/modules/coverage.html>`_).
- raise_warnings : None, str or sequence of warnings, optional
- This specifies which warnings to configure as 'raise' instead
- of being shown once during the test execution. Valid strings are:
-
- - "develop" : equals ``(Warning,)``
- - "release" : equals ``()``, don't raise on any warnings.
-
- The default is to use the class initialization value.
- timer : bool or int, optional
- Timing of individual tests with ``nose-timer`` (which needs to be
- installed). If True, time tests and report on all of them.
- If an integer (say ``N``), report timing results for ``N`` slowest
- tests.
-
- Returns
- -------
- result : object
- Returns the result of running the tests as a
- ``nose.result.TextTestResult`` object.
-
- Notes
- -----
- Each NumPy module exposes `test` in its namespace to run all tests for it.
- For example, to run all tests for numpy.lib:
-
- >>> np.lib.test() #doctest: +SKIP
-
- Examples
- --------
- >>> result = np.lib.test() #doctest: +SKIP
- Running unit tests for numpy.lib
- ...
- Ran 976 tests in 3.933s
-
- OK
-
- >>> result.errors #doctest: +SKIP
- []
- >>> result.knownfail #doctest: +SKIP
- []
- """
-
- # cap verbosity at 3 because nose becomes *very* verbose beyond that
- verbose = min(verbose, 3)
-
- from . import utils
- utils.verbose = verbose
-
- argv, plugins = self.prepare_test_args(
- label, verbose, extra_argv, doctests, coverage, timer)
-
- if doctests:
- print("Running unit tests and doctests for %s" % self.package_name)
- else:
- print("Running unit tests for %s" % self.package_name)
-
- self._show_system_info()
-
- # reset doctest state on every run
- import doctest
- doctest.master = None
-
- if raise_warnings is None:
- raise_warnings = self.raise_warnings
-
- _warn_opts = dict(develop=(Warning,),
- release=())
- if isinstance(raise_warnings, basestring):
- raise_warnings = _warn_opts[raise_warnings]
-
- with suppress_warnings("location") as sup:
- # Reset the warning filters to the default state,
- # so that running the tests is more repeatable.
- warnings.resetwarnings()
- # Set all warnings to 'warn', this is because the default 'once'
- # has the bad property of possibly shadowing later warnings.
- warnings.filterwarnings('always')
- # Force the requested warnings to raise
- for warningtype in raise_warnings:
- warnings.filterwarnings('error', category=warningtype)
- # Filter out annoying import messages.
- sup.filter(message='Not importing directory')
- sup.filter(message="numpy.dtype size changed")
- sup.filter(message="numpy.ufunc size changed")
- sup.filter(category=np.ModuleDeprecationWarning)
- # Filter out boolean '-' deprecation messages. This allows
- # older versions of scipy to test without a flood of messages.
- sup.filter(message=".*boolean negative.*")
- sup.filter(message=".*boolean subtract.*")
- # Filter out distutils cpu warnings (could be localized to
- # distutils tests). ASV has problems with top level import,
- # so fetch module for suppression here.
- with warnings.catch_warnings():
- warnings.simplefilter("always")
- from ...distutils import cpuinfo
- sup.filter(category=UserWarning, module=cpuinfo)
- # See #7949: Filter out deprecation warnings due to the -3 flag to
- # python 2
- if sys.version_info.major == 2 and sys.py3kwarning:
- # This is very specific, so using the fragile module filter
- # is fine
- import threading
- sup.filter(DeprecationWarning,
- r"sys\.exc_clear\(\) not supported in 3\.x",
- module=threading)
- sup.filter(DeprecationWarning, message=r"in 3\.x, __setslice__")
- sup.filter(DeprecationWarning, message=r"in 3\.x, __getslice__")
- sup.filter(DeprecationWarning, message=r"buffer\(\) not supported in 3\.x")
- sup.filter(DeprecationWarning, message=r"CObject type is not supported in 3\.x")
- sup.filter(DeprecationWarning, message=r"comparing unequal types not supported in 3\.x")
- # Filter out some deprecation warnings inside nose 1.3.7 when run
- # on python 3.5b2. See
- # https://github.com/nose-devs/nose/issues/929
- # Note: it is hard to filter based on module for sup (lineno could
- # be implemented).
- warnings.filterwarnings("ignore", message=".*getargspec.*",
- category=DeprecationWarning,
- module=r"nose\.")
-
- from .noseclasses import NumpyTestProgram
-
- t = NumpyTestProgram(argv=argv, exit=False, plugins=plugins)
-
- return t.result
-
- def bench(self, label='fast', verbose=1, extra_argv=None):
- """
- Run benchmarks for module using nose.
-
- Parameters
- ----------
- label : {'fast', 'full', '', attribute identifier}, optional
- Identifies the benchmarks to run. This can be a string to pass to
- the nosetests executable with the '-A' option, or one of several
- special values. Special values are:
- * 'fast' - the default - which corresponds to the ``nosetests -A``
- option of 'not slow'.
- * 'full' - fast (as above) and slow benchmarks as in the
- 'no -A' option to nosetests - this is the same as ''.
- * None or '' - run all tests.
- attribute_identifier - string passed directly to nosetests as '-A'.
- verbose : int, optional
- Integer in range 1..3, bigger means more verbose.
- extra_argv : list, optional
- List with any extra arguments to pass to nosetests.
-
- Returns
- -------
- success : bool
- Returns True if running the benchmarks works, False if an error
- occurred.
-
- Notes
- -----
- Benchmarks are like tests, but have names starting with "bench" instead
- of "test", and can be found under the "benchmarks" sub-directory of the
- module.
-
- Each NumPy module exposes `bench` in its namespace to run all benchmarks
- for it.
-
- Examples
- --------
- >>> success = np.lib.bench() #doctest: +SKIP
- Running benchmarks for numpy.lib
- ...
- using 562341 items:
- unique:
- 0.11
- unique1d:
- 0.11
- ratio: 1.0
- nUnique: 56230 == 56230
- ...
- OK
-
- >>> success #doctest: +SKIP
- True
-
- """
-
- print("Running benchmarks for %s" % self.package_name)
- self._show_system_info()
-
- argv = self._test_argv(label, verbose, extra_argv)
- argv += ['--match', r'(?:^|[\\b_\\.%s-])[Bb]ench' % os.sep]
-
- # import nose or make informative error
- nose = import_nose()
-
- # get plugin to disable doctests
- from .noseclasses import Unplugger
- add_plugins = [Unplugger('doctest')]
-
- return nose.run(argv=argv, addplugins=add_plugins)
-else:
-
- class NoseTester(object):
- def __init__(self, package=None, raise_warnings="release", depth=0):
- pass
-
- def test(self, label='fast', verbose=1, extra_argv=None,
- doctests=False, coverage=False, raise_warnings=None,
- timer=False):
- pass
-
- def bench(self, label='fast', verbose=1, extra_argv=None):
- pass
-
-
-def _numpy_tester():
- if hasattr(np, "__version__") and ".dev0" in np.__version__:
- mode = "develop"
- else:
- mode = "release"
- return NoseTester(raise_warnings=mode, depth=1)
diff --git a/numpy/testing/pytest_tools/utils.py b/numpy/testing/pytest_tools/utils.py
deleted file mode 100644
index 8a0eb8be3..000000000
--- a/numpy/testing/pytest_tools/utils.py
+++ /dev/null
@@ -1,2268 +0,0 @@
-"""
-Utility function to facilitate testing.
-
-"""
-from __future__ import division, absolute_import, print_function
-
-import os
-import sys
-import re
-import operator
-import warnings
-from functools import partial, wraps
-import shutil
-import contextlib
-from tempfile import mkdtemp, mkstemp
-
-from numpy.core import(
- float32, empty, arange, array_repr, ndarray, isnat, array)
-from numpy.lib.utils import deprecate
-
-if sys.version_info[0] >= 3:
- from io import StringIO
-else:
- from StringIO import StringIO
-
-__all__ = [
- 'assert_equal', 'assert_almost_equal', 'assert_approx_equal',
- 'assert_array_equal', 'assert_array_less', 'assert_string_equal',
- 'assert_array_almost_equal', 'assert_raises', 'build_err_msg',
- 'decorate_methods', 'jiffies', 'memusage', 'print_assert_equal',
- 'raises', 'rand', 'rundocs', 'runstring', 'verbose', 'measure',
- 'assert_', 'assert_array_almost_equal_nulp', 'assert_raises_regex',
- 'assert_array_max_ulp', 'assert_warns', 'assert_no_warnings',
- 'assert_allclose', 'IgnoreException', 'clear_and_catch_warnings',
- 'SkipTest', 'KnownFailureException', 'temppath', 'tempdir', 'IS_PYPY',
- 'HAS_REFCOUNT', 'suppress_warnings', 'assert_array_compare',
- '_assert_valid_refcount', '_gen_alignment_data',
- ]
-
-
-class KnownFailureException(Exception):
- """Raise this exception to mark a test as a known failing test.
-
- """
- def __new__(cls, *args, **kwargs):
- # import _pytest here to avoid hard dependency
- import _pytest
- return _pytest.skipping.xfail(*args, **kwargs)
-
-
-class SkipTest(Exception):
- """Raise this exception to mark a skipped test.
-
- """
- def __new__(cls, *args, **kwargs):
- # import _pytest here to avoid hard dependency
- import _pytest
- return _pytest.runner.Skipped(*args, **kwargs)
-
-
-class IgnoreException(Exception):
- """Ignoring this exception due to disabled feature
-
- This exception seems unused and can be removed.
-
- """
- pass
-
-
-KnownFailureTest = KnownFailureException # backwards compat
-
-verbose = 0
-
-IS_PYPY = '__pypy__' in sys.modules
-HAS_REFCOUNT = getattr(sys, 'getrefcount', None) is not None
-
-
-def import_nose():
- """ Not wanted for pytest, make it a dummy function
-
- """
- pass
-
-
-def assert_(val, msg=''):
- """
- Assert that works in release mode.
- Accepts callable msg to allow deferring evaluation until failure.
-
- The Python built-in ``assert`` does not work when executing code in
- optimized mode (the ``-O`` flag) - no byte-code is generated for it.
-
- For documentation on usage, refer to the Python documentation.
-
- """
- __tracebackhide__ = True # Hide traceback for py.test
- if not val:
- try:
- smsg = msg()
- except TypeError:
- smsg = msg
- raise AssertionError(smsg)
-
-
-def gisnan(x):
- """like isnan, but always raise an error if type not supported instead of
- returning a TypeError object.
-
- Notes
- -----
- isnan and other ufunc sometimes return a NotImplementedType object instead
- of raising any exception. This function is a wrapper to make sure an
- exception is always raised.
-
- This should be removed once this problem is solved at the Ufunc level."""
- from numpy.core import isnan
- st = isnan(x)
- if isinstance(st, type(NotImplemented)):
- raise TypeError("isnan not supported for this type")
- return st
-
-
-def gisfinite(x):
- """like isfinite, but always raise an error if type not supported instead of
- returning a TypeError object.
-
- Notes
- -----
- isfinite and other ufunc sometimes return a NotImplementedType object instead
- of raising any exception. This function is a wrapper to make sure an
- exception is always raised.
-
- This should be removed once this problem is solved at the Ufunc level."""
- from numpy.core import isfinite, errstate
- with errstate(invalid='ignore'):
- st = isfinite(x)
- if isinstance(st, type(NotImplemented)):
- raise TypeError("isfinite not supported for this type")
- return st
-
-
-def gisinf(x):
- """like isinf, but always raise an error if type not supported instead of
- returning a TypeError object.
-
- Notes
- -----
- isinf and other ufunc sometimes return a NotImplementedType object instead
- of raising any exception. This function is a wrapper to make sure an
- exception is always raised.
-
- This should be removed once this problem is solved at the Ufunc level."""
- from numpy.core import isinf, errstate
- with errstate(invalid='ignore'):
- st = isinf(x)
- if isinstance(st, type(NotImplemented)):
- raise TypeError("isinf not supported for this type")
- return st
-
-
-@deprecate(message="numpy.testing.rand is deprecated in numpy 1.11. "
- "Use numpy.random.rand instead.")
-def rand(*args):
- """Returns an array of random numbers with the given shape.
-
- This only uses the standard library, so it is useful for testing purposes.
- """
- import random
- from numpy.core import zeros, float64
- results = zeros(args, float64)
- f = results.flat
- for i in range(len(f)):
- f[i] = random.random()
- return results
-
-
-if os.name == 'nt':
- # Code "stolen" from enthought/debug/memusage.py
- def GetPerformanceAttributes(object, counter, instance=None,
- inum=-1, format=None, machine=None):
- # NOTE: Many counters require 2 samples to give accurate results,
- # including "% Processor Time" (as by definition, at any instant, a
- # thread's CPU usage is either 0 or 100). To read counters like this,
- # you should copy this function, but keep the counter open, and call
- # CollectQueryData() each time you need to know.
- # See http://msdn.microsoft.com/library/en-us/dnperfmo/html/perfmonpt2.asp
- # My older explanation for this was that the "AddCounter" process forced
- # the CPU to 100%, but the above makes more sense :)
- import win32pdh
- if format is None:
- format = win32pdh.PDH_FMT_LONG
- path = win32pdh.MakeCounterPath( (machine, object, instance, None, inum, counter))
- hq = win32pdh.OpenQuery()
- try:
- hc = win32pdh.AddCounter(hq, path)
- try:
- win32pdh.CollectQueryData(hq)
- type, val = win32pdh.GetFormattedCounterValue(hc, format)
- return val
- finally:
- win32pdh.RemoveCounter(hc)
- finally:
- win32pdh.CloseQuery(hq)
-
- def memusage(processName="python", instance=0):
- # from win32pdhutil, part of the win32all package
- import win32pdh
- return GetPerformanceAttributes("Process", "Virtual Bytes",
- processName, instance,
- win32pdh.PDH_FMT_LONG, None)
-elif sys.platform[:5] == 'linux':
-
- def memusage(_proc_pid_stat='/proc/%s/stat' % (os.getpid())):
- """
- Return virtual memory size in bytes of the running python.
-
- """
- try:
- f = open(_proc_pid_stat, 'r')
- l = f.readline().split(' ')
- f.close()
- return int(l[22])
- except Exception:
- return
-else:
- def memusage():
- """
- Return memory usage of running python. [Not implemented]
-
- """
- raise NotImplementedError
-
-
-if sys.platform[:5] == 'linux':
- def jiffies(_proc_pid_stat='/proc/%s/stat' % (os.getpid()),
- _load_time=[]):
- """
- Return number of jiffies elapsed.
-
- Return number of jiffies (1/100ths of a second) that this
- process has been scheduled in user mode. See man 5 proc.
-
- """
- import time
- if not _load_time:
- _load_time.append(time.time())
- try:
- f = open(_proc_pid_stat, 'r')
- l = f.readline().split(' ')
- f.close()
- return int(l[13])
- except Exception:
- return int(100*(time.time()-_load_time[0]))
-else:
- # os.getpid is not in all platforms available.
- # Using time is safe but inaccurate, especially when process
- # was suspended or sleeping.
- def jiffies(_load_time=[]):
- """
- Return number of jiffies elapsed.
-
- Return number of jiffies (1/100ths of a second) that this
- process has been scheduled in user mode. See man 5 proc.
-
- """
- import time
- if not _load_time:
- _load_time.append(time.time())
- return int(100*(time.time()-_load_time[0]))
-
-
-def build_err_msg(arrays, err_msg, header='Items are not equal:',
- verbose=True, names=('ACTUAL', 'DESIRED'), precision=8):
- msg = ['\n' + header]
- if err_msg:
- if err_msg.find('\n') == -1 and len(err_msg) < 79-len(header):
- msg = [msg[0] + ' ' + err_msg]
- else:
- msg.append(err_msg)
- if verbose:
- for i, a in enumerate(arrays):
-
- if isinstance(a, ndarray):
- # precision argument is only needed if the objects are ndarrays
- r_func = partial(array_repr, precision=precision)
- else:
- r_func = repr
-
- try:
- r = r_func(a)
- except Exception as exc:
- r = '[repr failed for <{}>: {}]'.format(type(a).__name__, exc)
- if r.count('\n') > 3:
- r = '\n'.join(r.splitlines()[:3])
- r += '...'
- msg.append(' %s: %s' % (names[i], r))
- return '\n'.join(msg)
-
-
-def assert_equal(actual, desired, err_msg='', verbose=True):
- """
- Raises an AssertionError if two objects are not equal.
-
- Given two objects (scalars, lists, tuples, dictionaries or numpy arrays),
- check that all elements of these objects are equal. An exception is raised
- at the first conflicting values.
-
- Parameters
- ----------
- actual : array_like
- The object to check.
- desired : array_like
- The expected object.
- err_msg : str, optional
- The error message to be printed in case of failure.
- verbose : bool, optional
- If True, the conflicting values are appended to the error message.
-
- Raises
- ------
- AssertionError
- If actual and desired are not equal.
-
- Examples
- --------
- >>> np.testing.assert_equal([4,5], [4,6])
- ...
- <type 'exceptions.AssertionError'>:
- Items are not equal:
- item=1
- ACTUAL: 5
- DESIRED: 6
-
- """
- __tracebackhide__ = True # Hide traceback for py.test
- if isinstance(desired, dict):
- if not isinstance(actual, dict):
- raise AssertionError(repr(type(actual)))
- assert_equal(len(actual), len(desired), err_msg, verbose)
- for k, i in desired.items():
- if k not in actual:
- raise AssertionError(repr(k))
- assert_equal(actual[k], desired[k], 'key=%r\n%s' % (k, err_msg), verbose)
- return
- if isinstance(desired, (list, tuple)) and isinstance(actual, (list, tuple)):
- assert_equal(len(actual), len(desired), err_msg, verbose)
- for k in range(len(desired)):
- assert_equal(actual[k], desired[k], 'item=%r\n%s' % (k, err_msg), verbose)
- return
- from numpy.core import ndarray, isscalar, signbit
- from numpy.lib import iscomplexobj, real, imag
- if isinstance(actual, ndarray) or isinstance(desired, ndarray):
- return assert_array_equal(actual, desired, err_msg, verbose)
- msg = build_err_msg([actual, desired], err_msg, verbose=verbose)
-
- # Handle complex numbers: separate into real/imag to handle
- # nan/inf/negative zero correctly
- # XXX: catch ValueError for subclasses of ndarray where iscomplex fail
- try:
- usecomplex = iscomplexobj(actual) or iscomplexobj(desired)
- except ValueError:
- usecomplex = False
-
- if usecomplex:
- if iscomplexobj(actual):
- actualr = real(actual)
- actuali = imag(actual)
- else:
- actualr = actual
- actuali = 0
- if iscomplexobj(desired):
- desiredr = real(desired)
- desiredi = imag(desired)
- else:
- desiredr = desired
- desiredi = 0
- try:
- assert_equal(actualr, desiredr)
- assert_equal(actuali, desiredi)
- except AssertionError:
- raise AssertionError(msg)
-
- # isscalar test to check cases such as [np.nan] != np.nan
- if isscalar(desired) != isscalar(actual):
- raise AssertionError(msg)
-
- # Inf/nan/negative zero handling
- try:
- # If one of desired/actual is not finite, handle it specially here:
- # check that both are nan if any is a nan, and test for equality
- # otherwise
- if not (gisfinite(desired) and gisfinite(actual)):
- isdesnan = gisnan(desired)
- isactnan = gisnan(actual)
- if isdesnan or isactnan:
- if not (isdesnan and isactnan):
- raise AssertionError(msg)
- else:
- if not desired == actual:
- raise AssertionError(msg)
- return
- elif desired == 0 and actual == 0:
- if not signbit(desired) == signbit(actual):
- raise AssertionError(msg)
- # If TypeError or ValueError raised while using isnan and co, just handle
- # as before
- except (TypeError, ValueError, NotImplementedError):
- pass
-
- try:
- # If both are NaT (and have the same dtype -- datetime or timedelta)
- # they are considered equal.
- if (isnat(desired) == isnat(actual) and
- array(desired).dtype.type == array(actual).dtype.type):
- return
- else:
- raise AssertionError(msg)
-
- # If TypeError or ValueError raised while using isnan and co, just handle
- # as before
- except (TypeError, ValueError, NotImplementedError):
- pass
-
- # Explicitly use __eq__ for comparison, ticket #2552
- if not (desired == actual):
- raise AssertionError(msg)
-
-
-def print_assert_equal(test_string, actual, desired):
- """
- Test if two objects are equal, and print an error message if test fails.
-
- The test is performed with ``actual == desired``.
-
- Parameters
- ----------
- test_string : str
- The message supplied to AssertionError.
- actual : object
- The object to test for equality against `desired`.
- desired : object
- The expected result.
-
- Examples
- --------
- >>> np.testing.print_assert_equal('Test XYZ of func xyz', [0, 1], [0, 1])
- >>> np.testing.print_assert_equal('Test XYZ of func xyz', [0, 1], [0, 2])
- Traceback (most recent call last):
- ...
- AssertionError: Test XYZ of func xyz failed
- ACTUAL:
- [0, 1]
- DESIRED:
- [0, 2]
-
- """
- __tracebackhide__ = True # Hide traceback for py.test
- import pprint
-
- if not (actual == desired):
- msg = StringIO()
- msg.write(test_string)
- msg.write(' failed\nACTUAL: \n')
- pprint.pprint(actual, msg)
- msg.write('DESIRED: \n')
- pprint.pprint(desired, msg)
- raise AssertionError(msg.getvalue())
-
-
-def assert_almost_equal(actual,desired,decimal=7,err_msg='',verbose=True):
- """
- Raises an AssertionError if two items are not equal up to desired
- precision.
-
- .. note:: It is recommended to use one of `assert_allclose`,
- `assert_array_almost_equal_nulp` or `assert_array_max_ulp`
- instead of this function for more consistent floating point
- comparisons.
-
- The test verifies that the elements of ``actual`` and ``desired`` satisfy.
-
- ``abs(desired-actual) < 1.5 * 10**(-decimal)``
-
- That is a looser test than originally documented, but agrees with what the
- actual implementation in `assert_array_almost_equal` did up to rounding
- vagaries. An exception is raised at conflicting values. For ndarrays this
- delegates to assert_array_almost_equal
-
- Parameters
- ----------
- actual : array_like
- The object to check.
- desired : array_like
- The expected object.
- decimal : int, optional
- Desired precision, default is 7.
- err_msg : str, optional
- The error message to be printed in case of failure.
- verbose : bool, optional
- If True, the conflicting values are appended to the error message.
-
- Raises
- ------
- AssertionError
- If actual and desired are not equal up to specified precision.
-
- See Also
- --------
- assert_allclose: Compare two array_like objects for equality with desired
- relative and/or absolute precision.
- assert_array_almost_equal_nulp, assert_array_max_ulp, assert_equal
-
- Examples
- --------
- >>> import numpy.testing as npt
- >>> npt.assert_almost_equal(2.3333333333333, 2.33333334)
- >>> npt.assert_almost_equal(2.3333333333333, 2.33333334, decimal=10)
- ...
- <type 'exceptions.AssertionError'>:
- Items are not equal:
- ACTUAL: 2.3333333333333002
- DESIRED: 2.3333333399999998
-
- >>> npt.assert_almost_equal(np.array([1.0,2.3333333333333]),
- ... np.array([1.0,2.33333334]), decimal=9)
- ...
- <type 'exceptions.AssertionError'>:
- Arrays are not almost equal
- <BLANKLINE>
- (mismatch 50.0%)
- x: array([ 1. , 2.33333333])
- y: array([ 1. , 2.33333334])
-
- """
- __tracebackhide__ = True # Hide traceback for py.test
- from numpy.core import ndarray
- from numpy.lib import iscomplexobj, real, imag
-
- # Handle complex numbers: separate into real/imag to handle
- # nan/inf/negative zero correctly
- # XXX: catch ValueError for subclasses of ndarray where iscomplex fail
- try:
- usecomplex = iscomplexobj(actual) or iscomplexobj(desired)
- except ValueError:
- usecomplex = False
-
- def _build_err_msg():
- header = ('Arrays are not almost equal to %d decimals' % decimal)
- return build_err_msg([actual, desired], err_msg, verbose=verbose,
- header=header)
-
- if usecomplex:
- if iscomplexobj(actual):
- actualr = real(actual)
- actuali = imag(actual)
- else:
- actualr = actual
- actuali = 0
- if iscomplexobj(desired):
- desiredr = real(desired)
- desiredi = imag(desired)
- else:
- desiredr = desired
- desiredi = 0
- try:
- assert_almost_equal(actualr, desiredr, decimal=decimal)
- assert_almost_equal(actuali, desiredi, decimal=decimal)
- except AssertionError:
- raise AssertionError(_build_err_msg())
-
- if isinstance(actual, (ndarray, tuple, list)) \
- or isinstance(desired, (ndarray, tuple, list)):
- return assert_array_almost_equal(actual, desired, decimal, err_msg)
- try:
- # If one of desired/actual is not finite, handle it specially here:
- # check that both are nan if any is a nan, and test for equality
- # otherwise
- if not (gisfinite(desired) and gisfinite(actual)):
- if gisnan(desired) or gisnan(actual):
- if not (gisnan(desired) and gisnan(actual)):
- raise AssertionError(_build_err_msg())
- else:
- if not desired == actual:
- raise AssertionError(_build_err_msg())
- return
- except (NotImplementedError, TypeError):
- pass
- if abs(desired - actual) >= 1.5 * 10.0**(-decimal):
- raise AssertionError(_build_err_msg())
-
-
-def assert_approx_equal(actual,desired,significant=7,err_msg='',verbose=True):
- """
- Raises an AssertionError if two items are not equal up to significant
- digits.
-
- .. note:: It is recommended to use one of `assert_allclose`,
- `assert_array_almost_equal_nulp` or `assert_array_max_ulp`
- instead of this function for more consistent floating point
- comparisons.
-
- Given two numbers, check that they are approximately equal.
- Approximately equal is defined as the number of significant digits
- that agree.
-
- Parameters
- ----------
- actual : scalar
- The object to check.
- desired : scalar
- The expected object.
- significant : int, optional
- Desired precision, default is 7.
- err_msg : str, optional
- The error message to be printed in case of failure.
- verbose : bool, optional
- If True, the conflicting values are appended to the error message.
-
- Raises
- ------
- AssertionError
- If actual and desired are not equal up to specified precision.
-
- See Also
- --------
- assert_allclose: Compare two array_like objects for equality with desired
- relative and/or absolute precision.
- assert_array_almost_equal_nulp, assert_array_max_ulp, assert_equal
-
- Examples
- --------
- >>> np.testing.assert_approx_equal(0.12345677777777e-20, 0.1234567e-20)
- >>> np.testing.assert_approx_equal(0.12345670e-20, 0.12345671e-20,
- significant=8)
- >>> np.testing.assert_approx_equal(0.12345670e-20, 0.12345672e-20,
- significant=8)
- ...
- <type 'exceptions.AssertionError'>:
- Items are not equal to 8 significant digits:
- ACTUAL: 1.234567e-021
- DESIRED: 1.2345672000000001e-021
-
- the evaluated condition that raises the exception is
-
- >>> abs(0.12345670e-20/1e-21 - 0.12345672e-20/1e-21) >= 10**-(8-1)
- True
-
- """
- __tracebackhide__ = True # Hide traceback for py.test
- import numpy as np
-
- (actual, desired) = map(float, (actual, desired))
- if desired == actual:
- return
- # Normalized the numbers to be in range (-10.0,10.0)
- # scale = float(pow(10,math.floor(math.log10(0.5*(abs(desired)+abs(actual))))))
- with np.errstate(invalid='ignore'):
- scale = 0.5*(np.abs(desired) + np.abs(actual))
- scale = np.power(10, np.floor(np.log10(scale)))
- try:
- sc_desired = desired/scale
- except ZeroDivisionError:
- sc_desired = 0.0
- try:
- sc_actual = actual/scale
- except ZeroDivisionError:
- sc_actual = 0.0
- msg = build_err_msg([actual, desired], err_msg,
- header='Items are not equal to %d significant digits:' %
- significant,
- verbose=verbose)
- try:
- # If one of desired/actual is not finite, handle it specially here:
- # check that both are nan if any is a nan, and test for equality
- # otherwise
- if not (gisfinite(desired) and gisfinite(actual)):
- if gisnan(desired) or gisnan(actual):
- if not (gisnan(desired) and gisnan(actual)):
- raise AssertionError(msg)
- else:
- if not desired == actual:
- raise AssertionError(msg)
- return
- except (TypeError, NotImplementedError):
- pass
- if np.abs(sc_desired - sc_actual) >= np.power(10., -(significant-1)):
- raise AssertionError(msg)
-
-
-def assert_array_compare(comparison, x, y, err_msg='', verbose=True,
- header='', precision=6, equal_nan=True,
- equal_inf=True):
- __tracebackhide__ = True # Hide traceback for py.test
- from numpy.core import array, isnan, isinf, any, inf
- x = array(x, copy=False, subok=True)
- y = array(y, copy=False, subok=True)
-
- def isnumber(x):
- return x.dtype.char in '?bhilqpBHILQPefdgFDG'
-
- def istime(x):
- return x.dtype.char in "Mm"
-
- def chk_same_position(x_id, y_id, hasval='nan'):
- """Handling nan/inf: check that x and y have the nan/inf at the same
- locations."""
- try:
- assert_array_equal(x_id, y_id)
- except AssertionError:
- msg = build_err_msg([x, y],
- err_msg + '\nx and y %s location mismatch:'
- % (hasval), verbose=verbose, header=header,
- names=('x', 'y'), precision=precision)
- raise AssertionError(msg)
-
- try:
- cond = (x.shape == () or y.shape == ()) or x.shape == y.shape
- if not cond:
- msg = build_err_msg([x, y],
- err_msg
- + '\n(shapes %s, %s mismatch)' % (x.shape,
- y.shape),
- verbose=verbose, header=header,
- names=('x', 'y'), precision=precision)
- raise AssertionError(msg)
-
- if isnumber(x) and isnumber(y):
- has_nan = has_inf = False
- if equal_nan:
- x_isnan, y_isnan = isnan(x), isnan(y)
- # Validate that NaNs are in the same place
- has_nan = any(x_isnan) or any(y_isnan)
- if has_nan:
- chk_same_position(x_isnan, y_isnan, hasval='nan')
-
- if equal_inf:
- x_isinf, y_isinf = isinf(x), isinf(y)
- # Validate that infinite values are in the same place
- has_inf = any(x_isinf) or any(y_isinf)
- if has_inf:
- # Check +inf and -inf separately, since they are different
- chk_same_position(x == +inf, y == +inf, hasval='+inf')
- chk_same_position(x == -inf, y == -inf, hasval='-inf')
-
- if has_nan and has_inf:
- x = x[~(x_isnan | x_isinf)]
- y = y[~(y_isnan | y_isinf)]
- elif has_nan:
- x = x[~x_isnan]
- y = y[~y_isnan]
- elif has_inf:
- x = x[~x_isinf]
- y = y[~y_isinf]
-
- # Only do the comparison if actual values are left
- if x.size == 0:
- return
-
- elif istime(x) and istime(y):
- # If one is datetime64 and the other timedelta64 there is no point
- if equal_nan and x.dtype.type == y.dtype.type:
- x_isnat, y_isnat = isnat(x), isnat(y)
-
- if any(x_isnat) or any(y_isnat):
- chk_same_position(x_isnat, y_isnat, hasval="NaT")
-
- if any(x_isnat) or any(y_isnat):
- x = x[~x_isnat]
- y = y[~y_isnat]
-
- val = comparison(x, y)
-
- if isinstance(val, bool):
- cond = val
- reduced = [0]
- else:
- reduced = val.ravel()
- cond = reduced.all()
- reduced = reduced.tolist()
- if not cond:
- match = 100-100.0*reduced.count(1)/len(reduced)
- msg = build_err_msg([x, y],
- err_msg
- + '\n(mismatch %s%%)' % (match,),
- verbose=verbose, header=header,
- names=('x', 'y'), precision=precision)
- if not cond:
- raise AssertionError(msg)
- except ValueError:
- import traceback
- efmt = traceback.format_exc()
- header = 'error during assertion:\n\n%s\n\n%s' % (efmt, header)
-
- msg = build_err_msg([x, y], err_msg, verbose=verbose, header=header,
- names=('x', 'y'), precision=precision)
- raise ValueError(msg)
-
-
-def assert_array_equal(x, y, err_msg='', verbose=True):
- """
- Raises an AssertionError if two array_like objects are not equal.
-
- Given two array_like objects, check that the shape is equal and all
- elements of these objects are equal. An exception is raised at
- shape mismatch or conflicting values. In contrast to the standard usage
- in numpy, NaNs are compared like numbers, no assertion is raised if
- both objects have NaNs in the same positions.
-
- The usual caution for verifying equality with floating point numbers is
- advised.
-
- Parameters
- ----------
- x : array_like
- The actual object to check.
- y : array_like
- The desired, expected object.
- err_msg : str, optional
- The error message to be printed in case of failure.
- verbose : bool, optional
- If True, the conflicting values are appended to the error message.
-
- Raises
- ------
- AssertionError
- If actual and desired objects are not equal.
-
- See Also
- --------
- assert_allclose: Compare two array_like objects for equality with desired
- relative and/or absolute precision.
- assert_array_almost_equal_nulp, assert_array_max_ulp, assert_equal
-
- Examples
- --------
- The first assert does not raise an exception:
-
- >>> np.testing.assert_array_equal([1.0,2.33333,np.nan],
- ... [np.exp(0),2.33333, np.nan])
-
- Assert fails with numerical inprecision with floats:
-
- >>> np.testing.assert_array_equal([1.0,np.pi,np.nan],
- ... [1, np.sqrt(np.pi)**2, np.nan])
- ...
- <type 'exceptions.ValueError'>:
- AssertionError:
- Arrays are not equal
- <BLANKLINE>
- (mismatch 50.0%)
- x: array([ 1. , 3.14159265, NaN])
- y: array([ 1. , 3.14159265, NaN])
-
- Use `assert_allclose` or one of the nulp (number of floating point values)
- functions for these cases instead:
-
- >>> np.testing.assert_allclose([1.0,np.pi,np.nan],
- ... [1, np.sqrt(np.pi)**2, np.nan],
- ... rtol=1e-10, atol=0)
-
- """
- __tracebackhide__ = True # Hide traceback for py.test
- assert_array_compare(operator.__eq__, x, y, err_msg=err_msg,
- verbose=verbose, header='Arrays are not equal')
-
-
-def assert_array_almost_equal(x, y, decimal=6, err_msg='', verbose=True):
- """
- Raises an AssertionError if two objects are not equal up to desired
- precision.
-
- .. note:: It is recommended to use one of `assert_allclose`,
- `assert_array_almost_equal_nulp` or `assert_array_max_ulp`
- instead of this function for more consistent floating point
- comparisons.
-
- The test verifies identical shapes and that the elements of ``actual`` and
- ``desired`` satisfy.
-
- ``abs(desired-actual) < 1.5 * 10**(-decimal)``
-
- That is a looser test than originally documented, but agrees with what the
- actual implementation did up to rounding vagaries. An exception is raised
- at shape mismatch or conflicting values. In contrast to the standard usage
- in numpy, NaNs are compared like numbers, no assertion is raised if both
- objects have NaNs in the same positions.
-
- Parameters
- ----------
- x : array_like
- The actual object to check.
- y : array_like
- The desired, expected object.
- decimal : int, optional
- Desired precision, default is 6.
- err_msg : str, optional
- The error message to be printed in case of failure.
- verbose : bool, optional
- If True, the conflicting values are appended to the error message.
-
- Raises
- ------
- AssertionError
- If actual and desired are not equal up to specified precision.
-
- See Also
- --------
- assert_allclose: Compare two array_like objects for equality with desired
- relative and/or absolute precision.
- assert_array_almost_equal_nulp, assert_array_max_ulp, assert_equal
-
- Examples
- --------
- the first assert does not raise an exception
-
- >>> np.testing.assert_array_almost_equal([1.0,2.333,np.nan],
- [1.0,2.333,np.nan])
-
- >>> np.testing.assert_array_almost_equal([1.0,2.33333,np.nan],
- ... [1.0,2.33339,np.nan], decimal=5)
- ...
- <type 'exceptions.AssertionError'>:
- AssertionError:
- Arrays are not almost equal
- <BLANKLINE>
- (mismatch 50.0%)
- x: array([ 1. , 2.33333, NaN])
- y: array([ 1. , 2.33339, NaN])
-
- >>> np.testing.assert_array_almost_equal([1.0,2.33333,np.nan],
- ... [1.0,2.33333, 5], decimal=5)
- <type 'exceptions.ValueError'>:
- ValueError:
- Arrays are not almost equal
- x: array([ 1. , 2.33333, NaN])
- y: array([ 1. , 2.33333, 5. ])
-
- """
- __tracebackhide__ = True # Hide traceback for py.test
- from numpy.core import around, number, float_, result_type, array
- from numpy.core.numerictypes import issubdtype
- from numpy.core.fromnumeric import any as npany
-
- def compare(x, y):
- try:
- if npany(gisinf(x)) or npany( gisinf(y)):
- xinfid = gisinf(x)
- yinfid = gisinf(y)
- if not (xinfid == yinfid).all():
- return False
- # if one item, x and y is +- inf
- if x.size == y.size == 1:
- return x == y
- x = x[~xinfid]
- y = y[~yinfid]
- except (TypeError, NotImplementedError):
- pass
-
- # make sure y is an inexact type to avoid abs(MIN_INT); will cause
- # casting of x later.
- dtype = result_type(y, 1.)
- y = array(y, dtype=dtype, copy=False, subok=True)
- z = abs(x - y)
-
- if not issubdtype(z.dtype, number):
- z = z.astype(float_) # handle object arrays
-
- return z < 1.5 * 10.0**(-decimal)
-
- assert_array_compare(compare, x, y, err_msg=err_msg, verbose=verbose,
- header=('Arrays are not almost equal to %d decimals' % decimal),
- precision=decimal)
-
-
-def assert_array_less(x, y, err_msg='', verbose=True):
- """
- Raises an AssertionError if two array_like objects are not ordered by less
- than.
-
- Given two array_like objects, check that the shape is equal and all
- elements of the first object are strictly smaller than those of the
- second object. An exception is raised at shape mismatch or incorrectly
- ordered values. Shape mismatch does not raise if an object has zero
- dimension. In contrast to the standard usage in numpy, NaNs are
- compared, no assertion is raised if both objects have NaNs in the same
- positions.
-
-
-
- Parameters
- ----------
- x : array_like
- The smaller object to check.
- y : array_like
- The larger object to compare.
- err_msg : string
- The error message to be printed in case of failure.
- verbose : bool
- If True, the conflicting values are appended to the error message.
-
- Raises
- ------
- AssertionError
- If actual and desired objects are not equal.
-
- See Also
- --------
- assert_array_equal: tests objects for equality
- assert_array_almost_equal: test objects for equality up to precision
-
-
-
- Examples
- --------
- >>> np.testing.assert_array_less([1.0, 1.0, np.nan], [1.1, 2.0, np.nan])
- >>> np.testing.assert_array_less([1.0, 1.0, np.nan], [1, 2.0, np.nan])
- ...
- <type 'exceptions.ValueError'>:
- Arrays are not less-ordered
- (mismatch 50.0%)
- x: array([ 1., 1., NaN])
- y: array([ 1., 2., NaN])
-
- >>> np.testing.assert_array_less([1.0, 4.0], 3)
- ...
- <type 'exceptions.ValueError'>:
- Arrays are not less-ordered
- (mismatch 50.0%)
- x: array([ 1., 4.])
- y: array(3)
-
- >>> np.testing.assert_array_less([1.0, 2.0, 3.0], [4])
- ...
- <type 'exceptions.ValueError'>:
- Arrays are not less-ordered
- (shapes (3,), (1,) mismatch)
- x: array([ 1., 2., 3.])
- y: array([4])
-
- """
- __tracebackhide__ = True # Hide traceback for py.test
- assert_array_compare(operator.__lt__, x, y, err_msg=err_msg,
- verbose=verbose,
- header='Arrays are not less-ordered',
- equal_inf=False)
-
-
-def runstring(astr, dict):
- exec(astr, dict)
-
-
-def assert_string_equal(actual, desired):
- """
- Test if two strings are equal.
-
- If the given strings are equal, `assert_string_equal` does nothing.
- If they are not equal, an AssertionError is raised, and the diff
- between the strings is shown.
-
- Parameters
- ----------
- actual : str
- The string to test for equality against the expected string.
- desired : str
- The expected string.
-
- Examples
- --------
- >>> np.testing.assert_string_equal('abc', 'abc')
- >>> np.testing.assert_string_equal('abc', 'abcd')
- Traceback (most recent call last):
- File "<stdin>", line 1, in <module>
- ...
- AssertionError: Differences in strings:
- - abc+ abcd? +
-
- """
- # delay import of difflib to reduce startup time
- __tracebackhide__ = True # Hide traceback for py.test
- import difflib
-
- if not isinstance(actual, str):
- raise AssertionError(repr(type(actual)))
- if not isinstance(desired, str):
- raise AssertionError(repr(type(desired)))
- if re.match(r'\A'+desired+r'\Z', actual, re.M):
- return
-
- diff = list(difflib.Differ().compare(actual.splitlines(1), desired.splitlines(1)))
- diff_list = []
- while diff:
- d1 = diff.pop(0)
- if d1.startswith(' '):
- continue
- if d1.startswith('- '):
- l = [d1]
- d2 = diff.pop(0)
- if d2.startswith('? '):
- l.append(d2)
- d2 = diff.pop(0)
- if not d2.startswith('+ '):
- raise AssertionError(repr(d2))
- l.append(d2)
- if diff:
- d3 = diff.pop(0)
- if d3.startswith('? '):
- l.append(d3)
- else:
- diff.insert(0, d3)
- if re.match(r'\A'+d2[2:]+r'\Z', d1[2:]):
- continue
- diff_list.extend(l)
- continue
- raise AssertionError(repr(d1))
- if not diff_list:
- return
- msg = 'Differences in strings:\n%s' % (''.join(diff_list)).rstrip()
- if actual != desired:
- raise AssertionError(msg)
-
-
-def rundocs(filename=None, raise_on_error=True):
- """
- Run doctests found in the given file.
-
- By default `rundocs` raises an AssertionError on failure.
-
- Parameters
- ----------
- filename : str
- The path to the file for which the doctests are run.
- raise_on_error : bool
- Whether to raise an AssertionError when a doctest fails. Default is
- True.
-
- Notes
- -----
- The doctests can be run by the user/developer by adding the ``doctests``
- argument to the ``test()`` call. For example, to run all tests (including
- doctests) for `numpy.lib`:
-
- >>> np.lib.test(doctests=True) #doctest: +SKIP
- """
- from numpy.compat import npy_load_module
- import doctest
- if filename is None:
- f = sys._getframe(1)
- filename = f.f_globals['__file__']
- name = os.path.splitext(os.path.basename(filename))[0]
- m = npy_load_module(name, filename)
-
- tests = doctest.DocTestFinder().find(m)
- runner = doctest.DocTestRunner(verbose=False)
-
- msg = []
- if raise_on_error:
- out = lambda s: msg.append(s)
- else:
- out = None
-
- for test in tests:
- runner.run(test, out=out)
-
- if runner.failures > 0 and raise_on_error:
- raise AssertionError("Some doctests failed:\n%s" % "\n".join(msg))
-
-
-def raises(*exceptions):
- """
- This is actually a decorator and belongs in decorators.py.
-
- """
- import pytest
-
- def raises_decorator(f):
-
- def raiser(*args, **kwargs):
- try:
- f(*args, **kwargs)
- except exceptions:
- return
- raise AssertionError()
-
- return raiser
-
-
- return raises_decorator
-
-
-def assert_raises(exception_class, fn=None, *args, **kwargs):
- """
- assert_raises(exception_class, callable, *args, **kwargs)
- assert_raises(exception_class)
-
- Fail unless an exception of class exception_class is thrown
- by callable when invoked with arguments args and keyword
- arguments kwargs. If a different type of exception is
- thrown, it will not be caught, and the test case will be
- deemed to have suffered an error, exactly as for an
- unexpected exception.
-
- Alternatively, `assert_raises` can be used as a context manager:
-
- >>> from numpy.testing import assert_raises
- >>> with assert_raises(ZeroDivisionError):
- ... 1 / 0
-
- is equivalent to
-
- >>> def div(x, y):
- ... return x / y
- >>> assert_raises(ZeroDivisionError, div, 1, 0)
-
- """
- import pytest
-
- __tracebackhide__ = True # Hide traceback for py.test
-
- if fn is not None:
- pytest.raises(exception_class, fn, *args,**kwargs)
- else:
- assert not kwargs
-
- return pytest.raises(exception_class)
-
-
-def assert_raises_regex(exception_class, expected_regexp, *args, **kwargs):
- """
- assert_raises_regex(exception_class, expected_regexp, callable, *args,
- **kwargs)
- assert_raises_regex(exception_class, expected_regexp)
-
- Fail unless an exception of class exception_class and with message that
- matches expected_regexp is thrown by callable when invoked with arguments
- args and keyword arguments kwargs.
-
- Alternatively, can be used as a context manager like `assert_raises`.
-
- Name of this function adheres to Python 3.2+ reference, but should work in
- all versions down to 2.6.
-
- Notes
- -----
- .. versionadded:: 1.9.0
-
- """
- import pytest
- import unittest
-
- class Dummy(unittest.TestCase):
- def do_nothing(self):
- pass
-
- tmp = Dummy('do_nothing')
-
- __tracebackhide__ = True # Hide traceback for py.test
- res = pytest.raises(exception_class, *args, **kwargs)
-
- if sys.version_info.major >= 3:
- funcname = tmp.assertRaisesRegex
- else:
- # Only present in Python 2.7, missing from unittest in 2.6
- funcname = tmp.assertRaisesRegexp
-
- return funcname(exception_class, expected_regexp, *args, **kwargs)
-
-
-def decorate_methods(cls, decorator, testmatch=None):
- """
- Apply a decorator to all methods in a class matching a regular expression.
-
- The given decorator is applied to all public methods of `cls` that are
- matched by the regular expression `testmatch`
- (``testmatch.search(methodname)``). Methods that are private, i.e. start
- with an underscore, are ignored.
-
- Parameters
- ----------
- cls : class
- Class whose methods to decorate.
- decorator : function
- Decorator to apply to methods
- testmatch : compiled regexp or str, optional
- The regular expression. Default value is None, in which case the
- nose default (``re.compile(r'(?:^|[\\b_\\.%s-])[Tt]est' % os.sep)``)
- is used.
- If `testmatch` is a string, it is compiled to a regular expression
- first.
-
- """
- if testmatch is None:
- testmatch = re.compile(r'(?:^|[\\b_\\.%s-])[Tt]est' % os.sep)
- else:
- testmatch = re.compile(testmatch)
- cls_attr = cls.__dict__
-
- # delayed import to reduce startup time
- from inspect import isfunction
-
- methods = [_m for _m in cls_attr.values() if isfunction(_m)]
- for function in methods:
- try:
- if hasattr(function, 'compat_func_name'):
- funcname = function.compat_func_name
- else:
- funcname = function.__name__
- except AttributeError:
- # not a function
- continue
- if testmatch.search(funcname) and not funcname.startswith('_'):
- setattr(cls, funcname, decorator(function))
- return
-
-
-def measure(code_str,times=1,label=None):
- """
- Return elapsed time for executing code in the namespace of the caller.
-
- The supplied code string is compiled with the Python builtin ``compile``.
- The precision of the timing is 10 milli-seconds. If the code will execute
- fast on this timescale, it can be executed many times to get reasonable
- timing accuracy.
-
- Parameters
- ----------
- code_str : str
- The code to be timed.
- times : int, optional
- The number of times the code is executed. Default is 1. The code is
- only compiled once.
- label : str, optional
- A label to identify `code_str` with. This is passed into ``compile``
- as the second argument (for run-time error messages).
-
- Returns
- -------
- elapsed : float
- Total elapsed time in seconds for executing `code_str` `times` times.
-
- Examples
- --------
- >>> etime = np.testing.measure('for i in range(1000): np.sqrt(i**2)',
- ... times=times)
- >>> print("Time for a single execution : ", etime / times, "s")
- Time for a single execution : 0.005 s
-
- """
- frame = sys._getframe(1)
- locs, globs = frame.f_locals, frame.f_globals
-
- code = compile(code_str,
- 'Test name: %s ' % label,
- 'exec')
- i = 0
- elapsed = jiffies()
- while i < times:
- i += 1
- exec(code, globs, locs)
- elapsed = jiffies() - elapsed
- return 0.01*elapsed
-
-
-def _assert_valid_refcount(op):
- """
- Check that ufuncs don't mishandle refcount of object `1`.
- Used in a few regression tests.
- """
- if not HAS_REFCOUNT:
- return True
- import numpy as np
-
- b = np.arange(100*100).reshape(100, 100)
- c = b
- i = 1
-
- rc = sys.getrefcount(i)
- for j in range(15):
- d = op(b, c)
- assert_(sys.getrefcount(i) >= rc)
- del d # for pyflakes
-
-
-def assert_allclose(actual, desired, rtol=1e-7, atol=0, equal_nan=True,
- err_msg='', verbose=True):
- """
- Raises an AssertionError if two objects are not equal up to desired
- tolerance.
-
- The test is equivalent to ``allclose(actual, desired, rtol, atol)``.
- It compares the difference between `actual` and `desired` to
- ``atol + rtol * abs(desired)``.
-
- .. versionadded:: 1.5.0
-
- Parameters
- ----------
- actual : array_like
- Array obtained.
- desired : array_like
- Array desired.
- rtol : float, optional
- Relative tolerance.
- atol : float, optional
- Absolute tolerance.
- equal_nan : bool, optional.
- If True, NaNs will compare equal.
- err_msg : str, optional
- The error message to be printed in case of failure.
- verbose : bool, optional
- If True, the conflicting values are appended to the error message.
-
- Raises
- ------
- AssertionError
- If actual and desired are not equal up to specified precision.
-
- See Also
- --------
- assert_array_almost_equal_nulp, assert_array_max_ulp
-
- Examples
- --------
- >>> x = [1e-5, 1e-3, 1e-1]
- >>> y = np.arccos(np.cos(x))
- >>> assert_allclose(x, y, rtol=1e-5, atol=0)
-
- """
- __tracebackhide__ = True # Hide traceback for py.test
- import numpy as np
-
- def compare(x, y):
- return np.core.numeric.isclose(x, y, rtol=rtol, atol=atol,
- equal_nan=equal_nan)
-
- actual, desired = np.asanyarray(actual), np.asanyarray(desired)
- header = 'Not equal to tolerance rtol=%g, atol=%g' % (rtol, atol)
- assert_array_compare(compare, actual, desired, err_msg=str(err_msg),
- verbose=verbose, header=header, equal_nan=equal_nan)
-
-
-def assert_array_almost_equal_nulp(x, y, nulp=1):
- """
- Compare two arrays relatively to their spacing.
-
- This is a relatively robust method to compare two arrays whose amplitude
- is variable.
-
- Parameters
- ----------
- x, y : array_like
- Input arrays.
- nulp : int, optional
- The maximum number of unit in the last place for tolerance (see Notes).
- Default is 1.
-
- Returns
- -------
- None
-
- Raises
- ------
- AssertionError
- If the spacing between `x` and `y` for one or more elements is larger
- than `nulp`.
-
- See Also
- --------
- assert_array_max_ulp : Check that all items of arrays differ in at most
- N Units in the Last Place.
- spacing : Return the distance between x and the nearest adjacent number.
-
- Notes
- -----
- An assertion is raised if the following condition is not met::
-
- abs(x - y) <= nulps * spacing(maximum(abs(x), abs(y)))
-
- Examples
- --------
- >>> x = np.array([1., 1e-10, 1e-20])
- >>> eps = np.finfo(x.dtype).eps
- >>> np.testing.assert_array_almost_equal_nulp(x, x*eps/2 + x)
-
- >>> np.testing.assert_array_almost_equal_nulp(x, x*eps + x)
- Traceback (most recent call last):
- ...
- AssertionError: X and Y are not equal to 1 ULP (max is 2)
-
- """
- __tracebackhide__ = True # Hide traceback for py.test
- import numpy as np
- ax = np.abs(x)
- ay = np.abs(y)
- ref = nulp * np.spacing(np.where(ax > ay, ax, ay))
- if not np.all(np.abs(x-y) <= ref):
- if np.iscomplexobj(x) or np.iscomplexobj(y):
- msg = "X and Y are not equal to %d ULP" % nulp
- else:
- max_nulp = np.max(nulp_diff(x, y))
- msg = "X and Y are not equal to %d ULP (max is %g)" % (nulp, max_nulp)
- raise AssertionError(msg)
-
-
-def assert_array_max_ulp(a, b, maxulp=1, dtype=None):
- """
- Check that all items of arrays differ in at most N Units in the Last Place.
-
- Parameters
- ----------
- a, b : array_like
- Input arrays to be compared.
- maxulp : int, optional
- The maximum number of units in the last place that elements of `a` and
- `b` can differ. Default is 1.
- dtype : dtype, optional
- Data-type to convert `a` and `b` to if given. Default is None.
-
- Returns
- -------
- ret : ndarray
- Array containing number of representable floating point numbers between
- items in `a` and `b`.
-
- Raises
- ------
- AssertionError
- If one or more elements differ by more than `maxulp`.
-
- See Also
- --------
- assert_array_almost_equal_nulp : Compare two arrays relatively to their
- spacing.
-
- Examples
- --------
- >>> a = np.linspace(0., 1., 100)
- >>> res = np.testing.assert_array_max_ulp(a, np.arcsin(np.sin(a)))
-
- """
- __tracebackhide__ = True # Hide traceback for py.test
- import numpy as np
- ret = nulp_diff(a, b, dtype)
- if not np.all(ret <= maxulp):
- raise AssertionError("Arrays are not almost equal up to %g ULP" %
- maxulp)
- return ret
-
-
-def nulp_diff(x, y, dtype=None):
- """For each item in x and y, return the number of representable floating
- points between them.
-
- Parameters
- ----------
- x : array_like
- first input array
- y : array_like
- second input array
- dtype : dtype, optional
- Data-type to convert `x` and `y` to if given. Default is None.
-
- Returns
- -------
- nulp : array_like
- number of representable floating point numbers between each item in x
- and y.
-
- Examples
- --------
- # By definition, epsilon is the smallest number such as 1 + eps != 1, so
- # there should be exactly one ULP between 1 and 1 + eps
- >>> nulp_diff(1, 1 + np.finfo(x.dtype).eps)
- 1.0
- """
- import numpy as np
- if dtype:
- x = np.array(x, dtype=dtype)
- y = np.array(y, dtype=dtype)
- else:
- x = np.array(x)
- y = np.array(y)
-
- t = np.common_type(x, y)
- if np.iscomplexobj(x) or np.iscomplexobj(y):
- raise NotImplementedError("_nulp not implemented for complex array")
-
- x = np.array(x, dtype=t)
- y = np.array(y, dtype=t)
-
- if not x.shape == y.shape:
- raise ValueError("x and y do not have the same shape: %s - %s" %
- (x.shape, y.shape))
-
- def _diff(rx, ry, vdt):
- diff = np.array(rx-ry, dtype=vdt)
- return np.abs(diff)
-
- rx = integer_repr(x)
- ry = integer_repr(y)
- return _diff(rx, ry, t)
-
-
-def _integer_repr(x, vdt, comp):
- # Reinterpret binary representation of the float as sign-magnitude:
- # take into account two-complement representation
- # See also
- # http://www.cygnus-software.com/papers/comparingfloats/comparingfloats.htm
- rx = x.view(vdt)
- if not (rx.size == 1):
- rx[rx < 0] = comp - rx[rx < 0]
- else:
- if rx < 0:
- rx = comp - rx
-
- return rx
-
-
-def integer_repr(x):
- """Return the signed-magnitude interpretation of the binary representation of
- x."""
- import numpy as np
- if x.dtype == np.float32:
- return _integer_repr(x, np.int32, np.int32(-2**31))
- elif x.dtype == np.float64:
- return _integer_repr(x, np.int64, np.int64(-2**63))
- else:
- raise ValueError("Unsupported dtype %s" % x.dtype)
-
-
-# The following two classes are copied from python 2.6 warnings module (context
-# manager)
-class WarningMessage(object):
-
- """
- Holds the result of a single showwarning() call.
-
- Deprecated in 1.8.0
-
- Notes
- -----
- `WarningMessage` is copied from the Python 2.6 warnings module,
- so it can be used in NumPy with older Python versions.
-
- """
-
- _WARNING_DETAILS = ("message", "category", "filename", "lineno", "file",
- "line")
-
- def __init__(self, message, category, filename, lineno, file=None,
- line=None):
- local_values = locals()
- for attr in self._WARNING_DETAILS:
- setattr(self, attr, local_values[attr])
- if category:
- self._category_name = category.__name__
- else:
- self._category_name = None
-
- def __str__(self):
- return ("{message : %r, category : %r, filename : %r, lineno : %s, "
- "line : %r}" % (self.message, self._category_name,
- self.filename, self.lineno, self.line))
-
-
-class WarningManager(object):
- """
- A context manager that copies and restores the warnings filter upon
- exiting the context.
-
- The 'record' argument specifies whether warnings should be captured by a
- custom implementation of ``warnings.showwarning()`` and be appended to a
- list returned by the context manager. Otherwise None is returned by the
- context manager. The objects appended to the list are arguments whose
- attributes mirror the arguments to ``showwarning()``.
-
- The 'module' argument is to specify an alternative module to the module
- named 'warnings' and imported under that name. This argument is only useful
- when testing the warnings module itself.
-
- Deprecated in 1.8.0
-
- Notes
- -----
- `WarningManager` is a copy of the ``catch_warnings`` context manager
- from the Python 2.6 warnings module, with slight modifications.
- It is copied so it can be used in NumPy with older Python versions.
-
- """
-
- def __init__(self, record=False, module=None):
- self._record = record
- if module is None:
- self._module = sys.modules['warnings']
- else:
- self._module = module
- self._entered = False
-
- def __enter__(self):
- if self._entered:
- raise RuntimeError("Cannot enter %r twice" % self)
- self._entered = True
- self._filters = self._module.filters
- self._module.filters = self._filters[:]
- self._showwarning = self._module.showwarning
- if self._record:
- log = []
-
- def showwarning(*args, **kwargs):
- log.append(WarningMessage(*args, **kwargs))
- self._module.showwarning = showwarning
- return log
- else:
- return None
-
- def __exit__(self):
- if not self._entered:
- raise RuntimeError("Cannot exit %r without entering first" % self)
- self._module.filters = self._filters
- self._module.showwarning = self._showwarning
-
-
-@contextlib.contextmanager
-def _assert_warns_context(warning_class, name=None):
- __tracebackhide__ = True # Hide traceback for py.test
- with suppress_warnings() as sup:
- l = sup.record(warning_class)
- yield
- if not len(l) > 0:
- name_str = " when calling %s" % name if name is not None else ""
- raise AssertionError("No warning raised" + name_str)
-
-
-def assert_warns(warning_class, *args, **kwargs):
- """
- Fail unless the given callable throws the specified warning.
-
- A warning of class warning_class should be thrown by the callable when
- invoked with arguments args and keyword arguments kwargs.
- If a different type of warning is thrown, it will not be caught.
-
- If called with all arguments other than the warning class omitted, may be
- used as a context manager:
-
- with assert_warns(SomeWarning):
- do_something()
-
- The ability to be used as a context manager is new in NumPy v1.11.0.
-
- .. versionadded:: 1.4.0
-
- Parameters
- ----------
- warning_class : class
- The class defining the warning that `func` is expected to throw.
- func : callable
- The callable to test.
- \\*args : Arguments
- Arguments passed to `func`.
- \\*\\*kwargs : Kwargs
- Keyword arguments passed to `func`.
-
- Returns
- -------
- The value returned by `func`.
-
- """
- if not args:
- return _assert_warns_context(warning_class)
-
- func = args[0]
- args = args[1:]
- with _assert_warns_context(warning_class, name=func.__name__):
- return func(*args, **kwargs)
-
-
-@contextlib.contextmanager
-def _assert_no_warnings_context(name=None):
- __tracebackhide__ = True # Hide traceback for py.test
- with warnings.catch_warnings(record=True) as l:
- warnings.simplefilter('always')
- yield
- if len(l) > 0:
- name_str = " when calling %s" % name if name is not None else ""
- raise AssertionError("Got warnings%s: %s" % (name_str, l))
-
-
-def assert_no_warnings(*args, **kwargs):
- """
- Fail if the given callable produces any warnings.
-
- If called with all arguments omitted, may be used as a context manager:
-
- with assert_no_warnings():
- do_something()
-
- The ability to be used as a context manager is new in NumPy v1.11.0.
-
- .. versionadded:: 1.7.0
-
- Parameters
- ----------
- func : callable
- The callable to test.
- \\*args : Arguments
- Arguments passed to `func`.
- \\*\\*kwargs : Kwargs
- Keyword arguments passed to `func`.
-
- Returns
- -------
- The value returned by `func`.
-
- """
- if not args:
- return _assert_no_warnings_context()
-
- func = args[0]
- args = args[1:]
- with _assert_no_warnings_context(name=func.__name__):
- return func(*args, **kwargs)
-
-
-def _gen_alignment_data(dtype=float32, type='binary', max_size=24):
- """
- generator producing data with different alignment and offsets
- to test simd vectorization
-
- Parameters
- ----------
- dtype : dtype
- data type to produce
- type : string
- 'unary': create data for unary operations, creates one input
- and output array
- 'binary': create data for unary operations, creates two input
- and output array
- max_size : integer
- maximum size of data to produce
-
- Returns
- -------
- if type is 'unary' yields one output, one input array and a message
- containing information on the data
- if type is 'binary' yields one output array, two input array and a message
- containing information on the data
-
- """
- ufmt = 'unary offset=(%d, %d), size=%d, dtype=%r, %s'
- bfmt = 'binary offset=(%d, %d, %d), size=%d, dtype=%r, %s'
- for o in range(3):
- for s in range(o + 2, max(o + 3, max_size)):
- if type == 'unary':
- inp = lambda: arange(s, dtype=dtype)[o:]
- out = empty((s,), dtype=dtype)[o:]
- yield out, inp(), ufmt % (o, o, s, dtype, 'out of place')
- d = inp()
- yield d, d, ufmt % (o, o, s, dtype, 'in place')
- yield out[1:], inp()[:-1], ufmt % \
- (o + 1, o, s - 1, dtype, 'out of place')
- yield out[:-1], inp()[1:], ufmt % \
- (o, o + 1, s - 1, dtype, 'out of place')
- yield inp()[:-1], inp()[1:], ufmt % \
- (o, o + 1, s - 1, dtype, 'aliased')
- yield inp()[1:], inp()[:-1], ufmt % \
- (o + 1, o, s - 1, dtype, 'aliased')
- if type == 'binary':
- inp1 = lambda: arange(s, dtype=dtype)[o:]
- inp2 = lambda: arange(s, dtype=dtype)[o:]
- out = empty((s,), dtype=dtype)[o:]
- yield out, inp1(), inp2(), bfmt % \
- (o, o, o, s, dtype, 'out of place')
- d = inp1()
- yield d, d, inp2(), bfmt % \
- (o, o, o, s, dtype, 'in place1')
- d = inp2()
- yield d, inp1(), d, bfmt % \
- (o, o, o, s, dtype, 'in place2')
- yield out[1:], inp1()[:-1], inp2()[:-1], bfmt % \
- (o + 1, o, o, s - 1, dtype, 'out of place')
- yield out[:-1], inp1()[1:], inp2()[:-1], bfmt % \
- (o, o + 1, o, s - 1, dtype, 'out of place')
- yield out[:-1], inp1()[:-1], inp2()[1:], bfmt % \
- (o, o, o + 1, s - 1, dtype, 'out of place')
- yield inp1()[1:], inp1()[:-1], inp2()[:-1], bfmt % \
- (o + 1, o, o, s - 1, dtype, 'aliased')
- yield inp1()[:-1], inp1()[1:], inp2()[:-1], bfmt % \
- (o, o + 1, o, s - 1, dtype, 'aliased')
- yield inp1()[:-1], inp1()[:-1], inp2()[1:], bfmt % \
- (o, o, o + 1, s - 1, dtype, 'aliased')
-
-
-
-@contextlib.contextmanager
-def tempdir(*args, **kwargs):
- """Context manager to provide a temporary test folder.
-
- All arguments are passed as this to the underlying tempfile.mkdtemp
- function.
-
- """
- tmpdir = mkdtemp(*args, **kwargs)
- try:
- yield tmpdir
- finally:
- shutil.rmtree(tmpdir)
-
-
-@contextlib.contextmanager
-def temppath(*args, **kwargs):
- """Context manager for temporary files.
-
- Context manager that returns the path to a closed temporary file. Its
- parameters are the same as for tempfile.mkstemp and are passed directly
- to that function. The underlying file is removed when the context is
- exited, so it should be closed at that time.
-
- Windows does not allow a temporary file to be opened if it is already
- open, so the underlying file must be closed after opening before it
- can be opened again.
-
- """
- fd, path = mkstemp(*args, **kwargs)
- os.close(fd)
- try:
- yield path
- finally:
- os.remove(path)
-
-
-class clear_and_catch_warnings(warnings.catch_warnings):
- """ Context manager that resets warning registry for catching warnings
-
- Warnings can be slippery, because, whenever a warning is triggered, Python
- adds a ``__warningregistry__`` member to the *calling* module. This makes
- it impossible to retrigger the warning in this module, whatever you put in
- the warnings filters. This context manager accepts a sequence of `modules`
- as a keyword argument to its constructor and:
-
- * stores and removes any ``__warningregistry__`` entries in given `modules`
- on entry;
- * resets ``__warningregistry__`` to its previous state on exit.
-
- This makes it possible to trigger any warning afresh inside the context
- manager without disturbing the state of warnings outside.
-
- For compatibility with Python 3.0, please consider all arguments to be
- keyword-only.
-
- Parameters
- ----------
- record : bool, optional
- Specifies whether warnings should be captured by a custom
- implementation of ``warnings.showwarning()`` and be appended to a list
- returned by the context manager. Otherwise None is returned by the
- context manager. The objects appended to the list are arguments whose
- attributes mirror the arguments to ``showwarning()``.
- modules : sequence, optional
- Sequence of modules for which to reset warnings registry on entry and
- restore on exit. To work correctly, all 'ignore' filters should
- filter by one of these modules.
-
- Examples
- --------
- >>> import warnings
- >>> with clear_and_catch_warnings(modules=[np.core.fromnumeric]):
- ... warnings.simplefilter('always')
- ... warnings.filterwarnings('ignore', module='np.core.fromnumeric')
- ... # do something that raises a warning but ignore those in
- ... # np.core.fromnumeric
- """
- class_modules = ()
-
- def __init__(self, record=False, modules=()):
- self.modules = set(modules).union(self.class_modules)
- self._warnreg_copies = {}
- super(clear_and_catch_warnings, self).__init__(record=record)
-
- def __enter__(self):
- for mod in self.modules:
- if hasattr(mod, '__warningregistry__'):
- mod_reg = mod.__warningregistry__
- self._warnreg_copies[mod] = mod_reg.copy()
- mod_reg.clear()
- return super(clear_and_catch_warnings, self).__enter__()
-
- def __exit__(self, *exc_info):
- super(clear_and_catch_warnings, self).__exit__(*exc_info)
- for mod in self.modules:
- if hasattr(mod, '__warningregistry__'):
- mod.__warningregistry__.clear()
- if mod in self._warnreg_copies:
- mod.__warningregistry__.update(self._warnreg_copies[mod])
-
-
-class suppress_warnings(object):
- """
- Context manager and decorator doing much the same as
- ``warnings.catch_warnings``.
-
- However, it also provides a filter mechanism to work around
- http://bugs.python.org/issue4180.
-
- This bug causes Python before 3.4 to not reliably show warnings again
- after they have been ignored once (even within catch_warnings). It
- means that no "ignore" filter can be used easily, since following
- tests might need to see the warning. Additionally it allows easier
- specificity for testing warnings and can be nested.
-
- Parameters
- ----------
- forwarding_rule : str, optional
- One of "always", "once", "module", or "location". Analogous to
- the usual warnings module filter mode, it is useful to reduce
- noise mostly on the outmost level. Unsuppressed and unrecorded
- warnings will be forwarded based on this rule. Defaults to "always".
- "location" is equivalent to the warnings "default", match by exact
- location the warning warning originated from.
-
- Notes
- -----
- Filters added inside the context manager will be discarded again
- when leaving it. Upon entering all filters defined outside a
- context will be applied automatically.
-
- When a recording filter is added, matching warnings are stored in the
- ``log`` attribute as well as in the list returned by ``record``.
-
- If filters are added and the ``module`` keyword is given, the
- warning registry of this module will additionally be cleared when
- applying it, entering the context, or exiting it. This could cause
- warnings to appear a second time after leaving the context if they
- were configured to be printed once (default) and were already
- printed before the context was entered.
-
- Nesting this context manager will work as expected when the
- forwarding rule is "always" (default). Unfiltered and unrecorded
- warnings will be passed out and be matched by the outer level.
- On the outmost level they will be printed (or caught by another
- warnings context). The forwarding rule argument can modify this
- behaviour.
-
- Like ``catch_warnings`` this context manager is not threadsafe.
-
- Examples
- --------
- >>> with suppress_warnings() as sup:
- ... sup.filter(DeprecationWarning, "Some text")
- ... sup.filter(module=np.ma.core)
- ... log = sup.record(FutureWarning, "Does this occur?")
- ... command_giving_warnings()
- ... # The FutureWarning was given once, the filtered warnings were
- ... # ignored. All other warnings abide outside settings (may be
- ... # printed/error)
- ... assert_(len(log) == 1)
- ... assert_(len(sup.log) == 1) # also stored in log attribute
-
- Or as a decorator:
-
- >>> sup = suppress_warnings()
- >>> sup.filter(module=np.ma.core) # module must match exact
- >>> @sup
- >>> def some_function():
- ... # do something which causes a warning in np.ma.core
- ... pass
- """
- def __init__(self, forwarding_rule="always"):
- self._entered = False
-
- # Suppressions are either instance or defined inside one with block:
- self._suppressions = []
-
- if forwarding_rule not in {"always", "module", "once", "location"}:
- raise ValueError("unsupported forwarding rule.")
- self._forwarding_rule = forwarding_rule
-
- def _clear_registries(self):
- if hasattr(warnings, "_filters_mutated"):
- # clearing the registry should not be necessary on new pythons,
- # instead the filters should be mutated.
- warnings._filters_mutated()
- return
- # Simply clear the registry, this should normally be harmless,
- # note that on new pythons it would be invalidated anyway.
- for module in self._tmp_modules:
- if hasattr(module, "__warningregistry__"):
- module.__warningregistry__.clear()
-
- def _filter(self, category=Warning, message="", module=None, record=False):
- if record:
- record = [] # The log where to store warnings
- else:
- record = None
- if self._entered:
- if module is None:
- warnings.filterwarnings(
- "always", category=category, message=message)
- else:
- module_regex = module.__name__.replace('.', r'\.') + '$'
- warnings.filterwarnings(
- "always", category=category, message=message,
- module=module_regex)
- self._tmp_modules.add(module)
- self._clear_registries()
-
- self._tmp_suppressions.append(
- (category, message, re.compile(message, re.I), module, record))
- else:
- self._suppressions.append(
- (category, message, re.compile(message, re.I), module, record))
-
- return record
-
- def filter(self, category=Warning, message="", module=None):
- """
- Add a new suppressing filter or apply it if the state is entered.
-
- Parameters
- ----------
- category : class, optional
- Warning class to filter
- message : string, optional
- Regular expression matching the warning message.
- module : module, optional
- Module to filter for. Note that the module (and its file)
- must match exactly and cannot be a submodule. This may make
- it unreliable for external modules.
-
- Notes
- -----
- When added within a context, filters are only added inside
- the context and will be forgotten when the context is exited.
- """
- self._filter(category=category, message=message, module=module,
- record=False)
-
- def record(self, category=Warning, message="", module=None):
- """
- Append a new recording filter or apply it if the state is entered.
-
- All warnings matching will be appended to the ``log`` attribute.
-
- Parameters
- ----------
- category : class, optional
- Warning class to filter
- message : string, optional
- Regular expression matching the warning message.
- module : module, optional
- Module to filter for. Note that the module (and its file)
- must match exactly and cannot be a submodule. This may make
- it unreliable for external modules.
-
- Returns
- -------
- log : list
- A list which will be filled with all matched warnings.
-
- Notes
- -----
- When added within a context, filters are only added inside
- the context and will be forgotten when the context is exited.
- """
- return self._filter(category=category, message=message, module=module,
- record=True)
-
- def __enter__(self):
- if self._entered:
- raise RuntimeError("cannot enter suppress_warnings twice.")
-
- self._orig_show = warnings.showwarning
- self._filters = warnings.filters
- warnings.filters = self._filters[:]
-
- self._entered = True
- self._tmp_suppressions = []
- self._tmp_modules = set()
- self._forwarded = set()
-
- self.log = [] # reset global log (no need to keep same list)
-
- for cat, mess, _, mod, log in self._suppressions:
- if log is not None:
- del log[:] # clear the log
- if mod is None:
- warnings.filterwarnings(
- "always", category=cat, message=mess)
- else:
- module_regex = mod.__name__.replace('.', r'\.') + '$'
- warnings.filterwarnings(
- "always", category=cat, message=mess,
- module=module_regex)
- self._tmp_modules.add(mod)
- warnings.showwarning = self._showwarning
- self._clear_registries()
-
- return self
-
- def __exit__(self, *exc_info):
- warnings.showwarning = self._orig_show
- warnings.filters = self._filters
- self._clear_registries()
- self._entered = False
- del self._orig_show
- del self._filters
-
- def _showwarning(self, message, category, filename, lineno,
- *args, **kwargs):
- use_warnmsg = kwargs.pop("use_warnmsg", None)
- for cat, _, pattern, mod, rec in (
- self._suppressions + self._tmp_suppressions)[::-1]:
- if (issubclass(category, cat) and
- pattern.match(message.args[0]) is not None):
- if mod is None:
- # Message and category match, either recorded or ignored
- if rec is not None:
- msg = WarningMessage(message, category, filename,
- lineno, **kwargs)
- self.log.append(msg)
- rec.append(msg)
- return
- # Use startswith, because warnings strips the c or o from
- # .pyc/.pyo files.
- elif mod.__file__.startswith(filename):
- # The message and module (filename) match
- if rec is not None:
- msg = WarningMessage(message, category, filename,
- lineno, **kwargs)
- self.log.append(msg)
- rec.append(msg)
- return
-
- # There is no filter in place, so pass to the outside handler
- # unless we should only pass it once
- if self._forwarding_rule == "always":
- if use_warnmsg is None:
- self._orig_show(message, category, filename, lineno,
- *args, **kwargs)
- else:
- self._orig_showmsg(use_warnmsg)
- return
-
- if self._forwarding_rule == "once":
- signature = (message.args, category)
- elif self._forwarding_rule == "module":
- signature = (message.args, category, filename)
- elif self._forwarding_rule == "location":
- signature = (message.args, category, filename, lineno)
-
- if signature in self._forwarded:
- return
- self._forwarded.add(signature)
- if use_warnmsg is None:
- self._orig_show(message, category, filename, lineno, *args,
- **kwargs)
- else:
- self._orig_showmsg(use_warnmsg)
-
- def __call__(self, func):
- """
- Function decorator to apply certain suppressions to a whole
- function.
- """
- @wraps(func)
- def new_func(*args, **kwargs):
- with self:
- return func(*args, **kwargs)
-
- return new_func
diff --git a/numpy/testing/setup.py b/numpy/testing/setup.py
index 5a0f977d9..e27a9b85b 100755
--- a/numpy/testing/setup.py
+++ b/numpy/testing/setup.py
@@ -6,8 +6,7 @@ def configuration(parent_package='',top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('testing', parent_package, top_path)
- config.add_subpackage('nose_tools')
- config.add_subpackage('pytest_tools')
+ config.add_subpackage('_private')
config.add_data_dir('tests')
return config
diff --git a/numpy/testing/tests/test_decorators.py b/numpy/testing/tests/test_decorators.py
index 62329ab7d..ea684140d 100644
--- a/numpy/testing/tests/test_decorators.py
+++ b/numpy/testing/tests/test_decorators.py
@@ -5,196 +5,212 @@ Test the decorators from ``testing.decorators``.
from __future__ import division, absolute_import, print_function
import warnings
+import pytest
-from numpy.testing import (dec, assert_, assert_raises, run_module_suite,
- SkipTest, KnownFailureException)
+from numpy.testing import (
+ assert_, assert_raises, dec, SkipTest, KnownFailureException,
+ )
-def test_slow():
- @dec.slow
- def slow_func(x, y, z):
- pass
-
- assert_(slow_func.slow)
+try:
+ import nose
+except ImportError:
+ HAVE_NOSE = False
+else:
+ HAVE_NOSE = True
-def test_setastest():
- @dec.setastest()
- def f_default(a):
- pass
-
- @dec.setastest(True)
- def f_istest(a):
- pass
+@pytest.mark.skipif(not HAVE_NOSE, reason="Needs nose")
+class TestNoseDecorators(object):
+ # These tests are run in a class for simplicity while still
+ # getting a report on each, skipped or success.
- @dec.setastest(False)
- def f_isnottest(a):
+ class DidntSkipException(Exception):
pass
- assert_(f_default.__test__)
- assert_(f_istest.__test__)
- assert_(not f_isnottest.__test__)
-
-
-class DidntSkipException(Exception):
- pass
-
-def test_skip_functions_hardcoded():
- @dec.skipif(True)
- def f1(x):
- raise DidntSkipException
-
- try:
- f1('a')
- except DidntSkipException:
- raise Exception('Failed to skip')
- except SkipTest().__class__:
- pass
+ def test_slow(self):
+ import nose
+ @dec.slow
+ def slow_func(x, y, z):
+ pass
- @dec.skipif(False)
- def f2(x):
- raise DidntSkipException
-
- try:
- f2('a')
- except DidntSkipException:
- pass
- except SkipTest().__class__:
- raise Exception('Skipped when not expected to')
+ assert_(slow_func.slow)
+ def test_setastest(self):
+ @dec.setastest()
+ def f_default(a):
+ pass
-def test_skip_functions_callable():
- def skip_tester():
- return skip_flag == 'skip me!'
+ @dec.setastest(True)
+ def f_istest(a):
+ pass
- @dec.skipif(skip_tester)
- def f1(x):
- raise DidntSkipException
+ @dec.setastest(False)
+ def f_isnottest(a):
+ pass
- try:
- skip_flag = 'skip me!'
- f1('a')
- except DidntSkipException:
- raise Exception('Failed to skip')
- except SkipTest().__class__:
- pass
+ assert_(f_default.__test__)
+ assert_(f_istest.__test__)
+ assert_(not f_isnottest.__test__)
- @dec.skipif(skip_tester)
- def f2(x):
- raise DidntSkipException
- try:
- skip_flag = 'five is right out!'
- f2('a')
- except DidntSkipException:
- pass
- except SkipTest().__class__:
- raise Exception('Skipped when not expected to')
+ def test_skip_functions_hardcoded(self):
+ @dec.skipif(True)
+ def f1(x):
+ raise self.DidntSkipException
+ try:
+ f1('a')
+ except self.DidntSkipException:
+ raise Exception('Failed to skip')
+ except SkipTest().__class__:
+ pass
-def test_skip_generators_hardcoded():
- @dec.knownfailureif(True, "This test is known to fail")
- def g1(x):
- for i in range(x):
- yield i
+ @dec.skipif(False)
+ def f2(x):
+ raise self.DidntSkipException
- try:
- for j in g1(10):
+ try:
+ f2('a')
+ except self.DidntSkipException:
+ pass
+ except SkipTest().__class__:
+ raise Exception('Skipped when not expected to')
+
+ def test_skip_functions_callable(self):
+ def skip_tester():
+ return skip_flag == 'skip me!'
+
+ @dec.skipif(skip_tester)
+ def f1(x):
+ raise self.DidntSkipException
+
+ try:
+ skip_flag = 'skip me!'
+ f1('a')
+ except self.DidntSkipException:
+ raise Exception('Failed to skip')
+ except SkipTest().__class__:
pass
- except KnownFailureException().__class__:
- pass
- else:
- raise Exception('Failed to mark as known failure')
- @dec.knownfailureif(False, "This test is NOT known to fail")
- def g2(x):
- for i in range(x):
- yield i
- raise DidntSkipException('FAIL')
+ @dec.skipif(skip_tester)
+ def f2(x):
+ raise self.DidntSkipException
- try:
- for j in g2(10):
+ try:
+ skip_flag = 'five is right out!'
+ f2('a')
+ except self.DidntSkipException:
+ pass
+ except SkipTest().__class__:
+ raise Exception('Skipped when not expected to')
+
+ def test_skip_generators_hardcoded(self):
+ @dec.knownfailureif(True, "This test is known to fail")
+ def g1(x):
+ for i in range(x):
+ yield i
+
+ try:
+ for j in g1(10):
+ pass
+ except KnownFailureException().__class__:
+ pass
+ else:
+ raise Exception('Failed to mark as known failure')
+
+ @dec.knownfailureif(False, "This test is NOT known to fail")
+ def g2(x):
+ for i in range(x):
+ yield i
+ raise self.DidntSkipException('FAIL')
+
+ try:
+ for j in g2(10):
+ pass
+ except KnownFailureException().__class__:
+ raise Exception('Marked incorrectly as known failure')
+ except self.DidntSkipException:
pass
- except KnownFailureException().__class__:
- raise Exception('Marked incorrectly as known failure')
- except DidntSkipException:
- pass
-
-def test_skip_generators_callable():
- def skip_tester():
- return skip_flag == 'skip me!'
+ def test_skip_generators_callable(self):
+ def skip_tester():
+ return skip_flag == 'skip me!'
- @dec.knownfailureif(skip_tester, "This test is known to fail")
- def g1(x):
- for i in range(x):
- yield i
+ @dec.knownfailureif(skip_tester, "This test is known to fail")
+ def g1(x):
+ for i in range(x):
+ yield i
- try:
- skip_flag = 'skip me!'
- for j in g1(10):
+ try:
+ skip_flag = 'skip me!'
+ for j in g1(10):
+ pass
+ except KnownFailureException().__class__:
pass
- except KnownFailureException().__class__:
- pass
- else:
- raise Exception('Failed to mark as known failure')
-
- @dec.knownfailureif(skip_tester, "This test is NOT known to fail")
- def g2(x):
- for i in range(x):
- yield i
- raise DidntSkipException('FAIL')
-
- try:
- skip_flag = 'do not skip'
- for j in g2(10):
+ else:
+ raise Exception('Failed to mark as known failure')
+
+ @dec.knownfailureif(skip_tester, "This test is NOT known to fail")
+ def g2(x):
+ for i in range(x):
+ yield i
+ raise self.DidntSkipException('FAIL')
+
+ try:
+ skip_flag = 'do not skip'
+ for j in g2(10):
+ pass
+ except KnownFailureException().__class__:
+ raise Exception('Marked incorrectly as known failure')
+ except self.DidntSkipException:
pass
- except KnownFailureException().__class__:
- raise Exception('Marked incorrectly as known failure')
- except DidntSkipException:
- pass
+ def test_deprecated(self):
+ @dec.deprecated(True)
+ def non_deprecated_func():
+ pass
-def test_deprecated():
- @dec.deprecated(True)
- def non_deprecated_func():
- pass
-
- @dec.deprecated()
- def deprecated_func():
- import warnings
- warnings.warn("TEST: deprecated func", DeprecationWarning)
-
- @dec.deprecated()
- def deprecated_func2():
- import warnings
- warnings.warn("AHHHH")
- raise ValueError
-
- @dec.deprecated()
- def deprecated_func3():
- import warnings
- warnings.warn("AHHHH")
-
- # marked as deprecated, but does not raise DeprecationWarning
- assert_raises(AssertionError, non_deprecated_func)
- # should be silent
- deprecated_func()
- with warnings.catch_warnings(record=True):
- warnings.simplefilter("always") # do not propagate unrelated warnings
- # fails if deprecated decorator just disables test. See #1453.
- assert_raises(ValueError, deprecated_func2)
- # warning is not a DeprecationWarning
- assert_raises(AssertionError, deprecated_func3)
-
-
-@dec.parametrize('base, power, expected',
- [(1, 1, 1),
- (2, 1, 2),
- (2, 2, 4)])
-def test_parametrize(base, power, expected):
- assert_(base**power == expected)
-
-
-if __name__ == '__main__':
- run_module_suite()
+ @dec.deprecated()
+ def deprecated_func():
+ import warnings
+ warnings.warn("TEST: deprecated func", DeprecationWarning)
+
+ @dec.deprecated()
+ def deprecated_func2():
+ import warnings
+ warnings.warn("AHHHH")
+ raise ValueError
+
+ @dec.deprecated()
+ def deprecated_func3():
+ import warnings
+ warnings.warn("AHHHH")
+
+ # marked as deprecated, but does not raise DeprecationWarning
+ assert_raises(AssertionError, non_deprecated_func)
+ # should be silent
+ deprecated_func()
+ with warnings.catch_warnings(record=True):
+ warnings.simplefilter("always") # do not propagate unrelated warnings
+ # fails if deprecated decorator just disables test. See #1453.
+ assert_raises(ValueError, deprecated_func2)
+ # warning is not a DeprecationWarning
+ assert_raises(AssertionError, deprecated_func3)
+
+ def test_parametrize(self):
+ # dec.parametrize assumes that it is being run by nose. Because
+ # we are running under pytest, we need to explicitly check the
+ # results.
+ @dec.parametrize('base, power, expected',
+ [(1, 1, 1),
+ (2, 1, 2),
+ (2, 2, 4)])
+ def check_parametrize(base, power, expected):
+ assert_(base**power == expected)
+
+ count = 0
+ for test in check_parametrize():
+ test[0](*test[1:])
+ count += 1
+ assert_(count == 3)
diff --git a/numpy/testing/tests/test_doctesting.py b/numpy/testing/tests/test_doctesting.py
index 43f9fb6ce..b77cd93e0 100644
--- a/numpy/testing/tests/test_doctesting.py
+++ b/numpy/testing/tests/test_doctesting.py
@@ -3,6 +3,9 @@
"""
from __future__ import division, absolute_import, print_function
+#FIXME: None of these tests is run, because 'check' is not a recognized
+# testing prefix.
+
# try the #random directive on the output line
def check_random_directive():
'''
diff --git a/numpy/testing/tests/test_utils.py b/numpy/testing/tests/test_utils.py
index 77fb974cf..0592e62f8 100644
--- a/numpy/testing/tests/test_utils.py
+++ b/numpy/testing/tests/test_utils.py
@@ -5,18 +5,18 @@ import sys
import os
import itertools
import textwrap
+import pytest
+import weakref
import numpy as np
from numpy.testing import (
assert_equal, assert_array_equal, assert_almost_equal,
- assert_array_almost_equal, assert_array_less, build_err_msg,
- raises, assert_raises, assert_warns, assert_no_warnings,
- assert_allclose, assert_approx_equal,
- assert_array_almost_equal_nulp, assert_array_max_ulp,
- clear_and_catch_warnings, suppress_warnings, run_module_suite,
- assert_string_equal, assert_, tempdir, temppath,
+ assert_array_almost_equal, assert_array_less, build_err_msg, raises,
+ assert_raises, assert_warns, assert_no_warnings, assert_allclose,
+ assert_approx_equal, assert_array_almost_equal_nulp, assert_array_max_ulp,
+ clear_and_catch_warnings, suppress_warnings, assert_string_equal, assert_,
+ tempdir, temppath, assert_no_gc_cycles, HAS_REFCOUNT
)
-import unittest
class _GenericTest(object):
@@ -69,9 +69,9 @@ class _GenericTest(object):
self._test_equal([1, 2, 3], (1, 2, 3))
-class TestArrayEqual(_GenericTest, unittest.TestCase):
+class TestArrayEqual(_GenericTest):
- def setUp(self):
+ def setup(self):
self._assert_func = assert_array_equal
def test_generic_rank1(self):
@@ -149,10 +149,10 @@ class TestArrayEqual(_GenericTest, unittest.TestCase):
with suppress_warnings() as sup:
l = sup.record(FutureWarning, message="elementwise == ")
self._test_not_equal(c, b)
- assert_(len(l) == 1)
+ assert_equal(len(l), 1)
-class TestBuildErrorMessage(unittest.TestCase):
+class TestBuildErrorMessage(object):
def test_build_err_msg_defaults(self):
x = np.array([1.00001, 2.00002, 3.00003])
@@ -163,7 +163,7 @@ class TestBuildErrorMessage(unittest.TestCase):
b = ('\nItems are not equal: There is a mismatch\n ACTUAL: array(['
'1.00001, 2.00002, 3.00003])\n DESIRED: array([1.00002, '
'2.00003, 3.00004])')
- self.assertEqual(a, b)
+ assert_equal(a, b)
def test_build_err_msg_no_verbose(self):
x = np.array([1.00001, 2.00002, 3.00003])
@@ -172,7 +172,7 @@ class TestBuildErrorMessage(unittest.TestCase):
a = build_err_msg([x, y], err_msg, verbose=False)
b = '\nItems are not equal: There is a mismatch'
- self.assertEqual(a, b)
+ assert_equal(a, b)
def test_build_err_msg_custom_names(self):
x = np.array([1.00001, 2.00002, 3.00003])
@@ -183,7 +183,7 @@ class TestBuildErrorMessage(unittest.TestCase):
b = ('\nItems are not equal: There is a mismatch\n FOO: array(['
'1.00001, 2.00002, 3.00003])\n BAR: array([1.00002, 2.00003, '
'3.00004])')
- self.assertEqual(a, b)
+ assert_equal(a, b)
def test_build_err_msg_custom_precision(self):
x = np.array([1.000000001, 2.00002, 3.00003])
@@ -194,12 +194,12 @@ class TestBuildErrorMessage(unittest.TestCase):
b = ('\nItems are not equal: There is a mismatch\n ACTUAL: array(['
'1.000000001, 2.00002 , 3.00003 ])\n DESIRED: array(['
'1.000000002, 2.00003 , 3.00004 ])')
- self.assertEqual(a, b)
+ assert_equal(a, b)
class TestEqual(TestArrayEqual):
- def setUp(self):
+ def setup(self):
self._assert_func = assert_equal
def test_nan_items(self):
@@ -298,16 +298,16 @@ class TestEqual(TestArrayEqual):
x: array([1, 2])
y: matrix([[1, 2]])""")
try:
- self.assertEqual(msg, msg_reference)
+ assert_equal(msg, msg_reference)
except AssertionError:
- self.assertEqual(msg2, msg_reference)
+ assert_equal(msg2, msg_reference)
else:
raise AssertionError("Did not raise")
-class TestArrayAlmostEqual(_GenericTest, unittest.TestCase):
+class TestArrayAlmostEqual(_GenericTest):
- def setUp(self):
+ def setup(self):
self._assert_func = assert_array_almost_equal
def test_closeness(self):
@@ -319,12 +319,12 @@ class TestArrayAlmostEqual(_GenericTest, unittest.TestCase):
# test scalars
self._assert_func(1.499999, 0.0, decimal=0)
- self.assertRaises(AssertionError,
+ assert_raises(AssertionError,
lambda: self._assert_func(1.5, 0.0, decimal=0))
# test arrays
self._assert_func([1.499999], [0.0], decimal=0)
- self.assertRaises(AssertionError,
+ assert_raises(AssertionError,
lambda: self._assert_func([1.5], [0.0], decimal=0))
def test_simple(self):
@@ -333,7 +333,7 @@ class TestArrayAlmostEqual(_GenericTest, unittest.TestCase):
self._assert_func(x, y, decimal=3)
self._assert_func(x, y, decimal=4)
- self.assertRaises(AssertionError,
+ assert_raises(AssertionError,
lambda: self._assert_func(x, y, decimal=5))
def test_nan(self):
@@ -341,21 +341,21 @@ class TestArrayAlmostEqual(_GenericTest, unittest.TestCase):
aone = np.array([1])
ainf = np.array([np.inf])
self._assert_func(anan, anan)
- self.assertRaises(AssertionError,
+ assert_raises(AssertionError,
lambda: self._assert_func(anan, aone))
- self.assertRaises(AssertionError,
+ assert_raises(AssertionError,
lambda: self._assert_func(anan, ainf))
- self.assertRaises(AssertionError,
+ assert_raises(AssertionError,
lambda: self._assert_func(ainf, anan))
def test_inf(self):
a = np.array([[1., 2.], [3., 4.]])
b = a.copy()
a[0, 0] = np.inf
- self.assertRaises(AssertionError,
+ assert_raises(AssertionError,
lambda: self._assert_func(a, b))
b[0, 0] = -np.inf
- self.assertRaises(AssertionError,
+ assert_raises(AssertionError,
lambda: self._assert_func(a, b))
def test_subclass(self):
@@ -396,9 +396,9 @@ class TestArrayAlmostEqual(_GenericTest, unittest.TestCase):
self._assert_func(a, a)
-class TestAlmostEqual(_GenericTest, unittest.TestCase):
+class TestAlmostEqual(_GenericTest):
- def setUp(self):
+ def setup(self):
self._assert_func = assert_almost_equal
def test_closeness(self):
@@ -410,30 +410,30 @@ class TestAlmostEqual(_GenericTest, unittest.TestCase):
# test scalars
self._assert_func(1.499999, 0.0, decimal=0)
- self.assertRaises(AssertionError,
- lambda: self._assert_func(1.5, 0.0, decimal=0))
+ assert_raises(AssertionError,
+ lambda: self._assert_func(1.5, 0.0, decimal=0))
# test arrays
self._assert_func([1.499999], [0.0], decimal=0)
- self.assertRaises(AssertionError,
- lambda: self._assert_func([1.5], [0.0], decimal=0))
+ assert_raises(AssertionError,
+ lambda: self._assert_func([1.5], [0.0], decimal=0))
def test_nan_item(self):
self._assert_func(np.nan, np.nan)
- self.assertRaises(AssertionError,
- lambda: self._assert_func(np.nan, 1))
- self.assertRaises(AssertionError,
- lambda: self._assert_func(np.nan, np.inf))
- self.assertRaises(AssertionError,
- lambda: self._assert_func(np.inf, np.nan))
+ assert_raises(AssertionError,
+ lambda: self._assert_func(np.nan, 1))
+ assert_raises(AssertionError,
+ lambda: self._assert_func(np.nan, np.inf))
+ assert_raises(AssertionError,
+ lambda: self._assert_func(np.inf, np.nan))
def test_inf_item(self):
self._assert_func(np.inf, np.inf)
self._assert_func(-np.inf, -np.inf)
- self.assertRaises(AssertionError,
- lambda: self._assert_func(np.inf, 1))
- self.assertRaises(AssertionError,
- lambda: self._assert_func(-np.inf, np.inf))
+ assert_raises(AssertionError,
+ lambda: self._assert_func(np.inf, 1))
+ assert_raises(AssertionError,
+ lambda: self._assert_func(-np.inf, np.inf))
def test_simple_item(self):
self._test_not_equal(1, 2)
@@ -467,7 +467,7 @@ class TestAlmostEqual(_GenericTest, unittest.TestCase):
self._assert_func(x, y, decimal=12)
except AssertionError as e:
# remove anything that's not the array string
- self.assertEqual(str(e).split('%)\n ')[1], b)
+ assert_equal(str(e).split('%)\n ')[1], b)
# with the default value of decimal digits, only the 3rd element differs
# note that we only check for the formatting of the arrays themselves
@@ -477,7 +477,7 @@ class TestAlmostEqual(_GenericTest, unittest.TestCase):
self._assert_func(x, y)
except AssertionError as e:
# remove anything that's not the array string
- self.assertEqual(str(e).split('%)\n ')[1], b)
+ assert_equal(str(e).split('%)\n ')[1], b)
def test_matrix(self):
# Matrix slicing keeps things 2-D, while array does not necessarily.
@@ -509,9 +509,9 @@ class TestAlmostEqual(_GenericTest, unittest.TestCase):
self._assert_func(a, a)
-class TestApproxEqual(unittest.TestCase):
+class TestApproxEqual(object):
- def setUp(self):
+ def setup(self):
self._assert_func = assert_approx_equal
def test_simple_arrays(self):
@@ -520,8 +520,8 @@ class TestApproxEqual(unittest.TestCase):
self._assert_func(x, y, significant=5)
self._assert_func(x, y, significant=6)
- self.assertRaises(AssertionError,
- lambda: self._assert_func(x, y, significant=7))
+ assert_raises(AssertionError,
+ lambda: self._assert_func(x, y, significant=7))
def test_simple_items(self):
x = 1234.22
@@ -530,37 +530,31 @@ class TestApproxEqual(unittest.TestCase):
self._assert_func(x, y, significant=4)
self._assert_func(x, y, significant=5)
self._assert_func(x, y, significant=6)
- self.assertRaises(AssertionError,
- lambda: self._assert_func(x, y, significant=7))
+ assert_raises(AssertionError,
+ lambda: self._assert_func(x, y, significant=7))
def test_nan_array(self):
anan = np.array(np.nan)
aone = np.array(1)
ainf = np.array(np.inf)
self._assert_func(anan, anan)
- self.assertRaises(AssertionError,
- lambda: self._assert_func(anan, aone))
- self.assertRaises(AssertionError,
- lambda: self._assert_func(anan, ainf))
- self.assertRaises(AssertionError,
- lambda: self._assert_func(ainf, anan))
+ assert_raises(AssertionError, lambda: self._assert_func(anan, aone))
+ assert_raises(AssertionError, lambda: self._assert_func(anan, ainf))
+ assert_raises(AssertionError, lambda: self._assert_func(ainf, anan))
def test_nan_items(self):
anan = np.array(np.nan)
aone = np.array(1)
ainf = np.array(np.inf)
self._assert_func(anan, anan)
- self.assertRaises(AssertionError,
- lambda: self._assert_func(anan, aone))
- self.assertRaises(AssertionError,
- lambda: self._assert_func(anan, ainf))
- self.assertRaises(AssertionError,
- lambda: self._assert_func(ainf, anan))
+ assert_raises(AssertionError, lambda: self._assert_func(anan, aone))
+ assert_raises(AssertionError, lambda: self._assert_func(anan, ainf))
+ assert_raises(AssertionError, lambda: self._assert_func(ainf, anan))
-class TestArrayAssertLess(unittest.TestCase):
+class TestArrayAssertLess(object):
- def setUp(self):
+ def setup(self):
self._assert_func = assert_array_less
def test_simple_arrays(self):
@@ -568,100 +562,79 @@ class TestArrayAssertLess(unittest.TestCase):
y = np.array([1.2, 2.3])
self._assert_func(x, y)
- self.assertRaises(AssertionError,
- lambda: self._assert_func(y, x))
+ assert_raises(AssertionError, lambda: self._assert_func(y, x))
y = np.array([1.0, 2.3])
- self.assertRaises(AssertionError,
- lambda: self._assert_func(x, y))
- self.assertRaises(AssertionError,
- lambda: self._assert_func(y, x))
+ assert_raises(AssertionError, lambda: self._assert_func(x, y))
+ assert_raises(AssertionError, lambda: self._assert_func(y, x))
def test_rank2(self):
x = np.array([[1.1, 2.2], [3.3, 4.4]])
y = np.array([[1.2, 2.3], [3.4, 4.5]])
self._assert_func(x, y)
- self.assertRaises(AssertionError,
- lambda: self._assert_func(y, x))
+ assert_raises(AssertionError, lambda: self._assert_func(y, x))
y = np.array([[1.0, 2.3], [3.4, 4.5]])
- self.assertRaises(AssertionError,
- lambda: self._assert_func(x, y))
- self.assertRaises(AssertionError,
- lambda: self._assert_func(y, x))
+ assert_raises(AssertionError, lambda: self._assert_func(x, y))
+ assert_raises(AssertionError, lambda: self._assert_func(y, x))
def test_rank3(self):
x = np.ones(shape=(2, 2, 2))
y = np.ones(shape=(2, 2, 2))+1
self._assert_func(x, y)
- self.assertRaises(AssertionError,
- lambda: self._assert_func(y, x))
+ assert_raises(AssertionError, lambda: self._assert_func(y, x))
y[0, 0, 0] = 0
- self.assertRaises(AssertionError,
- lambda: self._assert_func(x, y))
- self.assertRaises(AssertionError,
- lambda: self._assert_func(y, x))
+ assert_raises(AssertionError, lambda: self._assert_func(x, y))
+ assert_raises(AssertionError, lambda: self._assert_func(y, x))
def test_simple_items(self):
x = 1.1
y = 2.2
self._assert_func(x, y)
- self.assertRaises(AssertionError,
- lambda: self._assert_func(y, x))
+ assert_raises(AssertionError, lambda: self._assert_func(y, x))
y = np.array([2.2, 3.3])
self._assert_func(x, y)
- self.assertRaises(AssertionError,
- lambda: self._assert_func(y, x))
+ assert_raises(AssertionError, lambda: self._assert_func(y, x))
y = np.array([1.0, 3.3])
- self.assertRaises(AssertionError,
- lambda: self._assert_func(x, y))
+ assert_raises(AssertionError, lambda: self._assert_func(x, y))
def test_nan_noncompare(self):
anan = np.array(np.nan)
aone = np.array(1)
ainf = np.array(np.inf)
self._assert_func(anan, anan)
- self.assertRaises(AssertionError,
- lambda: self._assert_func(aone, anan))
- self.assertRaises(AssertionError,
- lambda: self._assert_func(anan, aone))
- self.assertRaises(AssertionError,
- lambda: self._assert_func(anan, ainf))
- self.assertRaises(AssertionError,
- lambda: self._assert_func(ainf, anan))
+ assert_raises(AssertionError, lambda: self._assert_func(aone, anan))
+ assert_raises(AssertionError, lambda: self._assert_func(anan, aone))
+ assert_raises(AssertionError, lambda: self._assert_func(anan, ainf))
+ assert_raises(AssertionError, lambda: self._assert_func(ainf, anan))
def test_nan_noncompare_array(self):
x = np.array([1.1, 2.2, 3.3])
anan = np.array(np.nan)
- self.assertRaises(AssertionError,
- lambda: self._assert_func(x, anan))
- self.assertRaises(AssertionError,
- lambda: self._assert_func(anan, x))
+ assert_raises(AssertionError, lambda: self._assert_func(x, anan))
+ assert_raises(AssertionError, lambda: self._assert_func(anan, x))
x = np.array([1.1, 2.2, np.nan])
- self.assertRaises(AssertionError,
- lambda: self._assert_func(x, anan))
- self.assertRaises(AssertionError,
- lambda: self._assert_func(anan, x))
+ assert_raises(AssertionError, lambda: self._assert_func(x, anan))
+ assert_raises(AssertionError, lambda: self._assert_func(anan, x))
y = np.array([1.0, 2.0, np.nan])
self._assert_func(y, x)
- self.assertRaises(AssertionError,
- lambda: self._assert_func(x, y))
+ assert_raises(AssertionError, lambda: self._assert_func(x, y))
def test_inf_compare(self):
aone = np.array(1)
@@ -670,37 +643,27 @@ class TestArrayAssertLess(unittest.TestCase):
self._assert_func(aone, ainf)
self._assert_func(-ainf, aone)
self._assert_func(-ainf, ainf)
- self.assertRaises(AssertionError,
- lambda: self._assert_func(ainf, aone))
- self.assertRaises(AssertionError,
- lambda: self._assert_func(aone, -ainf))
- self.assertRaises(AssertionError,
- lambda: self._assert_func(ainf, ainf))
- self.assertRaises(AssertionError,
- lambda: self._assert_func(ainf, -ainf))
- self.assertRaises(AssertionError,
- lambda: self._assert_func(-ainf, -ainf))
+ assert_raises(AssertionError, lambda: self._assert_func(ainf, aone))
+ assert_raises(AssertionError, lambda: self._assert_func(aone, -ainf))
+ assert_raises(AssertionError, lambda: self._assert_func(ainf, ainf))
+ assert_raises(AssertionError, lambda: self._assert_func(ainf, -ainf))
+ assert_raises(AssertionError, lambda: self._assert_func(-ainf, -ainf))
def test_inf_compare_array(self):
x = np.array([1.1, 2.2, np.inf])
ainf = np.array(np.inf)
- self.assertRaises(AssertionError,
- lambda: self._assert_func(x, ainf))
- self.assertRaises(AssertionError,
- lambda: self._assert_func(ainf, x))
- self.assertRaises(AssertionError,
- lambda: self._assert_func(x, -ainf))
- self.assertRaises(AssertionError,
- lambda: self._assert_func(-x, -ainf))
- self.assertRaises(AssertionError,
- lambda: self._assert_func(-ainf, -x))
+ assert_raises(AssertionError, lambda: self._assert_func(x, ainf))
+ assert_raises(AssertionError, lambda: self._assert_func(ainf, x))
+ assert_raises(AssertionError, lambda: self._assert_func(x, -ainf))
+ assert_raises(AssertionError, lambda: self._assert_func(-x, -ainf))
+ assert_raises(AssertionError, lambda: self._assert_func(-ainf, -x))
self._assert_func(-ainf, x)
+@pytest.mark.skip(reason="The raises decorator depends on Nose")
+class TestRaises(object):
-class TestRaises(unittest.TestCase):
-
- def setUp(self):
+ def setup(self):
class MyException(Exception):
pass
@@ -732,7 +695,7 @@ class TestRaises(unittest.TestCase):
raise AssertionError("should have raised an AssertionError")
-class TestWarns(unittest.TestCase):
+class TestWarns(object):
def test_warn(self):
def f():
@@ -783,28 +746,27 @@ class TestWarns(unittest.TestCase):
raise AssertionError("wrong warning caught by assert_warn")
-class TestAssertAllclose(unittest.TestCase):
+class TestAssertAllclose(object):
def test_simple(self):
x = 1e-3
y = 1e-9
assert_allclose(x, y, atol=1)
- self.assertRaises(AssertionError, assert_allclose, x, y)
+ assert_raises(AssertionError, assert_allclose, x, y)
a = np.array([x, y, x, y])
b = np.array([x, y, x, x])
assert_allclose(a, b, atol=1)
- self.assertRaises(AssertionError, assert_allclose, a, b)
+ assert_raises(AssertionError, assert_allclose, a, b)
b[-1] = y * (1 + 1e-8)
assert_allclose(a, b)
- self.assertRaises(AssertionError, assert_allclose, a, b,
- rtol=1e-9)
+ assert_raises(AssertionError, assert_allclose, a, b, rtol=1e-9)
assert_allclose(6, 10, rtol=0.5)
- self.assertRaises(AssertionError, assert_allclose, 10, 6, rtol=0.5)
+ assert_raises(AssertionError, assert_allclose, 10, 6, rtol=0.5)
def test_min_int(self):
a = np.array([np.iinfo(np.int_).min], dtype=np.int_)
@@ -819,7 +781,7 @@ class TestAssertAllclose(unittest.TestCase):
msg = ''
except AssertionError as exc:
msg = exc.args[0]
- self.assertTrue("mismatch 25.0%" in msg)
+ assert_("mismatch 25.0%" in msg)
def test_equal_nan(self):
a = np.array([np.nan])
@@ -830,8 +792,7 @@ class TestAssertAllclose(unittest.TestCase):
def test_not_equal_nan(self):
a = np.array([np.nan])
b = np.array([np.nan])
- self.assertRaises(AssertionError, assert_allclose, a, b,
- equal_nan=False)
+ assert_raises(AssertionError, assert_allclose, a, b, equal_nan=False)
def test_equal_nan_default(self):
# Make sure equal_nan default behavior remains unchanged. (All
@@ -845,7 +806,7 @@ class TestAssertAllclose(unittest.TestCase):
assert_allclose(a, b)
-class TestArrayAlmostEqualNulp(unittest.TestCase):
+class TestArrayAlmostEqualNulp(object):
def test_float64_pass(self):
# The number of units of least precision
@@ -873,13 +834,13 @@ class TestArrayAlmostEqualNulp(unittest.TestCase):
eps = np.finfo(x.dtype).eps
y = x + x*eps*nulp*2.
- self.assertRaises(AssertionError, assert_array_almost_equal_nulp,
- x, y, nulp)
+ assert_raises(AssertionError, assert_array_almost_equal_nulp,
+ x, y, nulp)
epsneg = np.finfo(x.dtype).epsneg
y = x - x*epsneg*nulp*2.
- self.assertRaises(AssertionError, assert_array_almost_equal_nulp,
- x, y, nulp)
+ assert_raises(AssertionError, assert_array_almost_equal_nulp,
+ x, y, nulp)
def test_float32_pass(self):
nulp = 5
@@ -903,13 +864,43 @@ class TestArrayAlmostEqualNulp(unittest.TestCase):
eps = np.finfo(x.dtype).eps
y = x + x*eps*nulp*2.
- self.assertRaises(AssertionError, assert_array_almost_equal_nulp,
- x, y, nulp)
+ assert_raises(AssertionError, assert_array_almost_equal_nulp,
+ x, y, nulp)
epsneg = np.finfo(x.dtype).epsneg
y = x - x*epsneg*nulp*2.
- self.assertRaises(AssertionError, assert_array_almost_equal_nulp,
- x, y, nulp)
+ assert_raises(AssertionError, assert_array_almost_equal_nulp,
+ x, y, nulp)
+
+ def test_float16_pass(self):
+ nulp = 5
+ x = np.linspace(-4, 4, 10, dtype=np.float16)
+ x = 10**x
+ x = np.r_[-x, x]
+
+ eps = np.finfo(x.dtype).eps
+ y = x + x*eps*nulp/2.
+ assert_array_almost_equal_nulp(x, y, nulp)
+
+ epsneg = np.finfo(x.dtype).epsneg
+ y = x - x*epsneg*nulp/2.
+ assert_array_almost_equal_nulp(x, y, nulp)
+
+ def test_float16_fail(self):
+ nulp = 5
+ x = np.linspace(-4, 4, 10, dtype=np.float16)
+ x = 10**x
+ x = np.r_[-x, x]
+
+ eps = np.finfo(x.dtype).eps
+ y = x + x*eps*nulp*2.
+ assert_raises(AssertionError, assert_array_almost_equal_nulp,
+ x, y, nulp)
+
+ epsneg = np.finfo(x.dtype).epsneg
+ y = x - x*epsneg*nulp*2.
+ assert_raises(AssertionError, assert_array_almost_equal_nulp,
+ x, y, nulp)
def test_complex128_pass(self):
nulp = 5
@@ -943,25 +934,25 @@ class TestArrayAlmostEqualNulp(unittest.TestCase):
eps = np.finfo(x.dtype).eps
y = x + x*eps*nulp*2.
- self.assertRaises(AssertionError, assert_array_almost_equal_nulp,
- xi, x + y*1j, nulp)
- self.assertRaises(AssertionError, assert_array_almost_equal_nulp,
- xi, y + x*1j, nulp)
+ assert_raises(AssertionError, assert_array_almost_equal_nulp,
+ xi, x + y*1j, nulp)
+ assert_raises(AssertionError, assert_array_almost_equal_nulp,
+ xi, y + x*1j, nulp)
# The test condition needs to be at least a factor of sqrt(2) smaller
# because the real and imaginary parts both change
y = x + x*eps*nulp
- self.assertRaises(AssertionError, assert_array_almost_equal_nulp,
- xi, y + y*1j, nulp)
+ assert_raises(AssertionError, assert_array_almost_equal_nulp,
+ xi, y + y*1j, nulp)
epsneg = np.finfo(x.dtype).epsneg
y = x - x*epsneg*nulp*2.
- self.assertRaises(AssertionError, assert_array_almost_equal_nulp,
- xi, x + y*1j, nulp)
- self.assertRaises(AssertionError, assert_array_almost_equal_nulp,
- xi, y + x*1j, nulp)
+ assert_raises(AssertionError, assert_array_almost_equal_nulp,
+ xi, x + y*1j, nulp)
+ assert_raises(AssertionError, assert_array_almost_equal_nulp,
+ xi, y + x*1j, nulp)
y = x - x*epsneg*nulp
- self.assertRaises(AssertionError, assert_array_almost_equal_nulp,
- xi, y + y*1j, nulp)
+ assert_raises(AssertionError, assert_array_almost_equal_nulp,
+ xi, y + y*1j, nulp)
def test_complex64_pass(self):
nulp = 5
@@ -993,26 +984,26 @@ class TestArrayAlmostEqualNulp(unittest.TestCase):
eps = np.finfo(x.dtype).eps
y = x + x*eps*nulp*2.
- self.assertRaises(AssertionError, assert_array_almost_equal_nulp,
- xi, x + y*1j, nulp)
- self.assertRaises(AssertionError, assert_array_almost_equal_nulp,
- xi, y + x*1j, nulp)
+ assert_raises(AssertionError, assert_array_almost_equal_nulp,
+ xi, x + y*1j, nulp)
+ assert_raises(AssertionError, assert_array_almost_equal_nulp,
+ xi, y + x*1j, nulp)
y = x + x*eps*nulp
- self.assertRaises(AssertionError, assert_array_almost_equal_nulp,
- xi, y + y*1j, nulp)
+ assert_raises(AssertionError, assert_array_almost_equal_nulp,
+ xi, y + y*1j, nulp)
epsneg = np.finfo(x.dtype).epsneg
y = x - x*epsneg*nulp*2.
- self.assertRaises(AssertionError, assert_array_almost_equal_nulp,
- xi, x + y*1j, nulp)
- self.assertRaises(AssertionError, assert_array_almost_equal_nulp,
- xi, y + x*1j, nulp)
+ assert_raises(AssertionError, assert_array_almost_equal_nulp,
+ xi, x + y*1j, nulp)
+ assert_raises(AssertionError, assert_array_almost_equal_nulp,
+ xi, y + x*1j, nulp)
y = x - x*epsneg*nulp
- self.assertRaises(AssertionError, assert_array_almost_equal_nulp,
- xi, y + y*1j, nulp)
+ assert_raises(AssertionError, assert_array_almost_equal_nulp,
+ xi, y + y*1j, nulp)
-class TestULP(unittest.TestCase):
+class TestULP(object):
def test_equal(self):
x = np.random.randn(10)
@@ -1051,24 +1042,24 @@ class TestULP(unittest.TestCase):
tiny = np.array([np.finfo(dt).tiny])
zero = np.array([np.PZERO]).astype(dt)
nzero = np.array([np.NZERO]).astype(dt)
- self.assertRaises(AssertionError,
- lambda: assert_array_max_ulp(nan, inf,
- maxulp=maxulp))
- self.assertRaises(AssertionError,
- lambda: assert_array_max_ulp(nan, big,
- maxulp=maxulp))
- self.assertRaises(AssertionError,
- lambda: assert_array_max_ulp(nan, tiny,
- maxulp=maxulp))
- self.assertRaises(AssertionError,
- lambda: assert_array_max_ulp(nan, zero,
- maxulp=maxulp))
- self.assertRaises(AssertionError,
- lambda: assert_array_max_ulp(nan, nzero,
- maxulp=maxulp))
-
-
-class TestStringEqual(unittest.TestCase):
+ assert_raises(AssertionError,
+ lambda: assert_array_max_ulp(nan, inf,
+ maxulp=maxulp))
+ assert_raises(AssertionError,
+ lambda: assert_array_max_ulp(nan, big,
+ maxulp=maxulp))
+ assert_raises(AssertionError,
+ lambda: assert_array_max_ulp(nan, tiny,
+ maxulp=maxulp))
+ assert_raises(AssertionError,
+ lambda: assert_array_max_ulp(nan, zero,
+ maxulp=maxulp))
+ assert_raises(AssertionError,
+ lambda: assert_array_max_ulp(nan, nzero,
+ maxulp=maxulp))
+
+
+class TestStringEqual(object):
def test_simple(self):
assert_string_equal("hello", "hello")
assert_string_equal("hello\nmultiline", "hello\nmultiline")
@@ -1080,22 +1071,32 @@ class TestStringEqual(unittest.TestCase):
else:
raise AssertionError("exception not raised")
- self.assertRaises(AssertionError,
- lambda: assert_string_equal("foo", "hello"))
+ assert_raises(AssertionError,
+ lambda: assert_string_equal("foo", "hello"))
-def assert_warn_len_equal(mod, n_in_context, py3_n_in_context=None):
+def assert_warn_len_equal(mod, n_in_context, py34=None, py37=None):
mod_warns = mod.__warningregistry__
+ num_warns = len(mod_warns)
# Python 3.4 appears to clear any pre-existing warnings of the same type,
# when raising warnings inside a catch_warnings block. So, there is a
# warning generated by the tests within the context manager, but no
# previous warnings.
if 'version' in mod_warns:
- if py3_n_in_context is None:
- py3_n_in_context = n_in_context
- assert_equal(len(mod_warns) - 1, py3_n_in_context)
- else:
- assert_equal(len(mod_warns), n_in_context)
+ # Python 3 adds a 'version' entry to the registry,
+ # do not count it.
+ num_warns -= 1
+
+ # Behavior of warnings is Python version dependent. Adjust the
+ # expected result to compensate. In particular, Python 3.7 does
+ # not make an entry for ignored warnings.
+ if sys.version_info[:2] >= (3, 7):
+ if py37 is not None:
+ n_in_context = py37
+ elif sys.version_info[:2] >= (3, 4):
+ if py34 is not None:
+ n_in_context = py34
+ assert_equal(num_warns, n_in_context)
def _get_fresh_mod():
@@ -1104,6 +1105,8 @@ def _get_fresh_mod():
try:
my_mod.__warningregistry__.clear()
except AttributeError:
+ # will not have a __warningregistry__ unless warning has been
+ # raised in the module at some point
pass
return my_mod
@@ -1117,21 +1120,23 @@ def test_clear_and_catch_warnings():
warnings.warn('Some warning')
assert_equal(my_mod.__warningregistry__, {})
# Without specified modules, don't clear warnings during context
+ # Python 3.7 catch_warnings doesn't make an entry for 'ignore'.
with clear_and_catch_warnings():
warnings.simplefilter('ignore')
warnings.warn('Some warning')
- assert_warn_len_equal(my_mod, 1)
+ assert_warn_len_equal(my_mod, 1, py37=0)
# Confirm that specifying module keeps old warning, does not add new
with clear_and_catch_warnings(modules=[my_mod]):
warnings.simplefilter('ignore')
warnings.warn('Another warning')
- assert_warn_len_equal(my_mod, 1)
+ assert_warn_len_equal(my_mod, 1, py37=0)
# Another warning, no module spec does add to warnings dict, except on
# Python 3.4 (see comments in `assert_warn_len_equal`)
+ # Python 3.7 catch_warnings doesn't make an entry for 'ignore'.
with clear_and_catch_warnings():
warnings.simplefilter('ignore')
warnings.warn('Another warning')
- assert_warn_len_equal(my_mod, 2, 1)
+ assert_warn_len_equal(my_mod, 2, py34=1, py37=0)
def test_suppress_warnings_module():
@@ -1148,6 +1153,7 @@ def test_suppress_warnings_module():
np.apply_along_axis(warn, 0, [0])
# Test module based warning suppression:
+ assert_warn_len_equal(my_mod, 0)
with suppress_warnings() as sup:
sup.record(UserWarning)
# suppress warning from other module (may have .pyc ending),
@@ -1157,10 +1163,9 @@ def test_suppress_warnings_module():
warn_other_module()
# Check that the suppression did test the file correctly (this module
# got filtered)
- assert_(len(sup.log) == 1)
- assert_(sup.log[0].message.args[0] == "Some warning")
-
- assert_warn_len_equal(my_mod, 0)
+ assert_equal(len(sup.log), 1)
+ assert_equal(sup.log[0].message.args[0], "Some warning")
+ assert_warn_len_equal(my_mod, 0, py37=0)
sup = suppress_warnings()
# Will have to be changed if apply_along_axis is moved:
sup.filter(module=my_mod)
@@ -1174,11 +1179,11 @@ def test_suppress_warnings_module():
assert_warn_len_equal(my_mod, 0)
# Without specified modules, don't clear warnings during context
+ # Python 3.7 does not add ignored warnings.
with suppress_warnings():
warnings.simplefilter('ignore')
warnings.warn('Some warning')
- assert_warn_len_equal(my_mod, 1)
-
+ assert_warn_len_equal(my_mod, 1, py37=0)
def test_suppress_warnings_type():
# Initial state of module, no warnings
@@ -1202,10 +1207,11 @@ def test_suppress_warnings_type():
assert_warn_len_equal(my_mod, 0)
# Without specified modules, don't clear warnings during context
+ # Python 3.7 does not add ignored warnings.
with suppress_warnings():
warnings.simplefilter('ignore')
warnings.warn('Some warning')
- assert_warn_len_equal(my_mod, 1)
+ assert_warn_len_equal(my_mod, 1, py37=0)
def test_suppress_warnings_decorate_no_record():
@@ -1220,7 +1226,7 @@ def test_suppress_warnings_decorate_no_record():
warnings.simplefilter("always")
warn(UserWarning) # should be supppressed
warn(RuntimeWarning)
- assert_(len(w) == 1)
+ assert_equal(len(w), 1)
def test_suppress_warnings_record():
@@ -1234,10 +1240,10 @@ def test_suppress_warnings_record():
warnings.warn('Some other warning')
warnings.warn('Some other warning 2')
- assert_(len(sup.log) == 2)
- assert_(len(log1) == 1)
- assert_(len(log2) == 1)
- assert_(log2[0].message.args[0] == 'Some other warning 2')
+ assert_equal(len(sup.log), 2)
+ assert_equal(len(log1), 1)
+ assert_equal(len(log2),1)
+ assert_equal(log2[0].message.args[0], 'Some other warning 2')
# Do it again, with the same context to see if some warnings survived:
with sup:
@@ -1247,10 +1253,10 @@ def test_suppress_warnings_record():
warnings.warn('Some other warning')
warnings.warn('Some other warning 2')
- assert_(len(sup.log) == 2)
- assert_(len(log1) == 1)
- assert_(len(log2) == 1)
- assert_(log2[0].message.args[0] == 'Some other warning 2')
+ assert_equal(len(sup.log), 2)
+ assert_equal(len(log1), 1)
+ assert_equal(len(log2), 1)
+ assert_equal(log2[0].message.args[0], 'Some other warning 2')
# Test nested:
with suppress_warnings() as sup:
@@ -1259,8 +1265,8 @@ def test_suppress_warnings_record():
sup2.record(message='Some warning')
warnings.warn('Some warning')
warnings.warn('Some other warning')
- assert_(len(sup2.log) == 1)
- assert_(len(sup.log) == 1)
+ assert_equal(len(sup2.log), 1)
+ assert_equal(len(sup.log), 1)
def test_suppress_warnings_forwarding():
@@ -1278,7 +1284,7 @@ def test_suppress_warnings_forwarding():
for i in range(2):
warnings.warn("Some warning")
- assert_(len(sup.log) == 2)
+ assert_equal(len(sup.log), 2)
with suppress_warnings() as sup:
sup.record()
@@ -1287,7 +1293,7 @@ def test_suppress_warnings_forwarding():
warnings.warn("Some warning")
warnings.warn("Some warning")
- assert_(len(sup.log) == 2)
+ assert_equal(len(sup.log), 2)
with suppress_warnings() as sup:
sup.record()
@@ -1297,7 +1303,7 @@ def test_suppress_warnings_forwarding():
warnings.warn("Some warning")
warn_other_module()
- assert_(len(sup.log) == 2)
+ assert_equal(len(sup.log), 2)
with suppress_warnings() as sup:
sup.record()
@@ -1307,7 +1313,7 @@ def test_suppress_warnings_forwarding():
warnings.warn("Some other warning")
warn_other_module()
- assert_(len(sup.log) == 2)
+ assert_equal(len(sup.log), 2)
def test_tempdir():
@@ -1357,5 +1363,74 @@ def test_clear_and_catch_warnings_inherit():
assert_equal(my_mod.__warningregistry__, {})
-if __name__ == '__main__':
- run_module_suite()
+@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
+class TestAssertNoGcCycles(object):
+ """ Test assert_no_gc_cycles """
+ def test_passes(self):
+ def no_cycle():
+ b = []
+ b.append([])
+ return b
+
+ with assert_no_gc_cycles():
+ no_cycle()
+
+ assert_no_gc_cycles(no_cycle)
+
+
+ def test_asserts(self):
+ def make_cycle():
+ a = []
+ a.append(a)
+ a.append(a)
+ return a
+
+ with assert_raises(AssertionError):
+ with assert_no_gc_cycles():
+ make_cycle()
+
+ with assert_raises(AssertionError):
+ assert_no_gc_cycles(make_cycle)
+
+
+ def test_fails(self):
+ """
+ Test that in cases where the garbage cannot be collected, we raise an
+ error, instead of hanging forever trying to clear it.
+ """
+
+ class ReferenceCycleInDel(object):
+ """
+ An object that not only contains a reference cycle, but creates new
+ cycles whenever it's garbage-collected and its __del__ runs
+ """
+ make_cycle = True
+
+ def __init__(self):
+ self.cycle = self
+
+ def __del__(self):
+ # break the current cycle so that `self` can be freed
+ self.cycle = None
+
+ if ReferenceCycleInDel.make_cycle:
+ # but create a new one so that the garbage collector has more
+ # work to do.
+ ReferenceCycleInDel()
+
+ try:
+ w = weakref.ref(ReferenceCycleInDel())
+ try:
+ with assert_raises(RuntimeError):
+ # this will be unable to get a baseline empty garbage
+ assert_no_gc_cycles(lambda: None)
+ except AssertionError:
+ # the above test is only necessary if the GC actually tried to free
+ # our object anyway, which python 2.7 does not.
+ if w() is not None:
+ pytest.skip("GC does not call __del__ on cyclic objects")
+ raise
+
+ finally:
+ # make sure that we stop creating reference cycles
+ ReferenceCycleInDel.make_cycle = False
diff --git a/numpy/testing/utils.py b/numpy/testing/utils.py
index a0218c4e6..184adcc74 100644
--- a/numpy/testing/utils.py
+++ b/numpy/testing/utils.py
@@ -3,9 +3,16 @@ Back compatibility utils module. It will import the appropriate
set of tools
"""
-import os
+from __future__ import division, absolute_import, print_function
-from .nose_tools.utils import *
+import warnings
+
+# 2018-04-04, numpy 1.15.0
+warnings.warn("Importing from numpy.testing.utils is deprecated, "
+ "import from numpy.testing instead.",
+ ImportWarning, stacklevel=2)
+
+from ._private.utils import *
__all__ = [
'assert_equal', 'assert_almost_equal', 'assert_approx_equal',
@@ -18,5 +25,5 @@ __all__ = [
'assert_allclose', 'IgnoreException', 'clear_and_catch_warnings',
'SkipTest', 'KnownFailureException', 'temppath', 'tempdir', 'IS_PYPY',
'HAS_REFCOUNT', 'suppress_warnings', 'assert_array_compare',
- '_assert_valid_refcount', '_gen_alignment_data',
+ '_assert_valid_refcount', '_gen_alignment_data', 'assert_no_gc_cycles'
]