diff options
author | Pauli Virtanen <pav@iki.fi> | 2009-10-02 19:32:38 +0000 |
---|---|---|
committer | Pauli Virtanen <pav@iki.fi> | 2009-10-02 19:32:38 +0000 |
commit | f1e3392d6d8813ed146ce1675f65a880634f727b (patch) | |
tree | 59a0f843abb9102f56ba3046db75249264a7e5d5 | |
parent | 0f17bf706dd5c4b160ff4d1f15ae69ef933cfc43 (diff) | |
download | numpy-f1e3392d6d8813ed146ce1675f65a880634f727b.tar.gz |
Docstring update: testing
-rw-r--r-- | numpy/testing/decorators.py | 205 | ||||
-rw-r--r-- | numpy/testing/nosetester.py | 190 | ||||
-rw-r--r-- | numpy/testing/utils.py | 141 |
3 files changed, 422 insertions, 114 deletions
diff --git a/numpy/testing/decorators.py b/numpy/testing/decorators.py index e337c35e2..afb52d3ea 100644 --- a/numpy/testing/decorators.py +++ b/numpy/testing/decorators.py @@ -1,79 +1,117 @@ -"""Decorators for labeling test objects +""" +Decorators for labeling and modifying behavior of test objects. Decorators that merely return a modified version of the original -function object are straightforward. Decorators that return a new +function object are straightforward. Decorators that return a new function object need to use -nose.tools.make_decorator(original_function)(decorator) in returning -the decorator, in order to preserve metadata such as function name, -setup and teardown functions and so on - see nose.tools for more -information. +:: + + nose.tools.make_decorator(original_function)(decorator) + +in returning the decorator, in order to preserve meta-data such as +function name, setup and teardown functions and so on - see +``nose.tools`` for more information. """ import warnings import sys def slow(t): - """Labels a test as 'slow'. + """ + Label a test as 'slow'. The exact definition of a slow test is obviously both subjective and hardware-dependent, but in general any individual test that requires more than a second or two should be labeled as slow (the whole suite consits of - thousands of tests, so even a second is significant).""" + thousands of tests, so even a second is significant). + + Parameters + ---------- + t : callable + The test to label as slow. + + Returns + ------- + t : callable + The decorated test `t`. + + Examples + -------- + The `numpy.testing` module includes ``import decorators as dec``. + A test can be decorated as slow like this:: + + from numpy.testing import * + + @dec.slow + def test_big(self): + print 'Big, slow test' + + """ t.slow = True return t def setastest(tf=True): - ''' Signals to nose that this function is or is not a test + """ + Signals to nose that this function is or is not a test. Parameters ---------- tf : bool - If True specifies this is a test, not a test otherwise + If True, specifies that the decorated callable is a test. + If False, specifies that the decorated callable is not a test. + Default is True. + + Notes + ----- + This decorator can't use the nose namespace, because it can be + called from a non-test module. See also ``istest`` and ``nottest`` in + ``nose.tools``. - e.g - >>> from numpy.testing.decorators import setastest - >>> @setastest(False) - ... def func_with_test_in_name(arg1, arg2): pass - ... - >>> + Examples + -------- + `setastest` can be used in the following way:: - This decorator cannot use the nose namespace, because it can be - called from a non-test module. See also istest and nottest in - nose.tools + from numpy.testing.decorators import setastest - ''' + @setastest(False) + def func_with_test_in_name(arg1, arg2): + pass + + """ def set_test(t): t.__test__ = tf return t return set_test def skipif(skip_condition, msg=None): - ''' Make function raise SkipTest exception if skip_condition is true + """ + Make function raise SkipTest exception if a given condition is true. + + If the condition is a callable, it is used at runtime to dynamically + make the decision. This is useful for tests that may require costly + imports, to delay the cost until the test suite is actually executed. Parameters ---------- - skip_condition : bool or callable. - Flag to determine whether to skip test. If the condition is a - callable, it is used at runtime to dynamically make the decision. This - is useful for tests that may require costly imports, to delay the cost - until the test suite is actually executed. - msg : string - Message to give on raising a SkipTest exception - - Returns - ------- - decorator : function - Decorator, which, when applied to a function, causes SkipTest - to be raised when the skip_condition was True, and the function - to be called normally otherwise. + skip_condition : bool or callable + Flag to determine whether to skip the decorated test. + msg : str, optional + Message to give on raising a SkipTest exception. Default is None. + + Returns + ------- + decorator : function + Decorator which, when applied to a function, causes SkipTest + to be raised when `skip_condition` is True, and the function + to be called normally otherwise. Notes ----- - You will see from the code that we had to further decorate the - decorator with the nose.tools.make_decorator function in order to - transmit function name, and various other metadata. - ''' + The decorator itself is decorated with the ``nose.tools.make_decorator`` + function in order to transmit function name, and various other metadata. + + """ def skip_decorator(f): # Local import to avoid a hard nose dependency and only incur the @@ -124,32 +162,35 @@ def skipif(skip_condition, msg=None): def knownfailureif(fail_condition, msg=None): - ''' Make function raise KnownFailureTest exception if fail_condition is true + """ + Make function raise KnownFailureTest exception if given condition is true. + + If the condition is a callable, it is used at runtime to dynamically + make the decision. This is useful for tests that may require costly + imports, to delay the cost until the test suite is actually executed. Parameters ---------- - fail_condition : bool or callable. - Flag to determine whether to mark test as known failure (True) - or not (False). If the condition is a callable, it is used at - runtime to dynamically make the decision. This is useful for - tests that may require costly imports, to delay the cost - until the test suite is actually executed. - msg : string - Message to give on raising a KnownFailureTest exception - - Returns - ------- - decorator : function - Decorator, which, when applied to a function, causes SkipTest - to be raised when the skip_condition was True, and the function - to be called normally otherwise. + fail_condition : bool or callable + Flag to determine whether to mark the decorated test as a known + failure (if True) or not (if False). + msg : str, optional + Message to give on raising a KnownFailureTest exception. + Default is None. + + Returns + ------- + decorator : function + Decorator, which, when applied to a function, causes SkipTest + to be raised when `skip_condition` is True, and the function + to be called normally otherwise. Notes ----- - You will see from the code that we had to further decorate the - decorator with the nose.tools.make_decorator function in order to - transmit function name, and various other metadata. - ''' + The decorator itself is decorated with the ``nose.tools.make_decorator`` + function in order to transmit function name, and various other metadata. + + """ if msg is None: msg = 'Test skipped due to known failure' @@ -177,7 +218,15 @@ def knownfailureif(fail_condition, msg=None): # manager) class WarningMessage(object): - """Holds the result of a single showwarning() call.""" + """ + Holds the result of a single showwarning() call. + + Notes + ----- + `WarningMessage` is copied from the Python 2.6 warnings module, + so it can be used in NumPy with older Python versions. + + """ _WARNING_DETAILS = ("message", "category", "filename", "lineno", "file", "line") @@ -198,6 +247,27 @@ class WarningMessage(object): self.filename, self.lineno, self.line)) class WarningManager: + """ + A context manager that copies and restores the warnings filter upon + exiting the context. + + The 'record' argument specifies whether warnings should be captured by a + custom implementation of ``warnings.showwarning()`` and be appended to a + list returned by the context manager. Otherwise None is returned by the + context manager. The objects appended to the list are arguments whose + attributes mirror the arguments to ``showwarning()``. + + The 'module' argument is to specify an alternative module to the module + named 'warnings' and imported under that name. This argument is only useful + when testing the warnings module itself. + + Notes + ----- + `WarningManager` is a copy of the ``catch_warnings`` context manager + from the Python 2.6 warnings module, with slight modifications. + It is copied so it can be used in NumPy with older Python versions. + + """ def __init__(self, record=False, module=None): self._record = record if module is None: @@ -229,28 +299,29 @@ class WarningManager: self._module.showwarning = self._showwarning def deprecated(conditional=True): - """This decorator can be used to filter Deprecation Warning, to avoid + """ + Filter deprecation warnings while running the test suite. + + This decorator can be used to filter DeprecationWarning's, to avoid printing them during the test suite run, while checking that the test actually raises a DeprecationWarning. Parameters ---------- - conditional : bool or callable. + conditional : bool or callable, optional Flag to determine whether to mark test as deprecated or not. If the condition is a callable, it is used at runtime to dynamically make the - decision. + decision. Default is True. Returns ------- decorator : function - Decorator, which, when applied to a function, causes SkipTest - to be raised when the skip_condition was True, and the function - to be called normally otherwise. + The `deprecated` decorator itself. Notes ----- - .. versionadded:: 1.4.0 + """ def deprecate_decorator(f): # Local import to avoid a hard nose dependency and only incur the diff --git a/numpy/testing/nosetester.py b/numpy/testing/nosetester.py index c4b8063de..03dc71ca3 100644 --- a/numpy/testing/nosetester.py +++ b/numpy/testing/nosetester.py @@ -1,13 +1,27 @@ -''' Nose test running +""" +Nose test running. -Implements test and bench functions for modules. +This module implements ``test()`` and ``bench()`` functions for NumPy modules. -''' +""" import os import sys def get_package_name(filepath): - """Given a path where a package is installed, determine its name""" + """ + Given a path where a package is installed, determine its name. + + Parameters + ---------- + filepath : str + Path to a file. If the determination fails, "numpy" is returned. + + Examples + -------- + >>> np.testing.nosetester.get_package_name('nonsense') + 'numpy' + + """ fullpath = filepath[:] pkg_name = [] @@ -93,21 +107,31 @@ def _docmethod(meth, testtype): class NoseTester(object): - """ Nose test runner. - - Usage: NoseTester(<package>).test() - - <package> is package path or module Default for package is None. A - value of None finds the calling module path. + """ + Nose test runner. This class is made available as numpy.testing.Tester, and a test function - is typically added to a package's __init__.py like so: + is typically added to a package's __init__.py like so:: - >>> from numpy.testing import Tester - >>> test = Tester().test + from numpy.testing import Tester + test = Tester().test Calling this test function finds and runs all tests associated with the - package and all its subpackages. + package and all its sub-packages. + + Attributes + ---------- + package_path : str + Full path to the package to test. + package_name : str + Name of the package to test. + + Parameters + ---------- + package : module, str or None + The package to test. If a string, this should be the full path to + the package. If None (default), `package` is set to the module from + which `NoseTester` is initialized. """ @@ -180,16 +204,17 @@ class NoseTester(object): def prepare_test_args(self, label='fast', verbose=1, extra_argv=None, doctests=False, coverage=False): - ''' Run tests for module using nose + """ + Run tests for module using nose. - %(test_header)s - doctests : boolean - If True, run doctests in module, default False - coverage : boolean - If True, report coverage of NumPy code, default False - (Requires the coverage module: - http://nedbatchelder.com/code/modules/coverage.html) - ''' + This method does the heavy lifting for the `test` method. It takes all + the same arguments, for details see `test`. + + See Also + -------- + test + + """ # if doctests is in the extra args, remove it and set the doctest # flag so the NumPy doctester is used instead @@ -231,16 +256,61 @@ class NoseTester(object): def test(self, label='fast', verbose=1, extra_argv=None, doctests=False, coverage=False): - ''' Run tests for module using nose + """ + Run tests for module using nose. - %(test_header)s - doctests : boolean - If True, run doctests in module, default False - coverage : boolean - If True, report coverage of NumPy code, default False - (Requires the coverage module: - http://nedbatchelder.com/code/modules/coverage.html) - ''' + Parameters + ---------- + label : {'fast', 'full', '', attribute identifier}, optional + Identifies the tests to run. This can be a string to pass to the + nosetests executable with the '-A' option, or one of + several special values. + Special values are: + 'fast' - the default - which corresponds to the ``nosetests -A`` + option of 'not slow'. + 'full' - fast (as above) and slow tests as in the + 'no -A' option to nosetests - this is the same as ''. + None or '' - run all tests. + attribute_identifier - string passed directly to nosetests as '-A'. + verbose : int, optional + Verbosity value for test outputs, in the range 1-10. Default is 1. + extra_argv : list, optional + List with any extra arguments to pass to nosetests. + doctests : bool, optional + If True, run doctests in module. Default is False. + coverage : bool, optional + If True, report coverage of NumPy code. Default is False. + (This requires the `coverage module: + <http://nedbatchelder.com/code/modules/coverage.html>`_). + + Returns + ------- + result : object + Returns the result of running the tests as a + ``nose.result.TextTestResult`` object. + + Notes + ----- + Each NumPy module exposes `test` in its namespace to run all tests for it. + For example, to run all tests for numpy.lib:: + + >>> np.lib.test() + + Examples + -------- + >>> result = np.lib.test() + Running unit tests for numpy.lib + ... + Ran 976 tests in 3.933s + + OK + + >>> result.errors + [] + >>> result.knownfail + [] + + """ # cap verbosity at 3 because nose becomes *very* verbose beyond that verbose = min(verbose, 3) @@ -266,9 +336,61 @@ class NoseTester(object): return t.result def bench(self, label='fast', verbose=1, extra_argv=None): - ''' Run benchmarks for module using nose + """ + Run benchmarks for module using nose. - %(test_header)s''' + Parameters + ---------- + label : {'fast', 'full', '', attribute identifier}, optional + Identifies the tests to run. This can be a string to pass to the + nosetests executable with the '-A' option, or one of + several special values. + Special values are: + 'fast' - the default - which corresponds to the ``nosetests -A`` + option of 'not slow'. + 'full' - fast (as above) and slow tests as in the + 'no -A' option to nosetests - this is the same as ''. + None or '' - run all tests. + attribute_identifier - string passed directly to nosetests as '-A'. + verbose : int, optional + Verbosity value for test outputs, in the range 1-10. Default is 1. + extra_argv : list, optional + List with any extra arguments to pass to nosetests. + + Returns + ------- + success : bool + Returns True if running the benchmarks works, False if an error + occurred. + + Notes + ----- + Benchmarks are like tests, but have names starting with "bench" instead + of "test", and can be found under the "benchmarks" sub-directory of the + module. + + Each NumPy module exposes `bench` in its namespace to run all benchmarks + for it. + + Examples + -------- + >>> success = np.lib.bench() + Running benchmarks for numpy.lib + ... + using 562341 items: + unique: + 0.11 + unique1d: + 0.11 + ratio: 1.0 + nUnique: 56230 == 56230 + ... + OK + + >>> success + True + + """ print "Running benchmarks for %s" % self.package_name self._show_system_info() diff --git a/numpy/testing/utils.py b/numpy/testing/utils.py index 23b8de1f7..71b5944b6 100644 --- a/numpy/testing/utils.py +++ b/numpy/testing/utils.py @@ -19,7 +19,15 @@ __all__ = ['assert_equal', 'assert_almost_equal','assert_approx_equal', verbose = 0 def assert_(val, msg='') : - """Assert that works in release mode.""" + """ + Assert that works in release mode. + + The Python built-in ``assert`` does not work when executing code in + optimized mode (the ``-O`` flag) - no byte-code is generated for it. + + For documentation on usage, refer to the Python documentation. + + """ if not val : raise AssertionError(msg) @@ -299,6 +307,33 @@ def assert_equal(actual,desired,err_msg='',verbose=True): raise AssertionError(msg) def print_assert_equal(test_string,actual,desired): + """ + Test if two objects are equal, and print an error message if test fails. + + The test is performed with ``actual == desired``. + + Parameters + ---------- + test_string : str + The message supplied to AssertionError. + actual : object + The object to test for equality against `desired`. + desired : object + The expected result. + + Examples + -------- + >>> np.testing.print_assert_equal('Test XYZ of func xyz', [0, 1], [0, 1]) + >>> np.testing.print_assert_equal('Test XYZ of func xyz', [0, 1], [0, 2]) + Traceback (most recent call last): + ... + AssertionError: Test XYZ of func xyz failed + ACTUAL: + [0, 1] + DESIRED: + [0, 2] + + """ import pprint try: assert(actual == desired) @@ -800,6 +835,31 @@ def runstring(astr, dict): exec astr in dict def assert_string_equal(actual, desired): + """ + Test if two strings are equal. + + If the given strings are equal, `assert_string_equal` does nothing. + If they are not equal, an AssertionError is raised, and the diff + between the strings is shown. + + Parameters + ---------- + actual : str + The string to test for equality against the expected string. + desired : str + The expected string. + + Examples + -------- + >>> np.testing.assert_string_equal('abc', 'abc') + >>> np.testing.assert_string_equal('abc', 'abcd') + Traceback (most recent call last): + File "<stdin>", line 1, in <module> + ... + AssertionError: Differences in strings: + - abc+ abcd? + + + """ # delay import of difflib to reduce startup time import difflib @@ -841,9 +901,27 @@ def assert_string_equal(actual, desired): def rundocs(filename=None, raise_on_error=True): - """Run doc string tests found in file. + """ + Run doctests found in the given file. + + By default `rundocs` raises an AssertionError on failure. + + Parameters + ---------- + filename : str + The path to the file for which the doctests are run. + raise_on_error : bool + Whether to raise an AssertionError when a doctest fails. Default is + True. + + Notes + ----- + The doctests can be run by the user/developer by adding the ``doctests`` + argument to the ``test()`` call. For example, to run all tests (including + doctests) for `numpy.lib`:: + + >>> np.lib.test(doctests=True) - By default raises AssertionError on failure. """ import doctest, imp if filename is None: @@ -893,20 +971,28 @@ def assert_raises(*args,**kwargs): return nose.tools.assert_raises(*args,**kwargs) def decorate_methods(cls, decorator, testmatch=None): - ''' Apply decorator to all methods in class matching testmatch + """ + Apply a decorator to all methods in a class matching a regular expression. + + The given decorator is applied to all public methods of `cls` that are + matched by the regular expression `testmatch` + (``testmatch.search(methodname)``). Methods that are private, i.e. start + with an underscore, are ignored. Parameters ---------- cls : class - Class to decorate methods for + Class whose methods to decorate. decorator : function Decorator to apply to methods - testmatch : compiled regexp or string to compile to regexp - Decorators are applied if testmatch.search(methodname) - is not None. Default value is - re.compile(r'(?:^|[\\b_\\.%s-])[Tt]est' % os.sep) - (the default for nose) - ''' + testmatch : compiled regexp or str, optional + The regular expression. Default value is None, in which case the + nose default (``re.compile(r'(?:^|[\\b_\\.%s-])[Tt]est' % os.sep)``) + is used. + If `testmatch` is a string, it is compiled to a regular expression + first. + + """ if testmatch is None: testmatch = re.compile(r'(?:^|[\\b_\\.%s-])[Tt]est' % os.sep) else: @@ -932,8 +1018,37 @@ def decorate_methods(cls, decorator, testmatch=None): def measure(code_str,times=1,label=None): - """ Return elapsed time for executing code_str in the - namespace of the caller for given times. + """ + Return elapsed time for executing code in the namespace of the caller. + + The supplied code string is compiled with the Python builtin ``compile``. + The precision of the timing is 10 milli-seconds. If the code will execute + fast on this timescale, it can be executed many times to get reasonable + timing accuracy. + + Parameters + ---------- + code_str : str + The code to be timed. + times : int, optional + The number of times the code is executed. Default is 1. The code is + only compiled once. + label : str, optional + A label to identify `code_str` with. This is passed into ``compile`` + as the second argument (for run-time error messages). + + Returns + ------- + elapsed : float + Total elapsed time in seconds for executing `code_str` `times` times. + + Examples + -------- + >>> etime = np.testing.measure('for i in range(1000): np.sqrt(i**2)', + ... times=times) + >>> print "Time for a single execution : ", etime / times, "s" + Time for a single execution : 0.005 s + """ frame = sys._getframe(1) locs,globs = frame.f_locals,frame.f_globals |