summaryrefslogtreecommitdiff
path: root/numpy/testing
diff options
context:
space:
mode:
Diffstat (limited to 'numpy/testing')
-rw-r--r--numpy/testing/__init__.py24
-rw-r--r--numpy/testing/decorators.py92
-rw-r--r--numpy/testing/info.py30
-rw-r--r--numpy/testing/nosetester.py140
-rw-r--r--numpy/testing/nulltester.py15
-rw-r--r--numpy/testing/numpytest.py548
-rw-r--r--numpy/testing/parametric.py300
-rw-r--r--numpy/testing/pkgtester.py27
-rw-r--r--numpy/testing/tests/test_utils.py7
-rw-r--r--numpy/testing/utils.py53
10 files changed, 322 insertions, 914 deletions
diff --git a/numpy/testing/__init__.py b/numpy/testing/__init__.py
index 87578fca0..fa448f298 100644
--- a/numpy/testing/__init__.py
+++ b/numpy/testing/__init__.py
@@ -1,5 +1,23 @@
+"""Common test support for all numpy test scripts.
-from info import __doc__
-from numpytest import *
+This single module should provide all the common functionality for numpy tests
+in a single location, so that test scripts can just import it and work right
+away.
+"""
+
+#import unittest
+from unittest import TestCase
+
+import decorators as dec
from utils import *
-from parametric import ParametricTestCase
+
+try:
+ import nose
+ from nose.tools import raises
+except ImportError:
+ pass
+
+from numpytest import *
+
+from pkgtester import Tester
+test = Tester().test
diff --git a/numpy/testing/decorators.py b/numpy/testing/decorators.py
new file mode 100644
index 000000000..6573c2a43
--- /dev/null
+++ b/numpy/testing/decorators.py
@@ -0,0 +1,92 @@
+"""Decorators for labeling test objects
+
+Decorators that merely return a modified version of the original
+function object are straightforward. Decorators that return a new
+function object need to use
+nose.tools.make_decorator(original_function)(decorator) in returning
+the decorator, in order to preserve metadata such as function name,
+setup and teardown functions and so on - see nose.tools for more
+information.
+
+"""
+
+try:
+ import nose
+except ImportError:
+ pass
+
+def slow(t):
+ """Labels a test as 'slow'.
+
+ The exact definition of a slow test is obviously both subjective and
+ hardware-dependent, but in general any individual test that requires more
+ than a second or two should be labeled as slow (the whole suite consits of
+ thousands of tests, so even a second is significant)."""
+
+ t.slow = True
+ return t
+
+def setastest(tf=True):
+ ''' Signals to nose that this function is or is not a test
+
+ Parameters
+ ----------
+ tf : bool
+ If True specifies this is a test, not a test otherwise
+
+ e.g
+ >>> @setastest(False)
+ >>> def func_with_test_in_name(arg1, arg2): pass
+ ...
+ >>>
+
+ This decorator cannot use the nose namespace, because it can be
+ called from a non-test module. See also istest and nottest in
+ nose.tools
+
+ '''
+ def set_test(t):
+ t.__test__ = tf
+ return t
+ return set_test
+
+def skipif(skip_condition, msg=None):
+ ''' Make function raise SkipTest exception if skip_condition is true
+
+ Parameters
+ ---------
+ skip_condition : bool
+ Flag to determine whether to skip test (True) or not (False)
+ msg : string
+ Message to give on raising a SkipTest exception
+
+ Returns
+ -------
+ decorator : function
+ Decorator, which, when applied to a function, causes SkipTest
+ to be raised when the skip_condition was True, and the function
+ to be called normally otherwise.
+
+ Notes
+ -----
+ You will see from the code that we had to further decorate the
+ decorator with the nose.tools.make_decorator function in order to
+ transmit function name, and various other metadata.
+ '''
+ if msg is None:
+ msg = 'Test skipped due to test condition'
+ def skip_decorator(f):
+ def skipper(*args, **kwargs):
+ if skip_condition:
+ raise nose.SkipTest, msg
+ else:
+ return f(*args, **kwargs)
+ return nose.tools.make_decorator(f)(skipper)
+ return skip_decorator
+
+def skipknownfailure(f):
+ ''' Decorator to raise SkipTest for test known to fail
+ '''
+ def skipper(*args, **kwargs):
+ raise nose.SkipTest, 'This test is known to fail'
+ return nose.tools.make_decorator(f)(skipper)
diff --git a/numpy/testing/info.py b/numpy/testing/info.py
deleted file mode 100644
index 8b09d8ed3..000000000
--- a/numpy/testing/info.py
+++ /dev/null
@@ -1,30 +0,0 @@
-"""
-Numpy testing tools
-===================
-
-Numpy-style unit-testing
-------------------------
-
- NumpyTest -- Numpy tests site manager
- NumpyTestCase -- unittest.TestCase with measure method
- IgnoreException -- raise when checking disabled feature, it'll be ignored
- set_package_path -- prepend package build directory to path
- set_local_path -- prepend local directory (to tests files) to path
- restore_path -- restore path after set_package_path
-
-Utility functions
------------------
-
- jiffies -- return 1/100ths of a second that the current process has used
- memusage -- virtual memory size in bytes of the running python [linux]
- rand -- array of random numbers from given shape
- assert_equal -- assert equality
- assert_almost_equal -- assert equality with decimal tolerance
- assert_approx_equal -- assert equality with significant digits tolerance
- assert_array_equal -- assert arrays equality
- assert_array_almost_equal -- assert arrays equality with decimal tolerance
- assert_array_less -- assert arrays less-ordering
-
-"""
-
-global_symbols = ['ScipyTest','NumpyTest']
diff --git a/numpy/testing/nosetester.py b/numpy/testing/nosetester.py
new file mode 100644
index 000000000..e0a0aad3f
--- /dev/null
+++ b/numpy/testing/nosetester.py
@@ -0,0 +1,140 @@
+''' Nose test running
+
+Implements test and bench functions for modules.
+
+'''
+import os
+import sys
+import re
+
+import nose
+
+class NoseTester(object):
+ """ Nose test runner.
+
+ Usage: NoseTester(<package>).test()
+
+ <package> is package path or module Default for package is None. A
+ value of None finds calling module path.
+
+ Typical call is from module __init__, and corresponds to this:
+
+ >>> test = NoseTester().test
+
+ In practice, because nose may not be importable, the __init__
+ files actually have:
+
+ >>> from scipy.testing.pkgtester import Tester
+ >>> test = Tester().test
+
+ The pkgtester module checks for the presence of nose on the path,
+ returning this class if nose is present, and a null class
+ otherwise.
+ """
+
+ def __init__(self, package=None):
+ ''' Test class init
+
+ Parameters
+ ----------
+ package : string or module
+ If string, gives full path to package
+ If None, extract calling module path
+ Default is None
+ '''
+ if package is None:
+ f = sys._getframe(1)
+ package = f.f_locals.get('__file__', None)
+ assert package is not None
+ package = os.path.dirname(package)
+ elif isinstance(package, type(os)):
+ package = os.path.dirname(package.__file__)
+ self.package_path = package
+
+ def _add_doc(testtype):
+ ''' Decorator to add docstring to functions using test labels
+
+ Parameters
+ ----------
+ testtype : string
+ Type of test for function docstring
+ '''
+ def docit(func):
+ test_header = \
+ '''Parameters
+ ----------
+ label : {'fast', 'full', '', attribute identifer}
+ Identifies %(testtype)s to run. This can be a string to pass to
+ the nosetests executable with the'-A' option, or one of
+ several special values.
+ Special values are:
+ 'fast' - the default - which corresponds to
+ nosetests -A option of
+ 'not slow'.
+ 'full' - fast (as above) and slow %(testtype)s as in
+ no -A option to nosetests - same as ''
+ None or '' - run all %(testtype)ss
+ attribute_identifier - string passed directly to
+ nosetests as '-A'
+ verbose : integer
+ verbosity value for test outputs, 1-10
+ extra_argv : list
+ List with any extra args to pass to nosetests''' \
+ % {'testtype': testtype}
+ func.__doc__ = func.__doc__ % {
+ 'test_header': test_header}
+ return func
+ return docit
+
+ @_add_doc('(testtype)')
+ def _test_argv(self, label, verbose, extra_argv):
+ ''' Generate argv for nosetest command
+
+ %(test_header)s
+ '''
+ argv = [__file__, self.package_path, '-s']
+ if label and label != 'full':
+ if not isinstance(label, basestring):
+ raise TypeError, 'Selection label should be a string'
+ if label == 'fast':
+ label = 'not slow'
+ argv += ['-A', label]
+ argv += ['--verbosity', str(verbose)]
+ if extra_argv:
+ argv += extra_argv
+ return argv
+
+ @_add_doc('test')
+ def test(self, label='fast', verbose=1, extra_argv=None, doctests=False,
+ coverage=False):
+ ''' Run tests for module using nose
+
+ %(test_header)s
+ doctests : boolean
+ If True, run doctests in module, default False
+ '''
+ argv = self._test_argv(label, verbose, extra_argv)
+ if doctests:
+ argv+=['--with-doctest','--doctest-tests']
+
+ if coverage:
+ argv+=['--cover-package=numpy','--with-coverage',
+ '--cover-tests','--cover-inclusive','--cover-erase']
+
+ # bypass these samples under distutils
+ argv += ['--exclude','f2py_ext']
+ argv += ['--exclude','f2py_f90_ext']
+ argv += ['--exclude','gen_ext']
+ argv += ['--exclude','pyrex_ext']
+ argv += ['--exclude','swig_ext']
+
+ nose.run(argv=argv)
+
+ @_add_doc('benchmark')
+ def bench(self, label='fast', verbose=1, extra_argv=None):
+ ''' Run benchmarks for module using nose
+
+ %(test_header)s'''
+ argv = self._test_argv(label, verbose, extra_argv)
+ argv += ['--match', r'(?:^|[\\b_\\.%s-])[Bb]ench' % os.sep]
+ nose.run(argv=argv)
diff --git a/numpy/testing/nulltester.py b/numpy/testing/nulltester.py
new file mode 100644
index 000000000..50d5484f6
--- /dev/null
+++ b/numpy/testing/nulltester.py
@@ -0,0 +1,15 @@
+''' Null tester to signal nose tests disabled
+
+Merely returns error reporting lack of nose package or version number
+below requirements.
+
+See pkgtester, nosetester modules
+
+'''
+
+class NullTester(object):
+ def test(self, labels=None, *args, **kwargs):
+ raise ImportError, \
+ 'Need nose >=0.10 for tests - see %s' % \
+ 'http://somethingaboutorange.com/mrl/projects/nose'
+ bench = test
diff --git a/numpy/testing/numpytest.py b/numpy/testing/numpytest.py
index 4792f035b..73002f92f 100644
--- a/numpy/testing/numpytest.py
+++ b/numpy/testing/numpytest.py
@@ -10,10 +10,7 @@ import traceback
import warnings
__all__ = ['set_package_path', 'set_local_path', 'restore_path',
- 'IgnoreException', 'NumpyTestCase', 'NumpyTest',
- 'ScipyTestCase', 'ScipyTest', # for backward compatibility
- 'importall',
- ]
+ 'IgnoreException', 'importall',]
DEBUG=0
from numpy.testing.utils import jiffies
@@ -113,95 +110,6 @@ class _dummy_stream:
self.stream.flush()
-class NumpyTestCase (unittest.TestCase):
-
- def measure(self,code_str,times=1):
- """ Return elapsed time for executing code_str in the
- namespace of the caller for given times.
- """
- frame = get_frame(1)
- locs,globs = frame.f_locals,frame.f_globals
- code = compile(code_str,
- 'NumpyTestCase runner for '+self.__class__.__name__,
- 'exec')
- i = 0
- elapsed = jiffies()
- while i<times:
- i += 1
- exec code in globs,locs
- elapsed = jiffies() - elapsed
- return 0.01*elapsed
-
- def __call__(self, result=None):
- if result is None or not hasattr(result, 'errors') \
- or not hasattr(result, 'stream'):
- return unittest.TestCase.__call__(self, result)
-
- nof_errors = len(result.errors)
- save_stream = result.stream
- result.stream = _dummy_stream(save_stream)
- unittest.TestCase.__call__(self, result)
- if nof_errors != len(result.errors):
- test, errstr = result.errors[-1][:2]
- if isinstance(errstr, tuple):
- errstr = str(errstr[0])
- elif isinstance(errstr, str):
- errstr = errstr.split('\n')[-2]
- else:
- # allow for proxy classes
- errstr = str(errstr).split('\n')[-2]
- l = len(result.stream.data)
- if errstr.startswith('IgnoreException:'):
- if l==1:
- assert result.stream.data[-1]=='E', \
- repr(result.stream.data)
- result.stream.data[-1] = 'i'
- else:
- assert result.stream.data[-1]=='ERROR\n', \
- repr(result.stream.data)
- result.stream.data[-1] = 'ignoring\n'
- del result.errors[-1]
- map(save_stream.write, result.stream.data)
- save_stream.flush()
- result.stream = save_stream
-
- def warn(self, message):
- from numpy.distutils.misc_util import yellow_text
- print>>sys.stderr,yellow_text('Warning: %s' % (message))
- sys.stderr.flush()
- def info(self, message):
- print>>sys.stdout, message
- sys.stdout.flush()
-
- def rundocs(self, filename=None):
- """ Run doc string tests found in filename.
- """
- import doctest
- if filename is None:
- f = get_frame(1)
- filename = f.f_globals['__file__']
- name = os.path.splitext(os.path.basename(filename))[0]
- path = [os.path.dirname(filename)]
- file, pathname, description = imp.find_module(name, path)
- try:
- m = imp.load_module(name, file, pathname, description)
- finally:
- file.close()
- if sys.version[:3]<'2.4':
- doctest.testmod(m, verbose=False)
- else:
- tests = doctest.DocTestFinder().find(m)
- runner = doctest.DocTestRunner(verbose=False)
- for test in tests:
- runner.run(test)
- return
-
-class ScipyTestCase(NumpyTestCase):
- def __init__(self, package=None):
- warnings.warn("ScipyTestCase is now called NumpyTestCase; please update your code",
- DeprecationWarning, stacklevel=2)
- NumpyTestCase.__init__(self, package)
-
def _get_all_method_names(cls):
names = dir(cls)
@@ -214,460 +122,6 @@ def _get_all_method_names(cls):
# for debug build--check for memory leaks during the test.
-class _NumPyTextTestResult(unittest._TextTestResult):
- def startTest(self, test):
- unittest._TextTestResult.startTest(self, test)
- if self.showAll:
- N = len(sys.getobjects(0))
- self._totnumobj = N
- self._totrefcnt = sys.gettotalrefcount()
- return
-
- def stopTest(self, test):
- if self.showAll:
- N = len(sys.getobjects(0))
- self.stream.write("objects: %d ===> %d; " % (self._totnumobj, N))
- self.stream.write("refcnts: %d ===> %d\n" % (self._totrefcnt,
- sys.gettotalrefcount()))
- return
-
-class NumPyTextTestRunner(unittest.TextTestRunner):
- def _makeResult(self):
- return _NumPyTextTestResult(self.stream, self.descriptions, self.verbosity)
-
-
-class NumpyTest:
- """ Numpy tests site manager.
-
- Usage: NumpyTest(<package>).test(level=1,verbosity=1)
-
- <package> is package name or its module object.
-
- Package is supposed to contain a directory tests/ with test_*.py
- files where * refers to the names of submodules. See .rename()
- method to redefine name mapping between test_*.py files and names of
- submodules. Pattern test_*.py can be overwritten by redefining
- .get_testfile() method.
-
- test_*.py files are supposed to define a classes, derived from
- NumpyTestCase or unittest.TestCase, with methods having names
- starting with test or bench or check. The names of TestCase classes
- must have a prefix test. This can be overwritten by redefining
- .check_testcase_name() method.
-
- And that is it! No need to implement test or test_suite functions
- in each .py file.
-
- Old-style test_suite(level=1) hooks are also supported.
- """
- _check_testcase_name = re.compile(r'test.*|Test.*').match
- def check_testcase_name(self, name):
- """ Return True if name matches TestCase class.
- """
- return not not self._check_testcase_name(name)
-
- testfile_patterns = ['test_%(modulename)s.py']
- def get_testfile(self, module, verbosity = 0):
- """ Return path to module test file.
- """
- mstr = self._module_str
- short_module_name = self._get_short_module_name(module)
- d = os.path.split(module.__file__)[0]
- test_dir = os.path.join(d,'tests')
- local_test_dir = os.path.join(os.getcwd(),'tests')
- if os.path.basename(os.path.dirname(local_test_dir)) \
- == os.path.basename(os.path.dirname(test_dir)):
- test_dir = local_test_dir
- for pat in self.testfile_patterns:
- fn = os.path.join(test_dir, pat % {'modulename':short_module_name})
- if os.path.isfile(fn):
- return fn
- if verbosity>1:
- self.warn('No test file found in %s for module %s' \
- % (test_dir, mstr(module)))
- return
-
- def __init__(self, package=None):
- if package is None:
- from numpy.distutils.misc_util import get_frame
- f = get_frame(1)
- package = f.f_locals.get('__name__',f.f_globals.get('__name__',None))
- assert package is not None
- self.package = package
- self._rename_map = {}
-
- def rename(self, **kws):
- """Apply renaming submodule test file test_<name>.py to
- test_<newname>.py.
-
- Usage: self.rename(name='newname') before calling the
- self.test() method.
-
- If 'newname' is None, then no tests will be executed for a given
- module.
- """
- for k,v in kws.items():
- self._rename_map[k] = v
- return
-
- def _module_str(self, module):
- filename = module.__file__[-30:]
- if filename!=module.__file__:
- filename = '...'+filename
- return '<module %r from %r>' % (module.__name__, filename)
-
- def _get_method_names(self,clsobj,level):
- names = []
- for mthname in _get_all_method_names(clsobj):
- if mthname[:5] not in ['bench','check'] \
- and mthname[:4] not in ['test']:
- continue
- mth = getattr(clsobj, mthname)
- if type(mth) is not types.MethodType:
- continue
- d = mth.im_func.func_defaults
- if d is not None:
- mthlevel = d[0]
- else:
- mthlevel = 1
- if level>=mthlevel:
- if mthname not in names:
- names.append(mthname)
- for base in clsobj.__bases__:
- for n in self._get_method_names(base,level):
- if n not in names:
- names.append(n)
- return names
-
- def _get_short_module_name(self, module):
- d,f = os.path.split(module.__file__)
- short_module_name = os.path.splitext(os.path.basename(f))[0]
- if short_module_name=='__init__':
- short_module_name = module.__name__.split('.')[-1]
- short_module_name = self._rename_map.get(short_module_name,short_module_name)
- return short_module_name
-
- def _get_module_tests(self, module, level, verbosity):
- mstr = self._module_str
-
- short_module_name = self._get_short_module_name(module)
- if short_module_name is None:
- return []
-
- test_file = self.get_testfile(module, verbosity)
-
- if test_file is None:
- return []
-
- if not os.path.isfile(test_file):
- if short_module_name[:5]=='info_' \
- and short_module_name[5:]==module.__name__.split('.')[-2]:
- return []
- if short_module_name in ['__cvs_version__','__svn_version__']:
- return []
- if short_module_name[-8:]=='_version' \
- and short_module_name[:-8]==module.__name__.split('.')[-2]:
- return []
- if verbosity>1:
- self.warn(test_file)
- self.warn(' !! No test file %r found for %s' \
- % (os.path.basename(test_file), mstr(module)))
- return []
-
- if test_file in self.test_files:
- return []
-
- parent_module_name = '.'.join(module.__name__.split('.')[:-1])
- test_module_name,ext = os.path.splitext(os.path.basename(test_file))
- test_dir_module = parent_module_name+'.tests'
- test_module_name = test_dir_module+'.'+test_module_name
-
- if test_dir_module not in sys.modules:
- sys.modules[test_dir_module] = imp.new_module(test_dir_module)
-
- old_sys_path = sys.path[:]
- try:
- f = open(test_file,'r')
- test_module = imp.load_module(test_module_name, f,
- test_file, ('.py', 'r', 1))
- f.close()
- except:
- sys.path[:] = old_sys_path
- self.warn('FAILURE importing tests for %s' % (mstr(module)))
- output_exception(sys.stderr)
- return []
- sys.path[:] = old_sys_path
-
- self.test_files.append(test_file)
-
- return self._get_suite_list(test_module, level, module.__name__)
-
- def _get_suite_list(self, test_module, level, module_name='__main__',
- verbosity=1):
- suite_list = []
- if hasattr(test_module, 'test_suite'):
- suite_list.extend(test_module.test_suite(level)._tests)
- for name in dir(test_module):
- obj = getattr(test_module, name)
- if type(obj) is not type(unittest.TestCase) \
- or not issubclass(obj, unittest.TestCase) \
- or not self.check_testcase_name(obj.__name__):
- continue
- for mthname in self._get_method_names(obj,level):
- suite = obj(mthname)
- if getattr(suite,'isrunnable',lambda mthname:1)(mthname):
- suite_list.append(suite)
- matched_suite_list = [suite for suite in suite_list \
- if self.testcase_match(suite.id()\
- .replace('__main__.',''))]
- if verbosity>=0:
- self.info(' Found %s/%s tests for %s' \
- % (len(matched_suite_list), len(suite_list), module_name))
- return matched_suite_list
-
- def _test_suite_from_modules(self, this_package, level, verbosity):
- package_name = this_package.__name__
- modules = []
- for name, module in sys.modules.items():
- if not name.startswith(package_name) or module is None:
- continue
- if not hasattr(module,'__file__'):
- continue
- if os.path.basename(os.path.dirname(module.__file__))=='tests':
- continue
- modules.append((name, module))
-
- modules.sort()
- modules = [m[1] for m in modules]
-
- self.test_files = []
- suites = []
- for module in modules:
- suites.extend(self._get_module_tests(module, abs(level), verbosity))
-
- suites.extend(self._get_suite_list(sys.modules[package_name],
- abs(level), verbosity=verbosity))
- return unittest.TestSuite(suites)
-
- def _test_suite_from_all_tests(self, this_package, level, verbosity):
- importall(this_package)
- package_name = this_package.__name__
-
- # Find all tests/ directories under the package
- test_dirs_names = {}
- for name, module in sys.modules.items():
- if not name.startswith(package_name) or module is None:
- continue
- if not hasattr(module, '__file__'):
- continue
- d = os.path.dirname(module.__file__)
- if os.path.basename(d)=='tests':
- continue
- d = os.path.join(d, 'tests')
- if not os.path.isdir(d):
- continue
- if d in test_dirs_names:
- continue
- test_dir_module = '.'.join(name.split('.')[:-1]+['tests'])
- test_dirs_names[d] = test_dir_module
-
- test_dirs = test_dirs_names.keys()
- test_dirs.sort()
-
- # For each file in each tests/ directory with a test case in it,
- # import the file, and add the test cases to our list
- suite_list = []
- testcase_match = re.compile(r'\s*class\s+\w+\s*\(.*TestCase').match
- for test_dir in test_dirs:
- test_dir_module = test_dirs_names[test_dir]
-
- if test_dir_module not in sys.modules:
- sys.modules[test_dir_module] = imp.new_module(test_dir_module)
-
- for fn in os.listdir(test_dir):
- base, ext = os.path.splitext(fn)
- if ext != '.py':
- continue
- f = os.path.join(test_dir, fn)
-
- # check that file contains TestCase class definitions:
- fid = open(f, 'r')
- skip = True
- for line in fid:
- if testcase_match(line):
- skip = False
- break
- fid.close()
- if skip:
- continue
-
- # import the test file
- n = test_dir_module + '.' + base
- # in case test files import local modules
- sys.path.insert(0, test_dir)
- fo = None
- try:
- try:
- fo = open(f)
- test_module = imp.load_module(n, fo, f,
- ('.py', 'U', 1))
- except Exception, msg:
- print 'Failed importing %s: %s' % (f,msg)
- continue
- finally:
- if fo:
- fo.close()
- del sys.path[0]
-
- suites = self._get_suite_list(test_module, level,
- module_name=n,
- verbosity=verbosity)
- suite_list.extend(suites)
-
- all_tests = unittest.TestSuite(suite_list)
- return all_tests
-
- def test(self, level=1, verbosity=1, all=True, sys_argv=[],
- testcase_pattern='.*'):
- """Run Numpy module test suite with level and verbosity.
-
- level:
- None --- do nothing, return None
- < 0 --- scan for tests of level=abs(level),
- don't run them, return TestSuite-list
- > 0 --- scan for tests of level, run them,
- return TestRunner
- > 10 --- run all tests (same as specifying all=True).
- (backward compatibility).
-
- verbosity:
- >= 0 --- show information messages
- > 1 --- show warnings on missing tests
-
- all:
- True --- run all test files (like self.testall())
- False (default) --- only run test files associated with a module
-
- sys_argv --- replacement of sys.argv[1:] during running
- tests.
-
- testcase_pattern --- run only tests that match given pattern.
-
- It is assumed (when all=False) that package tests suite follows
- the following convention: for each package module, there exists
- file <packagepath>/tests/test_<modulename>.py that defines
- TestCase classes (with names having prefix 'test_') with methods
- (with names having prefixes 'check_' or 'bench_'); each of these
- methods are called when running unit tests.
- """
- if level is None: # Do nothing.
- return
-
- if isinstance(self.package, str):
- exec 'import %s as this_package' % (self.package)
- else:
- this_package = self.package
-
- self.testcase_match = re.compile(testcase_pattern).match
-
- if all:
- all_tests = self._test_suite_from_all_tests(this_package,
- level, verbosity)
- else:
- all_tests = self._test_suite_from_modules(this_package,
- level, verbosity)
-
- if level < 0:
- return all_tests
-
- runner = unittest.TextTestRunner(verbosity=verbosity)
- old_sys_argv = sys.argv[1:]
- sys.argv[1:] = sys_argv
- # Use the builtin displayhook. If the tests are being run
- # under IPython (for instance), any doctest test suites will
- # fail otherwise.
- old_displayhook = sys.displayhook
- sys.displayhook = sys.__displayhook__
- try:
- r = runner.run(all_tests)
- finally:
- sys.displayhook = old_displayhook
- sys.argv[1:] = old_sys_argv
- return r
-
- def testall(self, level=1,verbosity=1):
- """ Run Numpy module test suite with level and verbosity.
-
- level:
- None --- do nothing, return None
- < 0 --- scan for tests of level=abs(level),
- don't run them, return TestSuite-list
- > 0 --- scan for tests of level, run them,
- return TestRunner
-
- verbosity:
- >= 0 --- show information messages
- > 1 --- show warnings on missing tests
-
- Different from .test(..) method, this method looks for
- TestCase classes from all files in <packagedir>/tests/
- directory and no assumptions are made for naming the
- TestCase classes or their methods.
- """
- return self.test(level=level, verbosity=verbosity, all=True)
-
- def run(self):
- """ Run Numpy module test suite with level and verbosity
- taken from sys.argv. Requires optparse module.
- """
- try:
- from optparse import OptionParser
- except ImportError:
- self.warn('Failed to import optparse module, ignoring.')
- return self.test()
- usage = r'usage: %prog [-v <verbosity>] [-l <level>]'\
- r' [-s "<replacement of sys.argv[1:]>"]'\
- r' [-t "<testcase pattern>"]'
- parser = OptionParser(usage)
- parser.add_option("-v", "--verbosity",
- action="store",
- dest="verbosity",
- default=1,
- type='int')
- parser.add_option("-l", "--level",
- action="store",
- dest="level",
- default=1,
- type='int')
- parser.add_option("-s", "--sys-argv",
- action="store",
- dest="sys_argv",
- default='',
- type='string')
- parser.add_option("-t", "--testcase-pattern",
- action="store",
- dest="testcase_pattern",
- default=r'.*',
- type='string')
- (options, args) = parser.parse_args()
- return self.test(options.level,options.verbosity,
- sys_argv=shlex.split(options.sys_argv or ''),
- testcase_pattern=options.testcase_pattern)
-
- def warn(self, message):
- from numpy.distutils.misc_util import yellow_text
- print>>sys.stderr,yellow_text('Warning: %s' % (message))
- sys.stderr.flush()
- def info(self, message):
- print>>sys.stdout, message
- sys.stdout.flush()
-
-class ScipyTest(NumpyTest):
- def __init__(self, package=None):
- warnings.warn("ScipyTest is now called NumpyTest; please update your code",
- DeprecationWarning, stacklevel=2)
- NumpyTest.__init__(self, package)
-
def importall(package):
"""
diff --git a/numpy/testing/parametric.py b/numpy/testing/parametric.py
deleted file mode 100644
index 43577d7d4..000000000
--- a/numpy/testing/parametric.py
+++ /dev/null
@@ -1,300 +0,0 @@
-"""Support for parametric tests in unittest.
-
-:Author: Fernando Perez
-
-Purpose
-=======
-
-Briefly, the main class in this module allows you to easily and cleanly
-(without the gross name-mangling hacks that are normally needed) to write
-unittest TestCase classes that have parametrized tests. That is, tests which
-consist of multiple sub-tests that scan for example a parameter range, but
-where you want each sub-test to:
-
-* count as a separate test in the statistics.
-
-* be run even if others in the group error out or fail.
-
-
-The class offers a simple name-based convention to create such tests (see
-simple example at the end), in one of two ways:
-
-* Each sub-test in a group can be run fully independently, with the
- setUp/tearDown methods being called each time.
-
-* The whole group can be run with setUp/tearDown being called only once for the
- group. This lets you conveniently reuse state that may be very expensive to
- compute for multiple tests. Be careful not to corrupt it!!!
-
-
-Caveats
-=======
-
-This code relies on implementation details of the unittest module (some key
-methods are heavily modified versions of those, after copying them in). So it
-may well break either if you make sophisticated use of the unittest APIs, or if
-unittest itself changes in the future. I have only tested this with Python
-2.5.
-
-"""
-__docformat__ = "restructuredtext en"
-
-import unittest
-
-class ParametricTestCase(unittest.TestCase):
- """TestCase subclass with support for parametric tests.
-
- Subclasses of this class can implement test methods that return a list of
- tests and arguments to call those with, to do parametric testing (often
- also called 'data driven' testing."""
-
- #: Prefix for tests with independent state. These methods will be run with
- #: a separate setUp/tearDown call for each test in the group.
- _indepParTestPrefix = 'testip'
-
- #: Prefix for tests with shared state. These methods will be run with
- #: a single setUp/tearDown call for the whole group. This is useful when
- #: writing a group of tests for which the setup is expensive and one wants
- #: to actually share that state. Use with care (especially be careful not
- #: to mutate the state you are using, which will alter later tests).
- _shareParTestPrefix = 'testsp'
-
- def exec_test(self,test,args,result):
- """Execute a single test. Returns a success boolean"""
-
- ok = False
- try:
- test(*args)
- ok = True
- except self.failureException:
- result.addFailure(self, self._exc_info())
- except KeyboardInterrupt:
- raise
- except:
- result.addError(self, self._exc_info())
-
- return ok
-
- def set_testMethodDoc(self,doc):
- self._testMethodDoc = doc
- self._TestCase__testMethodDoc = doc
-
- def get_testMethodDoc(self):
- return self._testMethodDoc
-
- testMethodDoc = property(fset=set_testMethodDoc, fget=get_testMethodDoc)
-
- def get_testMethodName(self):
- try:
- return getattr(self,"_testMethodName")
- except:
- return getattr(self,"_TestCase__testMethodName")
-
- testMethodName = property(fget=get_testMethodName)
-
- def run_test(self, testInfo,result):
- """Run one test with arguments"""
-
- test,args = testInfo[0],testInfo[1:]
-
- # Reset the doc attribute to be the docstring of this particular test,
- # so that in error messages it prints the actual test's docstring and
- # not that of the test factory.
- self.testMethodDoc = test.__doc__
- result.startTest(self)
- try:
- try:
- self.setUp()
- except KeyboardInterrupt:
- raise
- except:
- result.addError(self, self._exc_info())
- return
-
- ok = self.exec_test(test,args,result)
-
- try:
- self.tearDown()
- except KeyboardInterrupt:
- raise
- except:
- result.addError(self, self._exc_info())
- ok = False
- if ok: result.addSuccess(self)
- finally:
- result.stopTest(self)
-
- def run_tests(self, tests,result):
- """Run many tests with a common setUp/tearDown.
-
- The entire set of tests is run with a single setUp/tearDown call."""
-
- try:
- self.setUp()
- except KeyboardInterrupt:
- raise
- except:
- result.testsRun += 1
- result.addError(self, self._exc_info())
- return
-
- saved_doc = self.testMethodDoc
-
- try:
- # Run all the tests specified
- for testInfo in tests:
- test,args = testInfo[0],testInfo[1:]
-
- # Set the doc argument for this test. Note that even if we do
- # this, the fail/error tracebacks still print the docstring for
- # the parent factory, because they only generate the message at
- # the end of the run, AFTER we've restored it. There is no way
- # to tell the unittest system (without overriding a lot of
- # stuff) to extract this information right away, the logic is
- # hardcoded to pull it later, since unittest assumes it doesn't
- # change.
- self.testMethodDoc = test.__doc__
- result.startTest(self)
- ok = self.exec_test(test,args,result)
- if ok: result.addSuccess(self)
-
- finally:
- # Restore docstring info and run tearDown once only.
- self.testMethodDoc = saved_doc
- try:
- self.tearDown()
- except KeyboardInterrupt:
- raise
- except:
- result.addError(self, self._exc_info())
-
- def run(self, result=None):
- """Test runner."""
-
- #print
- #print '*** run for method:',self._testMethodName # dbg
- #print '*** doc:',self._testMethodDoc # dbg
-
- if result is None: result = self.defaultTestResult()
-
- # Independent tests: each gets its own setup/teardown
- if self.testMethodName.startswith(self._indepParTestPrefix):
- for t in getattr(self,self.testMethodName)():
- self.run_test(t,result)
- # Shared-state test: single setup/teardown for all
- elif self.testMethodName.startswith(self._shareParTestPrefix):
- tests = getattr(self,self.testMethodName,'runTest')()
- self.run_tests(tests,result)
- # Normal unittest Test methods
- else:
- unittest.TestCase.run(self,result)
-
-#############################################################################
-# Quick and dirty interactive example/test
-if __name__ == '__main__':
-
- class ExampleTestCase(ParametricTestCase):
-
- #-------------------------------------------------------------------
- # An instrumented setUp method so we can see when it gets called and
- # how many times per instance
- counter = 0
-
- def setUp(self):
- self.counter += 1
- print 'setUp count: %2s for: %s' % (self.counter,
- self.testMethodDoc)
-
- #-------------------------------------------------------------------
- # A standard test method, just like in the unittest docs.
- def test_foo(self):
- """Normal test for feature foo."""
- pass
-
- #-------------------------------------------------------------------
- # Testing methods that need parameters. These can NOT be named test*,
- # since they would be picked up by unittest and called without
- # arguments. Instead, call them anything else (I use tst*) and then
- # load them via the factories below.
- def tstX(self,i):
- "Test feature X with parameters."
- print 'tstX, i=',i
- if i==1 or i==3:
- # Test fails
- self.fail('i is bad, bad: %s' % i)
-
- def tstY(self,i):
- "Test feature Y with parameters."
- print 'tstY, i=',i
- if i==1:
- # Force an error
- 1/0
-
- def tstXX(self,i,j):
- "Test feature XX with parameters."
- print 'tstXX, i=',i,'j=',j
- if i==1:
- # Test fails
- self.fail('i is bad, bad: %s' % i)
-
- def tstYY(self,i):
- "Test feature YY with parameters."
- print 'tstYY, i=',i
- if i==2:
- # Force an error
- 1/0
-
- def tstZZ(self):
- """Test feature ZZ without parameters, needs multiple runs.
-
- This could be a random test that you want to run multiple times."""
- pass
-
- #-------------------------------------------------------------------
- # Parametric test factories that create the test groups to call the
- # above tst* methods with their required arguments.
- def testip(self):
- """Independent parametric test factory.
-
- A separate setUp() call is made for each test returned by this
- method.
-
- You must return an iterable (list or generator is fine) containing
- tuples with the actual method to be called as the first argument,
- and the arguments for that call later."""
- return [(self.tstX,i) for i in range(5)]
-
- def testip2(self):
- """Another independent parametric test factory"""
- return [(self.tstY,i) for i in range(5)]
-
- def testip3(self):
- """Test factory combining different subtests.
-
- This one shows how to assemble calls to different tests."""
- return [(self.tstX,3),(self.tstX,9),(self.tstXX,4,10),
- (self.tstZZ,),(self.tstZZ,)]
-
- def testsp(self):
- """Shared parametric test factory
-
- A single setUp() call is made for all the tests returned by this
- method.
- """
- return [(self.tstXX,i,i+1) for i in range(5)]
-
- def testsp2(self):
- """Another shared parametric test factory"""
- return [(self.tstYY,i) for i in range(5)]
-
- def testsp3(self):
- """Another shared parametric test factory.
-
- This one simply calls the same test multiple times, without any
- arguments. Note that you must still return tuples, even if there
- are no arguments."""
- return [(self.tstZZ,) for i in range(10)]
-
-
- # This test class runs normally under unittest's default runner
- unittest.main()
diff --git a/numpy/testing/pkgtester.py b/numpy/testing/pkgtester.py
new file mode 100644
index 000000000..8b22955fa
--- /dev/null
+++ b/numpy/testing/pkgtester.py
@@ -0,0 +1,27 @@
+''' Define test function for scipy package
+
+Module tests for presence of useful version of nose. If present
+returns NoseTester, otherwise returns a placeholder test routine
+reporting lack of nose and inability to run tests. Typical use is in
+module __init__:
+
+from scipy.testing.pkgtester import Tester
+test = Tester().test
+
+See nosetester module for test implementation
+
+'''
+fine_nose = True
+try:
+ import nose
+except ImportError:
+ fine_nose = False
+else:
+ nose_version = nose.__versioninfo__
+ if nose_version[0] < 1 and nose_version[1] < 10:
+ fine_nose = False
+
+if fine_nose:
+ from numpy.testing.nosetester import NoseTester as Tester
+else:
+ from numpy.testing.nulltester import NullTester as Tester
diff --git a/numpy/testing/tests/test_utils.py b/numpy/testing/tests/test_utils.py
index 6f40c778b..27cc4a809 100644
--- a/numpy/testing/tests/test_utils.py
+++ b/numpy/testing/tests/test_utils.py
@@ -1,9 +1,7 @@
import numpy as N
-from numpy.testing.utils import *
-
+from numpy.testing import *
import unittest
-
class _GenericTest(object):
def _test_equal(self, a, b):
self._assert_func(a, b)
@@ -163,5 +161,6 @@ class TestRaises(unittest.TestCase):
else:
raise AssertionError("should have raised an AssertionError")
+
if __name__ == '__main__':
- unittest.main()
+ nose.run(argv=['', __file__])
diff --git a/numpy/testing/utils.py b/numpy/testing/utils.py
index 680b4f168..538014f33 100644
--- a/numpy/testing/utils.py
+++ b/numpy/testing/utils.py
@@ -10,8 +10,8 @@ import operator
__all__ = ['assert_equal', 'assert_almost_equal','assert_approx_equal',
'assert_array_equal', 'assert_array_less', 'assert_string_equal',
- 'assert_array_almost_equal', 'jiffies', 'memusage', 'rand',
- 'runstring', 'raises']
+ 'assert_array_almost_equal', 'build_err_msg', 'jiffies', 'memusage',
+ 'rand', 'rundocs', 'runstring']
def rand(*args):
"""Returns an array of random numbers with the given shape.
@@ -295,32 +295,25 @@ def assert_string_equal(actual, desired):
assert actual==desired, msg
-def raises(*exceptions):
- """ Assert that a test function raises one of the specified exceptions to
- pass.
+def rundocs(filename=None):
+ """ Run doc string tests found in filename.
"""
- # FIXME: when we transition to nose, just use its implementation. It's
- # better.
- def deco(function):
- def f2(*args, **kwds):
- try:
- function(*args, **kwds)
- except exceptions:
- pass
- except:
- # Anything else.
- raise
- else:
- raise AssertionError('%s() did not raise one of (%s)' %
- (function.__name__, ', '.join([e.__name__ for e in exceptions])))
- try:
- f2.__name__ = function.__name__
- except TypeError:
- # Python 2.3 does not permit this.
- pass
- f2.__dict__ = function.__dict__
- f2.__doc__ = function.__doc__
- f2.__module__ = function.__module__
- return f2
-
- return deco
+ import doctest, imp
+ if filename is None:
+ f = sys._getframe(1)
+ filename = f.f_globals['__file__']
+ name = os.path.splitext(os.path.basename(filename))[0]
+ path = [os.path.dirname(filename)]
+ file, pathname, description = imp.find_module(name, path)
+ try:
+ m = imp.load_module(name, file, pathname, description)
+ finally:
+ file.close()
+ if sys.version[:3]<'2.4':
+ doctest.testmod(m, verbose=False)
+ else:
+ tests = doctest.DocTestFinder().find(m)
+ runner = doctest.DocTestRunner(verbose=False)
+ for test in tests:
+ runner.run(test)
+ return