diff options
author | Eric Jones <eric@enthought.com> | 2002-02-19 10:35:37 +0000 |
---|---|---|
committer | Eric Jones <eric@enthought.com> | 2002-02-19 10:35:37 +0000 |
commit | 5ba533b4b7b4562c36df4d0a698efff872a98086 (patch) | |
tree | 41e8d30399c1f16db04af95e353afd16e7ad3596 /weave/tests/test_build_tools.py | |
parent | 04891cf9a4a66f2c64391eda97ecf38fff9d35c9 (diff) | |
download | numpy-5ba533b4b7b4562c36df4d0a698efff872a98086.tar.gz |
major overhaul to testing framework. module_xxx.test() now takes a 'level' argument to specify how thorough the testing should be. Level 1 is the least thorough, and only runs rapid tests (as specified by the test writer) on the module/package. level=10 is the most thorough testing. Any value between 1 and 10 can be used. I'm currently using 1, 5, and 10 for most of my tests, but others may wish to use more fine grained settings.
Diffstat (limited to 'weave/tests/test_build_tools.py')
-rw-r--r-- | weave/tests/test_build_tools.py | 13 |
1 files changed, 7 insertions, 6 deletions
diff --git a/weave/tests/test_build_tools.py b/weave/tests/test_build_tools.py index e9a4a2c1d..9459675ea 100644 --- a/weave/tests/test_build_tools.py +++ b/weave/tests/test_build_tools.py @@ -64,16 +64,17 @@ class test_configure_sys_argv(unittest.TestCase): build_tools.restore_sys_argv() assert(pre_argv == sys.argv[:]) -def test_suite(): +def test_suite(level = 1): suites = [] - suites.append( unittest.makeSuite(test_configure_build_dir,'check_') ) - suites.append( unittest.makeSuite(test_configure_temp_dir,'check_') ) - suites.append( unittest.makeSuite(test_configure_sys_argv,'check_') ) + if level > 0: + suites.append( unittest.makeSuite(test_configure_build_dir,'check_') ) + suites.append( unittest.makeSuite(test_configure_temp_dir,'check_') ) + suites.append( unittest.makeSuite(test_configure_sys_argv,'check_') ) total_suite = unittest.TestSuite(suites) return total_suite -def test(): - all_tests = test_suite() +def test(level=10): + all_tests = test_suite(level) runner = unittest.TextTestRunner() runner.run(all_tests) return runner |