diff options
Diffstat (limited to 'lib/sqlalchemy/testing/plugin')
| -rw-r--r-- | lib/sqlalchemy/testing/plugin/noseplugin.py | 443 | ||||
| -rw-r--r-- | lib/sqlalchemy/testing/plugin/plugin_base.py | 422 | ||||
| -rw-r--r-- | lib/sqlalchemy/testing/plugin/pytestplugin.py | 125 |
3 files changed, 578 insertions, 412 deletions
diff --git a/lib/sqlalchemy/testing/plugin/noseplugin.py b/lib/sqlalchemy/testing/plugin/noseplugin.py index 27a028cd4..a68eb88cf 100644 --- a/lib/sqlalchemy/testing/plugin/noseplugin.py +++ b/lib/sqlalchemy/testing/plugin/noseplugin.py @@ -6,276 +6,24 @@ """Enhance nose with extra options and behaviors for running SQLAlchemy tests. -When running ./sqla_nose.py, this module is imported relative to the -"plugins" package as a top level package by the sqla_nose.py runner, -so that the plugin can be loaded with the rest of nose including the coverage -plugin before any of SQLAlchemy itself is imported, so that coverage works. - -When third party libraries use this plugin, it can be imported -normally as "from sqlalchemy.testing.plugin import noseplugin". +Must be run via ./sqla_nose.py so that it is imported in the expected +way (e.g. as a package-less import). """ -from __future__ import absolute_import - import os -import sys -py3k = sys.version_info >= (3, 0) - -if py3k: - import configparser -else: - import ConfigParser as configparser from nose.plugins import Plugin -from nose import SkipTest -import sys -import re - -# late imports fixtures = None -engines = None -exclusions = None -warnings = None -profiling = None -assertions = None -requirements = None -config = None -util = None -file_config = None - - -logging = None -db = None -db_label = None -db_url = None -db_opts = {} -options = None -_existing_engine = None - - -def _log(option, opt_str, value, parser): - global logging - if not logging: - import logging - logging.basicConfig() - - if opt_str.endswith('-info'): - logging.getLogger(value).setLevel(logging.INFO) - elif opt_str.endswith('-debug'): - logging.getLogger(value).setLevel(logging.DEBUG) - - -def _list_dbs(*args): - print("Available --db options (use --dburi to override)") - for macro in sorted(file_config.options('db')): - print("%20s\t%s" % (macro, file_config.get('db', macro))) - sys.exit(0) - - -def _server_side_cursors(options, opt_str, value, parser): - db_opts['server_side_cursors'] = True - - -def _engine_strategy(options, opt_str, value, parser): - if value: - db_opts['strategy'] = value - -pre_configure = [] -post_configure = [] - - -def pre(fn): - pre_configure.append(fn) - return fn - - -def post(fn): - post_configure.append(fn) - return fn - - -@pre -def _setup_options(opt, file_config): - global options - options = opt - - -@pre -def _monkeypatch_cdecimal(options, file_config): - if options.cdecimal: - import cdecimal - sys.modules['decimal'] = cdecimal - - -@post -def _engine_uri(options, file_config): - global db_label, db_url - - if options.dburi: - db_url = options.dburi - db_label = db_url[:db_url.index(':')] - elif options.db: - db_label = options.db - db_url = None - - if db_url is None: - if db_label not in file_config.options('db'): - raise RuntimeError( - "Unknown URI specifier '%s'. Specify --dbs for known uris." - % db_label) - db_url = file_config.get('db', db_label) - - -@post -def _require(options, file_config): - if not(options.require or - (file_config.has_section('require') and - file_config.items('require'))): - return - - try: - import pkg_resources - except ImportError: - raise RuntimeError("setuptools is required for version requirements") - - cmdline = [] - for requirement in options.require: - pkg_resources.require(requirement) - cmdline.append(re.split('\s*(<!>=)', requirement, 1)[0]) - - if file_config.has_section('require'): - for label, requirement in file_config.items('require'): - if not label == db_label or label.startswith('%s.' % db_label): - continue - seen = [c for c in cmdline if requirement.startswith(c)] - if seen: - continue - pkg_resources.require(requirement) - - -@post -def _engine_pool(options, file_config): - if options.mockpool: - from sqlalchemy import pool - db_opts['poolclass'] = pool.AssertionPool - - -@post -def _create_testing_engine(options, file_config): - from sqlalchemy.testing import engines, config - from sqlalchemy import testing - global db - config.db = testing.db = db = engines.testing_engine(db_url, db_opts) - config.db.connect().close() - config.db_opts = db_opts - config.db_url = db_url - - -@post -def _prep_testing_database(options, file_config): - from sqlalchemy.testing import engines - from sqlalchemy import schema, inspect - - # also create alt schemas etc. here? - if options.dropfirst: - e = engines.utf8_engine() - inspector = inspect(e) - try: - view_names = inspector.get_view_names() - except NotImplementedError: - pass - else: - for vname in view_names: - e.execute(schema._DropView(schema.Table(vname, schema.MetaData()))) - - try: - view_names = inspector.get_view_names(schema="test_schema") - except NotImplementedError: - pass - else: - for vname in view_names: - e.execute(schema._DropView( - schema.Table(vname, - schema.MetaData(), schema="test_schema"))) - - for tname in reversed(inspector.get_table_names(order_by="foreign_key")): - e.execute(schema.DropTable(schema.Table(tname, schema.MetaData()))) - - for tname in reversed(inspector.get_table_names( - order_by="foreign_key", schema="test_schema")): - e.execute(schema.DropTable( - schema.Table(tname, schema.MetaData(), schema="test_schema"))) - - e.dispose() - - -@post -def _set_table_options(options, file_config): - from sqlalchemy.testing import schema - - table_options = schema.table_options - for spec in options.tableopts: - key, value = spec.split('=') - table_options[key] = value - - if options.mysql_engine: - table_options['mysql_engine'] = options.mysql_engine - - -@post -def _reverse_topological(options, file_config): - if options.reversetop: - from sqlalchemy.orm.util import randomize_unitofwork - randomize_unitofwork() - - -def _requirements_opt(options, opt_str, value, parser): - _setup_requirements(value) - -@post -def _requirements(options, file_config): - - requirement_cls = file_config.get('sqla_testing', "requirement_cls") - _setup_requirements(requirement_cls) - -def _setup_requirements(argument): - from sqlalchemy.testing import config - from sqlalchemy import testing - - if config.requirements is not None: - return - - modname, clsname = argument.split(":") - - # importlib.import_module() only introduced in 2.7, a little - # late - mod = __import__(modname) - for component in modname.split(".")[1:]: - mod = getattr(mod, component) - req_cls = getattr(mod, clsname) - config.requirements = testing.requires = req_cls(config) - - -@post -def _post_setup_options(opt, file_config): - from sqlalchemy.testing import config - config.options = options - config.file_config = file_config - - -@post -def _setup_profiling(options, file_config): - from sqlalchemy.testing import profiling - profiling._profile_stats = profiling.ProfileStatsFile( - file_config.get('sqla_testing', 'profile_file')) +# no package imports yet! this prevents us from tripping coverage +# too soon. +import imp +path = os.path.join(os.path.dirname(__file__), "plugin_base.py") +plugin_base = imp.load_source("plugin_base", path) class NoseSQLAlchemy(Plugin): - """ - Handles the setup and extra properties required for testing SQLAlchemy - """ enabled = True name = 'sqla_testing' @@ -284,74 +32,29 @@ class NoseSQLAlchemy(Plugin): def options(self, parser, env=os.environ): Plugin.options(self, parser, env) opt = parser.add_option - opt("--log-info", action="callback", type="string", callback=_log, - help="turn on info logging for <LOG> (multiple OK)") - opt("--log-debug", action="callback", type="string", callback=_log, - help="turn on debug logging for <LOG> (multiple OK)") - opt("--require", action="append", dest="require", default=[], - help="require a particular driver or module version (multiple OK)") - opt("--db", action="store", dest="db", default="default", - help="Use prefab database uri") - opt('--dbs', action='callback', callback=_list_dbs, - help="List available prefab dbs") - opt("--dburi", action="store", dest="dburi", - help="Database uri (overrides --db)") - opt("--dropfirst", action="store_true", dest="dropfirst", - help="Drop all tables in the target database first") - opt("--mockpool", action="store_true", dest="mockpool", - help="Use mock pool (asserts only one connection used)") - opt("--low-connections", action="store_true", dest="low_connections", - help="Use a low number of distinct connections - i.e. for Oracle TNS" - ) - opt("--enginestrategy", action="callback", type="string", - callback=_engine_strategy, - help="Engine strategy (plain or threadlocal, defaults to plain)") - opt("--reversetop", action="store_true", dest="reversetop", default=False, - help="Use a random-ordering set implementation in the ORM (helps " - "reveal dependency issues)") - opt("--requirements", action="callback", type="string", - callback=_requirements_opt, - help="requirements class for testing, overrides setup.cfg") - opt("--with-cdecimal", action="store_true", dest="cdecimal", default=False, - help="Monkeypatch the cdecimal library into Python 'decimal' for all tests") - opt("--unhashable", action="store_true", dest="unhashable", default=False, - help="Disallow SQLAlchemy from performing a hash() on mapped test objects.") - opt("--noncomparable", action="store_true", dest="noncomparable", default=False, - help="Disallow SQLAlchemy from performing == on mapped test objects.") - opt("--truthless", action="store_true", dest="truthless", default=False, - help="Disallow SQLAlchemy from truth-evaluating mapped test objects.") - opt("--serverside", action="callback", callback=_server_side_cursors, - help="Turn on server side cursors for PG") - opt("--mysql-engine", action="store", dest="mysql_engine", default=None, - help="Use the specified MySQL storage engine for all tables, default is " - "a db-default/InnoDB combo.") - opt("--table-option", action="append", dest="tableopts", default=[], - help="Add a dialect-specific table option, key=value") - opt("--write-profiles", action="store_true", dest="write_profiles", default=False, - help="Write/update profiling data.") - global file_config - file_config = configparser.ConfigParser() - file_config.read(['setup.cfg', 'test.cfg']) + + def make_option(name, **kw): + callback_ = kw.pop("callback", None) + if callback_: + def wrap_(option, opt_str, value, parser): + callback_(opt_str, value, parser) + kw["callback"] = wrap_ + opt(name, **kw) + + plugin_base.setup_options(make_option) + plugin_base.read_config() def configure(self, options, conf): - Plugin.configure(self, options, conf) - self.options = options - for fn in pre_configure: - fn(self.options, file_config) + super(NoseSQLAlchemy, self).configure(options, conf) + plugin_base.pre_begin(options) - def begin(self): - # Lazy setup of other options (post coverage) - for fn in post_configure: - fn(self.options, file_config) + plugin_base.set_coverage_flag(options.enable_plugin_coverage) - # late imports, has to happen after config as well - # as nose plugins like coverage - global util, fixtures, engines, exclusions, \ - assertions, warnings, profiling,\ - config - from sqlalchemy.testing import fixtures, engines, exclusions, \ - assertions, warnings, profiling, config - from sqlalchemy import util + global fixtures + from sqlalchemy.testing import fixtures + + def begin(self): + plugin_base.post_begin() def describeTest(self, test): return "" @@ -363,106 +66,22 @@ class NoseSQLAlchemy(Plugin): return False def wantClass(self, cls): - """Return true if you want the main test selector to collect - tests from this class, false if you don't, and None if you don't - care. - - :Parameters: - cls : class - The class being examined by the selector - - """ - if not issubclass(cls, fixtures.TestBase): - return False - elif cls.__name__.startswith('_'): - return False - else: - return True - - def _do_skips(self, cls): - from sqlalchemy.testing import config - if hasattr(cls, '__requires__'): - def test_suite(): - return 'ok' - test_suite.__name__ = cls.__name__ - for requirement in cls.__requires__: - check = getattr(config.requirements, requirement) - - if not check.enabled: - raise SkipTest( - check.reason if check.reason - else - ( - "'%s' unsupported on DB implementation '%s' == %s" % ( - cls.__name__, config.db.name, - config.db.dialect.server_version_info - ) - ) - ) - - if cls.__unsupported_on__: - spec = exclusions.db_spec(*cls.__unsupported_on__) - if spec(config.db): - raise SkipTest( - "'%s' unsupported on DB implementation '%s' == %s" % ( - cls.__name__, config.db.name, - config.db.dialect.server_version_info) - ) - - if getattr(cls, '__only_on__', None): - spec = exclusions.db_spec(*util.to_list(cls.__only_on__)) - if not spec(config.db): - raise SkipTest( - "'%s' unsupported on DB implementation '%s' == %s" % ( - cls.__name__, config.db.name, - config.db.dialect.server_version_info) - ) - - if getattr(cls, '__skip_if__', False): - for c in getattr(cls, '__skip_if__'): - if c(): - raise SkipTest("'%s' skipped by %s" % ( - cls.__name__, c.__name__) - ) - - for db, op, spec in getattr(cls, '__excluded_on__', ()): - exclusions.exclude(db, op, spec, - "'%s' unsupported on DB %s version %s" % ( - cls.__name__, config.db.name, - exclusions._server_version(config.db))) + return plugin_base.want_class(cls) def beforeTest(self, test): - warnings.resetwarnings() - profiling._current_test = test.id() + plugin_base.before_test(test, test.id()) def afterTest(self, test): - engines.testing_reaper._after_test_ctx() - warnings.resetwarnings() - - def _setup_engine(self, ctx): - if getattr(ctx, '__engine_options__', None): - global _existing_engine - _existing_engine = config.db - config.db = engines.testing_engine(options=ctx.__engine_options__) - - def _restore_engine(self, ctx): - global _existing_engine - if _existing_engine is not None: - config.db = _existing_engine - _existing_engine = None + plugin_base.after_test(test) def startContext(self, ctx): if not isinstance(ctx, type) \ or not issubclass(ctx, fixtures.TestBase): return - self._do_skips(ctx) - self._setup_engine(ctx) + plugin_base.start_test_class(ctx) def stopContext(self, ctx): if not isinstance(ctx, type) \ or not issubclass(ctx, fixtures.TestBase): return - engines.testing_reaper._stop_test_ctx() - if not options.low_connections: - assertions.global_cleanup_assertions() - self._restore_engine(ctx) + plugin_base.stop_test_class(ctx) diff --git a/lib/sqlalchemy/testing/plugin/plugin_base.py b/lib/sqlalchemy/testing/plugin/plugin_base.py new file mode 100644 index 000000000..50efdfcf0 --- /dev/null +++ b/lib/sqlalchemy/testing/plugin/plugin_base.py @@ -0,0 +1,422 @@ +# plugin/plugin_base.py +# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors <see AUTHORS file> +# +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + +"""Testing extensions. + +this module is designed to work as a testing-framework-agnostic library, +so that we can continue to support nose and also begin adding new functionality +via py.test. + +""" + +from __future__ import absolute_import +from unittest import SkipTest +import sys +import re + +py3k = sys.version_info >= (3, 0) + +if py3k: + import configparser +else: + import ConfigParser as configparser + + +# late imports +fixtures = None +engines = None +exclusions = None +warnings = None +profiling = None +assertions = None +requirements = None +config = None +testing = None +util = None +file_config = None + + +logging = None +db_opts = {} +options = None + +def setup_options(make_option): + make_option("--log-info", action="callback", type="string", callback=_log, + help="turn on info logging for <LOG> (multiple OK)") + make_option("--log-debug", action="callback", type="string", callback=_log, + help="turn on debug logging for <LOG> (multiple OK)") + make_option("--db", action="append", type="string", dest="db", + help="Use prefab database uri. Multiple OK, " + "first one is run by default.") + make_option('--dbs', action='callback', callback=_list_dbs, + help="List available prefab dbs") + make_option("--dburi", action="append", type="string", dest="dburi", + help="Database uri. Multiple OK, first one is run by default.") + make_option("--dropfirst", action="store_true", dest="dropfirst", + help="Drop all tables in the target database first") + make_option("--mockpool", action="store_true", dest="mockpool", + help="Use mock pool (asserts only one connection used)") + make_option("--low-connections", action="store_true", dest="low_connections", + help="Use a low number of distinct connections - i.e. for Oracle TNS" + ) + make_option("--reversetop", action="store_true", dest="reversetop", default=False, + help="Use a random-ordering set implementation in the ORM (helps " + "reveal dependency issues)") + make_option("--requirements", action="callback", type="string", + callback=_requirements_opt, + help="requirements class for testing, overrides setup.cfg") + make_option("--with-cdecimal", action="store_true", dest="cdecimal", default=False, + help="Monkeypatch the cdecimal library into Python 'decimal' for all tests") + make_option("--serverside", action="callback", callback=_server_side_cursors, + help="Turn on server side cursors for PG") + make_option("--mysql-engine", action="store", dest="mysql_engine", default=None, + help="Use the specified MySQL storage engine for all tables, default is " + "a db-default/InnoDB combo.") + make_option("--tableopts", action="append", dest="tableopts", default=[], + help="Add a dialect-specific table option, key=value") + make_option("--write-profiles", action="store_true", dest="write_profiles", default=False, + help="Write/update profiling data.") + +def read_config(): + global file_config + file_config = configparser.ConfigParser() + file_config.read(['setup.cfg', 'test.cfg']) + +def pre_begin(opt): + """things to set up early, before coverage might be setup.""" + global options + options = opt + for fn in pre_configure: + fn(options, file_config) + +def set_coverage_flag(value): + options.has_coverage = value + +def post_begin(): + """things to set up later, once we know coverage is running.""" + # Lazy setup of other options (post coverage) + for fn in post_configure: + fn(options, file_config) + + # late imports, has to happen after config as well + # as nose plugins like coverage + global util, fixtures, engines, exclusions, \ + assertions, warnings, profiling,\ + config, testing + from sqlalchemy import testing + from sqlalchemy.testing import fixtures, engines, exclusions, \ + assertions, warnings, profiling, config + from sqlalchemy import util + + +def _log(opt_str, value, parser): + global logging + if not logging: + import logging + logging.basicConfig() + + if opt_str.endswith('-info'): + logging.getLogger(value).setLevel(logging.INFO) + elif opt_str.endswith('-debug'): + logging.getLogger(value).setLevel(logging.DEBUG) + + +def _list_dbs(*args): + print("Available --db options (use --dburi to override)") + for macro in sorted(file_config.options('db')): + print("%20s\t%s" % (macro, file_config.get('db', macro))) + sys.exit(0) + + +def _server_side_cursors(opt_str, value, parser): + db_opts['server_side_cursors'] = True + +def _requirements_opt(opt_str, value, parser): + _setup_requirements(value) + + +pre_configure = [] +post_configure = [] + + +def pre(fn): + pre_configure.append(fn) + return fn + + +def post(fn): + post_configure.append(fn) + return fn + + +@pre +def _setup_options(opt, file_config): + global options + options = opt + + +@pre +def _monkeypatch_cdecimal(options, file_config): + if options.cdecimal: + import cdecimal + sys.modules['decimal'] = cdecimal + + +@post +def _engine_uri(options, file_config): + from sqlalchemy.testing import engines, config + from sqlalchemy import testing + + if options.dburi: + db_urls = list(options.dburi) + else: + db_urls = [] + + if options.db: + for db_token in options.db: + for db in re.split(r'[,\s]+', db_token): + if db not in file_config.options('db'): + raise RuntimeError( + "Unknown URI specifier '%s'. Specify --dbs for known uris." + % db) + else: + db_urls.append(file_config.get('db', db)) + + if not db_urls: + db_urls.append(file_config.get('db', 'default')) + + for db_url in db_urls: + eng = engines.testing_engine(db_url, db_opts) + eng.connect().close() + config.Config.register(eng, db_opts, options, file_config, testing) + + config.db_opts = db_opts + + +@post +def _engine_pool(options, file_config): + if options.mockpool: + from sqlalchemy import pool + db_opts['poolclass'] = pool.AssertionPool + +@post +def _requirements(options, file_config): + + requirement_cls = file_config.get('sqla_testing', "requirement_cls") + _setup_requirements(requirement_cls) + +def _setup_requirements(argument): + from sqlalchemy.testing import config + from sqlalchemy import testing + + if config.requirements is not None: + return + + modname, clsname = argument.split(":") + + # importlib.import_module() only introduced in 2.7, a little + # late + mod = __import__(modname) + for component in modname.split(".")[1:]: + mod = getattr(mod, component) + req_cls = getattr(mod, clsname) + + config.requirements = testing.requires = req_cls() + +@post +def _prep_testing_database(options, file_config): + from sqlalchemy.testing import config + from sqlalchemy import schema, inspect + + if options.dropfirst: + for cfg in config.Config.all_configs(): + e = cfg.db + inspector = inspect(e) + try: + view_names = inspector.get_view_names() + except NotImplementedError: + pass + else: + for vname in view_names: + e.execute(schema._DropView(schema.Table(vname, schema.MetaData()))) + + if config.requirements.schemas.enabled_for_config(cfg): + try: + view_names = inspector.get_view_names(schema="test_schema") + except NotImplementedError: + pass + else: + for vname in view_names: + e.execute(schema._DropView( + schema.Table(vname, + schema.MetaData(), schema="test_schema"))) + + for tname in reversed(inspector.get_table_names(order_by="foreign_key")): + e.execute(schema.DropTable(schema.Table(tname, schema.MetaData()))) + + if config.requirements.schemas.enabled_for_config(cfg): + for tname in reversed(inspector.get_table_names( + order_by="foreign_key", schema="test_schema")): + e.execute(schema.DropTable( + schema.Table(tname, schema.MetaData(), schema="test_schema"))) + + +@post +def _set_table_options(options, file_config): + from sqlalchemy.testing import schema + + table_options = schema.table_options + for spec in options.tableopts: + key, value = spec.split('=') + table_options[key] = value + + if options.mysql_engine: + table_options['mysql_engine'] = options.mysql_engine + + +@post +def _reverse_topological(options, file_config): + if options.reversetop: + from sqlalchemy.orm.util import randomize_unitofwork + randomize_unitofwork() + + +@post +def _post_setup_options(opt, file_config): + from sqlalchemy.testing import config + config.options = options + config.file_config = file_config + + +@post +def _setup_profiling(options, file_config): + from sqlalchemy.testing import profiling + profiling._profile_stats = profiling.ProfileStatsFile( + file_config.get('sqla_testing', 'profile_file')) + + +def want_class(cls): + if not issubclass(cls, fixtures.TestBase): + return False + elif cls.__name__.startswith('_'): + return False + else: + return True + +def generate_sub_tests(cls, module): + if getattr(cls, '__multiple__', False): + for cfg in config.Config.all_configs(): + name = "%s_%s_%s" % (cls.__name__, cfg.db.name, cfg.db.driver) + subcls = type( + name, + (cls, ), + { + "__only_on__": (cfg.db.name, cfg.db.driver), + "__multiple__": False} + ) + setattr(module, name, subcls) + yield subcls + else: + yield cls + + +def start_test_class(cls): + _do_skips(cls) + _setup_engine(cls) + +def stop_test_class(cls): + engines.testing_reaper._stop_test_ctx() + if not options.low_connections: + assertions.global_cleanup_assertions() + _restore_engine() + +def _restore_engine(): + config._current.reset(testing) + +def _setup_engine(cls): + if getattr(cls, '__engine_options__', None): + eng = engines.testing_engine(options=cls.__engine_options__) + config._current.push_engine(eng, testing) + +def before_test(test, id_): + warnings.resetwarnings() + profiling._current_test = id_ + +def after_test(test): + engines.testing_reaper._after_test_ctx() + warnings.resetwarnings() + +def _do_skips(cls): + all_configs = set(config.Config.all_configs()) + reasons = [] + + if hasattr(cls, '__requires__'): + requirements = config.requirements + for config_obj in list(all_configs): + for requirement in cls.__requires__: + check = getattr(requirements, requirement) + + if check.predicate(config_obj): + all_configs.remove(config_obj) + if check.reason: + reasons.append(check.reason) + break + + if cls.__unsupported_on__: + spec = exclusions.db_spec(*cls.__unsupported_on__) + for config_obj in list(all_configs): + if spec(config_obj): + all_configs.remove(config_obj) + + if getattr(cls, '__only_on__', None): + spec = exclusions.db_spec(*util.to_list(cls.__only_on__)) + for config_obj in list(all_configs): + if not spec(config_obj): + all_configs.remove(config_obj) + + + if getattr(cls, '__skip_if__', False): + for c in getattr(cls, '__skip_if__'): + if c(): + raise SkipTest("'%s' skipped by %s" % ( + cls.__name__, c.__name__) + ) + + for db_spec, op, spec in getattr(cls, '__excluded_on__', ()): + for config_obj in list(all_configs): + if exclusions.skip_if( + exclusions.SpecPredicate(db_spec, op, spec) + ).predicate(config_obj): + all_configs.remove(config_obj) + + + if not all_configs: + raise SkipTest( + "'%s' unsupported on DB implementation %s%s" % ( + cls.__name__, + ", ".join("'%s' = %s" % ( + config_obj.db.name, + config_obj.db.dialect.server_version_info) + for config_obj in config.Config.all_configs() + ), + ", ".join(reasons) + ) + ) + elif hasattr(cls, '__prefer__'): + non_preferred = set() + spec = exclusions.db_spec(*util.to_list(cls.__prefer__)) + for config_obj in all_configs: + if not spec(config_obj): + non_preferred.add(config_obj) + if all_configs.difference(non_preferred): + all_configs.difference_update(non_preferred) + + if config._current not in all_configs: + _setup_config(all_configs.pop(), cls) + +def _setup_config(config_obj, ctx): + config._current.push(config_obj, testing) + diff --git a/lib/sqlalchemy/testing/plugin/pytestplugin.py b/lib/sqlalchemy/testing/plugin/pytestplugin.py new file mode 100644 index 000000000..352cbbd5f --- /dev/null +++ b/lib/sqlalchemy/testing/plugin/pytestplugin.py @@ -0,0 +1,125 @@ +import pytest +import argparse +import inspect +from . import plugin_base +import collections + +def pytest_addoption(parser): + group = parser.getgroup("sqlalchemy") + + def make_option(name, **kw): + callback_ = kw.pop("callback", None) + if callback_: + class CallableAction(argparse.Action): + def __call__(self, parser, namespace, values, option_string=None): + callback_(option_string, values, parser) + kw["action"] = CallableAction + + group.addoption(name, **kw) + + plugin_base.setup_options(make_option) + plugin_base.read_config() + +def pytest_configure(config): + plugin_base.pre_begin(config.option) + + plugin_base.set_coverage_flag(bool(getattr(config.option, "cov_source", False))) + + plugin_base.post_begin() + + +def pytest_collection_modifyitems(session, config, items): + # look for all those classes that specify __multiple__ and + # expand them out into per-database test cases. + + # this is much easier to do within pytest_pycollect_makeitem, however + # pytest is iterating through cls.__dict__ as makeitem is + # called which causes a "dictionary changed size" error on py3k. + # I'd submit a pullreq for them to turn it into a list first, but + # it's to suit the rather odd use case here which is that we are adding + # new classes to a module on the fly. + + rebuilt_items = collections.defaultdict(list) + test_classes = set(item.parent for item in items) + for test_class in test_classes: + for sub_cls in plugin_base.generate_sub_tests(test_class.cls, test_class.parent.module): + if sub_cls is not test_class.cls: + list_ = rebuilt_items[test_class.cls] + + for inst in pytest.Class(sub_cls.__name__, + parent=test_class.parent.parent).collect(): + list_.extend(inst.collect()) + + newitems = [] + for item in items: + if item.parent.cls in rebuilt_items: + newitems.extend(rebuilt_items[item.parent.cls]) + rebuilt_items[item.parent.cls][:] = [] + else: + newitems.append(item) + + # seems like the functions attached to a test class aren't sorted already? + # is that true and why's that? (when using unittest, they're sorted) + items[:] = sorted(newitems, key=lambda item: ( + item.parent.parent.parent.name, + item.parent.parent.name, + item.name + ) + ) + + + +def pytest_pycollect_makeitem(collector, name, obj): + + if inspect.isclass(obj) and plugin_base.want_class(obj): + return pytest.Class(name, parent=collector) + elif inspect.isfunction(obj) and \ + name.startswith("test_") and \ + isinstance(collector, pytest.Instance): + return pytest.Function(name, parent=collector) + else: + return [] + +_current_class = None + +def pytest_runtest_setup(item): + # here we seem to get called only based on what we collected + # in pytest_collection_modifyitems. So to do class-based stuff + # we have to tear that out. + global _current_class + + if not isinstance(item, pytest.Function): + return + + # ... so we're doing a little dance here to figure it out... + if item.parent.parent is not _current_class: + + class_setup(item.parent.parent) + _current_class = item.parent.parent + + # this is needed for the class-level, to ensure that the + # teardown runs after the class is completed with its own + # class-level teardown... + item.parent.parent.addfinalizer(lambda: class_teardown(item.parent.parent)) + + test_setup(item) + +def pytest_runtest_teardown(item): + # ...but this works better as the hook here rather than + # using a finalizer, as the finalizer seems to get in the way + # of the test reporting failures correctly (you get a bunch of + # py.test assertion stuff instead) + test_teardown(item) + +def test_setup(item): + id_ = "%s.%s:%s" % (item.parent.module.__name__, item.parent.name, item.name) + plugin_base.before_test(item, id_) + +def test_teardown(item): + plugin_base.after_test(item) + +def class_setup(item): + plugin_base.start_test_class(item.cls) + +def class_teardown(item): + plugin_base.stop_test_class(item.cls) |
