From 9e621d18aa84bf20640283e1e7a4abd17af47df9 Mon Sep 17 00:00:00 2001 From: Jack Zhou Date: Tue, 29 Jul 2014 11:49:52 -0700 Subject: Added documentation about interaction between `subqueryload` and LIMIT/OFFSET. --- doc/build/faq.rst | 84 ++++++++++++++++++++++++++++++++++++++++++++++ doc/build/orm/loading.rst | 15 +++++++++ doc/build/orm/tutorial.rst | 6 ++++ 3 files changed, 105 insertions(+) diff --git a/doc/build/faq.rst b/doc/build/faq.rst index 0c8314cb5..d642d1de8 100644 --- a/doc/build/faq.rst +++ b/doc/build/faq.rst @@ -622,6 +622,90 @@ The same idea applies to all the other arguments, such as ``foreign_keys``:: foo = relationship(Dest, foreign_keys=[foo_id, bar_id]) +.. _faq_subqueryload_sort: + +Why must I always ``ORDER BY`` a unique column when using ``subqueryload``? +---------------------------------------------------------------------------- + +The SQL standard prescribes that RDBMSs are free to return rows in any order it +deems appropriate, if no ``ORDER BY`` clause is specified. This even extends to +the case where the ``ORDER BY`` clause is not unique across all rows, i.e. rows +with the same value in the ``ORDER BY`` column(s) will not necessarily be +returned in a deterministic order. + +SQLAlchemy implements :func:`.orm.subqueryload` by issuing a separate query +(where the table specified in the relationship is joined to the original query) +and then attempting to match up the results in Python. This works fine +normally: + +.. sourcecode:: python+sql + + >>> session.query(User).options(subqueryload(User.addresses)).all() + {opensql}# the "main" query + SELECT users.id AS users_id + FROM users + {stop} + {opensql}# the "load" query issued by subqueryload + SELECT addresses.id AS addresses_id, addresses.user_id AS addresses_user_id, anon_1.users_id AS anon_1_users_id + FROM (SELECT users.id AS users_id + FROM users) AS anon_1 JOIN addresses ON anon_1.users_id = addresses.user_id ORDER BY anon_1.users_id + +Notice how the main query is a subquery in the load query. When an +``OFFSET``/``LIMIT`` is involved, however, things get a bit tricky: + +.. sourcecode:: python+sql + + >>> user = session.query(User).options(subqueryload(User.addresses)).first() + {opensql}# the "main" query + SELECT users.id AS users_id + FROM users + LIMIT 1 + {stop} + {opensql}# the "load" query issued by subqueryload + SELECT addresses.id AS addresses_id, addresses.user_id AS addresses_user_id, anon_1.users_id AS anon_1_users_id + FROM (SELECT users.id AS users_id + FROM users + LIMIT 1) AS anon_1 JOIN addresses ON anon_1.users_id = addresses.user_id ORDER BY anon_1.users_id + +The main query is still a subquery in the load query, but *it may return a +different set of results in the second query from the first* because it does +not have a deterministic sort order! Depending on database internals, there is +a chance we may get the following resultset for the two queries:: + + +--------+ + |users_id| + +--------+ + | 1| + +--------+ + + +------------+-----------------+---------------+ + |addresses_id|addresses_user_id|anon_1_users_id| + +------------+-----------------+---------------+ + | 3| 2| 2| + +------------+-----------------+---------------+ + | 4| 2| 2| + +------------+-----------------+---------------+ + +From SQLAlchemy's point of view, it didn't get any addresses back for user 1, +so ``user.addresses`` is empty. Oops. + +The solution to this problem is to always specify a deterministic sort order, +so that the main query always returns the same set of rows. This generally +means that you should :meth:`.Query.order_by` on a unique column on the table, +usually the primary key:: + + session.query(User).options(subqueryload(User.addresses)).order_by(User.id).first() + +You can get away with not doing a sort if the ``OFFSET``/``LIMIT`` does not +throw away any rows at all, but it's much simpler to remember to always ``ORDER +BY`` the primary key:: + + session.query(User).options(subqueryload(User.addresses)).filter(User.id == 1).first() + +Note that :func:`.joinedload` does not suffer from the same problem because +only one query is ever issued, so the load query cannot be different from the +main query. + Performance =========== diff --git a/doc/build/orm/loading.rst b/doc/build/orm/loading.rst index 6c2fac004..27846b9b2 100644 --- a/doc/build/orm/loading.rst +++ b/doc/build/orm/loading.rst @@ -120,6 +120,21 @@ query options: # set children to load eagerly with a second statement session.query(Parent).options(subqueryload('children')).all() +.. _subquery_loading_tips: + +Subquery Loading Tips +^^^^^^^^^^^^^^^^^^^^^ + +If you have ``LIMIT`` or ``OFFSET`` in your query, you **must** ``ORDER BY`` a +unique column, generally the primary key of your table, in order to ensure +correct results (see :ref:`faq_subqueryload_sort`):: + + # incorrect + session.query(User).options(subqueryload(User.addresses)).order_by(User.name).first() + + # correct + session.query(User).options(subqueryload(User.addresses)).order_by(User.name, User.id).first() + Loading Along Paths ------------------- diff --git a/doc/build/orm/tutorial.rst b/doc/build/orm/tutorial.rst index f90dc48d2..e75eda1ee 100644 --- a/doc/build/orm/tutorial.rst +++ b/doc/build/orm/tutorial.rst @@ -1703,6 +1703,12 @@ very easy to use: >>> jack.addresses [, ] +.. warning:: + + If you use :func:`.subqueryload`, you should generally + :meth:`.Query.order_by` on a unique column in order to ensure correct + results. See :ref:`subquery_loading_tips`. + Joined Load ------------- -- cgit v1.2.1 From 191fd3e27e3ef90190f8315c33ba6eb97aeaf5d2 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Thu, 14 Aug 2014 15:38:30 -0400 Subject: - proof of concept --- lib/sqlalchemy/orm/events.py | 9 +++++ lib/sqlalchemy/orm/persistence.py | 81 +++++++++++++++++++++------------------ lib/sqlalchemy/orm/session.py | 34 ++++++++++++++++ lib/sqlalchemy/orm/unitofwork.py | 28 +++++++++++++- 4 files changed, 113 insertions(+), 39 deletions(-) diff --git a/lib/sqlalchemy/orm/events.py b/lib/sqlalchemy/orm/events.py index aa99673ba..097726c62 100644 --- a/lib/sqlalchemy/orm/events.py +++ b/lib/sqlalchemy/orm/events.py @@ -1453,6 +1453,15 @@ class SessionEvents(event.Events): """ + def before_bulk_save(self, session, flush_context, objects): + """""" + + def after_bulk_save(self, session, flush_context, objects): + """""" + + def after_bulk_save_postexec(self, session, flush_context, objects): + """""" + def after_begin(self, session, transaction, connection): """Execute after a transaction is begun on a connection diff --git a/lib/sqlalchemy/orm/persistence.py b/lib/sqlalchemy/orm/persistence.py index 9d39c39b0..511a324be 100644 --- a/lib/sqlalchemy/orm/persistence.py +++ b/lib/sqlalchemy/orm/persistence.py @@ -23,7 +23,9 @@ from ..sql import expression from . import loading -def save_obj(base_mapper, states, uowtransaction, single=False): +def save_obj( + base_mapper, states, uowtransaction, single=False, + bookkeeping=True): """Issue ``INSERT`` and/or ``UPDATE`` statements for a list of objects. @@ -43,13 +45,14 @@ def save_obj(base_mapper, states, uowtransaction, single=False): states_to_insert, states_to_update = _organize_states_for_save( base_mapper, states, - uowtransaction) + uowtransaction, bookkeeping) cached_connections = _cached_connection_dict(base_mapper) for table, mapper in base_mapper._sorted_tables.items(): insert = _collect_insert_commands(base_mapper, uowtransaction, - table, states_to_insert) + table, states_to_insert, + bookkeeping) update = _collect_update_commands(base_mapper, uowtransaction, table, states_to_update) @@ -65,7 +68,8 @@ def save_obj(base_mapper, states, uowtransaction, single=False): mapper, table, insert) _finalize_insert_update_commands(base_mapper, uowtransaction, - states_to_insert, states_to_update) + states_to_insert, states_to_update, + bookkeeping) def post_update(base_mapper, states, uowtransaction, post_update_cols): @@ -121,7 +125,8 @@ def delete_obj(base_mapper, states, uowtransaction): mapper.dispatch.after_delete(mapper, connection, state) -def _organize_states_for_save(base_mapper, states, uowtransaction): +def _organize_states_for_save( + base_mapper, states, uowtransaction, bookkeeping): """Make an initial pass across a set of states for INSERT or UPDATE. @@ -158,7 +163,7 @@ def _organize_states_for_save(base_mapper, states, uowtransaction): # no instance_key attached to it), and another instance # with the same identity key already exists as persistent. # convert to an UPDATE if so. - if not has_identity and \ + if bookkeeping and not has_identity and \ instance_key in uowtransaction.session.identity_map: instance = \ uowtransaction.session.identity_map[instance_key] @@ -230,7 +235,7 @@ def _organize_states_for_delete(base_mapper, states, uowtransaction): def _collect_insert_commands(base_mapper, uowtransaction, table, - states_to_insert): + states_to_insert, bookkeeping): """Identify sets of values to use in INSERT statements for a list of states. @@ -261,12 +266,12 @@ def _collect_insert_commands(base_mapper, uowtransaction, table, value = state_dict.get(prop.key, None) if value is None: - if col in pks: + if bookkeeping and col in pks: has_all_pks = False elif col.default is None and \ col.server_default is None: params[col.key] = value - elif col.server_default is not None and \ + elif bookkeeping and col.server_default is not None and \ mapper.base_mapper.eager_defaults: has_all_defaults = False @@ -756,7 +761,8 @@ def _emit_delete_statements(base_mapper, uowtransaction, cached_connections, def _finalize_insert_update_commands(base_mapper, uowtransaction, - states_to_insert, states_to_update): + states_to_insert, states_to_update, + bookkeeping): """finalize state on states that have been inserted or updated, including calling after_insert/after_update events. @@ -765,33 +771,34 @@ def _finalize_insert_update_commands(base_mapper, uowtransaction, instance_key, row_switch in states_to_insert + \ states_to_update: - if mapper._readonly_props: - readonly = state.unmodified_intersection( - [p.key for p in mapper._readonly_props - if p.expire_on_flush or p.key not in state.dict] - ) - if readonly: - state._expire_attributes(state.dict, readonly) - - # if eager_defaults option is enabled, load - # all expired cols. Else if we have a version_id_col, make sure - # it isn't expired. - toload_now = [] - - if base_mapper.eager_defaults: - toload_now.extend(state._unloaded_non_object) - elif mapper.version_id_col is not None and \ - mapper.version_id_generator is False: - prop = mapper._columntoproperty[mapper.version_id_col] - if prop.key in state.unloaded: - toload_now.extend([prop.key]) - - if toload_now: - state.key = base_mapper._identity_key_from_state(state) - loading.load_on_ident( - uowtransaction.session.query(base_mapper), - state.key, refresh_state=state, - only_load_props=toload_now) + if bookkeeping: + if mapper._readonly_props: + readonly = state.unmodified_intersection( + [p.key for p in mapper._readonly_props + if p.expire_on_flush or p.key not in state.dict] + ) + if readonly: + state._expire_attributes(state.dict, readonly) + + # if eager_defaults option is enabled, load + # all expired cols. Else if we have a version_id_col, make sure + # it isn't expired. + toload_now = [] + + if base_mapper.eager_defaults: + toload_now.extend(state._unloaded_non_object) + elif mapper.version_id_col is not None and \ + mapper.version_id_generator is False: + prop = mapper._columntoproperty[mapper.version_id_col] + if prop.key in state.unloaded: + toload_now.extend([prop.key]) + + if toload_now: + state.key = base_mapper._identity_key_from_state(state) + loading.load_on_ident( + uowtransaction.session.query(base_mapper), + state.key, refresh_state=state, + only_load_props=toload_now) # call after_XXX extensions if not has_identity: diff --git a/lib/sqlalchemy/orm/session.py b/lib/sqlalchemy/orm/session.py index 036045dba..2455c803a 100644 --- a/lib/sqlalchemy/orm/session.py +++ b/lib/sqlalchemy/orm/session.py @@ -2033,6 +2033,40 @@ class Session(_SessionClassMethods): with util.safe_reraise(): transaction.rollback(_capture_exception=True) + def bulk_save(self, objects): + self._flushing = True + flush_context = UOWTransaction(self) + + if self.dispatch.before_bulk_save: + self.dispatch.before_bulk_save( + self, flush_context, objects) + + flush_context.transaction = transaction = self.begin( + subtransactions=True) + try: + self._warn_on_events = True + try: + flush_context.bulk_save(objects) + finally: + self._warn_on_events = False + + self.dispatch.after_bulk_save( + self, flush_context, objects + ) + + flush_context.finalize_flush_changes() + + self.dispatch.after_bulk_save_postexec( + self, flush_context, objects) + + transaction.commit() + + except: + with util.safe_reraise(): + transaction.rollback(_capture_exception=True) + finally: + self._flushing = False + def is_modified(self, instance, include_collections=True, passive=True): """Return ``True`` if the given instance has locally diff --git a/lib/sqlalchemy/orm/unitofwork.py b/lib/sqlalchemy/orm/unitofwork.py index 71e61827b..8df24e95a 100644 --- a/lib/sqlalchemy/orm/unitofwork.py +++ b/lib/sqlalchemy/orm/unitofwork.py @@ -16,6 +16,7 @@ organizes them in order of dependency, and executes. from .. import util, event from ..util import topological from . import attributes, persistence, util as orm_util +import itertools def track_cascade_events(descriptor, prop): @@ -379,14 +380,37 @@ class UOWTransaction(object): execute() method has succeeded and the transaction has been committed. """ + if not self.states: + return + states = set(self.states) isdel = set( s for (s, (isdelete, listonly)) in self.states.items() if isdelete ) other = states.difference(isdel) - self.session._remove_newly_deleted(isdel) - self.session._register_newly_persistent(other) + if isdel: + self.session._remove_newly_deleted(isdel) + if other: + self.session._register_newly_persistent(other) + + def bulk_save(self, objects): + for (base_mapper, in_session), states in itertools.groupby( + (attributes.instance_state(obj) for obj in objects), + lambda state: + ( + state.mapper.base_mapper, + state.key is self.session.hash_key + )): + + persistence.save_obj( + base_mapper, list(states), self, bookkeeping=in_session) + + if in_session: + self.states.update( + (state, (False, False)) + for state in states + ) class IterateMappersMixin(object): -- cgit v1.2.1 From 6bc676f56d57d5ea4dc298f63d0e3a77c0f4a4a1 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Thu, 14 Aug 2014 17:44:58 -0400 Subject: dev --- doc/build/faq.rst | 81 +++++++++++++++++++++++++++++++-------- lib/sqlalchemy/orm/persistence.py | 59 +++++++++++++++------------- lib/sqlalchemy/orm/session.py | 23 ++++++++--- lib/sqlalchemy/orm/state.py | 15 ++++++++ lib/sqlalchemy/orm/unitofwork.py | 10 ++--- 5 files changed, 135 insertions(+), 53 deletions(-) diff --git a/doc/build/faq.rst b/doc/build/faq.rst index 3dc81026b..b777f908f 100644 --- a/doc/build/faq.rst +++ b/doc/build/faq.rst @@ -907,10 +907,12 @@ methods of inserting rows, going from the most automated to the least. With cPython 2.7, runtimes observed:: classics-MacBook-Pro:sqlalchemy classic$ python test.py - SQLAlchemy ORM: Total time for 100000 records 14.3528850079 secs - SQLAlchemy ORM pk given: Total time for 100000 records 10.0164160728 secs - SQLAlchemy Core: Total time for 100000 records 0.775382995605 secs - sqlite3: Total time for 100000 records 0.676795005798 sec + SQLAlchemy ORM: Total time for 100000 records 12.4703581333 secs + SQLAlchemy ORM pk given: Total time for 100000 records 7.32723999023 secs + SQLAlchemy ORM bulk_save_objects(): Total time for 100000 records 3.43464708328 secs + SQLAlchemy ORM bulk_save_mappings(): Total time for 100000 records 2.37040805817 secs + SQLAlchemy Core: Total time for 100000 records 0.495043992996 secs + sqlite3: Total time for 100000 records 0.508063077927 sec We can reduce the time by a factor of three using recent versions of `Pypy `_:: @@ -933,11 +935,13 @@ Script:: DBSession = scoped_session(sessionmaker()) engine = None + class Customer(Base): __tablename__ = "customer" id = Column(Integer, primary_key=True) name = Column(String(255)) + def init_sqlalchemy(dbname='sqlite:///sqlalchemy.db'): global engine engine = create_engine(dbname, echo=False) @@ -946,69 +950,114 @@ Script:: Base.metadata.drop_all(engine) Base.metadata.create_all(engine) + def test_sqlalchemy_orm(n=100000): init_sqlalchemy() t0 = time.time() - for i in range(n): + for i in xrange(n): customer = Customer() customer.name = 'NAME ' + str(i) DBSession.add(customer) if i % 1000 == 0: DBSession.flush() DBSession.commit() - print("SQLAlchemy ORM: Total time for " + str(n) + - " records " + str(time.time() - t0) + " secs") + print( + "SQLAlchemy ORM: Total time for " + str(n) + + " records " + str(time.time() - t0) + " secs") + def test_sqlalchemy_orm_pk_given(n=100000): init_sqlalchemy() t0 = time.time() - for i in range(n): + for i in xrange(n): customer = Customer(id=i+1, name="NAME " + str(i)) DBSession.add(customer) if i % 1000 == 0: DBSession.flush() DBSession.commit() - print("SQLAlchemy ORM pk given: Total time for " + str(n) + + print( + "SQLAlchemy ORM pk given: Total time for " + str(n) + + " records " + str(time.time() - t0) + " secs") + + + def test_sqlalchemy_orm_bulk_save(n=100000): + init_sqlalchemy() + t0 = time.time() + n1 = n + while n1 > 0: + n1 = n1 - 10000 + DBSession.bulk_save_objects( + [ + Customer(name="NAME " + str(i)) + for i in xrange(min(10000, n1)) + ] + ) + DBSession.commit() + print( + "SQLAlchemy ORM bulk_save_objects(): Total time for " + str(n) + " records " + str(time.time() - t0) + " secs") + + def test_sqlalchemy_orm_bulk_save_mappings(n=100000): + init_sqlalchemy() + t0 = time.time() + DBSession.bulk_save_mappings( + Customer, + [ + dict(name="NAME " + str(i)) + for i in xrange(n) + ] + ) + DBSession.commit() + print( + "SQLAlchemy ORM bulk_save_mappings(): Total time for " + str(n) + + " records " + str(time.time() - t0) + " secs") + + def test_sqlalchemy_core(n=100000): init_sqlalchemy() t0 = time.time() engine.execute( Customer.__table__.insert(), - [{"name": 'NAME ' + str(i)} for i in range(n)] + [{"name": 'NAME ' + str(i)} for i in xrange(n)] ) - print("SQLAlchemy Core: Total time for " + str(n) + + print( + "SQLAlchemy Core: Total time for " + str(n) + " records " + str(time.time() - t0) + " secs") + def init_sqlite3(dbname): conn = sqlite3.connect(dbname) c = conn.cursor() c.execute("DROP TABLE IF EXISTS customer") - c.execute("CREATE TABLE customer (id INTEGER NOT NULL, " - "name VARCHAR(255), PRIMARY KEY(id))") + c.execute( + "CREATE TABLE customer (id INTEGER NOT NULL, " + "name VARCHAR(255), PRIMARY KEY(id))") conn.commit() return conn + def test_sqlite3(n=100000, dbname='sqlite3.db'): conn = init_sqlite3(dbname) c = conn.cursor() t0 = time.time() - for i in range(n): + for i in xrange(n): row = ('NAME ' + str(i),) c.execute("INSERT INTO customer (name) VALUES (?)", row) conn.commit() - print("sqlite3: Total time for " + str(n) + + print( + "sqlite3: Total time for " + str(n) + " records " + str(time.time() - t0) + " sec") if __name__ == '__main__': test_sqlalchemy_orm(100000) test_sqlalchemy_orm_pk_given(100000) + test_sqlalchemy_orm_bulk_save(100000) + test_sqlalchemy_orm_bulk_save_mappings(100000) test_sqlalchemy_core(100000) test_sqlite3(100000) - Sessions / Queries =================== diff --git a/lib/sqlalchemy/orm/persistence.py b/lib/sqlalchemy/orm/persistence.py index 511a324be..64c8440c4 100644 --- a/lib/sqlalchemy/orm/persistence.py +++ b/lib/sqlalchemy/orm/persistence.py @@ -18,7 +18,7 @@ import operator from itertools import groupby from .. import sql, util, exc as sa_exc, schema from . import attributes, sync, exc as orm_exc, evaluator -from .base import _state_mapper, state_str, _attr_as_key +from .base import state_str, _attr_as_key from ..sql import expression from . import loading @@ -65,7 +65,8 @@ def save_obj( if insert: _emit_insert_statements(base_mapper, uowtransaction, cached_connections, - mapper, table, insert) + mapper, table, insert, + bookkeeping) _finalize_insert_update_commands(base_mapper, uowtransaction, states_to_insert, states_to_update, @@ -140,13 +141,16 @@ def _organize_states_for_save( states_to_insert = [] states_to_update = [] + instance_key = None for state, dict_, mapper, connection in _connections_for_states( base_mapper, uowtransaction, states): has_identity = bool(state.key) - instance_key = state.key or mapper._identity_key_from_state(state) + + if bookkeeping: + instance_key = state.key or mapper._identity_key_from_state(state) row_switch = None @@ -188,12 +192,12 @@ def _organize_states_for_save( if not has_identity and not row_switch: states_to_insert.append( (state, dict_, mapper, connection, - has_identity, instance_key, row_switch) + has_identity, row_switch) ) else: states_to_update.append( (state, dict_, mapper, connection, - has_identity, instance_key, row_switch) + has_identity, row_switch) ) return states_to_insert, states_to_update @@ -242,7 +246,8 @@ def _collect_insert_commands(base_mapper, uowtransaction, table, """ insert = [] for state, state_dict, mapper, connection, has_identity, \ - instance_key, row_switch in states_to_insert: + row_switch in states_to_insert: + if table not in mapper._pks_by_table: continue @@ -265,13 +270,13 @@ def _collect_insert_commands(base_mapper, uowtransaction, table, prop = mapper._columntoproperty[col] value = state_dict.get(prop.key, None) - if value is None: - if bookkeeping and col in pks: + if bookkeeping and value is None: + if col in pks: has_all_pks = False elif col.default is None and \ col.server_default is None: params[col.key] = value - elif bookkeeping and col.server_default is not None and \ + elif col.server_default is not None and \ mapper.base_mapper.eager_defaults: has_all_defaults = False @@ -301,7 +306,7 @@ def _collect_update_commands(base_mapper, uowtransaction, update = [] for state, state_dict, mapper, connection, has_identity, \ - instance_key, row_switch in states_to_update: + row_switch in states_to_update: if table not in mapper._pks_by_table: continue @@ -567,7 +572,8 @@ def _emit_update_statements(base_mapper, uowtransaction, def _emit_insert_statements(base_mapper, uowtransaction, - cached_connections, mapper, table, insert): + cached_connections, mapper, table, insert, + bookkeeping): """Emit INSERT statements corresponding to value lists collected by _collect_insert_commands().""" @@ -593,19 +599,20 @@ def _emit_insert_statements(base_mapper, uowtransaction, c = cached_connections[connection].\ execute(statement, multiparams) - for (state, state_dict, params, mapper_rec, - conn, value_params, has_all_pks, has_all_defaults), \ - last_inserted_params in \ - zip(records, c.context.compiled_parameters): - _postfetch( - mapper_rec, - uowtransaction, - table, - state, - state_dict, - c, - last_inserted_params, - value_params) + if bookkeeping: + for (state, state_dict, params, mapper_rec, + conn, value_params, has_all_pks, has_all_defaults), \ + last_inserted_params in \ + zip(records, c.context.compiled_parameters): + _postfetch( + mapper_rec, + uowtransaction, + table, + state, + state_dict, + c, + last_inserted_params, + value_params) else: if not has_all_defaults and base_mapper.eager_defaults: @@ -768,7 +775,7 @@ def _finalize_insert_update_commands(base_mapper, uowtransaction, """ for state, state_dict, mapper, connection, has_identity, \ - instance_key, row_switch in states_to_insert + \ + row_switch in states_to_insert + \ states_to_update: if bookkeeping: @@ -871,7 +878,7 @@ def _connections_for_states(base_mapper, uowtransaction, states): if connection_callable: connection = connection_callable(base_mapper, state.obj()) - mapper = _state_mapper(state) + mapper = state.manager.mapper yield state, state.dict, mapper, connection diff --git a/lib/sqlalchemy/orm/session.py b/lib/sqlalchemy/orm/session.py index 2455c803a..546355611 100644 --- a/lib/sqlalchemy/orm/session.py +++ b/lib/sqlalchemy/orm/session.py @@ -482,7 +482,7 @@ class Session(_SessionClassMethods): '__contains__', '__iter__', 'add', 'add_all', 'begin', 'begin_nested', 'close', 'commit', 'connection', 'delete', 'execute', 'expire', 'expire_all', 'expunge', 'expunge_all', 'flush', 'get_bind', - 'is_modified', + 'is_modified', 'bulk_save_objects', 'bulk_save_mappings', 'merge', 'query', 'refresh', 'rollback', 'scalar') @@ -2033,31 +2033,42 @@ class Session(_SessionClassMethods): with util.safe_reraise(): transaction.rollback(_capture_exception=True) - def bulk_save(self, objects): + def bulk_save_objects(self, objects): + self._bulk_save((attributes.instance_state(obj) for obj in objects)) + + def bulk_save_mappings(self, mapper, mappings): + mapper = class_mapper(mapper) + + self._bulk_save(( + statelib.MappingState(mapper, mapping) + for mapping in mappings) + ) + + def _bulk_save(self, states): self._flushing = True flush_context = UOWTransaction(self) if self.dispatch.before_bulk_save: self.dispatch.before_bulk_save( - self, flush_context, objects) + self, flush_context, states) flush_context.transaction = transaction = self.begin( subtransactions=True) try: self._warn_on_events = True try: - flush_context.bulk_save(objects) + flush_context.bulk_save(states) finally: self._warn_on_events = False self.dispatch.after_bulk_save( - self, flush_context, objects + self, flush_context, states ) flush_context.finalize_flush_changes() self.dispatch.after_bulk_save_postexec( - self, flush_context, objects) + self, flush_context, states) transaction.commit() diff --git a/lib/sqlalchemy/orm/state.py b/lib/sqlalchemy/orm/state.py index fe8ccd222..e941bc1a4 100644 --- a/lib/sqlalchemy/orm/state.py +++ b/lib/sqlalchemy/orm/state.py @@ -580,6 +580,21 @@ class InstanceState(interfaces.InspectionAttr): state._strong_obj = None +class MappingState(InstanceState): + committed_state = {} + callables = {} + + def __init__(self, mapper, mapping): + self.class_ = mapper.class_ + self.manager = mapper.class_manager + self.modified = True + self._dict = mapping + + @property + def dict(self): + return self._dict + + class AttributeState(object): """Provide an inspection interface corresponding to a particular attribute on a particular mapped object. diff --git a/lib/sqlalchemy/orm/unitofwork.py b/lib/sqlalchemy/orm/unitofwork.py index 8df24e95a..bc8a0f556 100644 --- a/lib/sqlalchemy/orm/unitofwork.py +++ b/lib/sqlalchemy/orm/unitofwork.py @@ -394,9 +394,9 @@ class UOWTransaction(object): if other: self.session._register_newly_persistent(other) - def bulk_save(self, objects): - for (base_mapper, in_session), states in itertools.groupby( - (attributes.instance_state(obj) for obj in objects), + def bulk_save(self, states): + for (base_mapper, in_session), states_ in itertools.groupby( + states, lambda state: ( state.mapper.base_mapper, @@ -404,12 +404,12 @@ class UOWTransaction(object): )): persistence.save_obj( - base_mapper, list(states), self, bookkeeping=in_session) + base_mapper, list(states_), self, bookkeeping=in_session) if in_session: self.states.update( (state, (False, False)) - for state in states + for state in states_ ) -- cgit v1.2.1 From 591f2e4ed2d455cb2c5b9ece43d79fde4b109510 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Thu, 14 Aug 2014 19:47:23 -0400 Subject: - change to be represented as two very fast bulk_insert() and bulk_update() methods --- doc/build/faq.rst | 37 ++---- lib/sqlalchemy/orm/events.py | 9 +- lib/sqlalchemy/orm/persistence.py | 255 ++++++++++++++++++++++++++------------ lib/sqlalchemy/orm/session.py | 57 ++++----- lib/sqlalchemy/orm/state.py | 15 --- lib/sqlalchemy/orm/unitofwork.py | 22 +--- 6 files changed, 223 insertions(+), 172 deletions(-) diff --git a/doc/build/faq.rst b/doc/build/faq.rst index b777f908f..487f5b953 100644 --- a/doc/build/faq.rst +++ b/doc/build/faq.rst @@ -907,12 +907,11 @@ methods of inserting rows, going from the most automated to the least. With cPython 2.7, runtimes observed:: classics-MacBook-Pro:sqlalchemy classic$ python test.py - SQLAlchemy ORM: Total time for 100000 records 12.4703581333 secs - SQLAlchemy ORM pk given: Total time for 100000 records 7.32723999023 secs - SQLAlchemy ORM bulk_save_objects(): Total time for 100000 records 3.43464708328 secs - SQLAlchemy ORM bulk_save_mappings(): Total time for 100000 records 2.37040805817 secs - SQLAlchemy Core: Total time for 100000 records 0.495043992996 secs - sqlite3: Total time for 100000 records 0.508063077927 sec + SQLAlchemy ORM: Total time for 100000 records 12.0471920967 secs + SQLAlchemy ORM pk given: Total time for 100000 records 7.06283402443 secs + SQLAlchemy ORM bulk_save_objects(): Total time for 100000 records 0.856323003769 secs + SQLAlchemy Core: Total time for 100000 records 0.485800027847 secs + sqlite3: Total time for 100000 records 0.487842082977 sec We can reduce the time by a factor of three using recent versions of `Pypy `_:: @@ -980,15 +979,16 @@ Script:: " records " + str(time.time() - t0) + " secs") - def test_sqlalchemy_orm_bulk_save(n=100000): + def test_sqlalchemy_orm_bulk_insert(n=100000): init_sqlalchemy() t0 = time.time() n1 = n while n1 > 0: n1 = n1 - 10000 - DBSession.bulk_save_objects( + DBSession.bulk_insert_mappings( + Customer, [ - Customer(name="NAME " + str(i)) + dict(name="NAME " + str(i)) for i in xrange(min(10000, n1)) ] ) @@ -998,22 +998,6 @@ Script:: " records " + str(time.time() - t0) + " secs") - def test_sqlalchemy_orm_bulk_save_mappings(n=100000): - init_sqlalchemy() - t0 = time.time() - DBSession.bulk_save_mappings( - Customer, - [ - dict(name="NAME " + str(i)) - for i in xrange(n) - ] - ) - DBSession.commit() - print( - "SQLAlchemy ORM bulk_save_mappings(): Total time for " + str(n) + - " records " + str(time.time() - t0) + " secs") - - def test_sqlalchemy_core(n=100000): init_sqlalchemy() t0 = time.time() @@ -1052,8 +1036,7 @@ Script:: if __name__ == '__main__': test_sqlalchemy_orm(100000) test_sqlalchemy_orm_pk_given(100000) - test_sqlalchemy_orm_bulk_save(100000) - test_sqlalchemy_orm_bulk_save_mappings(100000) + test_sqlalchemy_orm_bulk_insert(100000) test_sqlalchemy_core(100000) test_sqlite3(100000) diff --git a/lib/sqlalchemy/orm/events.py b/lib/sqlalchemy/orm/events.py index 097726c62..37ea3071b 100644 --- a/lib/sqlalchemy/orm/events.py +++ b/lib/sqlalchemy/orm/events.py @@ -1453,13 +1453,16 @@ class SessionEvents(event.Events): """ - def before_bulk_save(self, session, flush_context, objects): + def before_bulk_insert(self, session, flush_context, mapper, mappings): """""" - def after_bulk_save(self, session, flush_context, objects): + def after_bulk_insert(self, session, flush_context, mapper, mappings): """""" - def after_bulk_save_postexec(self, session, flush_context, objects): + def before_bulk_update(self, session, flush_context, mapper, mappings): + """""" + + def after_bulk_update(self, session, flush_context, mapper, mappings): """""" def after_begin(self, session, transaction, connection): diff --git a/lib/sqlalchemy/orm/persistence.py b/lib/sqlalchemy/orm/persistence.py index 64c8440c4..a8d4bd695 100644 --- a/lib/sqlalchemy/orm/persistence.py +++ b/lib/sqlalchemy/orm/persistence.py @@ -23,9 +23,104 @@ from ..sql import expression from . import loading +def bulk_insert(mapper, mappings, uowtransaction): + base_mapper = mapper.base_mapper + + cached_connections = _cached_connection_dict(base_mapper) + + if uowtransaction.session.connection_callable: + raise NotImplementedError( + "connection_callable / per-instance sharding " + "not supported in bulk_insert()") + + connection = uowtransaction.transaction.connection(base_mapper) + + for table, sub_mapper in base_mapper._sorted_tables.items(): + if not mapper.isa(sub_mapper): + continue + + to_translate = dict( + (mapper._columntoproperty[col].key, col.key) + for col in mapper._cols_by_table[table] + ) + has_version_generator = mapper.version_id_generator is not False and \ + mapper.version_id_col is not None + multiparams = [] + for mapping in mappings: + params = dict( + (k, mapping.get(v)) for k, v in to_translate.items() + ) + if has_version_generator: + params[mapper.version_id_col.key] = \ + mapper.version_id_generator(None) + multiparams.append(params) + + statement = base_mapper._memo(('insert', table), table.insert) + cached_connections[connection].execute(statement, multiparams) + + +def bulk_update(mapper, mappings, uowtransaction): + base_mapper = mapper.base_mapper + + cached_connections = _cached_connection_dict(base_mapper) + + if uowtransaction.session.connection_callable: + raise NotImplementedError( + "connection_callable / per-instance sharding " + "not supported in bulk_update()") + + connection = uowtransaction.transaction.connection(base_mapper) + + for table, sub_mapper in base_mapper._sorted_tables.items(): + if not mapper.isa(sub_mapper): + continue + + needs_version_id = sub_mapper.version_id_col is not None and \ + table.c.contains_column(sub_mapper.version_id_col) + + def update_stmt(): + return _update_stmt_for_mapper(sub_mapper, table, needs_version_id) + + statement = base_mapper._memo(('update', table), update_stmt) + + pks = mapper._pks_by_table[table] + to_translate = dict( + (mapper._columntoproperty[col].key, col._label + if col in pks else col.key) + for col in mapper._cols_by_table[table] + ) + + for colnames, sub_mappings in groupby( + mappings, + lambda mapping: sorted(tuple(mapping.keys()))): + + multiparams = [] + for mapping in sub_mappings: + params = dict( + (to_translate[k], v) for k, v in mapping.items() + ) + multiparams.append(params) + + c = cached_connections[connection].execute(statement, multiparams) + + rows = c.rowcount + + if connection.dialect.supports_sane_rowcount: + if rows != len(multiparams): + raise orm_exc.StaleDataError( + "UPDATE statement on table '%s' expected to " + "update %d row(s); %d were matched." % + (table.description, len(multiparams), rows)) + + elif needs_version_id: + util.warn("Dialect %s does not support updated rowcount " + "- versioning cannot be verified." % + c.dialect.dialect_description, + stacklevel=12) + + def save_obj( - base_mapper, states, uowtransaction, single=False, - bookkeeping=True): + base_mapper, states, uowtransaction, single=False): """Issue ``INSERT`` and/or ``UPDATE`` statements for a list of objects. @@ -45,14 +140,13 @@ def save_obj( states_to_insert, states_to_update = _organize_states_for_save( base_mapper, states, - uowtransaction, bookkeeping) + uowtransaction) cached_connections = _cached_connection_dict(base_mapper) for table, mapper in base_mapper._sorted_tables.items(): insert = _collect_insert_commands(base_mapper, uowtransaction, - table, states_to_insert, - bookkeeping) + table, states_to_insert) update = _collect_update_commands(base_mapper, uowtransaction, table, states_to_update) @@ -65,12 +159,11 @@ def save_obj( if insert: _emit_insert_statements(base_mapper, uowtransaction, cached_connections, - mapper, table, insert, - bookkeeping) + mapper, table, insert) - _finalize_insert_update_commands(base_mapper, uowtransaction, - states_to_insert, states_to_update, - bookkeeping) + _finalize_insert_update_commands( + base_mapper, uowtransaction, + states_to_insert, states_to_update) def post_update(base_mapper, states, uowtransaction, post_update_cols): @@ -126,8 +219,7 @@ def delete_obj(base_mapper, states, uowtransaction): mapper.dispatch.after_delete(mapper, connection, state) -def _organize_states_for_save( - base_mapper, states, uowtransaction, bookkeeping): +def _organize_states_for_save(base_mapper, states, uowtransaction): """Make an initial pass across a set of states for INSERT or UPDATE. @@ -149,8 +241,7 @@ def _organize_states_for_save( has_identity = bool(state.key) - if bookkeeping: - instance_key = state.key or mapper._identity_key_from_state(state) + instance_key = state.key or mapper._identity_key_from_state(state) row_switch = None @@ -167,7 +258,7 @@ def _organize_states_for_save( # no instance_key attached to it), and another instance # with the same identity key already exists as persistent. # convert to an UPDATE if so. - if bookkeeping and not has_identity and \ + if not has_identity and \ instance_key in uowtransaction.session.identity_map: instance = \ uowtransaction.session.identity_map[instance_key] @@ -239,7 +330,7 @@ def _organize_states_for_delete(base_mapper, states, uowtransaction): def _collect_insert_commands(base_mapper, uowtransaction, table, - states_to_insert, bookkeeping): + states_to_insert): """Identify sets of values to use in INSERT statements for a list of states. @@ -270,7 +361,7 @@ def _collect_insert_commands(base_mapper, uowtransaction, table, prop = mapper._columntoproperty[col] value = state_dict.get(prop.key, None) - if bookkeeping and value is None: + if value is None: if col in pks: has_all_pks = False elif col.default is None and \ @@ -481,6 +572,28 @@ def _collect_delete_commands(base_mapper, uowtransaction, table, return delete +def _update_stmt_for_mapper(mapper, table, needs_version_id): + clause = sql.and_() + + for col in mapper._pks_by_table[table]: + clause.clauses.append(col == sql.bindparam(col._label, + type_=col.type)) + + if needs_version_id: + clause.clauses.append( + mapper.version_id_col == sql.bindparam( + mapper.version_id_col._label, + type_=mapper.version_id_col.type)) + + stmt = table.update(clause) + if mapper.base_mapper.eager_defaults: + stmt = stmt.return_defaults() + elif mapper.version_id_col is not None: + stmt = stmt.return_defaults(mapper.version_id_col) + + return stmt + + def _emit_update_statements(base_mapper, uowtransaction, cached_connections, mapper, table, update): """Emit UPDATE statements corresponding to value lists collected @@ -490,25 +603,7 @@ def _emit_update_statements(base_mapper, uowtransaction, table.c.contains_column(mapper.version_id_col) def update_stmt(): - clause = sql.and_() - - for col in mapper._pks_by_table[table]: - clause.clauses.append(col == sql.bindparam(col._label, - type_=col.type)) - - if needs_version_id: - clause.clauses.append( - mapper.version_id_col == sql.bindparam( - mapper.version_id_col._label, - type_=mapper.version_id_col.type)) - - stmt = table.update(clause) - if mapper.base_mapper.eager_defaults: - stmt = stmt.return_defaults() - elif mapper.version_id_col is not None: - stmt = stmt.return_defaults(mapper.version_id_col) - - return stmt + return _update_stmt_for_mapper(mapper, table, needs_version_id) statement = base_mapper._memo(('update', table), update_stmt) @@ -572,8 +667,7 @@ def _emit_update_statements(base_mapper, uowtransaction, def _emit_insert_statements(base_mapper, uowtransaction, - cached_connections, mapper, table, insert, - bookkeeping): + cached_connections, mapper, table, insert): """Emit INSERT statements corresponding to value lists collected by _collect_insert_commands().""" @@ -599,20 +693,19 @@ def _emit_insert_statements(base_mapper, uowtransaction, c = cached_connections[connection].\ execute(statement, multiparams) - if bookkeeping: - for (state, state_dict, params, mapper_rec, - conn, value_params, has_all_pks, has_all_defaults), \ - last_inserted_params in \ - zip(records, c.context.compiled_parameters): - _postfetch( - mapper_rec, - uowtransaction, - table, - state, - state_dict, - c, - last_inserted_params, - value_params) + for (state, state_dict, params, mapper_rec, + conn, value_params, has_all_pks, has_all_defaults), \ + last_inserted_params in \ + zip(records, c.context.compiled_parameters): + _postfetch( + mapper_rec, + uowtransaction, + table, + state, + state_dict, + c, + last_inserted_params, + value_params) else: if not has_all_defaults and base_mapper.eager_defaults: @@ -768,8 +861,7 @@ def _emit_delete_statements(base_mapper, uowtransaction, cached_connections, def _finalize_insert_update_commands(base_mapper, uowtransaction, - states_to_insert, states_to_update, - bookkeeping): + states_to_insert, states_to_update): """finalize state on states that have been inserted or updated, including calling after_insert/after_update events. @@ -778,34 +870,33 @@ def _finalize_insert_update_commands(base_mapper, uowtransaction, row_switch in states_to_insert + \ states_to_update: - if bookkeeping: - if mapper._readonly_props: - readonly = state.unmodified_intersection( - [p.key for p in mapper._readonly_props - if p.expire_on_flush or p.key not in state.dict] - ) - if readonly: - state._expire_attributes(state.dict, readonly) - - # if eager_defaults option is enabled, load - # all expired cols. Else if we have a version_id_col, make sure - # it isn't expired. - toload_now = [] - - if base_mapper.eager_defaults: - toload_now.extend(state._unloaded_non_object) - elif mapper.version_id_col is not None and \ - mapper.version_id_generator is False: - prop = mapper._columntoproperty[mapper.version_id_col] - if prop.key in state.unloaded: - toload_now.extend([prop.key]) - - if toload_now: - state.key = base_mapper._identity_key_from_state(state) - loading.load_on_ident( - uowtransaction.session.query(base_mapper), - state.key, refresh_state=state, - only_load_props=toload_now) + if mapper._readonly_props: + readonly = state.unmodified_intersection( + [p.key for p in mapper._readonly_props + if p.expire_on_flush or p.key not in state.dict] + ) + if readonly: + state._expire_attributes(state.dict, readonly) + + # if eager_defaults option is enabled, load + # all expired cols. Else if we have a version_id_col, make sure + # it isn't expired. + toload_now = [] + + if base_mapper.eager_defaults: + toload_now.extend(state._unloaded_non_object) + elif mapper.version_id_col is not None and \ + mapper.version_id_generator is False: + prop = mapper._columntoproperty[mapper.version_id_col] + if prop.key in state.unloaded: + toload_now.extend([prop.key]) + + if toload_now: + state.key = base_mapper._identity_key_from_state(state) + loading.load_on_ident( + uowtransaction.session.query(base_mapper), + state.key, refresh_state=state, + only_load_props=toload_now) # call after_XXX extensions if not has_identity: diff --git a/lib/sqlalchemy/orm/session.py b/lib/sqlalchemy/orm/session.py index 546355611..3199a4332 100644 --- a/lib/sqlalchemy/orm/session.py +++ b/lib/sqlalchemy/orm/session.py @@ -20,6 +20,7 @@ from .base import ( _class_to_mapper, _state_mapper, object_state, _none_set, state_str, instance_str ) +import itertools from .unitofwork import UOWTransaction from . import state as statelib import sys @@ -482,7 +483,8 @@ class Session(_SessionClassMethods): '__contains__', '__iter__', 'add', 'add_all', 'begin', 'begin_nested', 'close', 'commit', 'connection', 'delete', 'execute', 'expire', 'expire_all', 'expunge', 'expunge_all', 'flush', 'get_bind', - 'is_modified', 'bulk_save_objects', 'bulk_save_mappings', + 'is_modified', 'bulk_save_objects', 'bulk_insert_mappings', + 'bulk_update_mappings', 'merge', 'query', 'refresh', 'rollback', 'scalar') @@ -2034,42 +2036,41 @@ class Session(_SessionClassMethods): transaction.rollback(_capture_exception=True) def bulk_save_objects(self, objects): - self._bulk_save((attributes.instance_state(obj) for obj in objects)) + for (mapper, isupdate), states in itertools.groupby( + (attributes.instance_state(obj) for obj in objects), + lambda state: (state.mapper, state.key is not None) + ): + if isupdate: + self.bulk_update_mappings(mapper, (s.dict for s in states)) + else: + self.bulk_insert_mappings(mapper, (s.dict for s in states)) - def bulk_save_mappings(self, mapper, mappings): - mapper = class_mapper(mapper) + def bulk_insert_mappings(self, mapper, mappings): + self._bulk_save_mappings(mapper, mappings, False) - self._bulk_save(( - statelib.MappingState(mapper, mapping) - for mapping in mappings) - ) + def bulk_update_mappings(self, mapper, mappings): + self._bulk_save_mappings(mapper, mappings, True) - def _bulk_save(self, states): + def _bulk_save_mappings(self, mapper, mappings, isupdate): + mapper = _class_to_mapper(mapper) self._flushing = True flush_context = UOWTransaction(self) - if self.dispatch.before_bulk_save: - self.dispatch.before_bulk_save( - self, flush_context, states) - flush_context.transaction = transaction = self.begin( subtransactions=True) try: - self._warn_on_events = True - try: - flush_context.bulk_save(states) - finally: - self._warn_on_events = False - - self.dispatch.after_bulk_save( - self, flush_context, states - ) - - flush_context.finalize_flush_changes() - - self.dispatch.after_bulk_save_postexec( - self, flush_context, states) - + if isupdate: + self.dispatch.before_bulk_update( + self, flush_context, mapper, mappings) + flush_context.bulk_update(mapper, mappings) + self.dispatch.after_bulk_update( + self, flush_context, mapper, mappings) + else: + self.dispatch.before_bulk_insert( + self, flush_context, mapper, mappings) + flush_context.bulk_insert(mapper, mappings) + self.dispatch.after_bulk_insert( + self, flush_context, mapper, mappings) transaction.commit() except: diff --git a/lib/sqlalchemy/orm/state.py b/lib/sqlalchemy/orm/state.py index e941bc1a4..fe8ccd222 100644 --- a/lib/sqlalchemy/orm/state.py +++ b/lib/sqlalchemy/orm/state.py @@ -580,21 +580,6 @@ class InstanceState(interfaces.InspectionAttr): state._strong_obj = None -class MappingState(InstanceState): - committed_state = {} - callables = {} - - def __init__(self, mapper, mapping): - self.class_ = mapper.class_ - self.manager = mapper.class_manager - self.modified = True - self._dict = mapping - - @property - def dict(self): - return self._dict - - class AttributeState(object): """Provide an inspection interface corresponding to a particular attribute on a particular mapped object. diff --git a/lib/sqlalchemy/orm/unitofwork.py b/lib/sqlalchemy/orm/unitofwork.py index bc8a0f556..b3a1519c5 100644 --- a/lib/sqlalchemy/orm/unitofwork.py +++ b/lib/sqlalchemy/orm/unitofwork.py @@ -394,23 +394,11 @@ class UOWTransaction(object): if other: self.session._register_newly_persistent(other) - def bulk_save(self, states): - for (base_mapper, in_session), states_ in itertools.groupby( - states, - lambda state: - ( - state.mapper.base_mapper, - state.key is self.session.hash_key - )): - - persistence.save_obj( - base_mapper, list(states_), self, bookkeeping=in_session) - - if in_session: - self.states.update( - (state, (False, False)) - for state in states_ - ) + def bulk_insert(self, mapper, mappings): + persistence.bulk_insert(mapper, mappings, self) + + def bulk_update(self, mapper, mappings): + persistence.bulk_update(mapper, mappings, self) class IterateMappersMixin(object): -- cgit v1.2.1 From 8773307257550e86801217f2b77d47047718807a Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Fri, 15 Aug 2014 18:22:08 -0400 Subject: - refine this enough so that _collect_insert_commands() seems to be more than twice as fast now (.039 vs. .091); bulk_insert() and bulk_update() do their own collection but now both call into _emit_insert_statements() / _emit_update_statements(); the approach seems to have no impact on insert speed, still .85 for the insert test --- lib/sqlalchemy/orm/mapper.py | 35 ++++++ lib/sqlalchemy/orm/persistence.py | 259 +++++++++++++++++++------------------- 2 files changed, 161 insertions(+), 133 deletions(-) diff --git a/lib/sqlalchemy/orm/mapper.py b/lib/sqlalchemy/orm/mapper.py index 06ec2bf14..fc15769cd 100644 --- a/lib/sqlalchemy/orm/mapper.py +++ b/lib/sqlalchemy/orm/mapper.py @@ -1892,6 +1892,41 @@ class Mapper(InspectionAttr): """ + @_memoized_configured_property + def _col_to_propkey(self): + return dict( + ( + table, + [ + (col, self._columntoproperty[col].key) + for col in columns + ] + ) + for table, columns in self._cols_by_table.items() + ) + + @_memoized_configured_property + def _pk_keys_by_table(self): + return dict( + ( + table, + frozenset([col.key for col in pks]) + ) + for table, pks in self._pks_by_table.items() + ) + + @_memoized_configured_property + def _server_default_cols(self): + return dict( + ( + table, + frozenset([ + col for col in columns + if col.server_default is not None]) + ) + for table, columns in self._cols_by_table.items() + ) + @property def selectable(self): """The :func:`.select` construct this :class:`.Mapper` selects from diff --git a/lib/sqlalchemy/orm/persistence.py b/lib/sqlalchemy/orm/persistence.py index a8d4bd695..782d94dc8 100644 --- a/lib/sqlalchemy/orm/persistence.py +++ b/lib/sqlalchemy/orm/persistence.py @@ -34,29 +34,35 @@ def bulk_insert(mapper, mappings, uowtransaction): "not supported in bulk_insert()") connection = uowtransaction.transaction.connection(base_mapper) - + value_params = {} for table, sub_mapper in base_mapper._sorted_tables.items(): if not mapper.isa(sub_mapper): continue - to_translate = dict( - (mapper._columntoproperty[col].key, col.key) - for col in mapper._cols_by_table[table] - ) has_version_generator = mapper.version_id_generator is not False and \ mapper.version_id_col is not None - multiparams = [] + + records = [] for mapping in mappings: params = dict( - (k, mapping.get(v)) for k, v in to_translate.items() + (col.key, mapping[propkey]) + for col, propkey in mapper._col_to_propkey[table] + if propkey in mapping ) + if has_version_generator: params[mapper.version_id_col.key] = \ mapper.version_id_generator(None) - multiparams.append(params) - statement = base_mapper._memo(('insert', table), table.insert) - cached_connections[connection].execute(statement, multiparams) + records.append( + (None, None, params, sub_mapper, + connection, value_params, True, True) + ) + + _emit_insert_statements(base_mapper, uowtransaction, + cached_connections, + mapper, table, records, + bookkeeping=False) def bulk_update(mapper, mappings, uowtransaction): @@ -71,52 +77,41 @@ def bulk_update(mapper, mappings, uowtransaction): connection = uowtransaction.transaction.connection(base_mapper) + value_params = {} for table, sub_mapper in base_mapper._sorted_tables.items(): if not mapper.isa(sub_mapper): continue - needs_version_id = sub_mapper.version_id_col is not None and \ - table.c.contains_column(sub_mapper.version_id_col) - - def update_stmt(): - return _update_stmt_for_mapper(sub_mapper, table, needs_version_id) - - statement = base_mapper._memo(('update', table), update_stmt) + label_pks = mapper._pks_by_table[table] + if mapper.version_id_col is not None: + label_pks = label_pks.union([mapper.version_id_col]) - pks = mapper._pks_by_table[table] to_translate = dict( - (mapper._columntoproperty[col].key, col._label - if col in pks else col.key) - for col in mapper._cols_by_table[table] + (propkey, col._label if col in label_pks else col.key) + for col, propkey in mapper._col_to_propkey[table] ) - for colnames, sub_mappings in groupby( - mappings, - lambda mapping: sorted(tuple(mapping.keys()))): - - multiparams = [] - for mapping in sub_mappings: - params = dict( - (to_translate[k], v) for k, v in mapping.items() - ) - multiparams.append(params) - - c = cached_connections[connection].execute(statement, multiparams) + records = [] + for mapping in mappings: + params = dict( + (to_translate[k], v) for k, v in mapping.items() + ) - rows = c.rowcount + if mapper.version_id_generator is not False and \ + mapper.version_id_col is not None and \ + mapper.version_id_col.key not in params: + params[mapper.version_id_col.key] = \ + mapper.version_id_generator( + params[mapper.version_id_col._label]) - if connection.dialect.supports_sane_rowcount: - if rows != len(multiparams): - raise orm_exc.StaleDataError( - "UPDATE statement on table '%s' expected to " - "update %d row(s); %d were matched." % - (table.description, len(multiparams), rows)) + records.append( + (None, None, params, sub_mapper, connection, value_params) + ) - elif needs_version_id: - util.warn("Dialect %s does not support updated rowcount " - "- versioning cannot be verified." % - c.dialect.dialect_description, - stacklevel=12) + _emit_update_statements(base_mapper, uowtransaction, + cached_connections, + mapper, table, records, + bookkeeping=False) def save_obj( @@ -342,39 +337,36 @@ def _collect_insert_commands(base_mapper, uowtransaction, table, if table not in mapper._pks_by_table: continue - pks = mapper._pks_by_table[table] - params = {} value_params = {} - - has_all_pks = True - has_all_defaults = True - has_version_id_generator = mapper.version_id_generator is not False \ - and mapper.version_id_col is not None - for col in mapper._cols_by_table[table]: - if has_version_id_generator and col is mapper.version_id_col: - val = mapper.version_id_generator(None) - params[col.key] = val + for col, propkey in mapper._col_to_propkey[table]: + if propkey in state_dict: + value = state_dict[propkey] + if isinstance(value, sql.ClauseElement): + value_params[col.key] = value + elif value is not None or ( + not col.primary_key and + not col.server_default and + not col.default): + params[col.key] = value else: - # pull straight from the dict for - # pending objects - prop = mapper._columntoproperty[col] - value = state_dict.get(prop.key, None) + if not col.server_default \ + and not col.default and not col.primary_key: + params[col.key] = None - if value is None: - if col in pks: - has_all_pks = False - elif col.default is None and \ - col.server_default is None: - params[col.key] = value - elif col.server_default is not None and \ - mapper.base_mapper.eager_defaults: - has_all_defaults = False + has_all_pks = mapper._pk_keys_by_table[table].issubset(params) - elif isinstance(value, sql.ClauseElement): - value_params[col] = value - else: - params[col.key] = value + if base_mapper.eager_defaults: + has_all_defaults = mapper._server_default_cols[table].\ + issubset(params) + else: + has_all_defaults = True + + if mapper.version_id_generator is not False \ + and mapper.version_id_col is not None and \ + mapper.version_id_col in mapper._cols_by_table[table]: + params[mapper.version_id_col.key] = \ + mapper.version_id_generator(None) insert.append((state, state_dict, params, mapper, connection, value_params, has_all_pks, @@ -572,30 +564,9 @@ def _collect_delete_commands(base_mapper, uowtransaction, table, return delete -def _update_stmt_for_mapper(mapper, table, needs_version_id): - clause = sql.and_() - - for col in mapper._pks_by_table[table]: - clause.clauses.append(col == sql.bindparam(col._label, - type_=col.type)) - - if needs_version_id: - clause.clauses.append( - mapper.version_id_col == sql.bindparam( - mapper.version_id_col._label, - type_=mapper.version_id_col.type)) - - stmt = table.update(clause) - if mapper.base_mapper.eager_defaults: - stmt = stmt.return_defaults() - elif mapper.version_id_col is not None: - stmt = stmt.return_defaults(mapper.version_id_col) - - return stmt - - def _emit_update_statements(base_mapper, uowtransaction, - cached_connections, mapper, table, update): + cached_connections, mapper, table, update, + bookkeeping=True): """Emit UPDATE statements corresponding to value lists collected by _collect_update_commands().""" @@ -603,7 +574,25 @@ def _emit_update_statements(base_mapper, uowtransaction, table.c.contains_column(mapper.version_id_col) def update_stmt(): - return _update_stmt_for_mapper(mapper, table, needs_version_id) + clause = sql.and_() + + for col in mapper._pks_by_table[table]: + clause.clauses.append(col == sql.bindparam(col._label, + type_=col.type)) + + if needs_version_id: + clause.clauses.append( + mapper.version_id_col == sql.bindparam( + mapper.version_id_col._label, + type_=mapper.version_id_col.type)) + + stmt = table.update(clause) + if mapper.base_mapper.eager_defaults: + stmt = stmt.return_defaults() + elif mapper.version_id_col is not None: + stmt = stmt.return_defaults(mapper.version_id_col) + + return stmt statement = base_mapper._memo(('update', table), update_stmt) @@ -624,15 +613,16 @@ def _emit_update_statements(base_mapper, uowtransaction, c = connection.execute( statement.values(value_params), params) - _postfetch( - mapper, - uowtransaction, - table, - state, - state_dict, - c, - c.context.compiled_parameters[0], - value_params) + if bookkeeping: + _postfetch( + mapper, + uowtransaction, + table, + state, + state_dict, + c, + c.context.compiled_parameters[0], + value_params) rows += c.rowcount else: multiparams = [rec[2] for rec in records] @@ -640,17 +630,18 @@ def _emit_update_statements(base_mapper, uowtransaction, execute(statement, multiparams) rows += c.rowcount - for state, state_dict, params, mapper, \ - connection, value_params in records: - _postfetch( - mapper, - uowtransaction, - table, - state, - state_dict, - c, - c.context.compiled_parameters[0], - value_params) + if bookkeeping: + for state, state_dict, params, mapper, \ + connection, value_params in records: + _postfetch( + mapper, + uowtransaction, + table, + state, + state_dict, + c, + c.context.compiled_parameters[0], + value_params) if connection.dialect.supports_sane_rowcount: if rows != len(records): @@ -667,7 +658,8 @@ def _emit_update_statements(base_mapper, uowtransaction, def _emit_insert_statements(base_mapper, uowtransaction, - cached_connections, mapper, table, insert): + cached_connections, mapper, table, insert, + bookkeeping=True): """Emit INSERT statements corresponding to value lists collected by _collect_insert_commands().""" @@ -676,11 +668,11 @@ def _emit_insert_statements(base_mapper, uowtransaction, for (connection, pkeys, hasvalue, has_all_pks, has_all_defaults), \ records in groupby(insert, lambda rec: (rec[4], - list(rec[2].keys()), + tuple(sorted(rec[2].keys())), bool(rec[5]), rec[6], rec[7]) ): - if \ + if not bookkeeping or \ ( has_all_defaults or not base_mapper.eager_defaults @@ -693,19 +685,20 @@ def _emit_insert_statements(base_mapper, uowtransaction, c = cached_connections[connection].\ execute(statement, multiparams) - for (state, state_dict, params, mapper_rec, - conn, value_params, has_all_pks, has_all_defaults), \ - last_inserted_params in \ - zip(records, c.context.compiled_parameters): - _postfetch( - mapper_rec, - uowtransaction, - table, - state, - state_dict, - c, - last_inserted_params, - value_params) + if bookkeeping: + for (state, state_dict, params, mapper_rec, + conn, value_params, has_all_pks, has_all_defaults), \ + last_inserted_params in \ + zip(records, c.context.compiled_parameters): + _postfetch( + mapper_rec, + uowtransaction, + table, + state, + state_dict, + c, + last_inserted_params, + value_params) else: if not has_all_defaults and base_mapper.eager_defaults: -- cgit v1.2.1 From 84cca0e28660b5d35c35195aa57c89b094fa897d Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Mon, 18 Aug 2014 18:30:14 -0400 Subject: dev --- lib/sqlalchemy/orm/persistence.py | 47 +++++++++++++++++---------------------- 1 file changed, 21 insertions(+), 26 deletions(-) diff --git a/lib/sqlalchemy/orm/persistence.py b/lib/sqlalchemy/orm/persistence.py index 8d3e90cf4..f9e7eda28 100644 --- a/lib/sqlalchemy/orm/persistence.py +++ b/lib/sqlalchemy/orm/persistence.py @@ -34,26 +34,18 @@ def bulk_insert(mapper, mappings, uowtransaction): "not supported in bulk_insert()") connection = uowtransaction.transaction.connection(base_mapper) - value_params = {} for table, sub_mapper in base_mapper._sorted_tables.items(): if not mapper.isa(sub_mapper): continue - has_version_generator = mapper.version_id_generator is not False and \ - mapper.version_id_col is not None - records = [] - for mapping in mappings: - params = dict( - (col.key, mapping[propkey]) - for col, propkey in mapper._col_to_propkey[table] - if propkey in mapping - ) - - if has_version_generator: - params[mapper.version_id_col.key] = \ - mapper.version_id_generator(None) - + for ( + state, state_dict, params, mapper, + connection, value_params, has_all_pks, + has_all_defaults) in _collect_insert_commands(table, ( + (None, mapping, sub_mapper, connection) + for mapping in mappings) + ): records.append( (None, None, params, sub_mapper, connection, value_params, True, True) @@ -82,13 +74,13 @@ def bulk_update(mapper, mappings, uowtransaction): if not mapper.isa(sub_mapper): continue - label_pks = mapper._pks_by_table[table] + label_pks = sub_mapper._pks_by_table[table] if mapper.version_id_col is not None: label_pks = label_pks.union([mapper.version_id_col]) to_translate = dict( (propkey, col._label if col in label_pks else col.key) - for col, propkey in mapper._col_to_propkey[table] + for propkey, col in sub_mapper._propkey_to_col[table].items() ) records = [] @@ -350,7 +342,7 @@ def _organize_states_for_delete(base_mapper, states, uowtransaction): yield state, dict_, mapper, bool(state.key), connection -def _collect_insert_commands(table, states_to_insert): +def _collect_insert_commands(table, states_to_insert, bulk=False): """Identify sets of values to use in INSERT statements for a list of states. @@ -374,17 +366,20 @@ def _collect_insert_commands(table, states_to_insert): else: params[col.key] = value - for colkey in mapper._insert_cols_as_none[table].\ - difference(params).difference(value_params): - params[colkey] = None + if not bulk: + for colkey in mapper._insert_cols_as_none[table].\ + difference(params).difference(value_params): + params[colkey] = None - has_all_pks = mapper._pk_keys_by_table[table].issubset(params) + has_all_pks = mapper._pk_keys_by_table[table].issubset(params) - if mapper.base_mapper.eager_defaults: - has_all_defaults = mapper._server_default_cols[table].\ - issubset(params) + if mapper.base_mapper.eager_defaults: + has_all_defaults = mapper._server_default_cols[table].\ + issubset(params) + else: + has_all_defaults = True else: - has_all_defaults = True + has_all_defaults = has_all_pks = True if mapper.version_id_generator is not False \ and mapper.version_id_col is not None and \ -- cgit v1.2.1 From a251001f24e819f1ebc525948437563f52a3a226 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Mon, 18 Aug 2014 18:52:53 -0400 Subject: dev --- lib/sqlalchemy/orm/persistence.py | 37 ++++++++++++++++++------------------- 1 file changed, 18 insertions(+), 19 deletions(-) diff --git a/lib/sqlalchemy/orm/persistence.py b/lib/sqlalchemy/orm/persistence.py index f9e7eda28..145a7783a 100644 --- a/lib/sqlalchemy/orm/persistence.py +++ b/lib/sqlalchemy/orm/persistence.py @@ -34,26 +34,25 @@ def bulk_insert(mapper, mappings, uowtransaction): "not supported in bulk_insert()") connection = uowtransaction.transaction.connection(base_mapper) - for table, sub_mapper in base_mapper._sorted_tables.items(): - if not mapper.isa(sub_mapper): + for table, super_mapper in base_mapper._sorted_tables.items(): + if not mapper.isa(super_mapper): continue - records = [] - for ( - state, state_dict, params, mapper, - connection, value_params, has_all_pks, - has_all_defaults) in _collect_insert_commands(table, ( - (None, mapping, sub_mapper, connection) + records = ( + (None, None, params, super_mapper, + connection, value_params, True, True) + for + state, state_dict, params, mp, + conn, value_params, has_all_pks, + has_all_defaults in _collect_insert_commands(table, ( + (None, mapping, super_mapper, connection) for mapping in mappings) - ): - records.append( - (None, None, params, sub_mapper, - connection, value_params, True, True) ) + ) _emit_insert_statements(base_mapper, uowtransaction, cached_connections, - mapper, table, records, + super_mapper, table, records, bookkeeping=False) @@ -70,17 +69,17 @@ def bulk_update(mapper, mappings, uowtransaction): connection = uowtransaction.transaction.connection(base_mapper) value_params = {} - for table, sub_mapper in base_mapper._sorted_tables.items(): - if not mapper.isa(sub_mapper): + for table, super_mapper in base_mapper._sorted_tables.items(): + if not mapper.isa(super_mapper): continue - label_pks = sub_mapper._pks_by_table[table] + label_pks = super_mapper._pks_by_table[table] if mapper.version_id_col is not None: label_pks = label_pks.union([mapper.version_id_col]) to_translate = dict( (propkey, col._label if col in label_pks else col.key) - for propkey, col in sub_mapper._propkey_to_col[table].items() + for propkey, col in super_mapper._propkey_to_col[table].items() ) records = [] @@ -97,12 +96,12 @@ def bulk_update(mapper, mappings, uowtransaction): params[mapper.version_id_col._label]) records.append( - (None, None, params, sub_mapper, connection, value_params) + (None, None, params, super_mapper, connection, value_params) ) _emit_update_statements(base_mapper, uowtransaction, cached_connections, - mapper, table, records, + super_mapper, table, records, bookkeeping=False) -- cgit v1.2.1 From 91959122e0a12943e5ff9399024c65ad4d7489e1 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Tue, 19 Aug 2014 14:24:56 -0400 Subject: - refinements --- lib/sqlalchemy/orm/events.py | 12 ----- lib/sqlalchemy/orm/mapper.py | 4 ++ lib/sqlalchemy/orm/persistence.py | 107 +++++++++++++++++++++++++------------- lib/sqlalchemy/orm/session.py | 29 ++++------- lib/sqlalchemy/orm/unitofwork.py | 6 --- 5 files changed, 86 insertions(+), 72 deletions(-) diff --git a/lib/sqlalchemy/orm/events.py b/lib/sqlalchemy/orm/events.py index 37ea3071b..aa99673ba 100644 --- a/lib/sqlalchemy/orm/events.py +++ b/lib/sqlalchemy/orm/events.py @@ -1453,18 +1453,6 @@ class SessionEvents(event.Events): """ - def before_bulk_insert(self, session, flush_context, mapper, mappings): - """""" - - def after_bulk_insert(self, session, flush_context, mapper, mappings): - """""" - - def before_bulk_update(self, session, flush_context, mapper, mappings): - """""" - - def after_bulk_update(self, session, flush_context, mapper, mappings): - """""" - def after_begin(self, session, transaction, connection): """Execute after a transaction is begun on a connection diff --git a/lib/sqlalchemy/orm/mapper.py b/lib/sqlalchemy/orm/mapper.py index 89c092b58..b98fbda42 100644 --- a/lib/sqlalchemy/orm/mapper.py +++ b/lib/sqlalchemy/orm/mapper.py @@ -2366,6 +2366,10 @@ class Mapper(InspectionAttr): def _primary_key_props(self): return [self._columntoproperty[col] for col in self.primary_key] + @_memoized_configured_property + def _primary_key_propkeys(self): + return set([prop.key for prop in self._primary_key_props]) + def _get_state_attr_by_column( self, state, dict_, column, passive=attributes.PASSIVE_RETURN_NEVER_SET): diff --git a/lib/sqlalchemy/orm/persistence.py b/lib/sqlalchemy/orm/persistence.py index 145a7783a..9c0008925 100644 --- a/lib/sqlalchemy/orm/persistence.py +++ b/lib/sqlalchemy/orm/persistence.py @@ -23,17 +23,22 @@ from ..sql import expression from . import loading -def bulk_insert(mapper, mappings, uowtransaction): +def _bulk_insert(mapper, mappings, session_transaction, isstates): base_mapper = mapper.base_mapper cached_connections = _cached_connection_dict(base_mapper) - if uowtransaction.session.connection_callable: + if session_transaction.session.connection_callable: raise NotImplementedError( "connection_callable / per-instance sharding " "not supported in bulk_insert()") - connection = uowtransaction.transaction.connection(base_mapper) + if isstates: + mappings = [state.dict for state in mappings] + else: + mappings = list(mappings) + + connection = session_transaction.connection(base_mapper) for table, super_mapper in base_mapper._sorted_tables.items(): if not mapper.isa(super_mapper): continue @@ -45,61 +50,55 @@ def bulk_insert(mapper, mappings, uowtransaction): state, state_dict, params, mp, conn, value_params, has_all_pks, has_all_defaults in _collect_insert_commands(table, ( - (None, mapping, super_mapper, connection) - for mapping in mappings) + (None, mapping, mapper, connection) + for mapping in mappings), + bulk=True ) ) - _emit_insert_statements(base_mapper, uowtransaction, + _emit_insert_statements(base_mapper, None, cached_connections, super_mapper, table, records, bookkeeping=False) -def bulk_update(mapper, mappings, uowtransaction): +def _bulk_update(mapper, mappings, session_transaction, isstates): base_mapper = mapper.base_mapper cached_connections = _cached_connection_dict(base_mapper) - if uowtransaction.session.connection_callable: + def _changed_dict(mapper, state): + return dict( + (k, v) + for k, v in state.dict.items() if k in state.committed_state or k + in mapper._primary_key_propkeys + ) + + if isstates: + mappings = [_changed_dict(mapper, state) for state in mappings] + else: + mappings = list(mappings) + + if session_transaction.session.connection_callable: raise NotImplementedError( "connection_callable / per-instance sharding " "not supported in bulk_update()") - connection = uowtransaction.transaction.connection(base_mapper) + connection = session_transaction.connection(base_mapper) value_params = {} + for table, super_mapper in base_mapper._sorted_tables.items(): if not mapper.isa(super_mapper): continue - label_pks = super_mapper._pks_by_table[table] - if mapper.version_id_col is not None: - label_pks = label_pks.union([mapper.version_id_col]) - - to_translate = dict( - (propkey, col._label if col in label_pks else col.key) - for propkey, col in super_mapper._propkey_to_col[table].items() + records = ( + (None, None, params, super_mapper, connection, value_params) + for + params in _collect_bulk_update_commands(mapper, table, mappings) ) - records = [] - for mapping in mappings: - params = dict( - (to_translate[k], v) for k, v in mapping.items() - ) - - if mapper.version_id_generator is not False and \ - mapper.version_id_col is not None and \ - mapper.version_id_col.key not in params: - params[mapper.version_id_col.key] = \ - mapper.version_id_generator( - params[mapper.version_id_col._label]) - - records.append( - (None, None, params, super_mapper, connection, value_params) - ) - - _emit_update_statements(base_mapper, uowtransaction, + _emit_update_statements(base_mapper, None, cached_connections, super_mapper, table, records, bookkeeping=False) @@ -360,7 +359,7 @@ def _collect_insert_commands(table, states_to_insert, bulk=False): col = propkey_to_col[propkey] if value is None: continue - elif isinstance(value, sql.ClauseElement): + elif not bulk and isinstance(value, sql.ClauseElement): value_params[col.key] = value else: params[col.key] = value @@ -481,6 +480,44 @@ def _collect_update_commands(uowtransaction, table, states_to_update): state, state_dict, params, mapper, connection, value_params) +def _collect_bulk_update_commands(mapper, table, mappings): + label_pks = mapper._pks_by_table[table] + if mapper.version_id_col is not None: + label_pks = label_pks.union([mapper.version_id_col]) + + to_translate = dict( + (propkey, col.key if col not in label_pks else col._label) + for propkey, col in mapper._propkey_to_col[table].items() + ) + + for mapping in mappings: + params = dict( + (to_translate[k], mapping[k]) for k in to_translate + if k in mapping and k not in mapper._primary_key_propkeys + ) + + if not params: + continue + + try: + params.update( + (to_translate[k], mapping[k]) for k in + mapper._primary_key_propkeys.intersection(to_translate) + ) + except KeyError as ke: + raise orm_exc.FlushError( + "Can't update table using NULL for primary " + "key attribute: %s" % ke) + + if mapper.version_id_generator is not False and \ + mapper.version_id_col is not None and \ + mapper.version_id_col.key not in params: + params[mapper.version_id_col.key] = \ + mapper.version_id_generator( + params[mapper.version_id_col._label]) + + yield params + def _collect_post_update_commands(base_mapper, uowtransaction, table, states_to_update, post_update_cols): diff --git a/lib/sqlalchemy/orm/session.py b/lib/sqlalchemy/orm/session.py index 3199a4332..968868e84 100644 --- a/lib/sqlalchemy/orm/session.py +++ b/lib/sqlalchemy/orm/session.py @@ -21,6 +21,7 @@ from .base import ( _none_set, state_str, instance_str ) import itertools +from . import persistence from .unitofwork import UOWTransaction from . import state as statelib import sys @@ -2040,37 +2041,27 @@ class Session(_SessionClassMethods): (attributes.instance_state(obj) for obj in objects), lambda state: (state.mapper, state.key is not None) ): - if isupdate: - self.bulk_update_mappings(mapper, (s.dict for s in states)) - else: - self.bulk_insert_mappings(mapper, (s.dict for s in states)) + self._bulk_save_mappings(mapper, states, isupdate, True) def bulk_insert_mappings(self, mapper, mappings): - self._bulk_save_mappings(mapper, mappings, False) + self._bulk_save_mappings(mapper, mappings, False, False) def bulk_update_mappings(self, mapper, mappings): - self._bulk_save_mappings(mapper, mappings, True) + self._bulk_save_mappings(mapper, mappings, True, False) - def _bulk_save_mappings(self, mapper, mappings, isupdate): + def _bulk_save_mappings(self, mapper, mappings, isupdate, isstates): mapper = _class_to_mapper(mapper) self._flushing = True - flush_context = UOWTransaction(self) - flush_context.transaction = transaction = self.begin( + transaction = self.begin( subtransactions=True) try: if isupdate: - self.dispatch.before_bulk_update( - self, flush_context, mapper, mappings) - flush_context.bulk_update(mapper, mappings) - self.dispatch.after_bulk_update( - self, flush_context, mapper, mappings) + persistence._bulk_update( + mapper, mappings, transaction, isstates) else: - self.dispatch.before_bulk_insert( - self, flush_context, mapper, mappings) - flush_context.bulk_insert(mapper, mappings) - self.dispatch.after_bulk_insert( - self, flush_context, mapper, mappings) + persistence._bulk_insert( + mapper, mappings, transaction, isstates) transaction.commit() except: diff --git a/lib/sqlalchemy/orm/unitofwork.py b/lib/sqlalchemy/orm/unitofwork.py index b3a1519c5..05265b13f 100644 --- a/lib/sqlalchemy/orm/unitofwork.py +++ b/lib/sqlalchemy/orm/unitofwork.py @@ -394,12 +394,6 @@ class UOWTransaction(object): if other: self.session._register_newly_persistent(other) - def bulk_insert(self, mapper, mappings): - persistence.bulk_insert(mapper, mappings, self) - - def bulk_update(self, mapper, mappings): - persistence.bulk_update(mapper, mappings, self) - class IterateMappersMixin(object): def _mappers(self, uow): -- cgit v1.2.1 From fcea5c86d3a9097caa04e2e35fa6404a3ef32044 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Tue, 19 Aug 2014 18:26:11 -0400 Subject: - rename mapper._primary_key_props to mapper._identity_key_props - ensure bulk update is using all PK cols for all tables --- lib/sqlalchemy/orm/mapper.py | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/lib/sqlalchemy/orm/mapper.py b/lib/sqlalchemy/orm/mapper.py index 63d23e31d..31c17e69e 100644 --- a/lib/sqlalchemy/orm/mapper.py +++ b/lib/sqlalchemy/orm/mapper.py @@ -1244,7 +1244,7 @@ class Mapper(InspectionAttr): self._readonly_props = set( self._columntoproperty[col] for col in self._columntoproperty - if self._columntoproperty[col] not in self._primary_key_props and + if self._columntoproperty[col] not in self._identity_key_props and (not hasattr(col, 'table') or col.table not in self._cols_by_table)) @@ -2359,19 +2359,23 @@ class Mapper(InspectionAttr): manager[prop.key]. impl.get(state, dict_, attributes.PASSIVE_RETURN_NEVER_SET) - for prop in self._primary_key_props + for prop in self._identity_key_props ] @_memoized_configured_property - def _primary_key_props(self): - # TODO: this should really be called "identity key props", - # as it does not necessarily include primary key columns within - # individual tables + def _identity_key_props(self): return [self._columntoproperty[col] for col in self.primary_key] + @_memoized_configured_property + def _all_pk_props(self): + collection = set() + for table in self.tables: + collection.update(self._pks_by_table[table]) + return collection + @_memoized_configured_property def _primary_key_propkeys(self): - return set([prop.key for prop in self._primary_key_props]) + return set([prop.key for prop in self._all_pk_props]) def _get_state_attr_by_column( self, state, dict_, column, -- cgit v1.2.1 From d006e9cc2a84a05b46c480ad0c4b429036470e79 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Wed, 20 Aug 2014 14:59:16 -0400 Subject: - skip these methods --- test/orm/test_session.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/test/orm/test_session.py b/test/orm/test_session.py index 186b7a781..b2c8b5f02 100644 --- a/test/orm/test_session.py +++ b/test/orm/test_session.py @@ -1585,7 +1585,9 @@ class SessionInterface(fixtures.TestBase): raises_('refresh', user_arg) instance_methods = self._public_session_methods() \ - - self._class_methods + - self._class_methods - set([ + 'bulk_update_mappings', 'bulk_insert_mappings', + 'bulk_save_objects']) eq_(watchdog, instance_methods, watchdog.symmetric_difference(instance_methods)) -- cgit v1.2.1 From cb8f5c010b396dd83bdc1e4408787383f3c41d05 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Wed, 20 Aug 2014 16:16:47 -0400 Subject: - test for postfetch->sync.populate() having importance during an UPDATE at the per-table level --- test/orm/test_naturalpks.py | 74 +++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 74 insertions(+) diff --git a/test/orm/test_naturalpks.py b/test/orm/test_naturalpks.py index a4e982f84..709e1c0b1 100644 --- a/test/orm/test_naturalpks.py +++ b/test/orm/test_naturalpks.py @@ -1205,3 +1205,77 @@ class JoinedInheritanceTest(fixtures.MappedTest): eq_(e1.boss_name, 'pointy haired') eq_(e2.boss_name, 'pointy haired') + + +class JoinedInheritancePKOnFKTest(fixtures.MappedTest): + """Test cascades of pk->non-pk/fk on joined table inh.""" + + # mssql doesn't allow ON UPDATE on self-referential keys + __unsupported_on__ = ('mssql',) + + __requires__ = 'skip_mysql_on_windows', + __backend__ = True + + @classmethod + def define_tables(cls, metadata): + fk_args = _backend_specific_fk_args() + + Table( + 'person', metadata, + Column('name', String(50), primary_key=True), + Column('type', String(50), nullable=False), + test_needs_fk=True) + + Table( + 'engineer', metadata, + Column('id', Integer, primary_key=True), + Column( + 'person_name', String(50), + ForeignKey('person.name', **fk_args)), + Column('primary_language', String(50)), + test_needs_fk=True + ) + + @classmethod + def setup_classes(cls): + + class Person(cls.Comparable): + pass + + class Engineer(Person): + pass + + def _test_pk(self, passive_updates): + Person, person, Engineer, engineer = ( + self.classes.Person, self.tables.person, + self.classes.Engineer, self.tables.engineer) + + mapper( + Person, person, polymorphic_on=person.c.type, + polymorphic_identity='person', passive_updates=passive_updates) + mapper( + Engineer, engineer, inherits=Person, + polymorphic_identity='engineer') + + sess = sa.orm.sessionmaker()() + + e1 = Engineer(name='dilbert', primary_language='java') + sess.add(e1) + sess.commit() + e1.name = 'wally' + e1.primary_language = 'c++' + + sess.flush() + + eq_(e1.person_name, 'wally') + + sess.expire_all() + eq_(e1.primary_language, "c++") + + @testing.requires.on_update_cascade + def test_pk_passive(self): + self._test_pk(True) + + #@testing.requires.non_updating_cascade + def test_pk_nonpassive(self): + self._test_pk(False) -- cgit v1.2.1 From db70b6e79e263c137f4d282c9c600417636afa25 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Wed, 20 Aug 2014 17:15:20 -0400 Subject: - that's it, feature is finished, needs tests --- lib/sqlalchemy/orm/persistence.py | 195 +++++++++++++++++--------------------- 1 file changed, 89 insertions(+), 106 deletions(-) diff --git a/lib/sqlalchemy/orm/persistence.py b/lib/sqlalchemy/orm/persistence.py index c2750eeb3..aa10da9f4 100644 --- a/lib/sqlalchemy/orm/persistence.py +++ b/lib/sqlalchemy/orm/persistence.py @@ -15,7 +15,7 @@ in unitofwork.py. """ import operator -from itertools import groupby +from itertools import groupby, chain from .. import sql, util, exc as sa_exc, schema from . import attributes, sync, exc as orm_exc, evaluator from .base import state_str, _attr_as_key @@ -86,17 +86,16 @@ def _bulk_update(mapper, mappings, session_transaction, isstates): connection = session_transaction.connection(base_mapper) - value_params = {} - for table, super_mapper in base_mapper._sorted_tables.items(): if not mapper.isa(super_mapper): continue - records = ( - (None, None, params, super_mapper, connection, value_params) - for - params in _collect_bulk_update_commands(mapper, table, mappings) - ) + records = _collect_update_commands(None, table, ( + (None, mapping, mapper, connection, + (mapping[mapper._version_id_prop.key] + if mapper._version_id_prop else None)) + for mapping in mappings + ), bulk=True) _emit_update_statements(base_mapper, None, cached_connections, @@ -158,17 +157,16 @@ def save_obj( _finalize_insert_update_commands( base_mapper, uowtransaction, - ( - (state, state_dict, mapper, connection, False) - for state, state_dict, mapper, connection in states_to_insert - ) - ) - _finalize_insert_update_commands( - base_mapper, uowtransaction, - ( - (state, state_dict, mapper, connection, True) - for state, state_dict, mapper, connection, - update_version_id in states_to_update + chain( + ( + (state, state_dict, mapper, connection, False) + for state, state_dict, mapper, connection in states_to_insert + ), + ( + (state, state_dict, mapper, connection, True) + for state, state_dict, mapper, connection, + update_version_id in states_to_update + ) ) ) @@ -394,7 +392,9 @@ def _collect_insert_commands(table, states_to_insert, bulk=False): has_all_defaults) -def _collect_update_commands(uowtransaction, table, states_to_update): +def _collect_update_commands( + uowtransaction, table, states_to_update, + bulk=False): """Identify sets of values to use in UPDATE statements for a list of states. @@ -414,23 +414,32 @@ def _collect_update_commands(uowtransaction, table, states_to_update): pks = mapper._pks_by_table[table] - params = {} value_params = {} propkey_to_col = mapper._propkey_to_col[table] - for propkey in set(propkey_to_col).intersection(state.committed_state): - value = state_dict[propkey] - col = propkey_to_col[propkey] - - if not state.manager[propkey].impl.is_equal( - value, state.committed_state[propkey]): - if isinstance(value, sql.ClauseElement): - value_params[col] = value - else: - params[col.key] = value + if bulk: + params = dict( + (propkey_to_col[propkey].key, state_dict[propkey]) + for propkey in + set(propkey_to_col).intersection(state_dict) + ) + else: + params = {} + for propkey in set(propkey_to_col).intersection( + state.committed_state): + value = state_dict[propkey] + col = propkey_to_col[propkey] + + if not state.manager[propkey].impl.is_equal( + value, state.committed_state[propkey]): + if isinstance(value, sql.ClauseElement): + value_params[col] = value + else: + params[col.key] = value - if update_version_id is not None: + if update_version_id is not None and \ + mapper.version_id_col in mapper._cols_by_table[table]: col = mapper.version_id_col params[col._label] = update_version_id @@ -442,24 +451,33 @@ def _collect_update_commands(uowtransaction, table, states_to_update): if not (params or value_params): continue - pk_params = {} - for col in pks: - propkey = mapper._columntoproperty[col].key - history = state.manager[propkey].impl.get_history( - state, state_dict, attributes.PASSIVE_OFF) - - if history.added: - if not history.deleted or \ - ("pk_cascaded", state, col) in \ - uowtransaction.attributes: - pk_params[col._label] = history.added[0] - params.pop(col.key, None) + if bulk: + pk_params = dict( + (propkey_to_col[propkey]._label, state_dict.get(propkey)) + for propkey in + set(propkey_to_col). + intersection(mapper._pk_keys_by_table[table]) + ) + else: + pk_params = {} + for col in pks: + propkey = mapper._columntoproperty[col].key + + history = state.manager[propkey].impl.get_history( + state, state_dict, attributes.PASSIVE_OFF) + + if history.added: + if not history.deleted or \ + ("pk_cascaded", state, col) in \ + uowtransaction.attributes: + pk_params[col._label] = history.added[0] + params.pop(col.key, None) + else: + # else, use the old value to locate the row + pk_params[col._label] = history.deleted[0] + params[col.key] = history.added[0] else: - # else, use the old value to locate the row - pk_params[col._label] = history.deleted[0] - params[col.key] = history.added[0] - else: - pk_params[col._label] = history.unchanged[0] + pk_params[col._label] = history.unchanged[0] if params or value_params: if None in pk_params.values(): @@ -471,44 +489,6 @@ def _collect_update_commands(uowtransaction, table, states_to_update): state, state_dict, params, mapper, connection, value_params) -def _collect_bulk_update_commands(mapper, table, mappings): - label_pks = mapper._pks_by_table[table] - if mapper.version_id_col is not None: - label_pks = label_pks.union([mapper.version_id_col]) - - to_translate = dict( - (propkey, col.key if col not in label_pks else col._label) - for propkey, col in mapper._propkey_to_col[table].items() - ) - - for mapping in mappings: - params = dict( - (to_translate[k], mapping[k]) for k in to_translate - if k in mapping and k not in mapper._primary_key_propkeys - ) - - if not params: - continue - - try: - params.update( - (to_translate[k], mapping[k]) for k in - mapper._primary_key_propkeys.intersection(to_translate) - ) - except KeyError as ke: - raise orm_exc.FlushError( - "Can't update table using NULL for primary " - "key attribute: %s" % ke) - - if mapper.version_id_generator is not False and \ - mapper.version_id_col is not None and \ - mapper.version_id_col.key not in params: - params[mapper.version_id_col.key] = \ - mapper.version_id_generator( - params[mapper.version_id_col._label]) - - yield params - def _collect_post_update_commands(base_mapper, uowtransaction, table, states_to_update, post_update_cols): @@ -569,7 +549,7 @@ def _collect_delete_commands(base_mapper, uowtransaction, table, "key value") if update_version_id is not None and \ - table.c.contains_column(mapper.version_id_col): + mapper.version_id_col in mapper._cols_by_table[table]: params[mapper.version_id_col.key] = update_version_id yield params, connection @@ -581,7 +561,7 @@ def _emit_update_statements(base_mapper, uowtransaction, by _collect_update_commands().""" needs_version_id = mapper.version_id_col is not None and \ - table.c.contains_column(mapper.version_id_col) + mapper.version_id_col in mapper._cols_by_table[table] def update_stmt(): clause = sql.and_() @@ -610,9 +590,9 @@ def _emit_update_statements(base_mapper, uowtransaction, records in groupby( update, lambda rec: ( - rec[4], - tuple(sorted(rec[2])), - bool(rec[5]))): + rec[4], # connection + set(rec[2]), # set of parameter keys + bool(rec[5]))): # whether or not we have "value" parameters rows = 0 records = list(records) @@ -692,12 +672,14 @@ def _emit_insert_statements(base_mapper, uowtransaction, statement = base_mapper._memo(('insert', table), table.insert) for (connection, pkeys, hasvalue, has_all_pks, has_all_defaults), \ - records in groupby(insert, - lambda rec: (rec[4], - tuple(sorted(rec[2].keys())), - bool(rec[5]), - rec[6], rec[7]) - ): + records in groupby( + insert, + lambda rec: ( + rec[4], # connection + set(rec[2]), # parameter keys + bool(rec[5]), # whether we have "value" parameters + rec[6], + rec[7])): if not bookkeeping or \ ( has_all_defaults @@ -785,7 +767,10 @@ def _emit_post_update_statements(base_mapper, uowtransaction, # also group them into common (connection, cols) sets # to support executemany(). for key, grouper in groupby( - update, lambda rec: (rec[1], sorted(rec[0])) + update, lambda rec: ( + rec[1], # connection + set(rec[0]) # parameter keys + ) ): connection = key[0] multiparams = [params for params, conn in grouper] @@ -799,7 +784,7 @@ def _emit_delete_statements(base_mapper, uowtransaction, cached_connections, by _collect_delete_commands().""" need_version_id = mapper.version_id_col is not None and \ - table.c.contains_column(mapper.version_id_col) + mapper.version_id_col in mapper._cols_by_table[table] def delete_stmt(): clause = sql.and_() @@ -821,12 +806,9 @@ def _emit_delete_statements(base_mapper, uowtransaction, cached_connections, statement = base_mapper._memo(('delete', table), delete_stmt) for connection, recs in groupby( delete, - lambda rec: rec[1] + lambda rec: rec[1] # connection ): - del_objects = [ - params - for params, connection in recs - ] + del_objects = [params for params, connection in recs] connection = cached_connections[connection] @@ -931,7 +913,8 @@ def _postfetch(mapper, uowtransaction, table, postfetch_cols = result.context.postfetch_cols returning_cols = result.context.returning_cols - if mapper.version_id_col is not None: + if mapper.version_id_col is not None and \ + mapper.version_id_col in mapper._cols_by_table[table]: prefetch_cols = list(prefetch_cols) + [mapper.version_id_col] if returning_cols: -- cgit v1.2.1 From ccfd26d96916cc7953f1fefa8abed53d4f696c4c Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Tue, 2 Sep 2014 19:23:09 -0400 Subject: - add options to get back pk defaults for inserts. times spent start getting barely different... --- lib/sqlalchemy/orm/persistence.py | 37 ++++++++++++++++++++++++++----------- lib/sqlalchemy/orm/session.py | 16 +++++++++------- 2 files changed, 35 insertions(+), 18 deletions(-) diff --git a/lib/sqlalchemy/orm/persistence.py b/lib/sqlalchemy/orm/persistence.py index 198eeb46f..2a697a6f9 100644 --- a/lib/sqlalchemy/orm/persistence.py +++ b/lib/sqlalchemy/orm/persistence.py @@ -23,7 +23,8 @@ from ..sql import expression from . import loading -def _bulk_insert(mapper, mappings, session_transaction, isstates): +def _bulk_insert( + mapper, mappings, session_transaction, isstates, return_defaults): base_mapper = mapper.base_mapper cached_connections = _cached_connection_dict(base_mapper) @@ -34,7 +35,11 @@ def _bulk_insert(mapper, mappings, session_transaction, isstates): "not supported in bulk_insert()") if isstates: - mappings = [state.dict for state in mappings] + if return_defaults: + states = [(state, state.dict) for state in mappings] + mappings = [dict_ for (state, dict_) in states] + else: + mappings = [state.dict for state in mappings] else: mappings = list(mappings) @@ -44,22 +49,30 @@ def _bulk_insert(mapper, mappings, session_transaction, isstates): continue records = ( - (None, None, params, super_mapper, - connection, value_params, True, True) + (None, state_dict, params, super_mapper, + connection, value_params, has_all_pks, has_all_defaults) for state, state_dict, params, mp, conn, value_params, has_all_pks, has_all_defaults in _collect_insert_commands(table, ( (None, mapping, mapper, connection) for mapping in mappings), - bulk=True + bulk=True, return_defaults=return_defaults ) ) - _emit_insert_statements(base_mapper, None, cached_connections, super_mapper, table, records, - bookkeeping=False) + bookkeeping=return_defaults) + + if return_defaults and isstates: + identity_cls = mapper._identity_class + identity_props = [p.key for p in mapper._identity_key_props] + for state, dict_ in states: + state.key = ( + identity_cls, + tuple([dict_[key] for key in identity_props]) + ) def _bulk_update(mapper, mappings, session_transaction, isstates): @@ -341,7 +354,9 @@ def _organize_states_for_delete(base_mapper, states, uowtransaction): state, dict_, mapper, connection, update_version_id) -def _collect_insert_commands(table, states_to_insert, bulk=False): +def _collect_insert_commands( + table, states_to_insert, + bulk=False, return_defaults=False): """Identify sets of values to use in INSERT statements for a list of states. @@ -370,6 +385,7 @@ def _collect_insert_commands(table, states_to_insert, bulk=False): difference(params).difference(value_params): params[colkey] = None + if not bulk or return_defaults: has_all_pks = mapper._pk_keys_by_table[table].issubset(params) if mapper.base_mapper.eager_defaults: @@ -884,9 +900,8 @@ def _finalize_insert_update_commands(base_mapper, uowtransaction, states): toload_now.extend(state._unloaded_non_object) elif mapper.version_id_col is not None and \ mapper.version_id_generator is False: - prop = mapper._columntoproperty[mapper.version_id_col] - if prop.key in state.unloaded: - toload_now.extend([prop.key]) + if mapper._version_id_prop.key in state.unloaded: + toload_now.extend([mapper._version_id_prop.key]) if toload_now: state.key = base_mapper._identity_key_from_state(state) diff --git a/lib/sqlalchemy/orm/session.py b/lib/sqlalchemy/orm/session.py index e075b9c71..1611688b0 100644 --- a/lib/sqlalchemy/orm/session.py +++ b/lib/sqlalchemy/orm/session.py @@ -2036,20 +2036,22 @@ class Session(_SessionClassMethods): with util.safe_reraise(): transaction.rollback(_capture_exception=True) - def bulk_save_objects(self, objects): + def bulk_save_objects(self, objects, return_defaults=False): for (mapper, isupdate), states in itertools.groupby( (attributes.instance_state(obj) for obj in objects), lambda state: (state.mapper, state.key is not None) ): - self._bulk_save_mappings(mapper, states, isupdate, True) + self._bulk_save_mappings( + mapper, states, isupdate, True, return_defaults) - def bulk_insert_mappings(self, mapper, mappings): - self._bulk_save_mappings(mapper, mappings, False, False) + def bulk_insert_mappings(self, mapper, mappings, return_defaults=False): + self._bulk_save_mappings(mapper, mappings, False, False, return_defaults) def bulk_update_mappings(self, mapper, mappings): - self._bulk_save_mappings(mapper, mappings, True, False) + self._bulk_save_mappings(mapper, mappings, True, False, False) - def _bulk_save_mappings(self, mapper, mappings, isupdate, isstates): + def _bulk_save_mappings( + self, mapper, mappings, isupdate, isstates, return_defaults): mapper = _class_to_mapper(mapper) self._flushing = True @@ -2061,7 +2063,7 @@ class Session(_SessionClassMethods): mapper, mappings, transaction, isstates) else: persistence._bulk_insert( - mapper, mappings, transaction, isstates) + mapper, mappings, transaction, isstates, return_defaults) transaction.commit() except: -- cgit v1.2.1 From 9494ca00d4451448fd4473c03dff8459051224a2 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Tue, 2 Sep 2014 19:46:55 -0400 Subject: - lets start exampling this stuff --- examples/performance/__init__.py | 0 examples/performance/inserts.py | 148 +++++++++++++++++++++++++++++++++++++++ 2 files changed, 148 insertions(+) create mode 100644 examples/performance/__init__.py create mode 100644 examples/performance/inserts.py diff --git a/examples/performance/__init__.py b/examples/performance/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/examples/performance/inserts.py b/examples/performance/inserts.py new file mode 100644 index 000000000..469501d8d --- /dev/null +++ b/examples/performance/inserts.py @@ -0,0 +1,148 @@ +import time + +from sqlalchemy.ext.declarative import declarative_base +from sqlalchemy import Column, Integer, String, create_engine +from sqlalchemy.orm import Session + +Base = declarative_base() +engine = None + + +class Customer(Base): + __tablename__ = "customer" + id = Column(Integer, primary_key=True) + name = Column(String(255)) + description = Column(String(255)) + + +def setup_database(): + global engine + engine = create_engine("sqlite:///insert_speed.db", echo=False) + Base.metadata.drop_all(engine) + Base.metadata.create_all(engine) + +_tests = [] + + +def _test(fn): + _tests.append(fn) + return fn + + +@_test +def test_flush_no_pk(n): + """Individual INSERT statements via the ORM, calling upon last row id""" + session = Session(bind=engine) + for chunk in range(0, n, 1000): + session.add_all([ + Customer( + name='customer name %d' % i, + description='customer description %d' % i) + for i in range(chunk, chunk + 1000) + ]) + session.flush() + session.commit() + + +@_test +def test_bulk_save_return_pks(n): + """Individual INSERT statements in "bulk", but calling upon last row id""" + session = Session(bind=engine) + session.bulk_save_objects([ + Customer( + name='customer name %d' % i, + description='customer description %d' % i + ) + for i in range(n) + ], return_defaults=True) + session.commit() + + +@_test +def test_flush_pk_given(n): + """Batched INSERT statements via the ORM, PKs already defined""" + session = Session(bind=engine) + for chunk in range(0, n, 1000): + session.add_all([ + Customer( + id=i + 1, + name='customer name %d' % i, + description='customer description %d' % i) + for i in range(chunk, chunk + 1000) + ]) + session.flush() + session.commit() + + +@_test +def test_bulk_save(n): + """Batched INSERT statements via the ORM in "bulk", discarding PK values.""" + session = Session(bind=engine) + session.bulk_save_objects([ + Customer( + name='customer name %d' % i, + description='customer description %d' % i + ) + for i in range(n) + ]) + session.commit() + + +@_test +def test_bulk_insert_mappings(n): + """Batched INSERT statements via the ORM "bulk", using dictionaries instead of objects""" + session = Session(bind=engine) + session.bulk_insert_mappings(Customer, [ + dict( + name='customer name %d' % i, + description='customer description %d' % i + ) + for i in range(n) + ]) + session.commit() + + +@_test +def test_core_insert(n): + """A single Core INSERT construct inserting mappings in bulk.""" + conn = engine.connect() + conn.execute( + Customer.__table__.insert(), + [ + dict( + name='customer name %d' % i, + description='customer description %d' % i + ) + for i in range(n) + ]) + + +@_test +def test_sqlite_raw(n): + """pysqlite's pure C API inserting rows in bulk, no pure Python at all""" + conn = engine.raw_connection() + cursor = conn.cursor() + cursor.executemany( + "INSERT INTO customer (name, description) VALUES(:name, :description)", + [ + dict( + name='customer name %d' % i, + description='customer description %d' % i + ) + for i in range(n) + ] + ) + conn.commit() + + +def run_tests(n): + for fn in _tests: + setup_database() + now = time.time() + fn(n) + total = time.time() - now + + print("Test: %s; Total time %s" % (fn.__doc__, total)) + +if __name__ == '__main__': + run_tests(100000) -- cgit v1.2.1 From 07d061a17b3fbad89df97e57350b4d0c132408c2 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Wed, 3 Sep 2014 14:49:26 -0400 Subject: - wip --- examples/performance/__init__.py | 183 +++++++++++++++++++++++++++++++++++ examples/performance/bulk_inserts.py | 132 +++++++++++++++++++++++++ examples/performance/inserts.py | 148 ---------------------------- 3 files changed, 315 insertions(+), 148 deletions(-) create mode 100644 examples/performance/bulk_inserts.py delete mode 100644 examples/performance/inserts.py diff --git a/examples/performance/__init__.py b/examples/performance/__init__.py index e69de29bb..ae914db96 100644 --- a/examples/performance/__init__.py +++ b/examples/performance/__init__.py @@ -0,0 +1,183 @@ +"""A performance profiling suite for a variety of SQLAlchemy use cases. + +The suites here each focus on some specific type of use case, one which +has a particular performance profile: + +* bulk inserts +* individual inserts, with or without transactions +* fetching large numbers of rows +* running lots of small queries + +All suites include a variety of use patterns with both the Core and +ORM, and are sorted in order of performance from worst to greatest, +inversely based on amount of functionality provided by SQLAlchemy, +greatest to least (these two things generally correspond perfectly). + +Each suite is run as a module, and provides a consistent command line +interface:: + + $ python -m examples.performance.bulk_inserts --profile --num 1000 + +Using ``--help`` will allow all options:: + + $ python -m examples.performance.bulk_inserts --help +usage: bulk_inserts.py [-h] [--test TEST] [--dburl DBURL] [--num NUM] + [--profile] [--dump] [--runsnake] [--echo] + +optional arguments: + -h, --help show this help message and exit + --test TEST run specific test name + --dburl DBURL database URL, default sqlite:///profile.db + --num NUM Number of iterations/items/etc for tests, default 100000 + --profile run profiling and dump call counts + --dump dump full call profile (implies --profile) + --runsnake invoke runsnakerun (implies --profile) + --echo Echo SQL output + + +""" +import argparse +import cProfile +import StringIO +import pstats +import os +import time + + + +class Profiler(object): + tests = [] + + def __init__(self, setup, options): + self.setup = setup + self.test = options.test + self.dburl = options.dburl + self.runsnake = options.runsnake + self.profile = options.profile + self.dump = options.dump + self.num = options.num + self.echo = options.echo + self.stats = [] + + @classmethod + def profile(cls, fn): + cls.tests.append(fn) + return fn + + def run(self): + if self.test: + tests = [fn for fn in self.tests if fn.__name__ == self.test] + if not tests: + raise ValueError("No such test: %s" % self.test) + else: + tests = self.tests + + print("Tests to run: %s" % ", ".join([t.__name__ for t in tests])) + for test in tests: + self._run_test(test) + self.stats[-1].report() + + def _run_with_profile(self, fn): + pr = cProfile.Profile() + pr.enable() + try: + result = fn(self.num) + finally: + pr.disable() + + output = StringIO.StringIO() + stats = pstats.Stats(pr, stream=output).sort_stats('cumulative') + + self.stats.append(TestResult(self, fn, stats=stats)) + return result + + def _run_with_time(self, fn): + now = time.time() + try: + return fn(self.num) + finally: + total = time.time() - now + self.stats.append(TestResult(self, fn, total_time=total)) + + def _run_test(self, fn): + self.setup(self.dburl, self.echo) + if self.profile or self.runsnake or self.dump: + self._run_with_profile(fn) + else: + self._run_with_time(fn) + + @classmethod + def main(cls, setup): + parser = argparse.ArgumentParser() + + parser.add_argument( + "--test", type=str, + help="run specific test name" + ) + parser.add_argument( + '--dburl', type=str, default="sqlite:///profile.db", + help="database URL, default sqlite:///profile.db" + ) + parser.add_argument( + '--num', type=int, default=100000, + help="Number of iterations/items/etc for tests, default 100000" + ) + parser.add_argument( + '--profile', action='store_true', + help='run profiling and dump call counts') + parser.add_argument( + '--dump', action='store_true', + help='dump full call profile (implies --profile)') + parser.add_argument( + '--runsnake', action='store_true', + help='invoke runsnakerun (implies --profile)') + parser.add_argument( + '--echo', action='store_true', + help="Echo SQL output" + ) + args = parser.parse_args() + + args.profile = args.profile or args.dump or args.runsnake + + Profiler(setup, args).run() + + +class TestResult(object): + def __init__(self, profile, test, stats=None, total_time=None): + self.profile = profile + self.test = test + self.stats = stats + self.total_time = total_time + + def report(self): + print(self._summary()) + if self.profile.profile: + self.report_stats() + + def _summary(self): + summary = "%s : %s (%d iterations)" % ( + self.test.__name__, self.test.__doc__, self.profile.num) + if self.total_time: + summary += "; total time %f sec" % self.total_time + if self.stats: + summary += "; total fn calls %d" % self.stats.total_calls + return summary + + def report_stats(self): + if self.profile.runsnake: + self._runsnake() + elif self.profile.dump: + self._dump() + + def _dump(self): + self.stats.sort_stats('time', 'calls') + self.stats.print_stats() + + def _runsnake(self): + filename = "%s.profile" % self.test.__name__ + try: + self.stats.dump_stats(filename) + os.system("runsnake %s" % filename) + finally: + os.remove(filename) + diff --git a/examples/performance/bulk_inserts.py b/examples/performance/bulk_inserts.py new file mode 100644 index 000000000..42ab920a6 --- /dev/null +++ b/examples/performance/bulk_inserts.py @@ -0,0 +1,132 @@ +from . import Profiler + +from sqlalchemy.ext.declarative import declarative_base +from sqlalchemy import Column, Integer, String, create_engine +from sqlalchemy.orm import Session + +Base = declarative_base() +engine = None + + +class Customer(Base): + __tablename__ = "customer" + id = Column(Integer, primary_key=True) + name = Column(String(255)) + description = Column(String(255)) + + +def setup_database(dburl, echo): + global engine + engine = create_engine(dburl, echo=echo) + Base.metadata.drop_all(engine) + Base.metadata.create_all(engine) + + +@Profiler.profile +def test_flush_no_pk(n): + """Individual INSERT statements via the ORM, calling upon last row id""" + session = Session(bind=engine) + for chunk in range(0, n, 1000): + session.add_all([ + Customer( + name='customer name %d' % i, + description='customer description %d' % i) + for i in range(chunk, chunk + 1000) + ]) + session.flush() + session.commit() + + +@Profiler.profile +def test_bulk_save_return_pks(n): + """Individual INSERT statements in "bulk", but calling upon last row id""" + session = Session(bind=engine) + session.bulk_save_objects([ + Customer( + name='customer name %d' % i, + description='customer description %d' % i + ) + for i in range(n) + ], return_defaults=True) + session.commit() + + +@Profiler.profile +def test_flush_pk_given(n): + """Batched INSERT statements via the ORM, PKs already defined""" + session = Session(bind=engine) + for chunk in range(0, n, 1000): + session.add_all([ + Customer( + id=i + 1, + name='customer name %d' % i, + description='customer description %d' % i) + for i in range(chunk, chunk + 1000) + ]) + session.flush() + session.commit() + + +@Profiler.profile +def test_bulk_save(n): + """Batched INSERT statements via the ORM in "bulk", discarding PK values.""" + session = Session(bind=engine) + session.bulk_save_objects([ + Customer( + name='customer name %d' % i, + description='customer description %d' % i + ) + for i in range(n) + ]) + session.commit() + + +@Profiler.profile +def test_bulk_insert_mappings(n): + """Batched INSERT statements via the ORM "bulk", using dictionaries instead of objects""" + session = Session(bind=engine) + session.bulk_insert_mappings(Customer, [ + dict( + name='customer name %d' % i, + description='customer description %d' % i + ) + for i in range(n) + ]) + session.commit() + + +@Profiler.profile +def test_core_insert(n): + """A single Core INSERT construct inserting mappings in bulk.""" + conn = engine.connect() + conn.execute( + Customer.__table__.insert(), + [ + dict( + name='customer name %d' % i, + description='customer description %d' % i + ) + for i in range(n) + ]) + + +@Profiler.profile +def test_sqlite_raw(n): + """pysqlite's pure C API inserting rows in bulk, no pure Python at all""" + conn = engine.raw_connection() + cursor = conn.cursor() + cursor.executemany( + "INSERT INTO customer (name, description) VALUES(:name, :description)", + [ + dict( + name='customer name %d' % i, + description='customer description %d' % i + ) + for i in range(n) + ] + ) + conn.commit() + + +if __name__ == '__main__': + Profiler.main(setup=setup_database) diff --git a/examples/performance/inserts.py b/examples/performance/inserts.py deleted file mode 100644 index 469501d8d..000000000 --- a/examples/performance/inserts.py +++ /dev/null @@ -1,148 +0,0 @@ -import time - -from sqlalchemy.ext.declarative import declarative_base -from sqlalchemy import Column, Integer, String, create_engine -from sqlalchemy.orm import Session - -Base = declarative_base() -engine = None - - -class Customer(Base): - __tablename__ = "customer" - id = Column(Integer, primary_key=True) - name = Column(String(255)) - description = Column(String(255)) - - -def setup_database(): - global engine - engine = create_engine("sqlite:///insert_speed.db", echo=False) - Base.metadata.drop_all(engine) - Base.metadata.create_all(engine) - -_tests = [] - - -def _test(fn): - _tests.append(fn) - return fn - - -@_test -def test_flush_no_pk(n): - """Individual INSERT statements via the ORM, calling upon last row id""" - session = Session(bind=engine) - for chunk in range(0, n, 1000): - session.add_all([ - Customer( - name='customer name %d' % i, - description='customer description %d' % i) - for i in range(chunk, chunk + 1000) - ]) - session.flush() - session.commit() - - -@_test -def test_bulk_save_return_pks(n): - """Individual INSERT statements in "bulk", but calling upon last row id""" - session = Session(bind=engine) - session.bulk_save_objects([ - Customer( - name='customer name %d' % i, - description='customer description %d' % i - ) - for i in range(n) - ], return_defaults=True) - session.commit() - - -@_test -def test_flush_pk_given(n): - """Batched INSERT statements via the ORM, PKs already defined""" - session = Session(bind=engine) - for chunk in range(0, n, 1000): - session.add_all([ - Customer( - id=i + 1, - name='customer name %d' % i, - description='customer description %d' % i) - for i in range(chunk, chunk + 1000) - ]) - session.flush() - session.commit() - - -@_test -def test_bulk_save(n): - """Batched INSERT statements via the ORM in "bulk", discarding PK values.""" - session = Session(bind=engine) - session.bulk_save_objects([ - Customer( - name='customer name %d' % i, - description='customer description %d' % i - ) - for i in range(n) - ]) - session.commit() - - -@_test -def test_bulk_insert_mappings(n): - """Batched INSERT statements via the ORM "bulk", using dictionaries instead of objects""" - session = Session(bind=engine) - session.bulk_insert_mappings(Customer, [ - dict( - name='customer name %d' % i, - description='customer description %d' % i - ) - for i in range(n) - ]) - session.commit() - - -@_test -def test_core_insert(n): - """A single Core INSERT construct inserting mappings in bulk.""" - conn = engine.connect() - conn.execute( - Customer.__table__.insert(), - [ - dict( - name='customer name %d' % i, - description='customer description %d' % i - ) - for i in range(n) - ]) - - -@_test -def test_sqlite_raw(n): - """pysqlite's pure C API inserting rows in bulk, no pure Python at all""" - conn = engine.raw_connection() - cursor = conn.cursor() - cursor.executemany( - "INSERT INTO customer (name, description) VALUES(:name, :description)", - [ - dict( - name='customer name %d' % i, - description='customer description %d' % i - ) - for i in range(n) - ] - ) - conn.commit() - - -def run_tests(n): - for fn in _tests: - setup_database() - now = time.time() - fn(n) - total = time.time() - now - - print("Test: %s; Total time %s" % (fn.__doc__, total)) - -if __name__ == '__main__': - run_tests(100000) -- cgit v1.2.1 From 2c081f9a4af8928505ce4ea6ca2747ccb2e649c7 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Wed, 3 Sep 2014 19:30:38 -0400 Subject: - large resultsets --- examples/performance/__init__.py | 22 +++-- examples/performance/bulk_inserts.py | 35 ++++--- examples/performance/large_resultsets.py | 155 +++++++++++++++++++++++++++++++ examples/performance/single_inserts.py | 145 +++++++++++++++++++++++++++++ 4 files changed, 337 insertions(+), 20 deletions(-) create mode 100644 examples/performance/large_resultsets.py create mode 100644 examples/performance/single_inserts.py diff --git a/examples/performance/__init__.py b/examples/performance/__init__.py index ae914db96..b57f25b94 100644 --- a/examples/performance/__init__.py +++ b/examples/performance/__init__.py @@ -44,12 +44,12 @@ import os import time - class Profiler(object): tests = [] - def __init__(self, setup, options): + def __init__(self, options, setup=None, setup_once=None): self.setup = setup + self.setup_once = setup_once self.test = options.test self.dburl = options.dburl self.runsnake = options.runsnake @@ -72,6 +72,9 @@ class Profiler(object): else: tests = self.tests + if self.setup_once: + print("Running setup once...") + self.setup_once(self.dburl, self.echo, self.num) print("Tests to run: %s" % ", ".join([t.__name__ for t in tests])) for test in tests: self._run_test(test) @@ -100,14 +103,15 @@ class Profiler(object): self.stats.append(TestResult(self, fn, total_time=total)) def _run_test(self, fn): - self.setup(self.dburl, self.echo) + if self.setup: + self.setup(self.dburl, self.echo, self.num) if self.profile or self.runsnake or self.dump: self._run_with_profile(fn) else: self._run_with_time(fn) @classmethod - def main(cls, setup): + def main(cls, num, setup=None, setup_once=None): parser = argparse.ArgumentParser() parser.add_argument( @@ -119,8 +123,9 @@ class Profiler(object): help="database URL, default sqlite:///profile.db" ) parser.add_argument( - '--num', type=int, default=100000, - help="Number of iterations/items/etc for tests, default 100000" + '--num', type=int, default=num, + help="Number of iterations/items/etc for tests; " + "default is %d module-specific" % num ) parser.add_argument( '--profile', action='store_true', @@ -133,13 +138,12 @@ class Profiler(object): help='invoke runsnakerun (implies --profile)') parser.add_argument( '--echo', action='store_true', - help="Echo SQL output" - ) + help="Echo SQL output") args = parser.parse_args() args.profile = args.profile or args.dump or args.runsnake - Profiler(setup, args).run() + Profiler(args, setup=setup, setup_once=setup_once).run() class TestResult(object): diff --git a/examples/performance/bulk_inserts.py b/examples/performance/bulk_inserts.py index 42ab920a6..648d5f2aa 100644 --- a/examples/performance/bulk_inserts.py +++ b/examples/performance/bulk_inserts.py @@ -1,7 +1,7 @@ from . import Profiler from sqlalchemy.ext.declarative import declarative_base -from sqlalchemy import Column, Integer, String, create_engine +from sqlalchemy import Column, Integer, String, create_engine, bindparam from sqlalchemy.orm import Session Base = declarative_base() @@ -15,7 +15,7 @@ class Customer(Base): description = Column(String(255)) -def setup_database(dburl, echo): +def setup_database(dburl, echo, num): global engine engine = create_engine(dburl, echo=echo) Base.metadata.drop_all(engine) @@ -111,22 +111,35 @@ def test_core_insert(n): @Profiler.profile -def test_sqlite_raw(n): - """pysqlite's pure C API inserting rows in bulk, no pure Python at all""" - conn = engine.raw_connection() +def test_dbapi_raw(n): + """The DBAPI's pure C API inserting rows in bulk, no pure Python at all""" + + conn = engine.pool._creator() cursor = conn.cursor() - cursor.executemany( - "INSERT INTO customer (name, description) VALUES(:name, :description)", - [ + compiled = Customer.__table__.insert().values( + name=bindparam('name'), + description=bindparam('description')).\ + compile(dialect=engine.dialect) + + if compiled.positional: + args = ( + ('customer name %d' % i, 'customer description %d' % i) + for i in range(n)) + else: + args = ( dict( name='customer name %d' % i, description='customer description %d' % i ) for i in range(n) - ] + ) + + cursor.executemany( + str(compiled), + list(args) ) conn.commit() - + conn.close() if __name__ == '__main__': - Profiler.main(setup=setup_database) + Profiler.main(setup=setup_database, num=100000) diff --git a/examples/performance/large_resultsets.py b/examples/performance/large_resultsets.py new file mode 100644 index 000000000..268c6dc87 --- /dev/null +++ b/examples/performance/large_resultsets.py @@ -0,0 +1,155 @@ +"""In this series of tests, we are looking at time to load 1M very small +and simple rows. + +""" +from . import Profiler + +from sqlalchemy.ext.declarative import declarative_base +from sqlalchemy import Column, Integer, String, create_engine, literal_column +from sqlalchemy.orm import Session, Bundle + +Base = declarative_base() +engine = None + + +class Customer(Base): + __tablename__ = "customer" + id = Column(Integer, primary_key=True) + name = Column(String(255)) + description = Column(String(255)) + + +def setup_database(dburl, echo, num): + global engine + engine = create_engine(dburl, echo=echo) + Base.metadata.drop_all(engine) + Base.metadata.create_all(engine) + + s = Session(engine) + for chunk in range(0, num, 10000): + s.bulk_insert_mappings(Customer, [ + { + 'name': 'customer name %d' % i, + 'description': 'customer description %d' % i + } for i in range(chunk, chunk + 10000) + ]) + s.commit() + + +@Profiler.profile +def test_orm_full_objects(n): + """Load fully tracked objects using the ORM.""" + + sess = Session(engine) + # avoid using all() so that we don't have the overhead of building + # a large list of full objects in memory + for obj in sess.query(Customer).yield_per(1000).limit(n): + pass + + +@Profiler.profile +def test_orm_bundles(n): + """Load lightweight "bundle" objects using the ORM.""" + + sess = Session(engine) + bundle = Bundle('customer', + Customer.id, Customer.name, Customer.description) + for row in sess.query(bundle).yield_per(10000).limit(n): + pass + + +@Profiler.profile +def test_orm_columns(n): + """Load individual columns into named tuples using the ORM.""" + + sess = Session(engine) + for row in sess.query( + Customer.id, Customer.name, + Customer.description).yield_per(10000).limit(n): + pass + + +@Profiler.profile +def test_core_fetchall(n): + """Load Core result rows using Core / fetchall.""" + + with engine.connect() as conn: + result = conn.execute(Customer.__table__.select().limit(n)).fetchall() + for row in result: + data = row['id'], row['name'], row['description'] + + +@Profiler.profile +def test_core_fetchchunks_w_streaming(n): + """Load Core result rows using Core with fetchmany and + streaming results.""" + + with engine.connect() as conn: + result = conn.execution_options(stream_results=True).\ + execute(Customer.__table__.select().limit(n)) + while True: + chunk = result.fetchmany(10000) + if not chunk: + break + for row in chunk: + data = row['id'], row['name'], row['description'] + + +@Profiler.profile +def test_core_fetchchunks(n): + """Load Core result rows using Core / fetchmany.""" + + with engine.connect() as conn: + result = conn.execute(Customer.__table__.select().limit(n)) + while True: + chunk = result.fetchmany(10000) + if not chunk: + break + for row in chunk: + data = row['id'], row['name'], row['description'] + + +@Profiler.profile +def test_dbapi_fetchall(n): + """Load DBAPI cursor rows using fetchall()""" + + _test_dbapi_raw(n, True) + + +@Profiler.profile +def test_dbapi_fetchchunks(n): + """Load DBAPI cursor rows using fetchmany() + (usually doesn't limit memory)""" + + _test_dbapi_raw(n, False) + + +def _test_dbapi_raw(n, fetchall): + compiled = Customer.__table__.select().limit(n).\ + compile( + dialect=engine.dialect, + compile_kwargs={"literal_binds": True}) + + sql = str(compiled) + + import pdb + pdb.set_trace() + conn = engine.raw_connection() + cursor = conn.cursor() + cursor.execute(sql) + + if fetchall: + for row in cursor.fetchall(): + # ensure that we fully fetch! + data = row[0], row[1], row[2] + else: + while True: + chunk = cursor.fetchmany(10000) + if not chunk: + break + for row in chunk: + data = row[0], row[1], row[2] + conn.close() + +if __name__ == '__main__': + Profiler.main(setup_once=setup_database, num=1000000) diff --git a/examples/performance/single_inserts.py b/examples/performance/single_inserts.py new file mode 100644 index 000000000..671bbbe9c --- /dev/null +++ b/examples/performance/single_inserts.py @@ -0,0 +1,145 @@ +"""In this series of tests, we're looking at a method that inserts a row +within a distinct transaction, and afterwards returns to essentially a +"closed" state. This would be analogous to an API call that starts up +a database connection, inserts the row, commits and closes. + +""" +from . import Profiler + +from sqlalchemy.ext.declarative import declarative_base +from sqlalchemy import Column, Integer, String, create_engine, bindparam, pool +from sqlalchemy.orm import Session + +Base = declarative_base() +engine = None + + +class Customer(Base): + __tablename__ = "customer" + id = Column(Integer, primary_key=True) + name = Column(String(255)) + description = Column(String(255)) + + +def setup_database(dburl, echo, num): + global engine + engine = create_engine(dburl, echo=echo) + if engine.dialect.name == 'sqlite': + engine.pool = pool.StaticPool(creator=engine.pool._creator) + Base.metadata.drop_all(engine) + Base.metadata.create_all(engine) + + +@Profiler.profile +def test_orm_commit(n): + """Individual INSERT/COMMIT pairs via the ORM""" + + for i in range(n): + session = Session(bind=engine) + session.add( + Customer( + name='customer name %d' % i, + description='customer description %d' % i) + ) + session.commit() + + +@Profiler.profile +def test_bulk_save(n): + """Individual INSERT/COMMIT pairs using the "bulk" API """ + + for i in range(n): + session = Session(bind=engine) + session.bulk_save_objects([ + Customer( + name='customer name %d' % i, + description='customer description %d' % i + )]) + session.commit() + + +@Profiler.profile +def test_bulk_insert_dictionaries(n): + """Individual INSERT/COMMIT pairs using the "bulk" API with dictionaries""" + + for i in range(n): + session = Session(bind=engine) + session.bulk_insert_mappings(Customer, [ + dict( + name='customer name %d' % i, + description='customer description %d' % i + )]) + session.commit() + + +@Profiler.profile +def test_core(n): + """Individual INSERT/COMMIT pairs using Core.""" + + for i in range(n): + with engine.begin() as conn: + conn.execute( + Customer.__table__.insert(), + dict( + name='customer name %d' % i, + description='customer description %d' % i + ) + ) + + +@Profiler.profile +def test_dbapi_raw_w_connect(n): + """Individual INSERT/COMMIT pairs using a pure DBAPI connection, + connect each time.""" + + _test_dbapi_raw(n, True) + + +@Profiler.profile +def test_dbapi_raw_w_pool(n): + """Individual INSERT/COMMIT pairs using a pure DBAPI connection, + using a connection pool.""" + + _test_dbapi_raw(n, False) + + +def _test_dbapi_raw(n, connect): + compiled = Customer.__table__.insert().values( + name=bindparam('name'), + description=bindparam('description')).\ + compile(dialect=engine.dialect) + + if compiled.positional: + args = ( + ('customer name %d' % i, 'customer description %d' % i) + for i in range(n)) + else: + args = ( + dict( + name='customer name %d' % i, + description='customer description %d' % i + ) + for i in range(n) + ) + sql = str(compiled) + + if connect: + for arg in args: + # there's no connection pool, so if these were distinct + # calls, we'd be connecting each time + conn = engine.pool._creator() + cursor = conn.cursor() + cursor.execute(sql, arg) + conn.commit() + conn.close() + else: + for arg in args: + conn = engine.raw_connection() + cursor = conn.cursor() + cursor.execute(sql, arg) + conn.commit() + conn.close() + + +if __name__ == '__main__': + Profiler.main(setup=setup_database, num=10000) -- cgit v1.2.1 From cbef6a7d58ee42e33167a14e6a31a124aa0bf08e Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Wed, 3 Sep 2014 20:07:08 -0400 Subject: refine --- examples/performance/large_resultsets.py | 66 +++++++++++++++++++++----------- 1 file changed, 43 insertions(+), 23 deletions(-) diff --git a/examples/performance/large_resultsets.py b/examples/performance/large_resultsets.py index 7383db734..c9ce23d61 100644 --- a/examples/performance/large_resultsets.py +++ b/examples/performance/large_resultsets.py @@ -1,11 +1,22 @@ -"""In this series of tests, we are looking at time to load 1M very small -and simple rows. +"""In this series of tests, we are looking at time to load a large number +of very small and simple rows. + +A special test here illustrates the difference between fetching the +rows from the raw DBAPI and throwing them away, vs. assembling each +row into a completely basic Python object and appending to a list. The +time spent typically more than doubles. The point is that while +DBAPIs will give you raw rows very fast if they are written in C, the +moment you do anything with those rows, even something trivial, +overhead grows extremely fast in cPython. SQLAlchemy's Core and +lighter-weight ORM options add absolutely minimal overhead, and the +full blown ORM doesn't do terribly either even though mapped objects +provide a huge amount of functionality. """ from . import Profiler from sqlalchemy.ext.declarative import declarative_base -from sqlalchemy import Column, Integer, String, create_engine, literal_column +from sqlalchemy import Column, Integer, String, create_engine from sqlalchemy.orm import Session, Bundle Base = declarative_base() @@ -71,7 +82,7 @@ def test_orm_columns(n): @Profiler.profile def test_core_fetchall(n): - """Load Core result rows using Core / fetchall.""" + """Load Core result rows using fetchall.""" with engine.connect() as conn: result = conn.execute(Customer.__table__.select().limit(n)).fetchall() @@ -80,9 +91,8 @@ def test_core_fetchall(n): @Profiler.profile -def test_core_fetchchunks_w_streaming(n): - """Load Core result rows using Core with fetchmany and - streaming results.""" +def test_core_fetchmany_w_streaming(n): + """Load Core result rows using fetchmany/streaming.""" with engine.connect() as conn: result = conn.execution_options(stream_results=True).\ @@ -96,7 +106,7 @@ def test_core_fetchchunks_w_streaming(n): @Profiler.profile -def test_core_fetchchunks(n): +def test_core_fetchmany(n): """Load Core result rows using Core / fetchmany.""" with engine.connect() as conn: @@ -110,44 +120,54 @@ def test_core_fetchchunks(n): @Profiler.profile -def test_dbapi_fetchall(n): - """Load DBAPI cursor rows using fetchall()""" +def test_dbapi_fetchall_plus_append_objects(n): + """Load rows using DBAPI fetchall(), make a list of objects.""" _test_dbapi_raw(n, True) @Profiler.profile -def test_dbapi_fetchchunks(n): - """Load DBAPI cursor rows using fetchmany() - (usually doesn't limit memory)""" +def test_dbapi_fetchall_no_object(n): + """Load rows using DBAPI fetchall(), don't make any objects.""" _test_dbapi_raw(n, False) -def _test_dbapi_raw(n, fetchall): +def _test_dbapi_raw(n, make_objects): compiled = Customer.__table__.select().limit(n).\ compile( dialect=engine.dialect, compile_kwargs={"literal_binds": True}) + if make_objects: + # because if you're going to roll your own, you're probably + # going to do this, so see how this pushes you right back into + # ORM land anyway :) + class SimpleCustomer(object): + def __init__(self, id, name, description): + self.id = id + self.name = name + self.description = description + sql = str(compiled) conn = engine.raw_connection() cursor = conn.cursor() cursor.execute(sql) - if fetchall: + if make_objects: + result = [] for row in cursor.fetchall(): # ensure that we fully fetch! - data = row[0], row[1], row[2] + customer = SimpleCustomer( + id=row[0], name=row[1], description=row[2]) + result.append(customer) else: - while True: - chunk = cursor.fetchmany(10000) - if not chunk: - break - for row in chunk: - data = row[0], row[1], row[2] + for row in cursor.fetchall(): + # ensure that we fully fetch! + data = row[0], row[1], row[2] + conn.close() if __name__ == '__main__': - Profiler.main(setup_once=setup_database, num=1000000) + Profiler.main(setup_once=setup_database, num=500000) -- cgit v1.2.1 From eb81531275c07a0ab8c74eadc7881cfcff27ba21 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Wed, 3 Sep 2014 20:30:52 -0400 Subject: tweak --- examples/performance/bulk_inserts.py | 11 ++++++++--- examples/performance/large_resultsets.py | 4 +--- 2 files changed, 9 insertions(+), 6 deletions(-) diff --git a/examples/performance/bulk_inserts.py b/examples/performance/bulk_inserts.py index 648d5f2aa..531003aa6 100644 --- a/examples/performance/bulk_inserts.py +++ b/examples/performance/bulk_inserts.py @@ -1,3 +1,8 @@ +"""This series of tests illustrates different ways to INSERT a large number +of rows in bulk. + + +""" from . import Profiler from sqlalchemy.ext.declarative import declarative_base @@ -69,7 +74,7 @@ def test_flush_pk_given(n): @Profiler.profile def test_bulk_save(n): - """Batched INSERT statements via the ORM in "bulk", discarding PK values.""" + """Batched INSERT statements via the ORM in "bulk", discarding PKs.""" session = Session(bind=engine) session.bulk_save_objects([ Customer( @@ -83,7 +88,7 @@ def test_bulk_save(n): @Profiler.profile def test_bulk_insert_mappings(n): - """Batched INSERT statements via the ORM "bulk", using dictionaries instead of objects""" + """Batched INSERT statements via the ORM "bulk", using dictionaries.""" session = Session(bind=engine) session.bulk_insert_mappings(Customer, [ dict( @@ -112,7 +117,7 @@ def test_core_insert(n): @Profiler.profile def test_dbapi_raw(n): - """The DBAPI's pure C API inserting rows in bulk, no pure Python at all""" + """The DBAPI's API inserting rows in bulk.""" conn = engine.pool._creator() cursor = conn.cursor() diff --git a/examples/performance/large_resultsets.py b/examples/performance/large_resultsets.py index c9ce23d61..77c0246fc 100644 --- a/examples/performance/large_resultsets.py +++ b/examples/performance/large_resultsets.py @@ -121,7 +121,7 @@ def test_core_fetchmany(n): @Profiler.profile def test_dbapi_fetchall_plus_append_objects(n): - """Load rows using DBAPI fetchall(), make a list of objects.""" + """Load rows using DBAPI fetchall(), generate an object for each row.""" _test_dbapi_raw(n, True) @@ -156,12 +156,10 @@ def _test_dbapi_raw(n, make_objects): cursor.execute(sql) if make_objects: - result = [] for row in cursor.fetchall(): # ensure that we fully fetch! customer = SimpleCustomer( id=row[0], name=row[1], description=row[2]) - result.append(customer) else: for row in cursor.fetchall(): # ensure that we fully fetch! -- cgit v1.2.1 From d2c05c36a5c5f5b4838e924b4de2280f73916c99 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Thu, 4 Sep 2014 20:55:38 -0400 Subject: - add a test that shows query caching. --- examples/performance/single_inserts.py | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/examples/performance/single_inserts.py b/examples/performance/single_inserts.py index 671bbbe9c..4178ccea8 100644 --- a/examples/performance/single_inserts.py +++ b/examples/performance/single_inserts.py @@ -87,6 +87,23 @@ def test_core(n): ) +@Profiler.profile +def test_core_query_caching(n): + """Individual INSERT/COMMIT pairs using Core with query caching""" + + cache = {} + ins = Customer.__table__.insert() + for i in range(n): + with engine.begin() as conn: + conn.execution_options(compiled_cache=cache).execute( + ins, + dict( + name='customer name %d' % i, + description='customer description %d' % i + ) + ) + + @Profiler.profile def test_dbapi_raw_w_connect(n): """Individual INSERT/COMMIT pairs using a pure DBAPI connection, @@ -130,6 +147,7 @@ def _test_dbapi_raw(n, connect): conn = engine.pool._creator() cursor = conn.cursor() cursor.execute(sql, arg) + lastrowid = cursor.lastrowid conn.commit() conn.close() else: @@ -137,6 +155,7 @@ def _test_dbapi_raw(n, connect): conn = engine.raw_connection() cursor = conn.cursor() cursor.execute(sql, arg) + lastrowid = cursor.lastrowid conn.commit() conn.close() -- cgit v1.2.1 From fa7c8f88113d2e769274dee4aa4247b9c9aadec8 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Sat, 6 Sep 2014 13:01:21 -0400 Subject: - try to finish up the performance example for now --- doc/build/orm/examples.rst | 7 + examples/performance/__init__.py | 299 +++++++++++++++++++++++++++---- examples/performance/__main__.py | 5 + examples/performance/bulk_inserts.py | 6 +- examples/performance/large_resultsets.py | 6 +- examples/performance/single_inserts.py | 12 +- 6 files changed, 291 insertions(+), 44 deletions(-) create mode 100644 examples/performance/__main__.py diff --git a/doc/build/orm/examples.rst b/doc/build/orm/examples.rst index b820dba9f..93478381a 100644 --- a/doc/build/orm/examples.rst +++ b/doc/build/orm/examples.rst @@ -62,6 +62,13 @@ Nested Sets .. automodule:: examples.nested_sets +.. _examples_performance: + +Performance +----------- + +.. automodule:: examples.performance + .. _examples_relationships: Relationship Join Conditions diff --git a/examples/performance/__init__.py b/examples/performance/__init__.py index b57f25b94..6e2e1fc89 100644 --- a/examples/performance/__init__.py +++ b/examples/performance/__init__.py @@ -1,55 +1,232 @@ """A performance profiling suite for a variety of SQLAlchemy use cases. -The suites here each focus on some specific type of use case, one which -has a particular performance profile: +Each suite focuses on a specific use case with a particular performance +profile and associated implications: * bulk inserts * individual inserts, with or without transactions * fetching large numbers of rows -* running lots of small queries +* running lots of small queries (TODO) -All suites include a variety of use patterns with both the Core and -ORM, and are sorted in order of performance from worst to greatest, -inversely based on amount of functionality provided by SQLAlchemy, +All suites include a variety of use patterns illustrating both Core +and ORM use, and are generally sorted in order of performance from worst +to greatest, inversely based on amount of functionality provided by SQLAlchemy, greatest to least (these two things generally correspond perfectly). -Each suite is run as a module, and provides a consistent command line -interface:: +A command line tool is presented at the package level which allows +individual suites to be run:: - $ python -m examples.performance.bulk_inserts --profile --num 1000 + $ python -m examples.performance --help + usage: python -m examples.performance [-h] [--test TEST] [--dburl DBURL] + [--num NUM] [--profile] [--dump] + [--runsnake] [--echo] -Using ``--help`` will allow all options:: + {bulk_inserts,large_resultsets,single_inserts} - $ python -m examples.performance.bulk_inserts --help -usage: bulk_inserts.py [-h] [--test TEST] [--dburl DBURL] [--num NUM] - [--profile] [--dump] [--runsnake] [--echo] + positional arguments: + {bulk_inserts,large_resultsets,single_inserts} + suite to run -optional arguments: - -h, --help show this help message and exit - --test TEST run specific test name - --dburl DBURL database URL, default sqlite:///profile.db - --num NUM Number of iterations/items/etc for tests, default 100000 - --profile run profiling and dump call counts - --dump dump full call profile (implies --profile) - --runsnake invoke runsnakerun (implies --profile) - --echo Echo SQL output + optional arguments: + -h, --help show this help message and exit + --test TEST run specific test name + --dburl DBURL database URL, default sqlite:///profile.db + --num NUM Number of iterations/items/etc for tests; default is 0 + module-specific + --profile run profiling and dump call counts + --dump dump full call profile (implies --profile) + --runsnake invoke runsnakerun (implies --profile) + --echo Echo SQL output +An example run looks like:: + + $ python -m examples.performance bulk_inserts + +Or with options:: + + $ python -m examples.performance bulk_inserts \\ + --dburl mysql+mysqldb://scott:tiger@localhost/test \\ + --profile --num 1000 + + +Running all tests with time +--------------------------- + +This is the default form of run:: + + $ python -m examples.performance single_inserts + Tests to run: test_orm_commit, test_bulk_save, + test_bulk_insert_dictionaries, test_core, + test_core_query_caching, test_dbapi_raw_w_connect, + test_dbapi_raw_w_pool + + test_orm_commit : Individual INSERT/COMMIT pairs via the + ORM (10000 iterations); total time 13.690218 sec + test_bulk_save : Individual INSERT/COMMIT pairs using + the "bulk" API (10000 iterations); total time 11.290371 sec + test_bulk_insert_dictionaries : Individual INSERT/COMMIT pairs using + the "bulk" API with dictionaries (10000 iterations); + total time 10.814626 sec + test_core : Individual INSERT/COMMIT pairs using Core. + (10000 iterations); total time 9.665620 sec + test_core_query_caching : Individual INSERT/COMMIT pairs using Core + with query caching (10000 iterations); total time 9.209010 sec + test_dbapi_raw_w_connect : Individual INSERT/COMMIT pairs w/ DBAPI + + connection each time (10000 iterations); total time 9.551103 sec + test_dbapi_raw_w_pool : Individual INSERT/COMMIT pairs w/ DBAPI + + connection pool (10000 iterations); total time 8.001813 sec + +Dumping Profiles for Individual Tests +-------------------------------------- + +A Python profile output can be dumped for all tests, or more commonly +individual tests:: + + $ python -m examples.performance single_inserts --test test_core --num 1000 --dump + Tests to run: test_core + test_core : Individual INSERT/COMMIT pairs using Core. (1000 iterations); total fn calls 186109 + 186109 function calls (186102 primitive calls) in 1.089 seconds + + Ordered by: internal time, call count + + ncalls tottime percall cumtime percall filename:lineno(function) + 1000 0.634 0.001 0.634 0.001 {method 'commit' of 'sqlite3.Connection' objects} + 1000 0.154 0.000 0.154 0.000 {method 'execute' of 'sqlite3.Cursor' objects} + 1000 0.021 0.000 0.074 0.000 /Users/classic/dev/sqlalchemy/lib/sqlalchemy/sql/compiler.py:1950(_get_colparams) + 1000 0.015 0.000 0.034 0.000 /Users/classic/dev/sqlalchemy/lib/sqlalchemy/engine/default.py:503(_init_compiled) + 1 0.012 0.012 1.091 1.091 examples/performance/single_inserts.py:79(test_core) + + ... + +Using RunSnake +-------------- + +This option requires the `RunSnake `_ +command line tool be installed:: + + $ python -m examples.performance single_inserts --test test_core --num 1000 --runsnake + +A graphical RunSnake output will be displayed. + +.. _examples_profiling_writeyourown: + +Writing your Own Suites +----------------------- + +The profiler suite system is extensible, and can be applied to your own set +of tests. This is a valuable technique to use in deciding upon the proper +approach for some performance-critical set of routines. For example, +if we wanted to profile the difference between several kinds of loading, +we can create a file ``test_loads.py``, with the following content:: + + from examples.performance import Profiler + from sqlalchemy import Integer, Column, create_engine, ForeignKey + from sqlalchemy.orm import relationship, joinedload, subqueryload, Session + from sqlalchemy.ext.declarative import declarative_base + + Base = declarative_base() + engine = None + session = None + + + class Parent(Base): + __tablename__ = 'parent' + id = Column(Integer, primary_key=True) + children = relationship("Child") + + + class Child(Base): + __tablename__ = 'child' + id = Column(Integer, primary_key=True) + parent_id = Column(Integer, ForeignKey('parent.id')) + + + # Init with name of file, default number of items + Profiler.init("test_loads", 1000) + + + @Profiler.setup_once + def setup_once(dburl, echo, num): + "setup once. create an engine, insert fixture data" + global engine + engine = create_engine(dburl, echo=echo) + Base.metadata.drop_all(engine) + Base.metadata.create_all(engine) + sess = Session(engine) + sess.add_all([ + Parent(children=[Child() for j in range(100)]) + for i in range(num) + ]) + sess.commit() + + + @Profiler.setup + def setup(dburl, echo, num): + "setup per test. create a new Session." + global session + session = Session(engine) + # pre-connect so this part isn't profiled (if we choose) + session.connection() + + + @Profiler.profile + def test_lazyload(n): + "load everything, no eager loading." + + for parent in session.query(Parent): + parent.children + + + @Profiler.profile + def test_joinedload(n): + "load everything, joined eager loading." + + for parent in session.query(Parent).options(joinedload("children")): + parent.children + + + @Profiler.profile + def test_subqueryload(n): + "load everything, subquery eager loading." + + for parent in session.query(Parent).options(subqueryload("children")): + parent.children + + if __name__ == '__main__': + Profiler.main() + +We can run our new script directly:: + + $ python test_loads.py --dburl postgresql+psycopg2://scott:tiger@localhost/test + Running setup once... + Tests to run: test_lazyload, test_joinedload, test_subqueryload + test_lazyload : load everything, no eager loading. (1000 iterations); total time 11.971159 sec + test_joinedload : load everything, joined eager loading. (1000 iterations); total time 2.754592 sec + test_subqueryload : load everything, subquery eager loading. (1000 iterations); total time 2.977696 sec + +As well as see RunSnake output for an individual test:: + + $ python test_loads.py --num 100 --runsnake --test test_joinedload """ import argparse import cProfile -import StringIO import pstats import os import time +import re +import sys class Profiler(object): tests = [] - def __init__(self, options, setup=None, setup_once=None): - self.setup = setup - self.setup_once = setup_once + _setup = None + _setup_once = None + name = None + num = 0 + + def __init__(self, options): self.test = options.test self.dburl = options.dburl self.runsnake = options.runsnake @@ -59,11 +236,34 @@ class Profiler(object): self.echo = options.echo self.stats = [] + @classmethod + def init(cls, name, num): + cls.name = name + cls.num = num + @classmethod def profile(cls, fn): + if cls.name is None: + raise ValueError( + "Need to call Profile.init(, ) first.") cls.tests.append(fn) return fn + @classmethod + def setup(cls, fn): + if cls._setup is not None: + raise ValueError("setup function already set to %s" % cls._setup) + cls._setup = staticmethod(fn) + return fn + + @classmethod + def setup_once(cls, fn): + if cls._setup_once is not None: + raise ValueError( + "setup_once function already set to %s" % cls._setup_once) + cls._setup_once = staticmethod(fn) + return fn + def run(self): if self.test: tests = [fn for fn in self.tests if fn.__name__ == self.test] @@ -72,9 +272,9 @@ class Profiler(object): else: tests = self.tests - if self.setup_once: + if self._setup_once: print("Running setup once...") - self.setup_once(self.dburl, self.echo, self.num) + self._setup_once(self.dburl, self.echo, self.num) print("Tests to run: %s" % ", ".join([t.__name__ for t in tests])) for test in tests: self._run_test(test) @@ -88,8 +288,7 @@ class Profiler(object): finally: pr.disable() - output = StringIO.StringIO() - stats = pstats.Stats(pr, stream=output).sort_stats('cumulative') + stats = pstats.Stats(pr).sort_stats('cumulative') self.stats.append(TestResult(self, fn, stats=stats)) return result @@ -103,29 +302,42 @@ class Profiler(object): self.stats.append(TestResult(self, fn, total_time=total)) def _run_test(self, fn): - if self.setup: - self.setup(self.dburl, self.echo, self.num) + if self._setup: + self._setup(self.dburl, self.echo, self.num) if self.profile or self.runsnake or self.dump: self._run_with_profile(fn) else: self._run_with_time(fn) @classmethod - def main(cls, num, setup=None, setup_once=None): - parser = argparse.ArgumentParser() + def main(cls): + + parser = argparse.ArgumentParser("python -m examples.performance") + + if cls.name is None: + parser.add_argument( + "name", choices=cls._suite_names(), help="suite to run") + + if len(sys.argv) > 1: + potential_name = sys.argv[1] + try: + suite = __import__(__name__ + "." + potential_name) + except ImportError: + pass parser.add_argument( "--test", type=str, help="run specific test name" ) + parser.add_argument( '--dburl', type=str, default="sqlite:///profile.db", help="database URL, default sqlite:///profile.db" ) parser.add_argument( - '--num', type=int, default=num, + '--num', type=int, default=cls.num, help="Number of iterations/items/etc for tests; " - "default is %d module-specific" % num + "default is %d module-specific" % cls.num ) parser.add_argument( '--profile', action='store_true', @@ -143,7 +355,19 @@ class Profiler(object): args.profile = args.profile or args.dump or args.runsnake - Profiler(args, setup=setup, setup_once=setup_once).run() + if cls.name is None: + suite = __import__(__name__ + "." + args.name) + + Profiler(args).run() + + @classmethod + def _suite_names(cls): + suites = [] + for file_ in os.listdir(os.path.dirname(__file__)): + match = re.match(r'^([a-z].*).py$', file_) + if match: + suites.append(match.group(1)) + return suites class TestResult(object): @@ -185,3 +409,4 @@ class TestResult(object): finally: os.remove(filename) + diff --git a/examples/performance/__main__.py b/examples/performance/__main__.py new file mode 100644 index 000000000..957d6c699 --- /dev/null +++ b/examples/performance/__main__.py @@ -0,0 +1,5 @@ +from . import Profiler + +if __name__ == '__main__': + Profiler.main() + diff --git a/examples/performance/bulk_inserts.py b/examples/performance/bulk_inserts.py index 531003aa6..9c3cff5b2 100644 --- a/examples/performance/bulk_inserts.py +++ b/examples/performance/bulk_inserts.py @@ -20,6 +20,10 @@ class Customer(Base): description = Column(String(255)) +Profiler.init("bulk_inserts", num=100000) + + +@Profiler.setup def setup_database(dburl, echo, num): global engine engine = create_engine(dburl, echo=echo) @@ -147,4 +151,4 @@ def test_dbapi_raw(n): conn.close() if __name__ == '__main__': - Profiler.main(setup=setup_database, num=100000) + Profiler.main() diff --git a/examples/performance/large_resultsets.py b/examples/performance/large_resultsets.py index 77c0246fc..0a5857b75 100644 --- a/examples/performance/large_resultsets.py +++ b/examples/performance/large_resultsets.py @@ -30,6 +30,10 @@ class Customer(Base): description = Column(String(255)) +Profiler.init("large_resultsets", num=500000) + + +@Profiler.setup_once def setup_database(dburl, echo, num): global engine engine = create_engine(dburl, echo=echo) @@ -168,4 +172,4 @@ def _test_dbapi_raw(n, make_objects): conn.close() if __name__ == '__main__': - Profiler.main(setup_once=setup_database, num=500000) + Profiler.main() diff --git a/examples/performance/single_inserts.py b/examples/performance/single_inserts.py index 4178ccea8..cfce90300 100644 --- a/examples/performance/single_inserts.py +++ b/examples/performance/single_inserts.py @@ -21,6 +21,10 @@ class Customer(Base): description = Column(String(255)) +Profiler.init("single_inserts", num=10000) + + +@Profiler.setup def setup_database(dburl, echo, num): global engine engine = create_engine(dburl, echo=echo) @@ -106,16 +110,14 @@ def test_core_query_caching(n): @Profiler.profile def test_dbapi_raw_w_connect(n): - """Individual INSERT/COMMIT pairs using a pure DBAPI connection, - connect each time.""" + """Individual INSERT/COMMIT pairs w/ DBAPI + connection each time""" _test_dbapi_raw(n, True) @Profiler.profile def test_dbapi_raw_w_pool(n): - """Individual INSERT/COMMIT pairs using a pure DBAPI connection, - using a connection pool.""" + """Individual INSERT/COMMIT pairs w/ DBAPI + connection pool""" _test_dbapi_raw(n, False) @@ -161,4 +163,4 @@ def _test_dbapi_raw(n, connect): if __name__ == '__main__': - Profiler.main(setup=setup_database, num=10000) + Profiler.main() -- cgit v1.2.1 From b9d430af752b7cc955932a54a8f8db18f46d89a6 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Tue, 16 Sep 2014 11:57:03 -0400 Subject: - add differentiating examples of list() vs. iteration --- examples/performance/large_resultsets.py | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/examples/performance/large_resultsets.py b/examples/performance/large_resultsets.py index 0a5857b75..fbe77c759 100644 --- a/examples/performance/large_resultsets.py +++ b/examples/performance/large_resultsets.py @@ -52,12 +52,18 @@ def setup_database(dburl, echo, num): @Profiler.profile -def test_orm_full_objects(n): - """Load fully tracked objects using the ORM.""" +def test_orm_full_objects_list(n): + """Load fully tracked ORM objects into one big list().""" + + sess = Session(engine) + objects = list(sess.query(Customer).limit(n)) + + +@Profiler.profile +def test_orm_full_objects_chunks(n): + """Load fully tracked ORM objects a chunk at a time using yield_per().""" sess = Session(engine) - # avoid using all() so that we don't have the overhead of building - # a large list of full objects in memory for obj in sess.query(Customer).yield_per(1000).limit(n): pass -- cgit v1.2.1 From 16d9d366bd80b3f9b42c89ceb3e392de15631188 Mon Sep 17 00:00:00 2001 From: jonathan vanasco Date: Fri, 3 Oct 2014 13:15:52 -0400 Subject: * adding 'isouter=False' to sqlalchemy.orm.query.Query (https://bitbucket.org/zzzeek/sqlalchemy/issue/3217/make-join-more-standard-or-improve-error) $ python setup.py develop $ pip install nose $ pip install mock $ ./sqla_nose.py test.orm.test_joins ..................................................................................................... ---------------------------------------------------------------------- Ran 101 tests in 1.222s OK $ ./sqla_nose.py test.orm ......................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................S......................................................................................................................................................................................................................................................................................................................S.......................................................................................................................................................................................................................................................................................................................................................S.......S..S.SSS.SS...............................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................S................................S..S........................S...........................................................................................SSS.S.........SSSSSSSS......SSSSSSSSS........SS...SS...............S.............................S..............................................................SS..SS..............................................................................................................S. ---------------------------------------------------------------------- Ran 3103 tests in 82.607s OK (SKIP=46) --- lib/sqlalchemy/orm/query.py | 11 ++++++---- test/orm/test_joins.py | 51 +++++++++++++++++++++++++++++++++++++++++++-- 2 files changed, 56 insertions(+), 6 deletions(-) diff --git a/lib/sqlalchemy/orm/query.py b/lib/sqlalchemy/orm/query.py index 7b2ea7977..eaa3a8dcd 100644 --- a/lib/sqlalchemy/orm/query.py +++ b/lib/sqlalchemy/orm/query.py @@ -1740,6 +1740,8 @@ class Query(object): anonymously aliased. Subsequent calls to :meth:`~.Query.filter` and similar will adapt the incoming criterion to the target alias, until :meth:`~.Query.reset_joinpoint` is called. + :param isouter=False: If True, the join used will be a left outer join, + just as if the ``outerjoin()`` method were called. :param from_joinpoint=False: When using ``aliased=True``, a setting of True here will cause the join to be from the most recent joined target, rather than starting back from the original @@ -1757,13 +1759,15 @@ class Query(object): SQLAlchemy versions was the primary ORM-level joining interface. """ - aliased, from_joinpoint = kwargs.pop('aliased', False),\ - kwargs.pop('from_joinpoint', False) + aliased, from_joinpoint, isouter = kwargs.pop('aliased', False),\ + kwargs.pop('from_joinpoint', False),\ + kwargs.pop('isouter', False) if kwargs: raise TypeError("unknown arguments: %s" % ','.join(kwargs.keys)) + isouter = isouter return self._join(props, - outerjoin=False, create_aliases=aliased, + outerjoin=isouter, create_aliases=aliased, from_joinpoint=from_joinpoint) def outerjoin(self, *props, **kwargs): @@ -3385,7 +3389,6 @@ class _BundleEntity(_QueryEntity): self.supports_single_entity = self.bundle.single_entity - @property def entity_zero(self): for ent in self._entities: diff --git a/test/orm/test_joins.py b/test/orm/test_joins.py index 40bc01b5d..188af2b38 100644 --- a/test/orm/test_joins.py +++ b/test/orm/test_joins.py @@ -721,6 +721,13 @@ class JoinTest(QueryTest, AssertsCompiledSQL): filter_by(id=3).outerjoin('orders','address').filter_by(id=1).all() assert [User(id=7, name='jack')] == result + def test_overlapping_paths_join_isouter(self): + User = self.classes.User + + result = create_session().query(User).join('orders', 'items', isouter=True).\ + filter_by(id=3).join('orders','address', isouter=True).filter_by(id=1).all() + assert [User(id=7, name='jack')] == result + def test_from_joinpoint(self): Item, User, Order = (self.classes.Item, self.classes.User, @@ -1088,7 +1095,6 @@ class JoinTest(QueryTest, AssertsCompiledSQL): [User(name='fred')] ) - def test_aliased_classes(self): User, Address = self.classes.User, self.classes.Address @@ -1237,7 +1243,6 @@ class JoinTest(QueryTest, AssertsCompiledSQL): def test_joins_from_adapted_entities(self): User = self.classes.User - # test for #1853 session = create_session() @@ -1274,6 +1279,45 @@ class JoinTest(QueryTest, AssertsCompiledSQL): 'anon_2 ON anon_2.id = anon_1.users_id', use_default_dialect=True) + def test_joins_from_adapted_entities_isouter(self): + User = self.classes.User + + # test for #1853 + + session = create_session() + first = session.query(User) + second = session.query(User) + unioned = first.union(second) + subquery = session.query(User.id).subquery() + join = subquery, subquery.c.id == User.id + joined = unioned.join(*join, isouter=True) + self.assert_compile(joined, + 'SELECT anon_1.users_id AS ' + 'anon_1_users_id, anon_1.users_name AS ' + 'anon_1_users_name FROM (SELECT users.id ' + 'AS users_id, users.name AS users_name ' + 'FROM users UNION SELECT users.id AS ' + 'users_id, users.name AS users_name FROM ' + 'users) AS anon_1 LEFT OUTER JOIN (SELECT ' + 'users.id AS id FROM users) AS anon_2 ON ' + 'anon_2.id = anon_1.users_id', + use_default_dialect=True) + + first = session.query(User.id) + second = session.query(User.id) + unioned = first.union(second) + subquery = session.query(User.id).subquery() + join = subquery, subquery.c.id == User.id + joined = unioned.join(*join, isouter=True) + self.assert_compile(joined, + 'SELECT anon_1.users_id AS anon_1_users_id ' + 'FROM (SELECT users.id AS users_id FROM ' + 'users UNION SELECT users.id AS users_id ' + 'FROM users) AS anon_1 LEFT OUTER JOIN ' + '(SELECT users.id AS id FROM users) AS ' + 'anon_2 ON anon_2.id = anon_1.users_id', + use_default_dialect=True) + def test_reset_joinpoint(self): User = self.classes.User @@ -1282,6 +1326,9 @@ class JoinTest(QueryTest, AssertsCompiledSQL): result = create_session().query(User).join('orders', 'items', aliased=aliased).filter_by(id=3).reset_joinpoint().join('orders','address', aliased=aliased).filter_by(id=1).all() assert [User(id=7, name='jack')] == result + result = create_session().query(User).join('orders', 'items', aliased=aliased, isouter=True).filter_by(id=3).reset_joinpoint().join('orders','address', aliased=aliased, isouter=True).filter_by(id=1).all() + assert [User(id=7, name='jack')] == result + result = create_session().query(User).outerjoin('orders', 'items', aliased=aliased).filter_by(id=3).reset_joinpoint().outerjoin('orders','address', aliased=aliased).filter_by(id=1).all() assert [User(id=7, name='jack')] == result -- cgit v1.2.1 From 0f5a400b77862d2ae8f5d1a326fe9571da8fc0cb Mon Sep 17 00:00:00 2001 From: jonathan vanasco Date: Fri, 17 Oct 2014 19:35:29 -0400 Subject: added docs to clarify that sql statement is already in a dialect --- lib/sqlalchemy/events.py | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/lib/sqlalchemy/events.py b/lib/sqlalchemy/events.py index 1ff35b8b0..86bd3653b 100644 --- a/lib/sqlalchemy/events.py +++ b/lib/sqlalchemy/events.py @@ -420,6 +420,12 @@ class ConnectionEvents(event.Events): context, executemany): log.info("Received statement: %s" % statement) + When the methods are called with a `statement` parameter, such as in + :meth:`.after_cursor_execute`, :meth:`.before_cursor_execute` and + :meth:`.dbapi_error`, the statement is the exact SQL string that was + prepared for transmission to the DBAPI ``cursor`` in the connection's + :class:`.Dialect`. + The :meth:`.before_execute` and :meth:`.before_cursor_execute` events can also be established with the ``retval=True`` flag, which allows modification of the statement and parameters to be sent @@ -549,9 +555,8 @@ class ConnectionEvents(event.Events): def before_cursor_execute(self, conn, cursor, statement, parameters, context, executemany): """Intercept low-level cursor execute() events before execution, - receiving the string - SQL statement and DBAPI-specific parameter list to be invoked - against a cursor. + receiving the string SQL statement and DBAPI-specific parameter list to + be invoked against a cursor. This event is a good choice for logging as well as late modifications to the SQL string. It's less ideal for parameter modifications except @@ -571,7 +576,7 @@ class ConnectionEvents(event.Events): :param conn: :class:`.Connection` object :param cursor: DBAPI cursor object - :param statement: string SQL statement + :param statement: string SQL statement, as to be passed to the DBAPI :param parameters: Dictionary, tuple, or list of parameters being passed to the ``execute()`` or ``executemany()`` method of the DBAPI ``cursor``. In some cases may be ``None``. @@ -596,7 +601,7 @@ class ConnectionEvents(event.Events): :param cursor: DBAPI cursor object. Will have results pending if the statement was a SELECT, but these should not be consumed as they will be needed by the :class:`.ResultProxy`. - :param statement: string SQL statement + :param statement: string SQL statement, as passed to the DBAPI :param parameters: Dictionary, tuple, or list of parameters being passed to the ``execute()`` or ``executemany()`` method of the DBAPI ``cursor``. In some cases may be ``None``. @@ -640,7 +645,7 @@ class ConnectionEvents(event.Events): :param conn: :class:`.Connection` object :param cursor: DBAPI cursor object - :param statement: string SQL statement + :param statement: string SQL statement, as passed to the DBAPI :param parameters: Dictionary, tuple, or list of parameters being passed to the ``execute()`` or ``executemany()`` method of the DBAPI ``cursor``. In some cases may be ``None``. -- cgit v1.2.1 From 25434e9209af9ee2c05b651bc4fe197541c0bd60 Mon Sep 17 00:00:00 2001 From: Scott Dugas Date: Wed, 22 Oct 2014 15:09:05 -0400 Subject: Support additional args/kwargs on cursor method fdbsql has an optional nested kwarg, which is supported in the actual code, but not in the testing proxy --- lib/sqlalchemy/testing/engines.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/lib/sqlalchemy/testing/engines.py b/lib/sqlalchemy/testing/engines.py index 67c13231e..75bcc58e1 100644 --- a/lib/sqlalchemy/testing/engines.py +++ b/lib/sqlalchemy/testing/engines.py @@ -284,10 +284,10 @@ class DBAPIProxyCursor(object): """ - def __init__(self, engine, conn): + def __init__(self, engine, conn, *args, **kwargs): self.engine = engine self.connection = conn - self.cursor = conn.cursor() + self.cursor = conn.cursor(*args, **kwargs) def execute(self, stmt, parameters=None, **kw): if parameters: @@ -315,8 +315,10 @@ class DBAPIProxyConnection(object): self.engine = engine self.cursor_cls = cursor_cls - def cursor(self): - return self.cursor_cls(self.engine, self.conn) + def cursor(self, *args, **kwargs): + print "DPA", args + print "DPK", kwargs + return self.cursor_cls(self.engine, self.conn, *args, **kwargs) def close(self): self.conn.close() -- cgit v1.2.1 From 9c0eb840788ed5971f0876958cfb9866c7af918d Mon Sep 17 00:00:00 2001 From: Scott Dugas Date: Thu, 23 Oct 2014 10:24:35 -0400 Subject: Print useful traceback on error _expect_failure was rethrowing the exception without keeping the traceback, so it was really hard to find out what was actually wrong --- lib/sqlalchemy/testing/exclusions.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/lib/sqlalchemy/testing/exclusions.py b/lib/sqlalchemy/testing/exclusions.py index 283d89e36..5ce8bcd84 100644 --- a/lib/sqlalchemy/testing/exclusions.py +++ b/lib/sqlalchemy/testing/exclusions.py @@ -12,6 +12,7 @@ from ..util import decorator from . import config from .. import util import inspect +import sys import contextlib @@ -120,20 +121,21 @@ class compound(object): try: return_value = fn(*args, **kw) - except Exception as ex: - self._expect_failure(config, ex, name=fn.__name__) + except Exception: + exc_type, exc_value, exc_traceback = sys.exc_info() + self._expect_failure(config, exc_type, exc_value, exc_traceback, name=fn.__name__) else: self._expect_success(config, name=fn.__name__) return return_value - def _expect_failure(self, config, ex, name='block'): + def _expect_failure(self, config, exc_type, exc_value, exc_traceback, name='block'): for fail in self.fails: if fail(config): print(("%s failed as expected (%s): %s " % ( name, fail._as_string(config), str(ex)))) break else: - raise ex + raise exc_type, exc_value, exc_traceback def _expect_success(self, config, name='block'): if not self.fails: -- cgit v1.2.1 From 9687b272bdc76dd318566f02aaafe26a6089d077 Mon Sep 17 00:00:00 2001 From: Scott Dugas Date: Thu, 23 Oct 2014 11:46:34 -0400 Subject: Added new requirement for check_constraints --- test/engine/test_reflection.py | 1 + test/requirements.py | 6 ++++++ 2 files changed, 7 insertions(+) diff --git a/test/engine/test_reflection.py b/test/engine/test_reflection.py index c18b8b944..087610333 100644 --- a/test/engine/test_reflection.py +++ b/test/engine/test_reflection.py @@ -799,6 +799,7 @@ class ReflectionTest(fixtures.TestBase, ComparesTables): @testing.crashes('oracle', 'FIXME: unknown, confirm not fails_on') + @testing.requires.check_constraints @testing.provide_metadata def test_reserved(self): diff --git a/test/requirements.py b/test/requirements.py index 7eeabef2b..432dc645c 100644 --- a/test/requirements.py +++ b/test/requirements.py @@ -38,6 +38,12 @@ class DefaultRequirements(SuiteRequirements): no_support('mssql', 'not supported by database'), ]) + @property + def check_constraints(self): + """Target database must support check constraints.""" + + return exclusions.open() + @property def named_constraints(self): """target database must support names for constraints.""" -- cgit v1.2.1 From 2ce9333a24a1f894de4bf028f51eb1de28c10a3d Mon Sep 17 00:00:00 2001 From: Scott Dugas Date: Thu, 23 Oct 2014 13:01:23 -0400 Subject: Forgot to update usage of ex to exc_value --- lib/sqlalchemy/testing/exclusions.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/sqlalchemy/testing/exclusions.py b/lib/sqlalchemy/testing/exclusions.py index 5ce8bcd84..c9f81c8b9 100644 --- a/lib/sqlalchemy/testing/exclusions.py +++ b/lib/sqlalchemy/testing/exclusions.py @@ -132,7 +132,7 @@ class compound(object): for fail in self.fails: if fail(config): print(("%s failed as expected (%s): %s " % ( - name, fail._as_string(config), str(ex)))) + name, fail._as_string(config), str(exc_value)))) break else: raise exc_type, exc_value, exc_traceback -- cgit v1.2.1 From 470061bcdc0d1cdd2997354962b9e34cd9a43c33 Mon Sep 17 00:00:00 2001 From: Scott Dugas Date: Thu, 23 Oct 2014 17:55:13 -0400 Subject: Added requirement to test for bind limit --- test/orm/test_query.py | 1 + 1 file changed, 1 insertion(+) diff --git a/test/orm/test_query.py b/test/orm/test_query.py index 3f6813138..1c5fca144 100644 --- a/test/orm/test_query.py +++ b/test/orm/test_query.py @@ -1553,6 +1553,7 @@ class FilterTest(QueryTest, AssertsCompiledSQL): assert [] == sess.query(User).order_by(User.id)[3:3] assert [] == sess.query(User).order_by(User.id)[0:0] + @testing.requires.bound_limit_offset def test_select_with_bindparam_offset_limit(self): """Does a query allow bindparam for the limit?""" User = self.classes.User -- cgit v1.2.1 From fdbea87958628b641a855d4c3b35833330de49e0 Mon Sep 17 00:00:00 2001 From: Scott Dugas Date: Thu, 23 Oct 2014 17:59:27 -0400 Subject: require check constraints for tests --- test/sql/test_constraints.py | 1 + test/sql/test_metadata.py | 1 + 2 files changed, 2 insertions(+) diff --git a/test/sql/test_constraints.py b/test/sql/test_constraints.py index 2f054dac1..c0b5806ac 100644 --- a/test/sql/test_constraints.py +++ b/test/sql/test_constraints.py @@ -130,6 +130,7 @@ class ConstraintGenTest(fixtures.TestBase, AssertsExecutionResults): *assertions ) + @testing.requires.check_constraints @testing.provide_metadata def test_check_constraint_create(self): metadata = self.metadata diff --git a/test/sql/test_metadata.py b/test/sql/test_metadata.py index 4a484dbac..ff79c126a 100644 --- a/test/sql/test_metadata.py +++ b/test/sql/test_metadata.py @@ -486,6 +486,7 @@ class MetaDataTest(fixtures.TestBase, ComparesTables): class ToMetaDataTest(fixtures.TestBase, ComparesTables): + @testing.requires.check_constraints def test_copy(self): from sqlalchemy.testing.schema import Table meta = MetaData() -- cgit v1.2.1 From e01dab9b1fbaf8325022c20f76ea9b99fbfdfd73 Mon Sep 17 00:00:00 2001 From: Scott Dugas Date: Wed, 29 Oct 2014 17:42:52 -0400 Subject: Set the length for MyType implementation Mysql drops the type in these tests, when it does visit_typeclause, since it's an unkown type it just says none, and doesn't do a cast. Firebird also doesn't support varchar with length, it throws an error on these types. --- test/sql/test_types.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/sql/test_types.py b/test/sql/test_types.py index efa0f90ae..26dc6c842 100644 --- a/test/sql/test_types.py +++ b/test/sql/test_types.py @@ -558,7 +558,7 @@ class TypeCoerceCastTest(fixtures.TablesTest): @classmethod def define_tables(cls, metadata): class MyType(types.TypeDecorator): - impl = String + impl = String(50) def process_bind_param(self, value, dialect): return "BIND_IN" + str(value) -- cgit v1.2.1 From 5b3fc8743582db73642767cc6c101cf95f13464f Mon Sep 17 00:00:00 2001 From: Scott Dugas Date: Thu, 30 Oct 2014 11:06:45 -0400 Subject: Added requirement for temporary tables --- test/requirements.py | 5 +++++ test/sql/test_metadata.py | 1 + 2 files changed, 6 insertions(+) diff --git a/test/requirements.py b/test/requirements.py index 432dc645c..21dd2913e 100644 --- a/test/requirements.py +++ b/test/requirements.py @@ -126,6 +126,11 @@ class DefaultRequirements(SuiteRequirements): "not supported by database" ) + @property + def temporary_table(self): + """Target database must support CREATE TEMPORARY TABLE""" + return exclusions.open() + @property def reflectable_autoincrement(self): """Target database must support tables that can automatically generate diff --git a/test/sql/test_metadata.py b/test/sql/test_metadata.py index ff79c126a..a209cdd7a 100644 --- a/test/sql/test_metadata.py +++ b/test/sql/test_metadata.py @@ -1038,6 +1038,7 @@ class InfoTest(fixtures.TestBase): class TableTest(fixtures.TestBase, AssertsCompiledSQL): + @testing.requires.temporary_table @testing.skip_if('mssql', 'different col format') def test_prefixes(self): from sqlalchemy import Table -- cgit v1.2.1 From ebb9d57cb385f49becbf54c6f78647715ddd1c29 Mon Sep 17 00:00:00 2001 From: Scott Dugas Date: Thu, 30 Oct 2014 16:40:36 -0400 Subject: Removed accidental print statements --- lib/sqlalchemy/testing/engines.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/lib/sqlalchemy/testing/engines.py b/lib/sqlalchemy/testing/engines.py index 75bcc58e1..3a3f5be10 100644 --- a/lib/sqlalchemy/testing/engines.py +++ b/lib/sqlalchemy/testing/engines.py @@ -316,8 +316,6 @@ class DBAPIProxyConnection(object): self.cursor_cls = cursor_cls def cursor(self, *args, **kwargs): - print "DPA", args - print "DPK", kwargs return self.cursor_cls(self.engine, self.conn, *args, **kwargs) def close(self): -- cgit v1.2.1 From 7bf5ac9c1e814c999d4930941935e1d5cfd236bf Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Fri, 31 Oct 2014 20:00:42 -0400 Subject: - ensure kwargs are passed for limit clause on a compound select as well, further fixes for #3034 --- lib/sqlalchemy/sql/compiler.py | 2 +- test/sql/test_compiler.py | 16 ++++++++++++++++ 2 files changed, 17 insertions(+), 1 deletion(-) diff --git a/lib/sqlalchemy/sql/compiler.py b/lib/sqlalchemy/sql/compiler.py index a6c30b7dc..5fa78ad0f 100644 --- a/lib/sqlalchemy/sql/compiler.py +++ b/lib/sqlalchemy/sql/compiler.py @@ -813,7 +813,7 @@ class SQLCompiler(Compiled): text += self.order_by_clause(cs, **kwargs) text += (cs._limit_clause is not None or cs._offset_clause is not None) and \ - self.limit_clause(cs) or "" + self.limit_clause(cs, **kwargs) or "" if self.ctes and \ compound_index == 0 and toplevel: diff --git a/test/sql/test_compiler.py b/test/sql/test_compiler.py index 3e6b87351..bfafed599 100644 --- a/test/sql/test_compiler.py +++ b/test/sql/test_compiler.py @@ -238,6 +238,22 @@ class SelectTest(fixtures.TestBase, AssertsCompiledSQL): checkparams=params ) + def test_limit_offset_select_literal_binds(self): + stmt = select([1]).limit(5).offset(6) + self.assert_compile( + stmt, + "SELECT 1 LIMIT 5 OFFSET 6", + literal_binds=True + ) + + def test_limit_offset_compound_select_literal_binds(self): + stmt = select([1]).union(select([2])).limit(5).offset(6) + self.assert_compile( + stmt, + "SELECT 1 UNION SELECT 2 LIMIT 5 OFFSET 6", + literal_binds=True + ) + def test_select_precol_compile_ordering(self): s1 = select([column('x')]).select_from(text('a')).limit(5).as_scalar() s2 = select([s1]).limit(10) -- cgit v1.2.1 From 8d154f84f1a552c290a1ccd802f20940c8cab066 Mon Sep 17 00:00:00 2001 From: Scott Dugas Date: Mon, 3 Nov 2014 15:24:31 -0500 Subject: It now calls raise_from_cause master was updated to call util.raise_from_cause which is better than what I had --- lib/sqlalchemy/testing/exclusions.py | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/lib/sqlalchemy/testing/exclusions.py b/lib/sqlalchemy/testing/exclusions.py index e3d91300d..f94724608 100644 --- a/lib/sqlalchemy/testing/exclusions.py +++ b/lib/sqlalchemy/testing/exclusions.py @@ -12,7 +12,6 @@ from ..util import decorator from . import config from .. import util import inspect -import sys import contextlib @@ -121,18 +120,17 @@ class compound(object): try: return_value = fn(*args, **kw) - except Exception: - exc_type, exc_value, exc_traceback = sys.exc_info() - self._expect_failure(config, exc_type, exc_value, exc_traceback, name=fn.__name__) + except Exception as ex: + self._expect_failure(config, ex, name=fn.__name__) else: self._expect_success(config, name=fn.__name__) return return_value - def _expect_failure(self, config, exc_type, exc_value, exc_traceback, name='block'): + def _expect_failure(self, config, ex, name='block'): for fail in self.fails: if fail(config): print(("%s failed as expected (%s): %s " % ( - name, fail._as_string(config), str(exc_value)))) + name, fail._as_string(config), str(ex)))) break else: util.raise_from_cause(ex) -- cgit v1.2.1 From edec583b459e955a30d40b5c5d8baaed0a2ec1c6 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Wed, 5 Nov 2014 04:22:30 -0500 Subject: - Fixed bug regarding expression mutations which could express itself as a "Could not locate column" error when using :class:`.Query` to select from multiple, anonymous column entities when querying against SQLite, as a side effect of the "join rewriting" feature used by the SQLite dialect. fixes #3241 --- doc/build/changelog/changelog_09.rst | 11 +++++++++++ lib/sqlalchemy/sql/elements.py | 7 +++++++ test/sql/test_generative.py | 13 +++++++++++++ test/sql/test_join_rewriting.py | 25 ++++++++++++++++++++++++- 4 files changed, 55 insertions(+), 1 deletion(-) diff --git a/doc/build/changelog/changelog_09.rst b/doc/build/changelog/changelog_09.rst index 6909da357..8ed2ea776 100644 --- a/doc/build/changelog/changelog_09.rst +++ b/doc/build/changelog/changelog_09.rst @@ -13,6 +13,17 @@ .. changelog:: :version: 0.9.9 + .. change:: + :tags: bug, orm, sqlite + :versions: 1.0.0 + :tickets: 3241 + + Fixed bug regarding expression mutations which could express + itself as a "Could not locate column" error when using + :class:`.Query` to select from multiple, anonymous column + entities when querying against SQLite, as a side effect of the + "join rewriting" feature used by the SQLite dialect. + .. change:: :tags: feature, sqlite :versions: 1.0.0 diff --git a/lib/sqlalchemy/sql/elements.py b/lib/sqlalchemy/sql/elements.py index 4d5bb9476..fa9b66024 100644 --- a/lib/sqlalchemy/sql/elements.py +++ b/lib/sqlalchemy/sql/elements.py @@ -861,6 +861,9 @@ class ColumnElement(operators.ColumnOperators, ClauseElement): expressions and function calls. """ + while self._is_clone_of is not None: + self = self._is_clone_of + return _anonymous_label( '%%(%d %s)s' % (id(self), getattr(self, 'name', 'anon')) ) @@ -2778,6 +2781,10 @@ class Grouping(ColumnElement): def self_group(self, against=None): return self + @property + def _key_label(self): + return self._label + @property def _label(self): return getattr(self.element, '_label', None) or self.anon_label diff --git a/test/sql/test_generative.py b/test/sql/test_generative.py index 6044cecb0..6b86614e6 100644 --- a/test/sql/test_generative.py +++ b/test/sql/test_generative.py @@ -132,6 +132,19 @@ class TraversalTest(fixtures.TestBase, AssertsExecutionResults): assert struct == s2 assert struct.is_other(s2) + def test_clone_anon_label(self): + from sqlalchemy.sql.elements import Grouping + c1 = Grouping(literal_column('q')) + s1 = select([c1]) + + class Vis(CloningVisitor): + def visit_grouping(self, elem): + pass + + vis = Vis() + s2 = vis.traverse(s1) + eq_(list(s2.inner_columns)[0].anon_label, c1.anon_label) + def test_change_in_place(self): struct = B(A("expr1"), A("expr2"), B(A("expr1b"), A("expr2b")), A("expr3")) diff --git a/test/sql/test_join_rewriting.py b/test/sql/test_join_rewriting.py index c8b24e2f2..ced65d7f1 100644 --- a/test/sql/test_join_rewriting.py +++ b/test/sql/test_join_rewriting.py @@ -251,6 +251,16 @@ class _JoinRewriteTestBase(AssertsCompiledSQL): self._f_b1a_where_in_b2a ) + def test_anon_scalar_subqueries(self): + s1 = select([1]).as_scalar() + s2 = select([2]).as_scalar() + + s = select([s1, s2]).apply_labels() + self._test( + s, + self._anon_scalar_subqueries + ) + class JoinRewriteTest(_JoinRewriteTestBase, fixtures.TestBase): @@ -389,6 +399,10 @@ class JoinRewriteTest(_JoinRewriteTestBase, fixtures.TestBase): "FROM a JOIN b2 ON a.id = b2.a_id)" ) + _anon_scalar_subqueries = ( + "SELECT (SELECT 1) AS anon_1, (SELECT 2) AS anon_2" + ) + class JoinPlainTest(_JoinRewriteTestBase, fixtures.TestBase): @@ -497,6 +511,10 @@ class JoinPlainTest(_JoinRewriteTestBase, fixtures.TestBase): "FROM a JOIN b2 ON a.id = b2.a_id)" ) + _anon_scalar_subqueries = ( + "SELECT (SELECT 1) AS anon_1, (SELECT 2) AS anon_2" + ) + class JoinNoUseLabelsTest(_JoinRewriteTestBase, fixtures.TestBase): @@ -605,6 +623,10 @@ class JoinNoUseLabelsTest(_JoinRewriteTestBase, fixtures.TestBase): "FROM a JOIN b2 ON a.id = b2.a_id)" ) + _anon_scalar_subqueries = ( + "SELECT (SELECT 1) AS anon_1, (SELECT 2) AS anon_2" + ) + class JoinExecTest(_JoinRewriteTestBase, fixtures.TestBase): @@ -615,7 +637,8 @@ class JoinExecTest(_JoinRewriteTestBase, fixtures.TestBase): _a_bc = _a_bc_comma_a1_selbc = _a__b_dc = _a_bkeyassoc = \ _a_bkeyassoc_aliased = _a_atobalias_balias_c_w_exists = \ _a_atobalias_balias = _b_ab1_union_c_ab2 = \ - _b_a_id_double_overlap_annotated = _f_b1a_where_in_b2a = None + _b_a_id_double_overlap_annotated = _f_b1a_where_in_b2a = \ + _anon_scalar_subqueries = None @classmethod def setup_class(cls): -- cgit v1.2.1 From ea637cef2d9ec54b14fac3620b1cfd47da723f3f Mon Sep 17 00:00:00 2001 From: Paulo Bu Date: Wed, 5 Nov 2014 13:15:08 +0100 Subject: Small improvement on FlushError can't delete error message Output in the error message the table name and the column name. --- lib/sqlalchemy/orm/persistence.py | 4 ++-- test/orm/test_unitofwork.py | 3 ++- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/lib/sqlalchemy/orm/persistence.py b/lib/sqlalchemy/orm/persistence.py index 114b79ea5..28254cc10 100644 --- a/lib/sqlalchemy/orm/persistence.py +++ b/lib/sqlalchemy/orm/persistence.py @@ -441,9 +441,9 @@ def _collect_delete_commands(base_mapper, uowtransaction, table, state, state_dict, col) if value is None: raise orm_exc.FlushError( - "Can't delete from table " + "Can't delete from table %s " "using NULL for primary " - "key value") + "key value on column %s" % (table, col)) if update_version_id is not None and \ table.c.contains_column(mapper.version_id_col): diff --git a/test/orm/test_unitofwork.py b/test/orm/test_unitofwork.py index a54097b03..247c5e7a8 100644 --- a/test/orm/test_unitofwork.py +++ b/test/orm/test_unitofwork.py @@ -2505,7 +2505,8 @@ class PartialNullPKTest(fixtures.MappedTest): s.delete(t1) assert_raises_message( orm_exc.FlushError, - "Can't delete from table using NULL for primary key value", + "Can't delete from table t1 using NULL " + "for primary key value on column t1.col2", s.commit ) -- cgit v1.2.1 From 8200c2cd35b3e85a636baabe8324b9ecbbd8fedf Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Wed, 5 Nov 2014 15:11:13 -0500 Subject: - edits to the subqueryload ordering merge --- doc/build/faq.rst | 108 ++++++++++++++++++++++++++------------------- doc/build/orm/loading.rst | 25 ++++++++--- doc/build/orm/tutorial.rst | 9 ++-- 3 files changed, 86 insertions(+), 56 deletions(-) diff --git a/doc/build/faq.rst b/doc/build/faq.rst index fa10ba44b..12d8e0acc 100644 --- a/doc/build/faq.rst +++ b/doc/build/faq.rst @@ -603,62 +603,78 @@ The same idea applies to all the other arguments, such as ``foreign_keys``:: foo = relationship(Dest, foreign_keys=[foo_id, bar_id]) -.. _faq_subqueryload_sort: - -Why must I always ``ORDER BY`` a unique column when using ``subqueryload``? ----------------------------------------------------------------------------- - -The SQL standard prescribes that RDBMSs are free to return rows in any order it -deems appropriate, if no ``ORDER BY`` clause is specified. This even extends to -the case where the ``ORDER BY`` clause is not unique across all rows, i.e. rows -with the same value in the ``ORDER BY`` column(s) will not necessarily be -returned in a deterministic order. - -SQLAlchemy implements :func:`.orm.subqueryload` by issuing a separate query -(where the table specified in the relationship is joined to the original query) -and then attempting to match up the results in Python. This works fine -normally: +.. _faq_subqueryload_limit_sort: + +Why is ``ORDER BY`` required with ``LIMIT`` (especially with ``subqueryload()``)? +--------------------------------------------------------------------------------- + +A relational database can return rows in any +arbitrary order, when an explicit ordering is not set. +While this ordering very often corresponds to the natural +order of rows within a table, this is not the case for all databases and +all queries. The consequence of this is that any query that limits rows +using ``LIMIT`` or ``OFFSET`` should **always** specify an ``ORDER BY``. +Otherwise, it is not deterministic which rows will actually be returned. + +When we use a SQLAlchemy method like :meth:`.Query.first`, we are in fact +applying a ``LIMIT`` of one to the query, so without an explicit ordering +it is not deterministic what row we actually get back. +While we may not notice this for simple queries on databases that usually +returns rows in their natural +order, it becomes much more of an issue if we also use :func:`.orm.subqueryload` +to load related collections, and we may not be loading the collections +as intended. + +SQLAlchemy implements :func:`.orm.subqueryload` by issuing a separate query, +the results of which are matched up to the results from the first query. +We see two queries emitted like this: .. sourcecode:: python+sql >>> session.query(User).options(subqueryload(User.addresses)).all() - {opensql}# the "main" query + {opensql}-- the "main" query SELECT users.id AS users_id FROM users {stop} - {opensql}# the "load" query issued by subqueryload - SELECT addresses.id AS addresses_id, addresses.user_id AS addresses_user_id, anon_1.users_id AS anon_1_users_id - FROM (SELECT users.id AS users_id - FROM users) AS anon_1 JOIN addresses ON anon_1.users_id = addresses.user_id ORDER BY anon_1.users_id - -Notice how the main query is a subquery in the load query. When an -``OFFSET``/``LIMIT`` is involved, however, things get a bit tricky: + {opensql}-- the "load" query issued by subqueryload + SELECT addresses.id AS addresses_id, + addresses.user_id AS addresses_user_id, + anon_1.users_id AS anon_1_users_id + FROM (SELECT users.id AS users_id FROM users) AS anon_1 + JOIN addresses ON anon_1.users_id = addresses.user_id + ORDER BY anon_1.users_id + +The second query embeds the first query as a source of rows. +When the inner query uses ``OFFSET`` and/or ``LIMIT`` without ordering, +the two queries may not see the same results: .. sourcecode:: python+sql >>> user = session.query(User).options(subqueryload(User.addresses)).first() - {opensql}# the "main" query + {opensql}-- the "main" query SELECT users.id AS users_id FROM users LIMIT 1 {stop} - {opensql}# the "load" query issued by subqueryload - SELECT addresses.id AS addresses_id, addresses.user_id AS addresses_user_id, anon_1.users_id AS anon_1_users_id - FROM (SELECT users.id AS users_id - FROM users - LIMIT 1) AS anon_1 JOIN addresses ON anon_1.users_id = addresses.user_id ORDER BY anon_1.users_id - -The main query is still a subquery in the load query, but *it may return a -different set of results in the second query from the first* because it does -not have a deterministic sort order! Depending on database internals, there is -a chance we may get the following resultset for the two queries:: - + {opensql}-- the "load" query issued by subqueryload + SELECT addresses.id AS addresses_id, + addresses.user_id AS addresses_user_id, + anon_1.users_id AS anon_1_users_id + FROM (SELECT users.id AS users_id FROM users LIMIT 1) AS anon_1 + JOIN addresses ON anon_1.users_id = addresses.user_id + ORDER BY anon_1.users_id + +Depending on database specifics, there is +a chance we may get the a result like the following for the two queries:: + + -- query #1 +--------+ |users_id| +--------+ | 1| +--------+ + -- query #2 +------------+-----------------+---------------+ |addresses_id|addresses_user_id|anon_1_users_id| +------------+-----------------+---------------+ @@ -667,26 +683,28 @@ a chance we may get the following resultset for the two queries:: | 4| 2| 2| +------------+-----------------+---------------+ -From SQLAlchemy's point of view, it didn't get any addresses back for user 1, -so ``user.addresses`` is empty. Oops. +Above, we receive two ``addresses`` rows for ``user.id`` of 2, and none for +1. We've wasted two rows and failed to actually load the collection. This +is an insidious error because without looking at the SQL and the results, the +ORM will not show that there's any issue; if we access the ``addresses`` +for the ``User`` we have, it will emit a lazy load for the collection and we +won't see that anything actually went wrong. The solution to this problem is to always specify a deterministic sort order, so that the main query always returns the same set of rows. This generally -means that you should :meth:`.Query.order_by` on a unique column on the table, -usually the primary key:: +means that you should :meth:`.Query.order_by` on a unique column on the table. +The primary key is a good choice for this:: session.query(User).options(subqueryload(User.addresses)).order_by(User.id).first() -You can get away with not doing a sort if the ``OFFSET``/``LIMIT`` does not -throw away any rows at all, but it's much simpler to remember to always ``ORDER -BY`` the primary key:: - - session.query(User).options(subqueryload(User.addresses)).filter(User.id == 1).first() - Note that :func:`.joinedload` does not suffer from the same problem because only one query is ever issued, so the load query cannot be different from the main query. +.. seealso:: + + :ref:`subqueryload_ordering` + Performance =========== diff --git a/doc/build/orm/loading.rst b/doc/build/orm/loading.rst index 27846b9b2..b2d8124e2 100644 --- a/doc/build/orm/loading.rst +++ b/doc/build/orm/loading.rst @@ -120,21 +120,32 @@ query options: # set children to load eagerly with a second statement session.query(Parent).options(subqueryload('children')).all() -.. _subquery_loading_tips: +.. _subqueryload_ordering: -Subquery Loading Tips -^^^^^^^^^^^^^^^^^^^^^ +The Importance of Ordering +-------------------------- + +A query which makes use of :func:`.subqueryload` in conjunction with a +limiting modifier such as :meth:`.Query.first`, :meth:`.Query.limit`, +or :meth:`.Query.offset` should **always** include :meth:`.Query.order_by` +against unique column(s) such as the primary key, so that the additional queries +emitted by :func:`.subqueryload` include +the same ordering as used by the parent query. Without it, there is a chance +that the inner query could return the wrong rows:: -If you have ``LIMIT`` or ``OFFSET`` in your query, you **must** ``ORDER BY`` a -unique column, generally the primary key of your table, in order to ensure -correct results (see :ref:`faq_subqueryload_sort`):: + # incorrect, no ORDER BY + session.query(User).options(subqueryload(User.addresses)).first() - # incorrect + # incorrect if User.name is not unique session.query(User).options(subqueryload(User.addresses)).order_by(User.name).first() # correct session.query(User).options(subqueryload(User.addresses)).order_by(User.name, User.id).first() +.. seealso:: + + :ref:`faq_subqueryload_limit_sort` - detailed example + Loading Along Paths ------------------- diff --git a/doc/build/orm/tutorial.rst b/doc/build/orm/tutorial.rst index 19f3f6fea..8871ce765 100644 --- a/doc/build/orm/tutorial.rst +++ b/doc/build/orm/tutorial.rst @@ -1631,11 +1631,12 @@ very easy to use: >>> jack.addresses [, ] -.. warning:: +.. note:: - If you use :func:`.subqueryload`, you should generally - :meth:`.Query.order_by` on a unique column in order to ensure correct - results. See :ref:`subquery_loading_tips`. + :func:`.subqueryload` when used in conjunction with limiting such as + :meth:`.Query.first`, :meth:`.Query.limit` or :meth:`.Query.offset` + should also include :meth:`.Query.order_by` on a unique column in order to + ensure correct results. See :ref:`subqueryload_ordering`. Joined Load ------------- -- cgit v1.2.1 From 4b09f1423b382336f29722490bab3a4c8c8607ea Mon Sep 17 00:00:00 2001 From: Paulo Bu Date: Thu, 6 Nov 2014 21:14:17 +0100 Subject: Small improvement on FlushError can't update error message Output in the error message the table name and the column name. --- lib/sqlalchemy/orm/persistence.py | 8 ++++---- test/orm/test_unitofwork.py | 6 ++++-- 2 files changed, 8 insertions(+), 6 deletions(-) diff --git a/lib/sqlalchemy/orm/persistence.py b/lib/sqlalchemy/orm/persistence.py index 28254cc10..6b8d5af14 100644 --- a/lib/sqlalchemy/orm/persistence.py +++ b/lib/sqlalchemy/orm/persistence.py @@ -375,12 +375,12 @@ def _collect_update_commands(uowtransaction, table, states_to_update): params[col.key] = history.added[0] else: pk_params[col._label] = history.unchanged[0] + if pk_params[col._label] is None: + raise orm_exc.FlushError( + "Can't update table %s using NULL for primary " + "key value on column %s" % (table, col)) if params or value_params: - if None in pk_params.values(): - raise orm_exc.FlushError( - "Can't update table using NULL for primary " - "key value") params.update(pk_params) yield ( state, state_dict, params, mapper, diff --git a/test/orm/test_unitofwork.py b/test/orm/test_unitofwork.py index 247c5e7a8..ae5a8ef60 100644 --- a/test/orm/test_unitofwork.py +++ b/test/orm/test_unitofwork.py @@ -2479,7 +2479,8 @@ class PartialNullPKTest(fixtures.MappedTest): t1.col2 = 5 assert_raises_message( orm_exc.FlushError, - "Can't update table using NULL for primary key value", + "Can't update table t1 using NULL for primary " + "key value on column t1.col2", s.commit ) @@ -2492,7 +2493,8 @@ class PartialNullPKTest(fixtures.MappedTest): t1.col3 = 'hi' assert_raises_message( orm_exc.FlushError, - "Can't update table using NULL for primary key value", + "Can't update table t1 using NULL for primary " + "key value on column t1.col2", s.commit ) -- cgit v1.2.1 From 0c19d765dce89970c0395f57f15eb5b0f09c2a29 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Thu, 6 Nov 2014 17:29:22 -0500 Subject: bulk_updates --- examples/performance/bulk_updates.py | 54 ++++++++++++++++++++++++++++++++++++ 1 file changed, 54 insertions(+) create mode 100644 examples/performance/bulk_updates.py diff --git a/examples/performance/bulk_updates.py b/examples/performance/bulk_updates.py new file mode 100644 index 000000000..9522e4bf5 --- /dev/null +++ b/examples/performance/bulk_updates.py @@ -0,0 +1,54 @@ +"""This series of tests illustrates different ways to UPDATE a large number +of rows in bulk. + + +""" +from . import Profiler + +from sqlalchemy.ext.declarative import declarative_base +from sqlalchemy import Column, Integer, String, create_engine, bindparam +from sqlalchemy.orm import Session + +Base = declarative_base() +engine = None + + +class Customer(Base): + __tablename__ = "customer" + id = Column(Integer, primary_key=True) + name = Column(String(255)) + description = Column(String(255)) + + +Profiler.init("bulk_updates", num=100000) + + +@Profiler.setup +def setup_database(dburl, echo, num): + global engine + engine = create_engine(dburl, echo=echo) + Base.metadata.drop_all(engine) + Base.metadata.create_all(engine) + + s = Session(engine) + for chunk in range(0, num, 10000): + s.bulk_insert_mappings(Customer, [ + { + 'name': 'customer name %d' % i, + 'description': 'customer description %d' % i + } for i in range(chunk, chunk + 10000) + ]) + s.commit() + + +@Profiler.profile +def test_orm_flush(n): + """UPDATE statements via the ORM flush process.""" + session = Session(bind=engine) + for chunk in range(0, n, 1000): + customers = session.query(Customer).\ + filter(Customer.id.between(chunk, chunk + 1000)).all() + for customer in customers: + customer.description += "updated" + session.flush() + session.commit() -- cgit v1.2.1 From a19b2f419cd876b561a3b3c21ebed5c223192883 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Mon, 10 Nov 2014 17:37:26 -0500 Subject: - The :attr:`.Column.key` attribute is now used as the source of anonymous bound parameter names within expressions, to match the existing use of this value as the key when rendered in an INSERT or UPDATE statement. This allows :attr:`.Column.key` to be used as a "substitute" string to work around a difficult column name that doesn't translate well into a bound parameter name. Note that the paramstyle is configurable on :func:`.create_engine` in any case, and most DBAPIs today support a named and positional style. fixes #3245 --- doc/build/changelog/changelog_10.rst | 13 +++++++++++++ lib/sqlalchemy/sql/elements.py | 4 ++-- test/dialect/mssql/test_reflection.py | 4 ++-- test/sql/test_compiler.py | 13 +++++++++++++ 4 files changed, 30 insertions(+), 4 deletions(-) diff --git a/doc/build/changelog/changelog_10.rst b/doc/build/changelog/changelog_10.rst index e63e023d9..4e5e1ba1d 100644 --- a/doc/build/changelog/changelog_10.rst +++ b/doc/build/changelog/changelog_10.rst @@ -21,6 +21,19 @@ series as well. For changes that are specific to 1.0 with an emphasis on compatibility concerns, see :doc:`/changelog/migration_10`. + .. change:: + :tags: bug, sql + :tickets: 3245 + + The :attr:`.Column.key` attribute is now used as the source of + anonymous bound parameter names within expressions, to match the + existing use of this value as the key when rendered in an INSERT + or UPDATE statement. This allows :attr:`.Column.key` to be used + as a "substitute" string to work around a difficult column name + that doesn't translate well into a bound parameter name. Note that + the paramstyle is configurable on :func:`.create_engine` in any case, + and most DBAPIs today support a named and positional style. + .. change:: :tags: bug, sql :pullreq: github:146 diff --git a/lib/sqlalchemy/sql/elements.py b/lib/sqlalchemy/sql/elements.py index fa9b66024..734f78632 100644 --- a/lib/sqlalchemy/sql/elements.py +++ b/lib/sqlalchemy/sql/elements.py @@ -1092,7 +1092,7 @@ class BindParameter(ColumnElement): """ if isinstance(key, ColumnClause): type_ = key.type - key = key.name + key = key.key if required is NO_ARG: required = (value is NO_ARG and callable_ is None) if value is NO_ARG: @@ -3335,7 +3335,7 @@ class ColumnClause(Immutable, ColumnElement): return name def _bind_param(self, operator, obj): - return BindParameter(self.name, obj, + return BindParameter(self.key, obj, _compared_to_operator=operator, _compared_to_type=self.type, unique=True) diff --git a/test/dialect/mssql/test_reflection.py b/test/dialect/mssql/test_reflection.py index e93162a8e..0ef69f656 100644 --- a/test/dialect/mssql/test_reflection.py +++ b/test/dialect/mssql/test_reflection.py @@ -187,7 +187,7 @@ class InfoCoerceUnicodeTest(fixtures.TestBase, AssertsCompiledSQL): stmt = tables.c.table_name == 'somename' self.assert_compile( stmt, - "[TABLES_1].[TABLE_NAME] = :TABLE_NAME_1", + "[TABLES_1].[TABLE_NAME] = :table_name_1", dialect=dialect ) @@ -197,7 +197,7 @@ class InfoCoerceUnicodeTest(fixtures.TestBase, AssertsCompiledSQL): stmt = tables.c.table_name == 'somename' self.assert_compile( stmt, - "[TABLES_1].[TABLE_NAME] = CAST(:TABLE_NAME_1 AS NVARCHAR(max))", + "[TABLES_1].[TABLE_NAME] = CAST(:table_name_1 AS NVARCHAR(max))", dialect=dialect ) diff --git a/test/sql/test_compiler.py b/test/sql/test_compiler.py index bfafed599..5d1afe616 100644 --- a/test/sql/test_compiler.py +++ b/test/sql/test_compiler.py @@ -435,6 +435,19 @@ class SelectTest(fixtures.TestBase, AssertsCompiledSQL): dialect=default.DefaultDialect(paramstyle='pyformat') ) + def test_anon_param_name_on_keys(self): + self.assert_compile( + keyed.insert(), + "INSERT INTO keyed (x, y, z) VALUES (%(colx)s, %(coly)s, %(z)s)", + dialect=default.DefaultDialect(paramstyle='pyformat') + ) + self.assert_compile( + keyed.c.coly == 5, + "keyed.y = %(coly_1)s", + checkparams={'coly_1': 5}, + dialect=default.DefaultDialect(paramstyle='pyformat') + ) + def test_dupe_columns(self): """test that deduping is performed against clause element identity, not rendered result.""" -- cgit v1.2.1 From 21022f9760e32cf54d59eaccc12cc9e2fea1d37a Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Mon, 10 Nov 2014 17:58:09 -0500 Subject: - in lieu of adding a new system of translating bound parameter names for psycopg2 and others, encourage users to take advantage of positional styles by documenting "paramstyle". A section is added to psycopg2 specifically as this is a pretty common spot for named parameters that may be unusually named. fixes #3246. --- lib/sqlalchemy/dialects/postgresql/psycopg2.py | 49 ++++++++++++++++++++++++++ lib/sqlalchemy/engine/__init__.py | 11 ++++++ 2 files changed, 60 insertions(+) diff --git a/lib/sqlalchemy/dialects/postgresql/psycopg2.py b/lib/sqlalchemy/dialects/postgresql/psycopg2.py index 1a2a1ffe4..f67b2e3b0 100644 --- a/lib/sqlalchemy/dialects/postgresql/psycopg2.py +++ b/lib/sqlalchemy/dialects/postgresql/psycopg2.py @@ -159,6 +159,55 @@ defaults to ``utf-8``. SQLAlchemy's own unicode encode/decode functionality is steadily becoming obsolete as most DBAPIs now support unicode fully. +Bound Parameter Styles +---------------------- + +The default parameter style for the psycopg2 dialect is "pyformat", where +SQL is rendered using ``%(paramname)s`` style. This format has the limitation +that it does not accommodate the unusual case of parameter names that +actually contain percent or parenthesis symbols; as SQLAlchemy in many cases +generates bound parameter names based on the name of a column, the presence +of these characters in a column name can lead to problems. + +There are two solutions to the issue of a :class:`.schema.Column` that contains +one of these characters in its name. One is to specify the +:paramref:`.schema.Column.key` for columns that have such names:: + + measurement = Table('measurement', metadata, + Column('Size (meters)', Integer, key='size_meters') + ) + +Above, an INSERT statement such as ``measurement.insert()`` will use +``size_meters`` as the parameter name, and a SQL expression such as +``measurement.c.size_meters > 10`` will derive the bound parameter name +from the ``size_meters`` key as well. + +.. versionchanged:: 1.0.0 - SQL expressions will use :attr:`.Column.key` + as the source of naming when anonymous bound parameters are created + in SQL expressions; previously, this behavior only applied to + :meth:`.Table.insert` and :meth:`.Table.update` parameter names. + +The other solution is to use a positional format; psycopg2 allows use of the +"format" paramstyle, which can be passed to +:paramref:`.create_engine.paramstyle`:: + + engine = create_engine( + 'postgresql://scott:tiger@localhost:5432/test', paramstyle='format') + +With the above engine, instead of a statement like:: + + INSERT INTO measurement ("Size (meters)") VALUES (%(Size (meters))s) + {'Size (meters)': 1} + +we instead see:: + + INSERT INTO measurement ("Size (meters)") VALUES (%s) + (1, ) + +Where above, the dictionary style is converted into a tuple with positional +style. + + Transactions ------------ diff --git a/lib/sqlalchemy/engine/__init__.py b/lib/sqlalchemy/engine/__init__.py index 68145f5cd..cf75871bf 100644 --- a/lib/sqlalchemy/engine/__init__.py +++ b/lib/sqlalchemy/engine/__init__.py @@ -292,6 +292,17 @@ def create_engine(*args, **kwargs): be used instead. Can be used for testing of DBAPIs as well as to inject "mock" DBAPI implementations into the :class:`.Engine`. + :param paramstyle=None: The `paramstyle `_ + to use when rendering bound parameters. This style defaults to the + one recommended by the DBAPI itself, which is retrieved from the + ``.paramstyle`` attribute of the DBAPI. However, most DBAPIs accept + more than one paramstyle, and in particular it may be desirable + to change a "named" paramstyle into a "positional" one, or vice versa. + When this attribute is passed, it should be one of the values + ``"qmark"``, ``"numeric"``, ``"named"``, ``"format"`` or + ``"pyformat"``, and should correspond to a parameter style known + to be supported by the DBAPI in use. + :param pool=None: an already-constructed instance of :class:`~sqlalchemy.pool.Pool`, such as a :class:`~sqlalchemy.pool.QueuePool` instance. If non-None, this -- cgit v1.2.1 From b013fb82f5a5d891c6fd776e0e6ed926cdf2ffe1 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Tue, 11 Nov 2014 12:34:00 -0500 Subject: - Fixed issue where the columns from a SELECT embedded in an INSERT, either through the values clause or as a "from select", would pollute the column types used in the result set produced by the RETURNING clause when columns from both statements shared the same name, leading to potential errors or mis-adaptation when retrieving the returning rows. fixes #3248 --- doc/build/changelog/changelog_09.rst | 12 ++++++++++++ lib/sqlalchemy/sql/compiler.py | 8 ++++++++ test/sql/test_compiler.py | 29 +++++++++++++++++++++++++++++ test/sql/test_returning.py | 33 +++++++++++++++++++++++++++++++++ 4 files changed, 82 insertions(+) diff --git a/doc/build/changelog/changelog_09.rst b/doc/build/changelog/changelog_09.rst index 8ed2ea776..66a7da8da 100644 --- a/doc/build/changelog/changelog_09.rst +++ b/doc/build/changelog/changelog_09.rst @@ -13,6 +13,18 @@ .. changelog:: :version: 0.9.9 + .. change:: + :tags: bug, sql + :versions: 1.0.0 + :tickets: 3248 + + Fixed issue where the columns from a SELECT embedded in an + INSERT, either through the values clause or as a "from select", + would pollute the column types used in the result set produced by + the RETURNING clause when columns from both statements shared the + same name, leading to potential errors or mis-adaptation when + retrieving the returning rows. + .. change:: :tags: bug, orm, sqlite :versions: 1.0.0 diff --git a/lib/sqlalchemy/sql/compiler.py b/lib/sqlalchemy/sql/compiler.py index 5fa78ad0f..8f3ede25f 100644 --- a/lib/sqlalchemy/sql/compiler.py +++ b/lib/sqlalchemy/sql/compiler.py @@ -1729,6 +1729,12 @@ class SQLCompiler(Compiled): ) def visit_insert(self, insert_stmt, **kw): + self.stack.append( + {'correlate_froms': set(), + "iswrapper": False, + "asfrom_froms": set(), + "selectable": insert_stmt}) + self.isinsert = True crud_params = crud._get_crud_params(self, insert_stmt, **kw) @@ -1812,6 +1818,8 @@ class SQLCompiler(Compiled): if self.returning and not self.returning_precedes_values: text += " " + returning_clause + self.stack.pop(-1) + return text def update_limit_clause(self, update_stmt): diff --git a/test/sql/test_compiler.py b/test/sql/test_compiler.py index 5d1afe616..9e99a947b 100644 --- a/test/sql/test_compiler.py +++ b/test/sql/test_compiler.py @@ -3437,3 +3437,32 @@ class ResultMapTest(fixtures.TestBase): is_( comp.result_map['t1_a'][1][2], t1.c.a ) + + def test_insert_with_select_values(self): + astring = Column('a', String) + aint = Column('a', Integer) + m = MetaData() + Table('t1', m, astring) + t2 = Table('t2', m, aint) + + stmt = t2.insert().values(a=select([astring])).returning(aint) + comp = stmt.compile(dialect=postgresql.dialect()) + eq_( + comp.result_map, + {'a': ('a', (aint, 'a', 'a'), aint.type)} + ) + + def test_insert_from_select(self): + astring = Column('a', String) + aint = Column('a', Integer) + m = MetaData() + Table('t1', m, astring) + t2 = Table('t2', m, aint) + + stmt = t2.insert().from_select(['a'], select([astring])).\ + returning(aint) + comp = stmt.compile(dialect=postgresql.dialect()) + eq_( + comp.result_map, + {'a': ('a', (aint, 'a', 'a'), aint.type)} + ) diff --git a/test/sql/test_returning.py b/test/sql/test_returning.py index 79a0b38a5..cd9f632b9 100644 --- a/test/sql/test_returning.py +++ b/test/sql/test_returning.py @@ -160,6 +160,39 @@ class ReturningTest(fixtures.TestBase, AssertsExecutionResults): eq_(result2.fetchall(), [(2, False), ]) +class CompositeStatementTest(fixtures.TestBase): + __requires__ = 'returning', + __backend__ = True + + @testing.provide_metadata + def test_select_doesnt_pollute_result(self): + class MyType(TypeDecorator): + impl = Integer + + def process_result_value(self, value, dialect): + raise Exception("I have not been selected") + + t1 = Table( + 't1', self.metadata, + Column('x', MyType()) + ) + + t2 = Table( + 't2', self.metadata, + Column('x', Integer) + ) + + self.metadata.create_all(testing.db) + with testing.db.connect() as conn: + conn.execute(t1.insert().values(x=5)) + + stmt = t2.insert().values( + x=select([t1.c.x]).as_scalar()).returning(t2.c.x) + + result = conn.execute(stmt) + eq_(result.scalar(), 5) + + class SequenceReturningTest(fixtures.TestBase): __requires__ = 'returning', 'sequences' __backend__ = True -- cgit v1.2.1 From 30075f9015c91d945c620af0d84c9c162627aa3c Mon Sep 17 00:00:00 2001 From: Jon Nelson Date: Tue, 11 Nov 2014 21:34:57 -0600 Subject: - don't do inline string interpolation when logging --- doc/build/faq.rst | 4 ++-- lib/sqlalchemy/dialects/mysql/base.py | 2 +- lib/sqlalchemy/orm/strategies.py | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/doc/build/faq.rst b/doc/build/faq.rst index 12d8e0acc..586f66754 100644 --- a/doc/build/faq.rst +++ b/doc/build/faq.rst @@ -757,14 +757,14 @@ using a recipe like the following:: def before_cursor_execute(conn, cursor, statement, parameters, context, executemany): conn.info.setdefault('query_start_time', []).append(time.time()) - logger.debug("Start Query: %s" % statement) + logger.debug("Start Query: %s", statement) @event.listens_for(Engine, "after_cursor_execute") def after_cursor_execute(conn, cursor, statement, parameters, context, executemany): total = time.time() - conn.info['query_start_time'].pop(-1) logger.debug("Query Complete!") - logger.debug("Total Time: %f" % total) + logger.debug("Total Time: %f", total) Above, we use the :meth:`.ConnectionEvents.before_cursor_execute` and :meth:`.ConnectionEvents.after_cursor_execute` events to establish an interception diff --git a/lib/sqlalchemy/dialects/mysql/base.py b/lib/sqlalchemy/dialects/mysql/base.py index 2fb054d0c..58eb3afa0 100644 --- a/lib/sqlalchemy/dialects/mysql/base.py +++ b/lib/sqlalchemy/dialects/mysql/base.py @@ -2593,7 +2593,7 @@ class MySQLDialect(default.DefaultDialect): pass else: self.logger.info( - "Converting unknown KEY type %s to a plain KEY" % flavor) + "Converting unknown KEY type %s to a plain KEY", flavor) pass index_d = {} index_d['name'] = spec['name'] diff --git a/lib/sqlalchemy/orm/strategies.py b/lib/sqlalchemy/orm/strategies.py index cdb501c14..d95f17f64 100644 --- a/lib/sqlalchemy/orm/strategies.py +++ b/lib/sqlalchemy/orm/strategies.py @@ -373,7 +373,7 @@ class LazyLoader(AbstractRelationshipLoader): self._equated_columns[c] = self._equated_columns[col] self.logger.info("%s will use query.get() to " - "optimize instance loads" % self) + "optimize instance loads", self) def init_class_attribute(self, mapper): self.is_class_level = True -- cgit v1.2.1 From 026449c15ff35a9b89c2ca591d3e3cc791857272 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Thu, 13 Nov 2014 13:17:38 -0500 Subject: - Fixed a leak which would occur in the unsupported and highly non-recommended use case of replacing a relationship on a fixed mapped class many times, referring to an arbitrarily growing number of target mappers. A warning is emitted when the old relationship is replaced, however if the mapping were already used for querying, the old relationship would still be referenced within some registries. fixes #3251 --- doc/build/changelog/changelog_09.rst | 12 ++++++++++++ lib/sqlalchemy/orm/mapper.py | 2 ++ test/aaa_profiling/test_memusage.py | 26 ++++++++++++++++++++++++++ 3 files changed, 40 insertions(+) diff --git a/doc/build/changelog/changelog_09.rst b/doc/build/changelog/changelog_09.rst index 66a7da8da..abf564875 100644 --- a/doc/build/changelog/changelog_09.rst +++ b/doc/build/changelog/changelog_09.rst @@ -13,6 +13,18 @@ .. changelog:: :version: 0.9.9 + .. change:: + :tags: bug, orm + :versions: 1.0.0 + :tickets: 3251 + + Fixed a leak which would occur in the unsupported and highly + non-recommended use case of replacing a relationship on a fixed + mapped class many times, referring to an arbitrarily growing number of + target mappers. A warning is emitted when the old relationship is + replaced, however if the mapping were already used for querying, the + old relationship would still be referenced within some registries. + .. change:: :tags: bug, sql :versions: 1.0.0 diff --git a/lib/sqlalchemy/orm/mapper.py b/lib/sqlalchemy/orm/mapper.py index 7e88ba161..863dab5cb 100644 --- a/lib/sqlalchemy/orm/mapper.py +++ b/lib/sqlalchemy/orm/mapper.py @@ -1581,6 +1581,8 @@ class Mapper(InspectionAttr): self, prop, )) + oldprop = self._props[key] + self._path_registry.pop(oldprop, None) self._props[key] = prop diff --git a/test/aaa_profiling/test_memusage.py b/test/aaa_profiling/test_memusage.py index f4bce6b01..63883daac 100644 --- a/test/aaa_profiling/test_memusage.py +++ b/test/aaa_profiling/test_memusage.py @@ -658,6 +658,32 @@ class MemUsageTest(EnsureZeroed): row[t.c.x] go() + def test_many_discarded_relationships(self): + """a use case that really isn't supported, nonetheless we can + guard against memleaks here so why not""" + + m1 = MetaData() + t1 = Table('t1', m1, Column('id', Integer, primary_key=True)) + t2 = Table( + 't2', m1, Column('id', Integer, primary_key=True), + Column('t1id', ForeignKey('t1.id'))) + + class T1(object): + pass + t1_mapper = mapper(T1, t1) + + @testing.emits_warning() + @profile_memory() + def go(): + class T2(object): + pass + t2_mapper = mapper(T2, t2) + t1_mapper.add_property("bar", relationship(t2_mapper)) + s1 = Session() + # this causes the path_registry to be invoked + s1.query(t1_mapper)._compile_context() + go() + # fails on newer versions of pysqlite due to unusual memory behvior # in pysqlite itself. background at: # http://thread.gmane.org/gmane.comp.python.db.pysqlite.user/2290 -- cgit v1.2.1 From de9103aae22ba548323a3e469624f02d1d279103 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Fri, 14 Nov 2014 11:06:43 -0500 Subject: - correct this to rewrite a multiple profile line correctly --- lib/sqlalchemy/testing/profiling.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/lib/sqlalchemy/testing/profiling.py b/lib/sqlalchemy/testing/profiling.py index fcb888f86..6fc51ef32 100644 --- a/lib/sqlalchemy/testing/profiling.py +++ b/lib/sqlalchemy/testing/profiling.py @@ -115,7 +115,11 @@ class ProfileStatsFile(object): per_fn = self.data[test_key] per_platform = per_fn[self.platform_key] counts = per_platform['counts'] - counts[-1] = callcount + current_count = per_platform['current_count'] + if current_count < len(counts): + counts[current_count - 1] = callcount + else: + counts[-1] = callcount if self.write: self._write() -- cgit v1.2.1 From 69979c8b2e4dcd009be5d1a1f466a24985e15301 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Fri, 14 Nov 2014 11:07:02 -0500 Subject: - callcounts --- test/profiles.txt | 70 ++++++++----------------------------------------------- 1 file changed, 10 insertions(+), 60 deletions(-) diff --git a/test/profiles.txt b/test/profiles.txt index dc4d05264..7d6e016b8 100644 --- a/test/profiles.txt +++ b/test/profiles.txt @@ -1,15 +1,15 @@ # /Users/classic/dev/sqlalchemy/test/profiles.txt # This file is written out on a per-environment basis. -# For each test in aaa_profiling, the corresponding function and +# For each test in aaa_profiling, the corresponding function and # environment is located within this file. If it doesn't exist, # the test is skipped. -# If a callcount does exist, it is compared to what we received. +# If a callcount does exist, it is compared to what we received. # assertions are raised if the counts do not match. -# -# To add a new callcount test, apply the function_call_count -# decorator and re-run the tests using the --write-profiles +# +# To add a new callcount test, apply the function_call_count +# decorator and re-run the tests using the --write-profiles # option - this file will be rewritten including the new count. -# +# # TEST: test.aaa_profiling.test_compiler.CompileTest.test_insert @@ -132,8 +132,6 @@ test.aaa_profiling.test_orm.DeferOptionsTest.test_baseline 2.7_postgresql_psycop test.aaa_profiling.test_orm.DeferOptionsTest.test_baseline 2.7_postgresql_psycopg2_nocextensions 40149 test.aaa_profiling.test_orm.DeferOptionsTest.test_baseline 2.7_sqlite_pysqlite_cextensions 19280 test.aaa_profiling.test_orm.DeferOptionsTest.test_baseline 2.7_sqlite_pysqlite_nocextensions 28297 - - test.aaa_profiling.test_orm.DeferOptionsTest.test_baseline 3.3_postgresql_psycopg2_nocextensions 29138 test.aaa_profiling.test_orm.DeferOptionsTest.test_baseline 3.3_sqlite_pysqlite_cextensions 32398 test.aaa_profiling.test_orm.DeferOptionsTest.test_baseline 3.3_sqlite_pysqlite_nocextensions 37327 @@ -148,8 +146,6 @@ test.aaa_profiling.test_orm.DeferOptionsTest.test_defer_many_cols 2.7_postgresql test.aaa_profiling.test_orm.DeferOptionsTest.test_defer_many_cols 2.7_postgresql_psycopg2_nocextensions 30054 test.aaa_profiling.test_orm.DeferOptionsTest.test_defer_many_cols 2.7_sqlite_pysqlite_cextensions 27144 test.aaa_profiling.test_orm.DeferOptionsTest.test_defer_many_cols 2.7_sqlite_pysqlite_nocextensions 30149 - - test.aaa_profiling.test_orm.DeferOptionsTest.test_defer_many_cols 3.3_postgresql_psycopg2_nocextensions 29068 test.aaa_profiling.test_orm.DeferOptionsTest.test_defer_many_cols 3.3_sqlite_pysqlite_cextensions 32197 test.aaa_profiling.test_orm.DeferOptionsTest.test_defer_many_cols 3.3_sqlite_pysqlite_nocextensions 31179 @@ -164,8 +160,6 @@ test.aaa_profiling.test_orm.LoadManyToOneFromIdentityTest.test_many_to_one_load_ test.aaa_profiling.test_orm.LoadManyToOneFromIdentityTest.test_many_to_one_load_identity 2.7_postgresql_psycopg2_nocextensions 17988 test.aaa_profiling.test_orm.LoadManyToOneFromIdentityTest.test_many_to_one_load_identity 2.7_sqlite_pysqlite_cextensions 17988 test.aaa_profiling.test_orm.LoadManyToOneFromIdentityTest.test_many_to_one_load_identity 2.7_sqlite_pysqlite_nocextensions 17988 - - test.aaa_profiling.test_orm.LoadManyToOneFromIdentityTest.test_many_to_one_load_identity 3.3_postgresql_psycopg2_nocextensions 18988 test.aaa_profiling.test_orm.LoadManyToOneFromIdentityTest.test_many_to_one_load_identity 3.3_sqlite_pysqlite_cextensions 18988 test.aaa_profiling.test_orm.LoadManyToOneFromIdentityTest.test_many_to_one_load_identity 3.3_sqlite_pysqlite_nocextensions 18988 @@ -180,8 +174,6 @@ test.aaa_profiling.test_orm.LoadManyToOneFromIdentityTest.test_many_to_one_load_ test.aaa_profiling.test_orm.LoadManyToOneFromIdentityTest.test_many_to_one_load_no_identity 2.7_postgresql_psycopg2_nocextensions 122553 test.aaa_profiling.test_orm.LoadManyToOneFromIdentityTest.test_many_to_one_load_no_identity 2.7_sqlite_pysqlite_cextensions 162315 test.aaa_profiling.test_orm.LoadManyToOneFromIdentityTest.test_many_to_one_load_no_identity 2.7_sqlite_pysqlite_nocextensions 165111 - - test.aaa_profiling.test_orm.LoadManyToOneFromIdentityTest.test_many_to_one_load_no_identity 3.3_postgresql_psycopg2_nocextensions 125352 test.aaa_profiling.test_orm.LoadManyToOneFromIdentityTest.test_many_to_one_load_no_identity 3.3_sqlite_pysqlite_cextensions 169566 test.aaa_profiling.test_orm.LoadManyToOneFromIdentityTest.test_many_to_one_load_no_identity 3.3_sqlite_pysqlite_nocextensions 171364 @@ -196,8 +188,6 @@ test.aaa_profiling.test_orm.MergeBackrefsTest.test_merge_pending_with_all_pks 2. test.aaa_profiling.test_orm.MergeBackrefsTest.test_merge_pending_with_all_pks 2.7_postgresql_psycopg2_nocextensions 19219 test.aaa_profiling.test_orm.MergeBackrefsTest.test_merge_pending_with_all_pks 2.7_sqlite_pysqlite_cextensions 22288 test.aaa_profiling.test_orm.MergeBackrefsTest.test_merge_pending_with_all_pks 2.7_sqlite_pysqlite_nocextensions 22530 - - test.aaa_profiling.test_orm.MergeBackrefsTest.test_merge_pending_with_all_pks 3.3_postgresql_psycopg2_nocextensions 19492 test.aaa_profiling.test_orm.MergeBackrefsTest.test_merge_pending_with_all_pks 3.3_sqlite_pysqlite_cextensions 23067 test.aaa_profiling.test_orm.MergeBackrefsTest.test_merge_pending_with_all_pks 3.3_sqlite_pysqlite_nocextensions 23271 @@ -212,8 +202,6 @@ test.aaa_profiling.test_orm.MergeTest.test_merge_load 2.7_postgresql_psycopg2_ce test.aaa_profiling.test_orm.MergeTest.test_merge_load 2.7_postgresql_psycopg2_nocextensions 1348 test.aaa_profiling.test_orm.MergeTest.test_merge_load 2.7_sqlite_pysqlite_cextensions 1601 test.aaa_profiling.test_orm.MergeTest.test_merge_load 2.7_sqlite_pysqlite_nocextensions 1626 - - test.aaa_profiling.test_orm.MergeTest.test_merge_load 3.3_postgresql_psycopg2_nocextensions 1355 test.aaa_profiling.test_orm.MergeTest.test_merge_load 3.3_sqlite_pysqlite_cextensions 1656 test.aaa_profiling.test_orm.MergeTest.test_merge_load 3.3_sqlite_pysqlite_nocextensions 1671 @@ -228,8 +216,6 @@ test.aaa_profiling.test_orm.MergeTest.test_merge_no_load 2.7_postgresql_psycopg2 test.aaa_profiling.test_orm.MergeTest.test_merge_no_load 2.7_postgresql_psycopg2_nocextensions 117,18 test.aaa_profiling.test_orm.MergeTest.test_merge_no_load 2.7_sqlite_pysqlite_cextensions 117,18 test.aaa_profiling.test_orm.MergeTest.test_merge_no_load 2.7_sqlite_pysqlite_nocextensions 117,18 - - test.aaa_profiling.test_orm.MergeTest.test_merge_no_load 3.3_postgresql_psycopg2_nocextensions 122,19 test.aaa_profiling.test_orm.MergeTest.test_merge_no_load 3.3_sqlite_pysqlite_cextensions 122,19 test.aaa_profiling.test_orm.MergeTest.test_merge_no_load 3.3_sqlite_pysqlite_nocextensions 122,19 @@ -244,8 +230,6 @@ test.aaa_profiling.test_pool.QueuePoolTest.test_first_connect 2.7_postgresql_psy test.aaa_profiling.test_pool.QueuePoolTest.test_first_connect 2.7_postgresql_psycopg2_nocextensions 91 test.aaa_profiling.test_pool.QueuePoolTest.test_first_connect 2.7_sqlite_pysqlite_cextensions 91 test.aaa_profiling.test_pool.QueuePoolTest.test_first_connect 2.7_sqlite_pysqlite_nocextensions 91 - - test.aaa_profiling.test_pool.QueuePoolTest.test_first_connect 3.3_postgresql_psycopg2_nocextensions 78 test.aaa_profiling.test_pool.QueuePoolTest.test_first_connect 3.3_sqlite_pysqlite_cextensions 78 test.aaa_profiling.test_pool.QueuePoolTest.test_first_connect 3.3_sqlite_pysqlite_nocextensions 78 @@ -260,8 +244,6 @@ test.aaa_profiling.test_pool.QueuePoolTest.test_second_connect 2.7_postgresql_ps test.aaa_profiling.test_pool.QueuePoolTest.test_second_connect 2.7_postgresql_psycopg2_nocextensions 31 test.aaa_profiling.test_pool.QueuePoolTest.test_second_connect 2.7_sqlite_pysqlite_cextensions 31 test.aaa_profiling.test_pool.QueuePoolTest.test_second_connect 2.7_sqlite_pysqlite_nocextensions 31 - - test.aaa_profiling.test_pool.QueuePoolTest.test_second_connect 3.3_postgresql_psycopg2_nocextensions 24 test.aaa_profiling.test_pool.QueuePoolTest.test_second_connect 3.3_sqlite_pysqlite_cextensions 24 test.aaa_profiling.test_pool.QueuePoolTest.test_second_connect 3.3_sqlite_pysqlite_nocextensions 24 @@ -276,8 +258,6 @@ test.aaa_profiling.test_pool.QueuePoolTest.test_second_samethread_connect 2.7_po test.aaa_profiling.test_pool.QueuePoolTest.test_second_samethread_connect 2.7_postgresql_psycopg2_nocextensions 8 test.aaa_profiling.test_pool.QueuePoolTest.test_second_samethread_connect 2.7_sqlite_pysqlite_cextensions 8 test.aaa_profiling.test_pool.QueuePoolTest.test_second_samethread_connect 2.7_sqlite_pysqlite_nocextensions 8 - - test.aaa_profiling.test_pool.QueuePoolTest.test_second_samethread_connect 3.3_postgresql_psycopg2_nocextensions 9 test.aaa_profiling.test_pool.QueuePoolTest.test_second_samethread_connect 3.3_sqlite_pysqlite_cextensions 9 test.aaa_profiling.test_pool.QueuePoolTest.test_second_samethread_connect 3.3_sqlite_pysqlite_nocextensions 9 @@ -286,22 +266,16 @@ test.aaa_profiling.test_pool.QueuePoolTest.test_second_samethread_connect 3.4_po # TEST: test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_connection_execute - - test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_connection_execute 2.7_mysql_mysqldb_cextensions 43 test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_connection_execute 2.7_mysql_mysqldb_nocextensions 45 test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_connection_execute 2.7_postgresql_psycopg2_cextensions 43 test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_connection_execute 2.7_postgresql_psycopg2_nocextensions 45 test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_connection_execute 2.7_sqlite_pysqlite_cextensions 43 test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_connection_execute 2.7_sqlite_pysqlite_nocextensions 45 - - test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_connection_execute 3.3_postgresql_psycopg2_cextensions 43 test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_connection_execute 3.3_postgresql_psycopg2_nocextensions 43 test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_connection_execute 3.3_sqlite_pysqlite_cextensions 43 test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_connection_execute 3.3_sqlite_pysqlite_nocextensions 43 - - test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_connection_execute 3.4_postgresql_psycopg2_cextensions 43 test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_connection_execute 3.4_postgresql_psycopg2_nocextensions 43 test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_connection_execute 3.4_sqlite_pysqlite_cextensions 43 @@ -309,22 +283,16 @@ test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_connection_execute # TEST: test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_engine_execute - - test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_engine_execute 2.7_mysql_mysqldb_cextensions 78 test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_engine_execute 2.7_mysql_mysqldb_nocextensions 80 test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_engine_execute 2.7_postgresql_psycopg2_cextensions 78 test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_engine_execute 2.7_postgresql_psycopg2_nocextensions 80 test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_engine_execute 2.7_sqlite_pysqlite_cextensions 78 test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_engine_execute 2.7_sqlite_pysqlite_nocextensions 80 - - test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_engine_execute 3.3_postgresql_psycopg2_cextensions 78 test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_engine_execute 3.3_postgresql_psycopg2_nocextensions 78 test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_engine_execute 3.3_sqlite_pysqlite_cextensions 78 test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_engine_execute 3.3_sqlite_pysqlite_nocextensions 78 - - test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_engine_execute 3.4_postgresql_psycopg2_cextensions 78 test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_engine_execute 3.4_postgresql_psycopg2_nocextensions 78 test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_engine_execute 3.4_sqlite_pysqlite_cextensions 78 @@ -332,22 +300,16 @@ test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_engine_execute 3.4_ # TEST: test.aaa_profiling.test_resultset.ResultSetTest.test_contains_doesnt_compile - - test.aaa_profiling.test_resultset.ResultSetTest.test_contains_doesnt_compile 2.7_mysql_mysqldb_cextensions 15 test.aaa_profiling.test_resultset.ResultSetTest.test_contains_doesnt_compile 2.7_mysql_mysqldb_nocextensions 15 test.aaa_profiling.test_resultset.ResultSetTest.test_contains_doesnt_compile 2.7_postgresql_psycopg2_cextensions 15 test.aaa_profiling.test_resultset.ResultSetTest.test_contains_doesnt_compile 2.7_postgresql_psycopg2_nocextensions 15 test.aaa_profiling.test_resultset.ResultSetTest.test_contains_doesnt_compile 2.7_sqlite_pysqlite_cextensions 15 test.aaa_profiling.test_resultset.ResultSetTest.test_contains_doesnt_compile 2.7_sqlite_pysqlite_nocextensions 15 - - test.aaa_profiling.test_resultset.ResultSetTest.test_contains_doesnt_compile 3.3_postgresql_psycopg2_cextensions 16 test.aaa_profiling.test_resultset.ResultSetTest.test_contains_doesnt_compile 3.3_postgresql_psycopg2_nocextensions 16 test.aaa_profiling.test_resultset.ResultSetTest.test_contains_doesnt_compile 3.3_sqlite_pysqlite_cextensions 16 test.aaa_profiling.test_resultset.ResultSetTest.test_contains_doesnt_compile 3.3_sqlite_pysqlite_nocextensions 16 - - test.aaa_profiling.test_resultset.ResultSetTest.test_contains_doesnt_compile 3.4_postgresql_psycopg2_cextensions 16 test.aaa_profiling.test_resultset.ResultSetTest.test_contains_doesnt_compile 3.4_postgresql_psycopg2_nocextensions 16 test.aaa_profiling.test_resultset.ResultSetTest.test_contains_doesnt_compile 3.4_sqlite_pysqlite_cextensions 16 @@ -355,22 +317,16 @@ test.aaa_profiling.test_resultset.ResultSetTest.test_contains_doesnt_compile 3.4 # TEST: test.aaa_profiling.test_resultset.ResultSetTest.test_string - - test.aaa_profiling.test_resultset.ResultSetTest.test_string 2.7_mysql_mysqldb_cextensions 514 test.aaa_profiling.test_resultset.ResultSetTest.test_string 2.7_mysql_mysqldb_nocextensions 15534 test.aaa_profiling.test_resultset.ResultSetTest.test_string 2.7_postgresql_psycopg2_cextensions 20501 test.aaa_profiling.test_resultset.ResultSetTest.test_string 2.7_postgresql_psycopg2_nocextensions 35521 test.aaa_profiling.test_resultset.ResultSetTest.test_string 2.7_sqlite_pysqlite_cextensions 457 test.aaa_profiling.test_resultset.ResultSetTest.test_string 2.7_sqlite_pysqlite_nocextensions 15477 - - test.aaa_profiling.test_resultset.ResultSetTest.test_string 3.3_postgresql_psycopg2_cextensions 489 test.aaa_profiling.test_resultset.ResultSetTest.test_string 3.3_postgresql_psycopg2_nocextensions 14489 test.aaa_profiling.test_resultset.ResultSetTest.test_string 3.3_sqlite_pysqlite_cextensions 462 test.aaa_profiling.test_resultset.ResultSetTest.test_string 3.3_sqlite_pysqlite_nocextensions 14462 - - test.aaa_profiling.test_resultset.ResultSetTest.test_string 3.4_postgresql_psycopg2_cextensions 489 test.aaa_profiling.test_resultset.ResultSetTest.test_string 3.4_postgresql_psycopg2_nocextensions 14489 test.aaa_profiling.test_resultset.ResultSetTest.test_string 3.4_sqlite_pysqlite_cextensions 462 @@ -378,22 +334,16 @@ test.aaa_profiling.test_resultset.ResultSetTest.test_string 3.4_sqlite_pysqlite_ # TEST: test.aaa_profiling.test_resultset.ResultSetTest.test_unicode - - test.aaa_profiling.test_resultset.ResultSetTest.test_unicode 2.7_mysql_mysqldb_cextensions 514 test.aaa_profiling.test_resultset.ResultSetTest.test_unicode 2.7_mysql_mysqldb_nocextensions 45534 test.aaa_profiling.test_resultset.ResultSetTest.test_unicode 2.7_postgresql_psycopg2_cextensions 20501 test.aaa_profiling.test_resultset.ResultSetTest.test_unicode 2.7_postgresql_psycopg2_nocextensions 35521 test.aaa_profiling.test_resultset.ResultSetTest.test_unicode 2.7_sqlite_pysqlite_cextensions 457 test.aaa_profiling.test_resultset.ResultSetTest.test_unicode 2.7_sqlite_pysqlite_nocextensions 15477 - - test.aaa_profiling.test_resultset.ResultSetTest.test_unicode 3.3_postgresql_psycopg2_cextensions 489 test.aaa_profiling.test_resultset.ResultSetTest.test_unicode 3.3_postgresql_psycopg2_nocextensions 14489 test.aaa_profiling.test_resultset.ResultSetTest.test_unicode 3.3_sqlite_pysqlite_cextensions 462 test.aaa_profiling.test_resultset.ResultSetTest.test_unicode 3.3_sqlite_pysqlite_nocextensions 14462 - - test.aaa_profiling.test_resultset.ResultSetTest.test_unicode 3.4_postgresql_psycopg2_cextensions 489 test.aaa_profiling.test_resultset.ResultSetTest.test_unicode 3.4_postgresql_psycopg2_nocextensions 14489 test.aaa_profiling.test_resultset.ResultSetTest.test_unicode 3.4_sqlite_pysqlite_cextensions 462 @@ -403,10 +353,10 @@ test.aaa_profiling.test_resultset.ResultSetTest.test_unicode 3.4_sqlite_pysqlite test.aaa_profiling.test_zoomark.ZooMarkTest.test_invocation 2.7_postgresql_psycopg2_cextensions 5562,277,3697,11893,1106,1968,2433 test.aaa_profiling.test_zoomark.ZooMarkTest.test_invocation 2.7_postgresql_psycopg2_nocextensions 5606,277,3929,13595,1223,2011,2692 -test.aaa_profiling.test_zoomark.ZooMarkTest.test_invocation 3.3_postgresql_psycopg2_cextensions 5238,259,3577,11529,1077,1886,2439 -test.aaa_profiling.test_zoomark.ZooMarkTest.test_invocation 3.3_postgresql_psycopg2_nocextensions 5260,259,3673,12701,1171,1893,2631 -test.aaa_profiling.test_zoomark.ZooMarkTest.test_invocation 3.4_postgresql_psycopg2_cextensions 5221,259,3577,11529,1077,1883,2439 -test.aaa_profiling.test_zoomark.ZooMarkTest.test_invocation 3.4_postgresql_psycopg2_nocextensions 5243,259,3673,12701,1171,1890,2631 +test.aaa_profiling.test_zoomark.ZooMarkTest.test_invocation 3.3_postgresql_psycopg2_cextensions 5238,273,3577,11529,1077,1886,2439 +test.aaa_profiling.test_zoomark.ZooMarkTest.test_invocation 3.3_postgresql_psycopg2_nocextensions 5260,273,3673,12701,1171,1893,2631 +test.aaa_profiling.test_zoomark.ZooMarkTest.test_invocation 3.4_postgresql_psycopg2_cextensions 5221,273,3577,11529,1077,1883,2439 +test.aaa_profiling.test_zoomark.ZooMarkTest.test_invocation 3.4_postgresql_psycopg2_nocextensions 5243,273,3697,12796,1187,1923,2653 # TEST: test.aaa_profiling.test_zoomark_orm.ZooMarkTest.test_invocation -- cgit v1.2.1 From 5fd779df0c958dc1ec2766f55d80b3090d2427eb Mon Sep 17 00:00:00 2001 From: Jon Nelson Date: Fri, 14 Nov 2014 22:19:26 -0600 Subject: - fix minor spelling error --- test/requirements.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/requirements.py b/test/requirements.py index 05ca8d717..514c2b329 100644 --- a/test/requirements.py +++ b/test/requirements.py @@ -30,7 +30,7 @@ def exclude(db, op, spec, description=None): class DefaultRequirements(SuiteRequirements): @property def deferrable_or_no_constraints(self): - """Target database must support derferable constraints.""" + """Target database must support deferrable constraints.""" return skip_if([ no_support('firebird', 'not supported by database'), -- cgit v1.2.1 From 2098001ad3e0a40aa909a347ec91c12fab04a657 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Mon, 24 Nov 2014 12:33:56 -0500 Subject: - Fixed a bug in the examples/generic_assocaitions/discriminator_on_association.py example, where the subclasses of AddressAssociation were not being mapped as "single table inheritance", leading to problems when trying to use the mappings further. --- doc/build/changelog/changelog_09.rst | 9 +++++++++ examples/generic_associations/discriminator_on_association.py | 1 + 2 files changed, 10 insertions(+) diff --git a/doc/build/changelog/changelog_09.rst b/doc/build/changelog/changelog_09.rst index abf564875..ef0277935 100644 --- a/doc/build/changelog/changelog_09.rst +++ b/doc/build/changelog/changelog_09.rst @@ -13,6 +13,15 @@ .. changelog:: :version: 0.9.9 + .. change:: + :tags: bug, examples + :versions: 1.0.0 + + Fixed a bug in the examples/generic_assocaitions/discriminator_on_association.py + example, where the subclasses of AddressAssociation were not being + mapped as "single table inheritance", leading to problems when trying + to use the mappings further. + .. change:: :tags: bug, orm :versions: 1.0.0 diff --git a/examples/generic_associations/discriminator_on_association.py b/examples/generic_associations/discriminator_on_association.py index e03cfec00..7bb04cf85 100644 --- a/examples/generic_associations/discriminator_on_association.py +++ b/examples/generic_associations/discriminator_on_association.py @@ -84,6 +84,7 @@ class HasAddresses(object): "%sAddressAssociation" % name, (AddressAssociation, ), dict( + __tablename__=None, __mapper_args__={ "polymorphic_identity": discriminator } -- cgit v1.2.1 From ba926a09b493b37c88e7b435aaccc6b399574057 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Mon, 24 Nov 2014 17:35:50 -0500 Subject: - add some logging to path_registry to help debug eager loading issues --- lib/sqlalchemy/orm/path_registry.py | 10 ++++++++++ lib/sqlalchemy/orm/strategy_options.py | 3 +++ 2 files changed, 13 insertions(+) diff --git a/lib/sqlalchemy/orm/path_registry.py b/lib/sqlalchemy/orm/path_registry.py index f10a125a8..d4dbf29a0 100644 --- a/lib/sqlalchemy/orm/path_registry.py +++ b/lib/sqlalchemy/orm/path_registry.py @@ -13,6 +13,9 @@ from .. import util from .. import exc from itertools import chain from .base import class_mapper +import logging + +log = logging.getLogger(__name__) def _unreduce_path(path): @@ -54,9 +57,11 @@ class PathRegistry(object): self.path == other.path def set(self, attributes, key, value): + log.debug("set '%s' on path '%s' to '%s'", key, self, value) attributes[(key, self.path)] = value def setdefault(self, attributes, key, value): + log.debug("setdefault '%s' on path '%s' to '%s'", key, self, value) attributes.setdefault((key, self.path), value) def get(self, attributes, key, value=None): @@ -184,6 +189,11 @@ class PropRegistry(PathRegistry): self.parent = parent self.path = parent.path + (prop,) + def __str__(self): + return " -> ".join( + str(elem) for elem in self.path + ) + @util.memoized_property def has_entity(self): return hasattr(self.prop, "mapper") diff --git a/lib/sqlalchemy/orm/strategy_options.py b/lib/sqlalchemy/orm/strategy_options.py index 4f986193e..a4107202e 100644 --- a/lib/sqlalchemy/orm/strategy_options.py +++ b/lib/sqlalchemy/orm/strategy_options.py @@ -176,6 +176,9 @@ class Load(Generative, MapperOption): path = path.entity_path return path + def __str__(self): + return "Load(strategy=%r)" % self.strategy + def _coerce_strat(self, strategy): if strategy is not None: strategy = tuple(sorted(strategy.items())) -- cgit v1.2.1 From de11f9498258182cbb6668b72067ec3f43a90415 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Mon, 24 Nov 2014 18:49:32 -0500 Subject: - The :meth:`.PropComparator.of_type` modifier has been improved in conjunction with loader directives such as :func:`.joinedload` and :func:`.contains_eager` such that if two :meth:`.PropComparator.of_type` modifiers of the same base type/path are encountered, they will be joined together into a single "polymorphic" entity, rather than replacing the entity of type A with the one of type B. E.g. a joinedload of ``A.b.of_type(BSub1)->BSub1.c`` combined with joinedload of ``A.b.of_type(BSub2)->BSub2.c`` will create a single joinedload of ``A.b.of_type((BSub1, BSub2)) -> BSub1.c, BSub2.c``, without the need for the ``with_polymorphic`` to be explicit in the query. fixes #3256 --- doc/build/changelog/changelog_10.rst | 22 ++++++++++++++++++++++ doc/build/orm/inheritance.rst | 21 ++++++++++++++++++++- lib/sqlalchemy/orm/strategy_options.py | 5 ++++- lib/sqlalchemy/orm/util.py | 22 +++++++++++++++++++--- lib/sqlalchemy/util/_collections.py | 9 ++++++--- test/base/test_utils.py | 30 ++++++++++++++++++++++++++++++ test/orm/test_of_type.py | 16 ++++++++++++++++ 7 files changed, 117 insertions(+), 8 deletions(-) diff --git a/doc/build/changelog/changelog_10.rst b/doc/build/changelog/changelog_10.rst index 4e5e1ba1d..c0197a691 100644 --- a/doc/build/changelog/changelog_10.rst +++ b/doc/build/changelog/changelog_10.rst @@ -21,6 +21,28 @@ series as well. For changes that are specific to 1.0 with an emphasis on compatibility concerns, see :doc:`/changelog/migration_10`. + .. change:: + :tags: bug, orm + :tickets: 3256 + + The :meth:`.PropComparator.of_type` modifier has been + improved in conjunction with loader directives such as + :func:`.joinedload` and :func:`.contains_eager` such that if + two :meth:`.PropComparator.of_type` modifiers of the same + base type/path are encountered, they will be joined together + into a single "polymorphic" entity, rather than replacing + the entity of type A with the one of type B. E.g. + a joinedload of ``A.b.of_type(BSub1)->BSub1.c`` combined with + joinedload of ``A.b.of_type(BSub2)->BSub2.c`` will create a + single joinedload of ``A.b.of_type((BSub1, BSub2)) -> BSub1.c, BSub2.c``, + without the need for the ``with_polymorphic`` to be explicit + in the query. + + .. seealso:: + + :ref:`eagerloading_polymorphic_subtypes` - contains an updated + example illustrating the new format. + .. change:: :tags: bug, sql :tickets: 3245 diff --git a/doc/build/orm/inheritance.rst b/doc/build/orm/inheritance.rst index 9f01a3e24..0713634bc 100644 --- a/doc/build/orm/inheritance.rst +++ b/doc/build/orm/inheritance.rst @@ -475,6 +475,8 @@ subselect back to the parent ``companies`` table. :func:`.orm.aliased` and :func:`.orm.with_polymorphic` constructs in conjunction with :meth:`.Query.join`, ``any()`` and ``has()``. +.. _eagerloading_polymorphic_subtypes: + Eager Loading of Specific or Polymorphic Subtypes ++++++++++++++++++++++++++++++++++++++++++++++++++ @@ -491,7 +493,7 @@ objects, querying the ``employee`` and ``engineer`` tables simultaneously:: ) ) -As is the case with :meth:`.Query.join`, :func:`~sqlalchemy.orm.interfaces.PropComparator.of_type` +As is the case with :meth:`.Query.join`, :meth:`~PropComparator.of_type` also can be used with eager loading and :func:`.orm.with_polymorphic` at the same time, so that all sub-attributes of all referenced subtypes can be loaded:: @@ -513,6 +515,23 @@ can be loaded:: :func:`~sqlalchemy.orm.interfaces.PropComparator.of_type`, supporting single target types as well as :func:`.orm.with_polymorphic` targets. +Another option for the above query is to state the two subtypes separately; +the :func:`.joinedload` directive should detect this and create the +above ``with_polymorphic`` construct automatically:: + + session.query(Company).\ + options( + joinedload(Company.employees.of_type(Manager)), + joinedload(Company.employees.of_type(Engineer)), + ) + ) + +.. versionadded:: 1.0 + Eager loaders such as :func:`.joinedload` will create a polymorphic + entity when multiple overlapping :meth:`~PropComparator.of_type` + directives are encountered. + + Single Table Inheritance ------------------------ diff --git a/lib/sqlalchemy/orm/strategy_options.py b/lib/sqlalchemy/orm/strategy_options.py index a4107202e..276da2ae0 100644 --- a/lib/sqlalchemy/orm/strategy_options.py +++ b/lib/sqlalchemy/orm/strategy_options.py @@ -161,11 +161,14 @@ class Load(Generative, MapperOption): ext_info = inspect(ac) path_element = ext_info.mapper + existing = path.entity_path[prop].get( + self.context, "path_with_polymorphic") if not ext_info.is_aliased_class: ac = orm_util.with_polymorphic( ext_info.mapper.base_mapper, ext_info.mapper, aliased=True, - _use_mapper_path=True) + _use_mapper_path=True, + _existing_alias=existing) path.entity_path[prop].set( self.context, "path_with_polymorphic", inspect(ac)) path = path[prop][path_element] diff --git a/lib/sqlalchemy/orm/util.py b/lib/sqlalchemy/orm/util.py index ad610a4ac..4be8d19ff 100644 --- a/lib/sqlalchemy/orm/util.py +++ b/lib/sqlalchemy/orm/util.py @@ -543,8 +543,13 @@ class AliasedInsp(InspectionAttr): mapper, self) def __repr__(self): - return '' % ( - id(self), self.class_.__name__) + if self.with_polymorphic_mappers: + with_poly = "(%s)" % ", ".join( + mp.class_.__name__ for mp in self.with_polymorphic_mappers) + else: + with_poly = "" + return '' % ( + id(self), self.class_.__name__, with_poly) inspection._inspects(AliasedClass)(lambda target: target._aliased_insp) @@ -648,7 +653,8 @@ def aliased(element, alias=None, name=None, flat=False, adapt_on_names=False): def with_polymorphic(base, classes, selectable=False, flat=False, polymorphic_on=None, aliased=False, - innerjoin=False, _use_mapper_path=False): + innerjoin=False, _use_mapper_path=False, + _existing_alias=None): """Produce an :class:`.AliasedClass` construct which specifies columns for descendant mappers of the given base. @@ -713,6 +719,16 @@ def with_polymorphic(base, classes, selectable=False, only be specified if querying for one specific subtype only """ primary_mapper = _class_to_mapper(base) + if _existing_alias: + assert _existing_alias.mapper is primary_mapper + classes = util.to_set(classes) + new_classes = set([ + mp.class_ for mp in + _existing_alias.with_polymorphic_mappers]) + if classes == new_classes: + return _existing_alias + else: + classes = classes.union(new_classes) mappers, selectable = primary_mapper.\ _with_polymorphic_args(classes, selectable, innerjoin=innerjoin) diff --git a/lib/sqlalchemy/util/_collections.py b/lib/sqlalchemy/util/_collections.py index a1fbc0fa0..d36852698 100644 --- a/lib/sqlalchemy/util/_collections.py +++ b/lib/sqlalchemy/util/_collections.py @@ -10,9 +10,10 @@ from __future__ import absolute_import import weakref import operator -from .compat import threading, itertools_filterfalse +from .compat import threading, itertools_filterfalse, string_types from . import py2k import types +import collections EMPTY_SET = frozenset() @@ -779,10 +780,12 @@ def coerce_generator_arg(arg): def to_list(x, default=None): if x is None: return default - if not isinstance(x, (list, tuple)): + if not isinstance(x, collections.Iterable) or isinstance(x, string_types): return [x] - else: + elif isinstance(x, list): return x + else: + return list(x) def to_set(x): diff --git a/test/base/test_utils.py b/test/base/test_utils.py index f75c5cbe9..df61d7874 100644 --- a/test/base/test_utils.py +++ b/test/base/test_utils.py @@ -8,6 +8,7 @@ from sqlalchemy.util import classproperty, WeakSequence, get_callable_argspec from sqlalchemy.sql import column from sqlalchemy.util import langhelpers + class _KeyedTupleTest(object): def _fixture(self, values, labels): @@ -283,6 +284,35 @@ class MemoizedAttrTest(fixtures.TestBase): eq_(val[0], 21) +class ToListTest(fixtures.TestBase): + def test_from_string(self): + eq_( + util.to_list("xyz"), + ["xyz"] + ) + + def test_from_set(self): + spec = util.to_list(set([1, 2, 3])) + assert isinstance(spec, list) + eq_( + sorted(spec), + [1, 2, 3] + ) + + def test_from_dict(self): + spec = util.to_list({1: "a", 2: "b", 3: "c"}) + assert isinstance(spec, list) + eq_( + sorted(spec), + [1, 2, 3] + ) + + def test_from_tuple(self): + eq_( + util.to_list((1, 2, 3)), + [1, 2, 3] + ) + class ColumnCollectionTest(fixtures.TestBase): def test_in(self): diff --git a/test/orm/test_of_type.py b/test/orm/test_of_type.py index 836d85cc7..b9ebc2daf 100644 --- a/test/orm/test_of_type.py +++ b/test/orm/test_of_type.py @@ -14,6 +14,7 @@ from .inheritance._poly_fixtures import Company, Person, Engineer, Manager, Boss _PolymorphicPolymorphic, _PolymorphicUnions, _PolymorphicJoins,\ _PolymorphicAliasedJoins + class _PolymorphicTestBase(object): __dialect__ = 'default' @@ -191,6 +192,21 @@ class _PolymorphicTestBase(object): ) self.assert_sql_count(testing.db, go, 3) + def test_joinedload_stacked_of_type(self): + sess = Session() + + def go(): + eq_( + sess.query(Company). + filter_by(company_id=1). + options( + joinedload(Company.employees.of_type(Manager)), + joinedload(Company.employees.of_type(Engineer)) + ).all(), + [self._company_with_emps_fixture()[0]] + ) + self.assert_sql_count(testing.db, go, 2) + class PolymorphicPolymorphicTest(_PolymorphicTestBase, _PolymorphicPolymorphic): def _polymorphic_join_target(self, cls): -- cgit v1.2.1 From 212d93366d1c5c3a8e44f8b428eeece6258ae28f Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Tue, 25 Nov 2014 18:01:31 -0500 Subject: - The behavioral contract of the :attr:`.ForeignKeyConstraint.columns` collection has been made consistent; this attribute is now a :class:`.ColumnCollection` like that of all other constraints and is initialized at the point when the constraint is associated with a :class:`.Table`. fixes #3243 --- doc/build/changelog/changelog_10.rst | 14 +++++ doc/build/changelog/migration_10.rst | 16 +++++- lib/sqlalchemy/dialects/sqlite/base.py | 4 +- lib/sqlalchemy/sql/compiler.py | 6 +- lib/sqlalchemy/sql/schema.py | 100 ++++++++++++++++++++------------- test/sql/test_metadata.py | 44 +++++++++++++++ 6 files changed, 138 insertions(+), 46 deletions(-) diff --git a/doc/build/changelog/changelog_10.rst b/doc/build/changelog/changelog_10.rst index c0197a691..d0d025011 100644 --- a/doc/build/changelog/changelog_10.rst +++ b/doc/build/changelog/changelog_10.rst @@ -21,6 +21,20 @@ series as well. For changes that are specific to 1.0 with an emphasis on compatibility concerns, see :doc:`/changelog/migration_10`. + .. change:: + :tags: bug, sql + :tickets: 3243 + + The behavioral contract of the :attr:`.ForeignKeyConstraint.columns` + collection has been made consistent; this attribute is now a + :class:`.ColumnCollection` like that of all other constraints and + is initialized at the point when the constraint is associated with + a :class:`.Table`. + + .. seealso:: + + :ref:`change_3243` + .. change:: :tags: bug, orm :tickets: 3256 diff --git a/doc/build/changelog/migration_10.rst b/doc/build/changelog/migration_10.rst index bc7fa139f..c4157266b 100644 --- a/doc/build/changelog/migration_10.rst +++ b/doc/build/changelog/migration_10.rst @@ -1427,7 +1427,21 @@ A :class:`.Table` can be set up for reflection by passing :ticket:`3027` - +.. _change_3243: + +ForeignKeyConstraint.columns is now a ColumnCollection +------------------------------------------------------ + +:attr:`.ForeignKeyConstraint.columns` was previously a plain list +containing either strings or :class:`.Column` objects, depending on +how the :class:`.ForeignKeyConstraint` was constructed and whether it was +associated with a table. The collection is now a :class:`.ColumnCollection`, +and is only initialized after the :class:`.ForeignKeyConstraint` is +associated with a :class:`.Table`. A new accessor +:attr:`.ForeignKeyConstraint.column_keys` +is added to unconditionally return string keys for the local set of +columns regardless of how the object was constructed or its current +state. Dialect Changes =============== diff --git a/lib/sqlalchemy/dialects/sqlite/base.py b/lib/sqlalchemy/dialects/sqlite/base.py index 335b35c94..33003297c 100644 --- a/lib/sqlalchemy/dialects/sqlite/base.py +++ b/lib/sqlalchemy/dialects/sqlite/base.py @@ -646,8 +646,8 @@ class SQLiteDDLCompiler(compiler.DDLCompiler): def visit_foreign_key_constraint(self, constraint): - local_table = list(constraint._elements.values())[0].parent.table - remote_table = list(constraint._elements.values())[0].column.table + local_table = constraint.elements[0].parent.table + remote_table = constraint.elements[0].column.table if local_table.schema != remote_table.schema: return None diff --git a/lib/sqlalchemy/sql/compiler.py b/lib/sqlalchemy/sql/compiler.py index 8f3ede25f..b102f0240 100644 --- a/lib/sqlalchemy/sql/compiler.py +++ b/lib/sqlalchemy/sql/compiler.py @@ -2286,14 +2286,14 @@ class DDLCompiler(Compiled): formatted_name = self.preparer.format_constraint(constraint) if formatted_name is not None: text += "CONSTRAINT %s " % formatted_name - remote_table = list(constraint._elements.values())[0].column.table + remote_table = list(constraint.elements)[0].column.table text += "FOREIGN KEY(%s) REFERENCES %s (%s)" % ( ', '.join(preparer.quote(f.parent.name) - for f in constraint._elements.values()), + for f in constraint.elements), self.define_constraint_remote_table( constraint, remote_table, preparer), ', '.join(preparer.quote(f.column.name) - for f in constraint._elements.values()) + for f in constraint.elements) ) text += self.define_constraint_match(constraint) text += self.define_constraint_cascades(constraint) diff --git a/lib/sqlalchemy/sql/schema.py b/lib/sqlalchemy/sql/schema.py index 96cabbf4f..8b2eb12f0 100644 --- a/lib/sqlalchemy/sql/schema.py +++ b/lib/sqlalchemy/sql/schema.py @@ -1804,7 +1804,7 @@ class ForeignKey(DialectKWArgs, SchemaItem): match=self.match, **self._unvalidated_dialect_kw ) - self.constraint._elements[self.parent] = self + self.constraint._append_element(column, self) self.constraint._set_parent_with_dispatch(table) table.foreign_keys.add(self) @@ -2489,7 +2489,7 @@ class CheckConstraint(Constraint): return self._schema_item_copy(c) -class ForeignKeyConstraint(Constraint): +class ForeignKeyConstraint(ColumnCollectionConstraint): """A table-level FOREIGN KEY constraint. Defines a single column or composite FOREIGN KEY ... REFERENCES @@ -2564,9 +2564,10 @@ class ForeignKeyConstraint(Constraint): .. versionadded:: 0.9.2 """ - super(ForeignKeyConstraint, self).\ - __init__(name, deferrable, initially, info=info, **dialect_kw) + Constraint.__init__( + self, name=name, deferrable=deferrable, initially=initially, + info=info, **dialect_kw) self.onupdate = onupdate self.ondelete = ondelete self.link_to_name = link_to_name @@ -2575,14 +2576,12 @@ class ForeignKeyConstraint(Constraint): self.use_alter = use_alter self.match = match - self._elements = util.OrderedDict() - # standalone ForeignKeyConstraint - create # associated ForeignKey objects which will be applied to hosted # Column objects (in col.foreign_keys), either now or when attached # to the Table for string-specified names - for col, refcol in zip(columns, refcolumns): - self._elements[col] = ForeignKey( + self.elements = [ + ForeignKey( refcol, _constraint=self, name=self.name, @@ -2594,25 +2593,36 @@ class ForeignKeyConstraint(Constraint): deferrable=self.deferrable, initially=self.initially, **self.dialect_kwargs - ) + ) for refcol in refcolumns + ] + ColumnCollectionMixin.__init__(self, *columns) if table is not None: + if hasattr(self, "parent"): + assert table is self.parent self._set_parent_with_dispatch(table) - elif columns and \ - isinstance(columns[0], Column) and \ - columns[0].table is not None: - self._set_parent_with_dispatch(columns[0].table) + + def _append_element(self, column, fk): + self.columns.add(column) + self.elements.append(fk) + + @property + def _elements(self): + # legacy - provide a dictionary view of (column_key, fk) + return util.OrderedDict( + zip(self.column_keys, self.elements) + ) @property def _referred_schema(self): - for elem in self._elements.values(): + for elem in self.elements: return elem._referred_schema else: return None def _validate_dest_table(self, table): table_keys = set([elem._table_key() - for elem in self._elements.values()]) + for elem in self.elements]) if None not in table_keys and len(table_keys) > 1: elem0, elem1 = sorted(table_keys)[0:2] raise exc.ArgumentError( @@ -2625,38 +2635,48 @@ class ForeignKeyConstraint(Constraint): )) @property - def _col_description(self): - return ", ".join(self._elements) + def column_keys(self): + """Return a list of string keys representing the local + columns in this :class:`.ForeignKeyConstraint`. - @property - def columns(self): - return list(self._elements) + This list is either the original string arguments sent + to the constructor of the :class:`.ForeignKeyConstraint`, + or if the constraint has been initialized with :class:`.Column` + objects, is the string .key of each element. + + .. versionadded:: 1.0.0 + + """ + if hasattr(self, 'table'): + return self.columns.keys() + else: + return [ + col.key if isinstance(col, ColumnElement) + else str(col) for col in self._pending_colargs + ] @property - def elements(self): - return list(self._elements.values()) + def _col_description(self): + return ", ".join(self.column_keys) def _set_parent(self, table): - super(ForeignKeyConstraint, self)._set_parent(table) - - self._validate_dest_table(table) + Constraint._set_parent(self, table) - for col, fk in self._elements.items(): - # string-specified column names now get - # resolved to Column objects - if isinstance(col, util.string_types): - try: - col = table.c[col] - except KeyError: - raise exc.ArgumentError( - "Can't create ForeignKeyConstraint " - "on table '%s': no column " - "named '%s' is present." % (table.description, col)) + try: + ColumnCollectionConstraint._set_parent(self, table) + except KeyError as ke: + raise exc.ArgumentError( + "Can't create ForeignKeyConstraint " + "on table '%s': no column " + "named '%s' is present." % (table.description, ke.args[0])) + for col, fk in zip(self.columns, self.elements): if not hasattr(fk, 'parent') or \ fk.parent is not col: fk._set_parent_with_dispatch(col) + self._validate_dest_table(table) + if self.use_alter: def supports_alter(ddl, event, schema_item, bind, **kw): return table in set(kw['tables']) and \ @@ -2669,14 +2689,14 @@ class ForeignKeyConstraint(Constraint): def copy(self, schema=None, target_table=None, **kw): fkc = ForeignKeyConstraint( - [x.parent.key for x in self._elements.values()], + [x.parent.key for x in self.elements], [x._get_colspec( schema=schema, table_name=target_table.name if target_table is not None and x._table_key() == x.parent.table.key else None) - for x in self._elements.values()], + for x in self.elements], name=self.name, onupdate=self.onupdate, ondelete=self.ondelete, @@ -2687,8 +2707,8 @@ class ForeignKeyConstraint(Constraint): match=self.match ) for self_fk, other_fk in zip( - self._elements.values(), - fkc._elements.values()): + self.elements, + fkc.elements): self_fk._schema_item_copy(other_fk) return self._schema_item_copy(fkc) diff --git a/test/sql/test_metadata.py b/test/sql/test_metadata.py index 3c55242fd..3f24fd07d 100644 --- a/test/sql/test_metadata.py +++ b/test/sql/test_metadata.py @@ -227,6 +227,50 @@ class MetaDataTest(fixtures.TestBase, ComparesTables): fk1 = ForeignKeyConstraint(('foo', ), ('bar', ), table=t1) assert fk1 in t1.constraints + def test_fk_constraint_col_collection_w_table(self): + c1 = Column('foo', Integer) + c2 = Column('bar', Integer) + m = MetaData() + t1 = Table('t', m, c1, c2) + fk1 = ForeignKeyConstraint(('foo', ), ('bar', ), table=t1) + eq_(dict(fk1.columns), {"foo": c1}) + + def test_fk_constraint_col_collection_no_table(self): + fk1 = ForeignKeyConstraint(('foo', 'bat'), ('bar', 'hoho')) + eq_(dict(fk1.columns), {}) + eq_(fk1.column_keys, ['foo', 'bat']) + eq_(fk1._col_description, 'foo, bat') + eq_(fk1._elements, {"foo": fk1.elements[0], "bat": fk1.elements[1]}) + + def test_fk_constraint_col_collection_no_table_real_cols(self): + c1 = Column('foo', Integer) + c2 = Column('bar', Integer) + fk1 = ForeignKeyConstraint((c1, ), (c2, )) + eq_(dict(fk1.columns), {}) + eq_(fk1.column_keys, ['foo']) + eq_(fk1._col_description, 'foo') + eq_(fk1._elements, {"foo": fk1.elements[0]}) + + def test_fk_constraint_col_collection_added_to_table(self): + c1 = Column('foo', Integer) + m = MetaData() + fk1 = ForeignKeyConstraint(('foo', ), ('bar', )) + Table('t', m, c1, fk1) + eq_(dict(fk1.columns), {"foo": c1}) + eq_(fk1._elements, {"foo": fk1.elements[0]}) + + def test_fk_constraint_col_collection_via_fk(self): + fk = ForeignKey('bar') + c1 = Column('foo', Integer, fk) + m = MetaData() + t1 = Table('t', m, c1) + fk1 = fk.constraint + eq_(fk1.column_keys, ['foo']) + assert fk1 in t1.constraints + eq_(fk1.column_keys, ['foo']) + eq_(dict(fk1.columns), {"foo": c1}) + eq_(fk1._elements, {"foo": fk}) + def test_fk_no_such_parent_col_error(self): meta = MetaData() a = Table('a', meta, Column('a', Integer)) -- cgit v1.2.1 From d69f44b78090c6795b0b73b3befef39af44b6918 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Tue, 25 Nov 2014 23:28:54 -0500 Subject: - add a new option --force-write-profiles to rewrite profiles even if they are passing --- lib/sqlalchemy/testing/plugin/plugin_base.py | 5 ++++- lib/sqlalchemy/testing/profiling.py | 8 ++++++-- 2 files changed, 10 insertions(+), 3 deletions(-) diff --git a/lib/sqlalchemy/testing/plugin/plugin_base.py b/lib/sqlalchemy/testing/plugin/plugin_base.py index 6696427dc..614a12133 100644 --- a/lib/sqlalchemy/testing/plugin/plugin_base.py +++ b/lib/sqlalchemy/testing/plugin/plugin_base.py @@ -93,7 +93,10 @@ def setup_options(make_option): help="Exclude tests with tag ") make_option("--write-profiles", action="store_true", dest="write_profiles", default=False, - help="Write/update profiling data.") + help="Write/update failing profiling data.") + make_option("--force-write-profiles", action="store_true", + dest="force_write_profiles", default=False, + help="Unconditionally write/update profiling data.") def configure_follower(follower_ident): diff --git a/lib/sqlalchemy/testing/profiling.py b/lib/sqlalchemy/testing/profiling.py index 6fc51ef32..671bbe32d 100644 --- a/lib/sqlalchemy/testing/profiling.py +++ b/lib/sqlalchemy/testing/profiling.py @@ -42,7 +42,11 @@ class ProfileStatsFile(object): """ def __init__(self, filename): - self.write = ( + self.force_write = ( + config.options is not None and + config.options.force_write_profiles + ) + self.write = self.force_write or ( config.options is not None and config.options.write_profiles ) @@ -239,7 +243,7 @@ def count_functions(variance=0.05): deviance = int(callcount * variance) failed = abs(callcount - expected_count) > deviance - if failed: + if failed or _profile_stats.force_write: if _profile_stats.write: _profile_stats.replace(callcount) else: -- cgit v1.2.1 From ee38bcdec26d217dfeedc4f75200be44a388af78 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Tue, 25 Nov 2014 23:29:15 -0500 Subject: - refresh all zoomark profiles --- test/profiles.txt | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/test/profiles.txt b/test/profiles.txt index 7d6e016b8..97ef13873 100644 --- a/test/profiles.txt +++ b/test/profiles.txt @@ -360,9 +360,9 @@ test.aaa_profiling.test_zoomark.ZooMarkTest.test_invocation 3.4_postgresql_psyco # TEST: test.aaa_profiling.test_zoomark_orm.ZooMarkTest.test_invocation -test.aaa_profiling.test_zoomark_orm.ZooMarkTest.test_invocation 2.7_postgresql_psycopg2_cextensions 6098,399,6666,18183,1118,2606 -test.aaa_profiling.test_zoomark_orm.ZooMarkTest.test_invocation 2.7_postgresql_psycopg2_nocextensions 6169,404,6898,19614,1226,2671 -test.aaa_profiling.test_zoomark_orm.ZooMarkTest.test_invocation 3.3_postgresql_psycopg2_cextensions 6008,386,6716,18339,1091,2630 -test.aaa_profiling.test_zoomark_orm.ZooMarkTest.test_invocation 3.3_postgresql_psycopg2_nocextensions 6093,391,6820,19366,1177,2659 -test.aaa_profiling.test_zoomark_orm.ZooMarkTest.test_invocation 3.4_postgresql_psycopg2_cextensions 6007,386,6716,18339,1091,2630 -test.aaa_profiling.test_zoomark_orm.ZooMarkTest.test_invocation 3.4_postgresql_psycopg2_nocextensions 6087,391,6820,19366,1177,2659 +test.aaa_profiling.test_zoomark_orm.ZooMarkTest.test_invocation 2.7_postgresql_psycopg2_cextensions 6389,407,6826,18499,1134,2661 +test.aaa_profiling.test_zoomark_orm.ZooMarkTest.test_invocation 2.7_postgresql_psycopg2_nocextensions 6480,412,7058,19930,1242,2726 +test.aaa_profiling.test_zoomark_orm.ZooMarkTest.test_invocation 3.3_postgresql_psycopg2_cextensions 6268,394,6860,18613,1107,2679 +test.aaa_profiling.test_zoomark_orm.ZooMarkTest.test_invocation 3.3_postgresql_psycopg2_nocextensions 6361,399,6964,19640,1193,2708 +test.aaa_profiling.test_zoomark_orm.ZooMarkTest.test_invocation 3.4_postgresql_psycopg2_cextensions 6275,394,6860,18613,1107,2679 +test.aaa_profiling.test_zoomark_orm.ZooMarkTest.test_invocation 3.4_postgresql_psycopg2_nocextensions 6360,399,6964,19640,1193,2708 -- cgit v1.2.1 From 79c0aa6b7320f94399634d02997faacbb6ced1d7 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Tue, 25 Nov 2014 23:33:47 -0500 Subject: - use self.parent, not table here as there's an attributeerror trap for self.table that behaves differently in py3k --- lib/sqlalchemy/sql/schema.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/sqlalchemy/sql/schema.py b/lib/sqlalchemy/sql/schema.py index 8b2eb12f0..4093d7115 100644 --- a/lib/sqlalchemy/sql/schema.py +++ b/lib/sqlalchemy/sql/schema.py @@ -2647,7 +2647,7 @@ class ForeignKeyConstraint(ColumnCollectionConstraint): .. versionadded:: 1.0.0 """ - if hasattr(self, 'table'): + if hasattr(self, "parent"): return self.columns.keys() else: return [ -- cgit v1.2.1 From fb06fa9d6e6f6da8eee5b7dfc196c3e12761da61 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Wed, 26 Nov 2014 10:34:46 -0500 Subject: - add an order_by here --- test/orm/test_mapper.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/test/orm/test_mapper.py b/test/orm/test_mapper.py index 0a9cbfc71..63ba1a207 100644 --- a/test/orm/test_mapper.py +++ b/test/orm/test_mapper.py @@ -222,7 +222,8 @@ class MapperTest(_fixtures.FixtureTest, AssertsCompiledSQL): mapper(Address, addresses) s = create_session() a = s.query(Address).from_statement( - sa.select([addresses.c.id, addresses.c.user_id])).first() + sa.select([addresses.c.id, addresses.c.user_id]). + order_by(addresses.c.id)).first() eq_(a.user_id, 7) eq_(a.id, 1) # email address auto-defers -- cgit v1.2.1 From 99e51151244c7028fcc319d60e2e8ad1ba9e22bb Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Wed, 26 Nov 2014 13:50:43 -0500 Subject: - changelog, improve docstring/test for #3217. fixes #3217 --- doc/build/changelog/changelog_10.rst | 9 +++++++++ lib/sqlalchemy/orm/query.py | 8 +++++++- test/orm/test_joins.py | 17 ++++++++++------- 3 files changed, 26 insertions(+), 8 deletions(-) diff --git a/doc/build/changelog/changelog_10.rst b/doc/build/changelog/changelog_10.rst index d0d025011..4a350370f 100644 --- a/doc/build/changelog/changelog_10.rst +++ b/doc/build/changelog/changelog_10.rst @@ -21,6 +21,15 @@ series as well. For changes that are specific to 1.0 with an emphasis on compatibility concerns, see :doc:`/changelog/migration_10`. + .. change:: + :tags: feature, orm + :tickets: 3217 + + Added a parameter :paramref:`.Query.join.isouter` which is synonymous + with calling :meth:`.Query.outerjoin`; this flag is to provide a more + consistent interface compared to Core :meth:`.FromClause.join`. + Pull request courtesy Jonathan Vanasco. + .. change:: :tags: bug, sql :tickets: 3243 diff --git a/lib/sqlalchemy/orm/query.py b/lib/sqlalchemy/orm/query.py index 884e04bbc..790686288 100644 --- a/lib/sqlalchemy/orm/query.py +++ b/lib/sqlalchemy/orm/query.py @@ -1741,7 +1741,13 @@ class Query(object): and similar will adapt the incoming criterion to the target alias, until :meth:`~.Query.reset_joinpoint` is called. :param isouter=False: If True, the join used will be a left outer join, - just as if the ``outerjoin()`` method were called. + just as if the :meth:`.Query.outerjoin` method were called. This + flag is here to maintain consistency with the same flag as accepted + by :meth:`.FromClause.join` and other Core constructs. + + + .. versionadded:: 1.0.0 + :param from_joinpoint=False: When using ``aliased=True``, a setting of True here will cause the join to be from the most recent joined target, rather than starting back from the original diff --git a/test/orm/test_joins.py b/test/orm/test_joins.py index 98888862f..979ab0518 100644 --- a/test/orm/test_joins.py +++ b/test/orm/test_joins.py @@ -430,6 +430,16 @@ class JoinTest(QueryTest, AssertsCompiledSQL): sess.query(literal_column('x'), User).join, Address ) + def test_isouter_flag(self): + User = self.classes.User + + self.assert_compile( + create_session().query(User).join('orders', isouter=True), + "SELECT users.id AS users_id, users.name AS users_name " + "FROM users LEFT OUTER JOIN orders ON users.id = orders.user_id" + ) + + def test_multi_tuple_form(self): """test the 'tuple' form of join, now superseded by the two-element join() form. @@ -724,13 +734,6 @@ class JoinTest(QueryTest, AssertsCompiledSQL): filter_by(id=3).outerjoin('orders','address').filter_by(id=1).all() assert [User(id=7, name='jack')] == result - def test_overlapping_paths_join_isouter(self): - User = self.classes.User - - result = create_session().query(User).join('orders', 'items', isouter=True).\ - filter_by(id=3).join('orders','address', isouter=True).filter_by(id=1).all() - assert [User(id=7, name='jack')] == result - def test_from_joinpoint(self): Item, User, Order = (self.classes.Item, self.classes.User, -- cgit v1.2.1 From 028f7b8a8688fcc6e9b926b99ed80fe8e1d511aa Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Wed, 26 Nov 2014 13:51:39 -0500 Subject: - set default dialect here --- test/orm/test_joins.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/test/orm/test_joins.py b/test/orm/test_joins.py index 979ab0518..c519032b3 100644 --- a/test/orm/test_joins.py +++ b/test/orm/test_joins.py @@ -362,6 +362,8 @@ class InheritedJoinTest(fixtures.MappedTest, AssertsCompiledSQL): class JoinOnSynonymTest(_fixtures.FixtureTest, AssertsCompiledSQL): + __dialect__ = 'default' + @classmethod def setup_mappers(cls): User = cls.classes.User -- cgit v1.2.1 From a88be57fb7ef96e914d8a7fb90de375ec7f0befd Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Wed, 26 Nov 2014 14:58:32 -0500 Subject: - formatting --- examples/versioned_history/history_meta.py | 59 ++++++++++----- examples/versioned_history/test_versioning.py | 100 ++++++++++++++++---------- 2 files changed, 106 insertions(+), 53 deletions(-) diff --git a/examples/versioned_history/history_meta.py b/examples/versioned_history/history_meta.py index f9e979a6a..f10444e52 100644 --- a/examples/versioned_history/history_meta.py +++ b/examples/versioned_history/history_meta.py @@ -8,15 +8,18 @@ from sqlalchemy import event import datetime from sqlalchemy.orm.properties import RelationshipProperty + def col_references_table(col, table): for fk in col.foreign_keys: if fk.references(table): return True return False + def _is_versioning_col(col): return "version_meta" in col.info + def _history_mapper(local_mapper): cls = local_mapper.class_ @@ -38,7 +41,8 @@ def _history_mapper(local_mapper): col.default = col.server_default = None return col - if not super_mapper or local_mapper.local_table is not super_mapper.local_table: + if not super_mapper or \ + local_mapper.local_table is not super_mapper.local_table: cols = [] for column in local_mapper.local_table.c: if _is_versioning_col(column): @@ -46,8 +50,14 @@ def _history_mapper(local_mapper): col = _col_copy(column) - if super_mapper and col_references_table(column, super_mapper.local_table): - super_fks.append((col.key, list(super_history_mapper.local_table.primary_key)[0])) + if super_mapper and \ + col_references_table(column, super_mapper.local_table): + super_fks.append( + ( + col.key, + list(super_history_mapper.local_table.primary_key)[0] + ) + ) cols.append(col) @@ -55,15 +65,21 @@ def _history_mapper(local_mapper): polymorphic_on = col if super_mapper: - super_fks.append(('version', super_history_mapper.local_table.c.version)) + super_fks.append( + ( + 'version', super_history_mapper.local_table.c.version + ) + ) version_meta = {"version_meta": True} # add column.info to identify # columns specific to versioning # "version" stores the integer version id. This column is # required. - cols.append(Column('version', Integer, primary_key=True, - autoincrement=False, info=version_meta)) + cols.append( + Column( + 'version', Integer, primary_key=True, + autoincrement=False, info=version_meta)) # "changed" column stores the UTC timestamp of when the # history row was created. @@ -75,10 +91,11 @@ def _history_mapper(local_mapper): if super_fks: cols.append(ForeignKeyConstraint(*zip(*super_fks))) - table = Table(local_mapper.local_table.name + '_history', - local_mapper.local_table.metadata, - *cols, - schema=local_mapper.local_table.schema + table = Table( + local_mapper.local_table.name + '_history', + local_mapper.local_table.metadata, + *cols, + schema=local_mapper.local_table.schema ) else: # single table inheritance. take any additional columns that may have @@ -108,7 +125,8 @@ def _history_mapper(local_mapper): local_mapper.local_table.append_column( Column('version', Integer, default=1, nullable=False) ) - local_mapper.add_property("version", local_mapper.local_table.c.version) + local_mapper.add_property( + "version", local_mapper.local_table.c.version) class Versioned(object): @@ -126,6 +144,7 @@ def versioned_objects(iter): if hasattr(obj, '__history_mapper__'): yield obj + def create_version(obj, session, deleted=False): obj_mapper = object_mapper(obj) history_mapper = obj.__history_mapper__ @@ -137,7 +156,10 @@ def create_version(obj, session, deleted=False): obj_changed = False - for om, hm in zip(obj_mapper.iterate_to_root(), history_mapper.iterate_to_root()): + for om, hm in zip( + obj_mapper.iterate_to_root(), + history_mapper.iterate_to_root() + ): if hm.single: continue @@ -157,11 +179,12 @@ def create_version(obj, session, deleted=False): # in the case of single table inheritance, there may be # columns on the mapped table intended for the subclass only. # the "unmapped" status of the subclass column on the - # base class is a feature of the declarative module as of sqla 0.5.2. + # base class is a feature of the declarative module. continue - # expired object attributes and also deferred cols might not be in the - # dict. force it to load no matter what by using getattr(). + # expired object attributes and also deferred cols might not + # be in the dict. force it to load no matter what by + # using getattr(). if prop.key not in obj_state.dict: getattr(obj, prop.key) @@ -182,8 +205,9 @@ def create_version(obj, session, deleted=False): # check those too for prop in obj_mapper.iterate_properties: if isinstance(prop, RelationshipProperty) and \ - attributes.get_history(obj, prop.key, - passive=attributes.PASSIVE_NO_INITIALIZE).has_changes(): + attributes.get_history( + obj, prop.key, + passive=attributes.PASSIVE_NO_INITIALIZE).has_changes(): for p in prop.local_columns: if p.foreign_keys: obj_changed = True @@ -201,6 +225,7 @@ def create_version(obj, session, deleted=False): session.add(hist) obj.version += 1 + def versioned_session(session): @event.listens_for(session, 'before_flush') def before_flush(session, flush_context, instances): diff --git a/examples/versioned_history/test_versioning.py b/examples/versioned_history/test_versioning.py index 874223d62..ed6935eb7 100644 --- a/examples/versioned_history/test_versioning.py +++ b/examples/versioned_history/test_versioning.py @@ -1,4 +1,5 @@ -"""Unit tests illustrating usage of the ``history_meta.py`` module functions.""" +"""Unit tests illustrating usage of the ``history_meta.py`` +module functions.""" from unittest import TestCase from sqlalchemy.ext.declarative import declarative_base @@ -11,10 +12,12 @@ from sqlalchemy.orm import exc as orm_exc engine = None + def setup_module(): global engine engine = create_engine('sqlite://', echo=True) + class TestVersioning(TestCase, AssertsCompiledSQL): __dialect__ = 'default' @@ -52,14 +55,16 @@ class TestVersioning(TestCase, AssertsCompiledSQL): SomeClassHistory = SomeClass.__history_mapper__.class_ eq_( - sess.query(SomeClassHistory).filter(SomeClassHistory.version == 1).all(), + sess.query(SomeClassHistory).filter( + SomeClassHistory.version == 1).all(), [SomeClassHistory(version=1, name='sc1')] ) sc.name = 'sc1modified2' eq_( - sess.query(SomeClassHistory).order_by(SomeClassHistory.version).all(), + sess.query(SomeClassHistory).order_by( + SomeClassHistory.version).all(), [ SomeClassHistory(version=1, name='sc1'), SomeClassHistory(version=2, name='sc1modified') @@ -76,7 +81,8 @@ class TestVersioning(TestCase, AssertsCompiledSQL): sess.commit() eq_( - sess.query(SomeClassHistory).order_by(SomeClassHistory.version).all(), + sess.query(SomeClassHistory).order_by( + SomeClassHistory.version).all(), [ SomeClassHistory(version=1, name='sc1'), SomeClassHistory(version=2, name='sc1modified') @@ -87,7 +93,8 @@ class TestVersioning(TestCase, AssertsCompiledSQL): sess.commit() eq_( - sess.query(SomeClassHistory).order_by(SomeClassHistory.version).all(), + sess.query(SomeClassHistory).order_by( + SomeClassHistory.version).all(), [ SomeClassHistory(version=1, name='sc1'), SomeClassHistory(version=2, name='sc1modified'), @@ -164,13 +171,13 @@ class TestVersioning(TestCase, AssertsCompiledSQL): SomeClassHistory = SomeClass.__history_mapper__.class_ eq_( - sess.query(SomeClassHistory.boole).order_by(SomeClassHistory.id).all(), + sess.query(SomeClassHistory.boole).order_by( + SomeClassHistory.id).all(), [(True, ), (None, )] ) eq_(sc.version, 3) - def test_deferred(self): """test versioning of unloaded, deferred columns.""" @@ -199,11 +206,11 @@ class TestVersioning(TestCase, AssertsCompiledSQL): SomeClassHistory = SomeClass.__history_mapper__.class_ eq_( - sess.query(SomeClassHistory).filter(SomeClassHistory.version == 1).all(), + sess.query(SomeClassHistory).filter( + SomeClassHistory.version == 1).all(), [SomeClassHistory(version=1, name='sc1', data='somedata')] ) - def test_joined_inheritance(self): class BaseClass(Versioned, self.Base, ComparableEntity): __tablename__ = 'basetable' @@ -212,7 +219,9 @@ class TestVersioning(TestCase, AssertsCompiledSQL): name = Column(String(50)) type = Column(String(20)) - __mapper_args__ = {'polymorphic_on': type, 'polymorphic_identity': 'base'} + __mapper_args__ = { + 'polymorphic_on': type, + 'polymorphic_identity': 'base'} class SubClassSeparatePk(BaseClass): __tablename__ = 'subtable1' @@ -246,38 +255,50 @@ class TestVersioning(TestCase, AssertsCompiledSQL): sess.commit() BaseClassHistory = BaseClass.__history_mapper__.class_ - SubClassSeparatePkHistory = SubClassSeparatePk.__history_mapper__.class_ + SubClassSeparatePkHistory = \ + SubClassSeparatePk.__history_mapper__.class_ SubClassSamePkHistory = SubClassSamePk.__history_mapper__.class_ eq_( sess.query(BaseClassHistory).order_by(BaseClassHistory.id).all(), [ - SubClassSeparatePkHistory(id=1, name='sep1', type='sep', version=1), + SubClassSeparatePkHistory( + id=1, name='sep1', type='sep', version=1), BaseClassHistory(id=2, name='base1', type='base', version=1), - SubClassSamePkHistory(id=3, name='same1', type='same', version=1) + SubClassSamePkHistory( + id=3, name='same1', type='same', version=1) ] ) same1.subdata2 = 'same1subdatamod2' eq_( - sess.query(BaseClassHistory).order_by(BaseClassHistory.id, BaseClassHistory.version).all(), + sess.query(BaseClassHistory).order_by( + BaseClassHistory.id, BaseClassHistory.version).all(), [ - SubClassSeparatePkHistory(id=1, name='sep1', type='sep', version=1), + SubClassSeparatePkHistory( + id=1, name='sep1', type='sep', version=1), BaseClassHistory(id=2, name='base1', type='base', version=1), - SubClassSamePkHistory(id=3, name='same1', type='same', version=1), - SubClassSamePkHistory(id=3, name='same1', type='same', version=2) + SubClassSamePkHistory( + id=3, name='same1', type='same', version=1), + SubClassSamePkHistory( + id=3, name='same1', type='same', version=2) ] ) base1.name = 'base1mod2' eq_( - sess.query(BaseClassHistory).order_by(BaseClassHistory.id, BaseClassHistory.version).all(), + sess.query(BaseClassHistory).order_by( + BaseClassHistory.id, BaseClassHistory.version).all(), [ - SubClassSeparatePkHistory(id=1, name='sep1', type='sep', version=1), + SubClassSeparatePkHistory( + id=1, name='sep1', type='sep', version=1), BaseClassHistory(id=2, name='base1', type='base', version=1), - BaseClassHistory(id=2, name='base1mod', type='base', version=2), - SubClassSamePkHistory(id=3, name='same1', type='same', version=1), - SubClassSamePkHistory(id=3, name='same1', type='same', version=2) + BaseClassHistory( + id=2, name='base1mod', type='base', version=2), + SubClassSamePkHistory( + id=3, name='same1', type='same', version=1), + SubClassSamePkHistory( + id=3, name='same1', type='same', version=2) ] ) @@ -289,8 +310,9 @@ class TestVersioning(TestCase, AssertsCompiledSQL): name = Column(String(50)) type = Column(String(20)) - __mapper_args__ = {'polymorphic_on': type, - 'polymorphic_identity': 'base'} + __mapper_args__ = { + 'polymorphic_on': type, + 'polymorphic_identity': 'base'} class SubClass(BaseClass): __tablename__ = 'subtable' @@ -342,7 +364,8 @@ class TestVersioning(TestCase, AssertsCompiledSQL): "ON basetable_history.id = subtable_history.base_id " "AND basetable_history.version = subtable_history.version " "JOIN subsubtable_history ON subtable_history.id = " - "subsubtable_history.id AND subtable_history.version = subsubtable_history.version" + "subsubtable_history.id AND subtable_history.version = " + "subsubtable_history.version" ) ssc = SubSubClass(name='ss1', subdata1='sd1', subdata2='sd2') @@ -360,8 +383,9 @@ class TestVersioning(TestCase, AssertsCompiledSQL): [SubSubHistory(name='ss1', subdata1='sd1', subdata2='sd2', type='subsub', version=1)] ) - eq_(ssc, SubSubClass(name='ss1', subdata1='sd11', - subdata2='sd22', version=2)) + eq_(ssc, SubSubClass( + name='ss1', subdata1='sd11', + subdata2='sd22', version=2)) @@ -372,8 +396,9 @@ class TestVersioning(TestCase, AssertsCompiledSQL): id = Column(Integer, primary_key=True) name = Column(String(50)) type = Column(String(50)) - __mapper_args__ = {'polymorphic_on': type, - 'polymorphic_identity': 'base'} + __mapper_args__ = { + 'polymorphic_on': type, + 'polymorphic_identity': 'base'} class SubClass(BaseClass): @@ -396,8 +421,8 @@ class TestVersioning(TestCase, AssertsCompiledSQL): SubClassHistory = SubClass.__history_mapper__.class_ eq_( - sess.query(BaseClassHistory).order_by(BaseClassHistory.id, - BaseClassHistory.version).all(), + sess.query(BaseClassHistory).order_by( + BaseClassHistory.id, BaseClassHistory.version).all(), [BaseClassHistory(id=1, name='b1', type='base', version=1)] ) @@ -405,11 +430,12 @@ class TestVersioning(TestCase, AssertsCompiledSQL): b1.name = 'b1modified2' eq_( - sess.query(BaseClassHistory).order_by(BaseClassHistory.id, - BaseClassHistory.version).all(), + sess.query(BaseClassHistory).order_by( + BaseClassHistory.id, BaseClassHistory.version).all(), [ BaseClassHistory(id=1, name='b1', type='base', version=1), - BaseClassHistory(id=1, name='b1modified', type='base', version=2), + BaseClassHistory( + id=1, name='b1modified', type='base', version=2), SubClassHistory(id=2, name='s1', type='sub', version=1) ] ) @@ -475,14 +501,16 @@ class TestVersioning(TestCase, AssertsCompiledSQL): assert sc.version == 2 eq_( - sess.query(SomeClassHistory).filter(SomeClassHistory.version == 1).all(), + sess.query(SomeClassHistory).filter( + SomeClassHistory.version == 1).all(), [SomeClassHistory(version=1, name='sc1', related_id=None)] ) sc.related = None eq_( - sess.query(SomeClassHistory).order_by(SomeClassHistory.version).all(), + sess.query(SomeClassHistory).order_by( + SomeClassHistory.version).all(), [ SomeClassHistory(version=1, name='sc1', related_id=None), SomeClassHistory(version=2, name='sc1', related_id=sr1.id) -- cgit v1.2.1 From de62497b03274c860ea2554dfbacb3064ce02c19 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Wed, 26 Nov 2014 14:58:44 -0500 Subject: - Updated the :ref:`examples_versioned_history` example such that mapped columns are re-mapped to match column names as well as grouping of columns; in particular, this allows columns that are explicitly grouped in a same-column-named joined inheritance scenario to be mapped in the same way in the history mappings, avoiding warnings added in the 0.9 series regarding this pattern and allowing the same view of attribute keys. --- doc/build/changelog/changelog_09.rst | 13 +++++ doc/build/orm/examples.rst | 2 + examples/versioned_history/history_meta.py | 45 ++++++++++++----- examples/versioned_history/test_versioning.py | 73 ++++++++++++++++++++++++--- 4 files changed, 112 insertions(+), 21 deletions(-) diff --git a/doc/build/changelog/changelog_09.rst b/doc/build/changelog/changelog_09.rst index ef0277935..f10d48273 100644 --- a/doc/build/changelog/changelog_09.rst +++ b/doc/build/changelog/changelog_09.rst @@ -13,6 +13,19 @@ .. changelog:: :version: 0.9.9 + .. change:: + :tags: bug, examples + :versions: 1.0.0 + + Updated the :ref:`examples_versioned_history` example such that + mapped columns are re-mapped to + match column names as well as grouping of columns; in particular, + this allows columns that are explicitly grouped in a same-column-named + joined inheritance scenario to be mapped in the same way in the + history mappings, avoiding warnings added in the 0.9 series + regarding this pattern and allowing the same view of attribute + keys. + .. change:: :tags: bug, examples :versions: 1.0.0 diff --git a/doc/build/orm/examples.rst b/doc/build/orm/examples.rst index b820dba9f..8803e1c34 100644 --- a/doc/build/orm/examples.rst +++ b/doc/build/orm/examples.rst @@ -79,6 +79,8 @@ XML Persistence Versioning Objects ------------------------ +.. _examples_versioned_history: + Versioning with a History Table ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ diff --git a/examples/versioned_history/history_meta.py b/examples/versioned_history/history_meta.py index f10444e52..6d7b137eb 100644 --- a/examples/versioned_history/history_meta.py +++ b/examples/versioned_history/history_meta.py @@ -4,7 +4,7 @@ from sqlalchemy.ext.declarative import declared_attr from sqlalchemy.orm import mapper, attributes, object_mapper from sqlalchemy.orm.exc import UnmappedColumnError from sqlalchemy import Table, Column, ForeignKeyConstraint, Integer, DateTime -from sqlalchemy import event +from sqlalchemy import event, util import datetime from sqlalchemy.orm.properties import RelationshipProperty @@ -36,14 +36,20 @@ def _history_mapper(local_mapper): super_fks = [] def _col_copy(col): + orig = col col = col.copy() + orig.info['history_copy'] = col col.unique = False col.default = col.server_default = None return col + properties = util.OrderedDict() if not super_mapper or \ local_mapper.local_table is not super_mapper.local_table: cols = [] + version_meta = {"version_meta": True} # add column.info to identify + # columns specific to versioning + for column in local_mapper.local_table.c: if _is_versioning_col(column): continue @@ -64,6 +70,13 @@ def _history_mapper(local_mapper): if column is local_mapper.polymorphic_on: polymorphic_on = col + orig_prop = local_mapper.get_property_by_column(column) + # carry over column re-mappings + if len(orig_prop.columns) > 1 or \ + orig_prop.columns[0].key != orig_prop.key: + properties[orig_prop.key] = tuple( + col.info['history_copy'] for col in orig_prop.columns) + if super_mapper: super_fks.append( ( @@ -71,9 +84,6 @@ def _history_mapper(local_mapper): ) ) - version_meta = {"version_meta": True} # add column.info to identify - # columns specific to versioning - # "version" stores the integer version id. This column is # required. cols.append( @@ -84,9 +94,10 @@ def _history_mapper(local_mapper): # "changed" column stores the UTC timestamp of when the # history row was created. # This column is optional and can be omitted. - cols.append(Column('changed', DateTime, - default=datetime.datetime.utcnow, - info=version_meta)) + cols.append(Column( + 'changed', DateTime, + default=datetime.datetime.utcnow, + info=version_meta)) if super_fks: cols.append(ForeignKeyConstraint(*zip(*super_fks))) @@ -108,17 +119,25 @@ def _history_mapper(local_mapper): if super_history_mapper: bases = (super_history_mapper.class_,) + + if table is not None: + properties['changed'] = ( + (table.c.changed, ) + + tuple(super_history_mapper.attrs.changed.columns) + ) + else: bases = local_mapper.base_mapper.class_.__bases__ versioned_cls = type.__new__(type, "%sHistory" % cls.__name__, bases, {}) m = mapper( - versioned_cls, - table, - inherits=super_history_mapper, - polymorphic_on=polymorphic_on, - polymorphic_identity=local_mapper.polymorphic_identity - ) + versioned_cls, + table, + inherits=super_history_mapper, + polymorphic_on=polymorphic_on, + polymorphic_identity=local_mapper.polymorphic_identity, + properties=properties + ) cls.__history_mapper__ = m if not super_history_mapper: diff --git a/examples/versioned_history/test_versioning.py b/examples/versioned_history/test_versioning.py index ed6935eb7..dde73a5ae 100644 --- a/examples/versioned_history/test_versioning.py +++ b/examples/versioned_history/test_versioning.py @@ -4,11 +4,16 @@ module functions.""" from unittest import TestCase from sqlalchemy.ext.declarative import declarative_base from .history_meta import Versioned, versioned_session -from sqlalchemy import create_engine, Column, Integer, String, ForeignKey, Boolean -from sqlalchemy.orm import clear_mappers, Session, deferred, relationship +from sqlalchemy import create_engine, Column, Integer, String, \ + ForeignKey, Boolean, select +from sqlalchemy.orm import clear_mappers, Session, deferred, relationship, \ + column_property from sqlalchemy.testing import AssertsCompiledSQL, eq_, assert_raises from sqlalchemy.testing.entities import ComparableEntity from sqlalchemy.orm import exc as orm_exc +import warnings + +warnings.simplefilter("error") engine = None @@ -226,7 +231,10 @@ class TestVersioning(TestCase, AssertsCompiledSQL): class SubClassSeparatePk(BaseClass): __tablename__ = 'subtable1' - id = Column(Integer, primary_key=True) + id = column_property( + Column(Integer, primary_key=True), + BaseClass.id + ) base_id = Column(Integer, ForeignKey('basetable.id')) subdata1 = Column(String(50)) @@ -235,7 +243,8 @@ class TestVersioning(TestCase, AssertsCompiledSQL): class SubClassSamePk(BaseClass): __tablename__ = 'subtable2' - id = Column(Integer, ForeignKey('basetable.id'), primary_key=True) + id = Column( + Integer, ForeignKey('basetable.id'), primary_key=True) subdata2 = Column(String(50)) __mapper_args__ = {'polymorphic_identity': 'same'} @@ -317,7 +326,10 @@ class TestVersioning(TestCase, AssertsCompiledSQL): class SubClass(BaseClass): __tablename__ = 'subtable' - id = Column(Integer, primary_key=True) + id = column_property( + Column(Integer, primary_key=True), + BaseClass.id + ) base_id = Column(Integer, ForeignKey('basetable.id')) subdata1 = Column(String(50)) @@ -338,12 +350,18 @@ class TestVersioning(TestCase, AssertsCompiledSQL): q = sess.query(SubSubHistory) self.assert_compile( q, + + "SELECT " "subsubtable_history.id AS subsubtable_history_id, " "subtable_history.id AS subtable_history_id, " "basetable_history.id AS basetable_history_id, " + "subsubtable_history.changed AS subsubtable_history_changed, " + "subtable_history.changed AS subtable_history_changed, " + "basetable_history.changed AS basetable_history_changed, " + "basetable_history.name AS basetable_history_name, " "basetable_history.type AS basetable_history_type, " @@ -352,9 +370,6 @@ class TestVersioning(TestCase, AssertsCompiledSQL): "subtable_history.version AS subtable_history_version, " "basetable_history.version AS basetable_history_version, " - "subsubtable_history.changed AS subsubtable_history_changed, " - "subtable_history.changed AS subtable_history_changed, " - "basetable_history.changed AS basetable_history_changed, " "subtable_history.base_id AS subtable_history_base_id, " "subtable_history.subdata1 AS subtable_history_subdata1, " @@ -387,7 +402,49 @@ class TestVersioning(TestCase, AssertsCompiledSQL): name='ss1', subdata1='sd11', subdata2='sd22', version=2)) + def test_joined_inheritance_changed(self): + class BaseClass(Versioned, self.Base, ComparableEntity): + __tablename__ = 'basetable' + + id = Column(Integer, primary_key=True) + name = Column(String(50)) + type = Column(String(20)) + + __mapper_args__ = { + 'polymorphic_on': type, + 'polymorphic_identity': 'base' + } + + class SubClass(BaseClass): + __tablename__ = 'subtable' + + id = Column(Integer, ForeignKey('basetable.id'), primary_key=True) + + __mapper_args__ = {'polymorphic_identity': 'sep'} + + self.create_tables() + + BaseClassHistory = BaseClass.__history_mapper__.class_ + SubClassHistory = SubClass.__history_mapper__.class_ + sess = self.session + s1 = SubClass(name='s1') + sess.add(s1) + sess.commit() + + s1.name = 's2' + sess.commit() + actual_changed_base = sess.scalar( + select([BaseClass.__history_mapper__.local_table.c.changed])) + actual_changed_sub = sess.scalar( + select([SubClass.__history_mapper__.local_table.c.changed])) + h1 = sess.query(BaseClassHistory).first() + eq_(h1.changed, actual_changed_base) + eq_(h1.changed, actual_changed_sub) + + h1 = sess.query(SubClassHistory).first() + eq_(h1.changed, actual_changed_base) + eq_(h1.changed, actual_changed_sub) def test_single_inheritance(self): class BaseClass(Versioned, self.Base, ComparableEntity): -- cgit v1.2.1 From 98c2a679707432e6707ba70f1aebd10b28b861a3 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Sat, 29 Nov 2014 14:44:26 -0500 Subject: - Fixed bug in :meth:`.Table.tometadata` method where the :class:`.CheckConstraint` associated with a :class:`.Boolean` or :class:`.Enum` type object would be doubled in the target table. The copy process now tracks the production of this constraint object as local to a type object. fixes #3260 --- doc/build/changelog/changelog_10.rst | 10 +++++++++ lib/sqlalchemy/sql/schema.py | 16 +++++++++------ lib/sqlalchemy/sql/sqltypes.py | 10 ++++----- test/sql/test_metadata.py | 40 ++++++++++++++++++++++++++++++++++++ 4 files changed, 65 insertions(+), 11 deletions(-) diff --git a/doc/build/changelog/changelog_10.rst b/doc/build/changelog/changelog_10.rst index 4a350370f..f2bd43a76 100644 --- a/doc/build/changelog/changelog_10.rst +++ b/doc/build/changelog/changelog_10.rst @@ -21,6 +21,16 @@ series as well. For changes that are specific to 1.0 with an emphasis on compatibility concerns, see :doc:`/changelog/migration_10`. + .. change:: + :tags: bug, sql + :tickets: 3260 + + Fixed bug in :meth:`.Table.tometadata` method where the + :class:`.CheckConstraint` associated with a :class:`.Boolean` + or :class:`.Enum` type object would be doubled in the target table. + The copy process now tracks the production of this constraint object + as local to a type object. + .. change:: :tags: feature, orm :tickets: 3217 diff --git a/lib/sqlalchemy/sql/schema.py b/lib/sqlalchemy/sql/schema.py index 4093d7115..b90f7fc53 100644 --- a/lib/sqlalchemy/sql/schema.py +++ b/lib/sqlalchemy/sql/schema.py @@ -824,7 +824,7 @@ class Table(DialectKWArgs, SchemaItem, TableClause): table.append_constraint( c.copy(schema=fk_constraint_schema, target_table=table)) - else: + elif not c._type_bound: table.append_constraint( c.copy(schema=schema, target_table=table)) for index in self.indexes: @@ -1295,7 +1295,7 @@ class Column(SchemaItem, ColumnClause): # Constraint objects plus non-constraint-bound ForeignKey objects args = \ - [c.copy(**kw) for c in self.constraints] + \ + [c.copy(**kw) for c in self.constraints if not c._type_bound] + \ [c.copy(**kw) for c in self.foreign_keys if not c.constraint] type_ = self.type @@ -2254,7 +2254,7 @@ class Constraint(DialectKWArgs, SchemaItem): __visit_name__ = 'constraint' def __init__(self, name=None, deferrable=None, initially=None, - _create_rule=None, info=None, + _create_rule=None, info=None, _type_bound=False, **dialect_kw): """Create a SQL constraint. @@ -2304,6 +2304,7 @@ class Constraint(DialectKWArgs, SchemaItem): if info: self.info = info self._create_rule = _create_rule + self._type_bound = _type_bound util.set_creation_order(self) self._validate_dialect_kwargs(dialect_kw) @@ -2420,7 +2421,7 @@ class CheckConstraint(Constraint): def __init__(self, sqltext, name=None, deferrable=None, initially=None, table=None, info=None, _create_rule=None, - _autoattach=True): + _autoattach=True, _type_bound=False): """Construct a CHECK constraint. :param sqltext: @@ -2450,7 +2451,9 @@ class CheckConstraint(Constraint): """ super(CheckConstraint, self).\ - __init__(name, deferrable, initially, _create_rule, info=info) + __init__( + name, deferrable, initially, _create_rule, info=info, + _type_bound=_type_bound) self.sqltext = _literal_as_text(sqltext, warn=False) if table is not None: self._set_parent_with_dispatch(table) @@ -2485,7 +2488,8 @@ class CheckConstraint(Constraint): deferrable=self.deferrable, _create_rule=self._create_rule, table=target_table, - _autoattach=False) + _autoattach=False, + _type_bound=self._type_bound) return self._schema_item_copy(c) diff --git a/lib/sqlalchemy/sql/sqltypes.py b/lib/sqlalchemy/sql/sqltypes.py index 2729bc83e..7bf2f337c 100644 --- a/lib/sqlalchemy/sql/sqltypes.py +++ b/lib/sqlalchemy/sql/sqltypes.py @@ -998,13 +998,11 @@ class SchemaType(SchemaEventTarget): def adapt(self, impltype, **kw): schema = kw.pop('schema', self.schema) - # don't associate with MetaData as the hosting type + # don't associate with self.metadata as the hosting type # is already associated with it, avoid creating event # listeners - metadata = kw.pop('metadata', None) return impltype(name=self.name, schema=schema, - metadata=metadata, inherit_schema=self.inherit_schema, **kw) @@ -1165,7 +1163,8 @@ class Enum(String, SchemaType): type_coerce(column, self).in_(self.enums), name=_defer_name(self.name), _create_rule=util.portable_instancemethod( - self._should_create_constraint) + self._should_create_constraint), + _type_bound=True ) assert e.table is table @@ -1303,7 +1302,8 @@ class Boolean(TypeEngine, SchemaType): type_coerce(column, self).in_([0, 1]), name=_defer_name(self.name), _create_rule=util.portable_instancemethod( - self._should_create_constraint) + self._should_create_constraint), + _type_bound=True ) assert e.table is table diff --git a/test/sql/test_metadata.py b/test/sql/test_metadata.py index 3f24fd07d..74044e3bb 100644 --- a/test/sql/test_metadata.py +++ b/test/sql/test_metadata.py @@ -1473,6 +1473,46 @@ class SchemaTypeTest(fixtures.TestBase): m1.create_all(testing.db) + def test_boolean_constraint_type_doesnt_double(self): + m1 = MetaData() + + t1 = Table('x', m1, Column("flag", Boolean())) + eq_( + len([ + c for c in t1.constraints + if isinstance(c, CheckConstraint)]), + 1 + ) + m2 = MetaData() + t2 = t1.tometadata(m2) + + eq_( + len([ + c for c in t2.constraints + if isinstance(c, CheckConstraint)]), + 1 + ) + + def test_enum_constraint_type_doesnt_double(self): + m1 = MetaData() + + t1 = Table('x', m1, Column("flag", Enum('a', 'b', 'c'))) + eq_( + len([ + c for c in t1.constraints + if isinstance(c, CheckConstraint)]), + 1 + ) + m2 = MetaData() + t2 = t1.tometadata(m2) + + eq_( + len([ + c for c in t2.constraints + if isinstance(c, CheckConstraint)]), + 1 + ) + class SchemaTest(fixtures.TestBase, AssertsCompiledSQL): -- cgit v1.2.1 From 87bfcf91e9659893f17adf307090bc0a4a8a8f23 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Thu, 4 Dec 2014 12:01:19 -0500 Subject: - The :meth:`.PGDialect.has_table` method will now query against ``pg_catalog.pg_table_is_visible(c.oid)``, rather than testing for an exact schema match, when the schema name is None; this so that the method will also illustrate that temporary tables are present. Note that this is a behavioral change, as Postgresql allows a non-temporary table to silently overwrite an existing temporary table of the same name, so this changes the behavior of ``checkfirst`` in that unusual scenario. fixes #3264 --- doc/build/changelog/changelog_10.rst | 17 ++++++++ doc/build/changelog/migration_10.rst | 58 +++++++++++++++++++++++++ lib/sqlalchemy/dialects/postgresql/base.py | 3 +- lib/sqlalchemy/testing/suite/test_reflection.py | 4 ++ test/dialect/postgresql/test_reflection.py | 12 +++++ 5 files changed, 93 insertions(+), 1 deletion(-) diff --git a/doc/build/changelog/changelog_10.rst b/doc/build/changelog/changelog_10.rst index f2bd43a76..ad9eefa09 100644 --- a/doc/build/changelog/changelog_10.rst +++ b/doc/build/changelog/changelog_10.rst @@ -21,6 +21,23 @@ series as well. For changes that are specific to 1.0 with an emphasis on compatibility concerns, see :doc:`/changelog/migration_10`. + .. change:: + :tags: bug, postgresql + :tickets: 3264 + + The :meth:`.PGDialect.has_table` method will now query against + ``pg_catalog.pg_table_is_visible(c.oid)``, rather than testing + for an exact schema match, when the schema name is None; this + so that the method will also illustrate that temporary tables + are present. Note that this is a behavioral change, as Postgresql + allows a non-temporary table to silently overwrite an existing + temporary table of the same name, so this changes the behavior + of ``checkfirst`` in that unusual scenario. + + .. seealso:: + + :ref:`change_3264` + .. change:: :tags: bug, sql :tickets: 3260 diff --git a/doc/build/changelog/migration_10.rst b/doc/build/changelog/migration_10.rst index c4157266b..e148e7d70 100644 --- a/doc/build/changelog/migration_10.rst +++ b/doc/build/changelog/migration_10.rst @@ -276,6 +276,64 @@ running 0.9 in production. :ticket:`2891` +.. _change_3264: + +Postgresql ``has_table()`` now works for temporary tables +--------------------------------------------------------- + +This is a simple fix such that "has table" for temporary tables now works, +so that code like the following may proceed:: + + from sqlalchemy import * + + metadata = MetaData() + user_tmp = Table( + "user_tmp", metadata, + Column("id", INT, primary_key=True), + Column('name', VARCHAR(50)), + prefixes=['TEMPORARY'] + ) + + e = create_engine("postgresql://scott:tiger@localhost/test", echo='debug') + with e.begin() as conn: + user_tmp.create(conn, checkfirst=True) + + # checkfirst will succeed + user_tmp.create(conn, checkfirst=True) + +The very unlikely case that this behavior will cause a non-failing application +to behave differently, is because Postgresql allows a non-temporary table +to silently overwrite a temporary table. So code like the following will +now act completely differently, no longer creating the real table following +the temporary table:: + + from sqlalchemy import * + + metadata = MetaData() + user_tmp = Table( + "user_tmp", metadata, + Column("id", INT, primary_key=True), + Column('name', VARCHAR(50)), + prefixes=['TEMPORARY'] + ) + + e = create_engine("postgresql://scott:tiger@localhost/test", echo='debug') + with e.begin() as conn: + user_tmp.create(conn, checkfirst=True) + + m2 = MetaData() + user = Table( + "user_tmp", m2, + Column("id", INT, primary_key=True), + Column('name', VARCHAR(50)), + ) + + # in 0.9, *will create* the new table, overwriting the old one. + # in 1.0, *will not create* the new table + user.create(conn, checkfirst=True) + +:ticket:`3264` + .. _feature_gh134: Postgresql FILTER keyword diff --git a/lib/sqlalchemy/dialects/postgresql/base.py b/lib/sqlalchemy/dialects/postgresql/base.py index baa640eaa..034ee9076 100644 --- a/lib/sqlalchemy/dialects/postgresql/base.py +++ b/lib/sqlalchemy/dialects/postgresql/base.py @@ -1942,7 +1942,8 @@ class PGDialect(default.DefaultDialect): cursor = connection.execute( sql.text( "select relname from pg_class c join pg_namespace n on " - "n.oid=c.relnamespace where n.nspname=current_schema() " + "n.oid=c.relnamespace where " + "pg_catalog.pg_table_is_visible(c.oid) " "and relname=:name", bindparams=[ sql.bindparam('name', util.text_type(table_name), diff --git a/lib/sqlalchemy/testing/suite/test_reflection.py b/lib/sqlalchemy/testing/suite/test_reflection.py index 08b858b47..e58b6f068 100644 --- a/lib/sqlalchemy/testing/suite/test_reflection.py +++ b/lib/sqlalchemy/testing/suite/test_reflection.py @@ -128,6 +128,10 @@ class ComponentReflectionTest(fixtures.TablesTest): DDL("create temporary view user_tmp_v as " "select * from user_tmp") ) + event.listen( + user_tmp, "before_drop", + DDL("drop view user_tmp_v") + ) @classmethod def define_index(cls, metadata, users): diff --git a/test/dialect/postgresql/test_reflection.py b/test/dialect/postgresql/test_reflection.py index 8de71216e..0dda1fa45 100644 --- a/test/dialect/postgresql/test_reflection.py +++ b/test/dialect/postgresql/test_reflection.py @@ -322,6 +322,18 @@ class ReflectionTest(fixtures.TestBase): t2 = Table('t', m2, autoload=True) eq_([c.name for c in t2.primary_key], ['t_id']) + @testing.provide_metadata + def test_has_temporary_table(self): + assert not testing.db.has_table("some_temp_table") + user_tmp = Table( + "some_temp_table", self.metadata, + Column("id", Integer, primary_key=True), + Column('name', String(50)), + prefixes=['TEMPORARY'] + ) + user_tmp.create(testing.db) + assert testing.db.has_table("some_temp_table") + @testing.provide_metadata def test_cross_schema_reflection_one(self): -- cgit v1.2.1 From f5ff86983f9cc7914a89b96da1fd2638677d345b Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Thu, 4 Dec 2014 18:29:56 -0500 Subject: - The :meth:`.Operators.match` operator is now handled such that the return type is not strictly assumed to be boolean; it now returns a :class:`.Boolean` subclass called :class:`.MatchType`. The type will still produce boolean behavior when used in Python expressions, however the dialect can override its behavior at result time. In the case of MySQL, while the MATCH operator is typically used in a boolean context within an expression, if one actually queries for the value of a match expression, a floating point value is returned; this value is not compatible with SQLAlchemy's C-based boolean processor, so MySQL's result-set behavior now follows that of the :class:`.Float` type. A new operator object ``notmatch_op`` is also added to better allow dialects to define the negation of a match operation. fixes #3263 --- doc/build/changelog/changelog_10.rst | 23 +++++++++++++++++++ doc/build/changelog/migration_10.rst | 31 +++++++++++++++++++++++++ doc/build/core/types.rst | 3 +++ lib/sqlalchemy/dialects/mysql/base.py | 9 ++++++++ lib/sqlalchemy/sql/compiler.py | 9 ++++++-- lib/sqlalchemy/sql/default_comparator.py | 21 +++++++++++++---- lib/sqlalchemy/sql/elements.py | 2 +- lib/sqlalchemy/sql/operators.py | 5 ++++ lib/sqlalchemy/sql/sqltypes.py | 17 ++++++++++++++ lib/sqlalchemy/sql/type_api.py | 2 +- lib/sqlalchemy/types.py | 1 + test/dialect/mysql/test_query.py | 39 +++++++++++++++++++++++++++----- test/dialect/postgresql/test_query.py | 6 +++++ test/sql/test_operators.py | 28 ++++++++++++++++++++++- 14 files changed, 180 insertions(+), 16 deletions(-) diff --git a/doc/build/changelog/changelog_10.rst b/doc/build/changelog/changelog_10.rst index ad9eefa09..f90ae40f8 100644 --- a/doc/build/changelog/changelog_10.rst +++ b/doc/build/changelog/changelog_10.rst @@ -1,3 +1,4 @@ + ============== 1.0 Changelog ============== @@ -21,6 +22,28 @@ series as well. For changes that are specific to 1.0 with an emphasis on compatibility concerns, see :doc:`/changelog/migration_10`. + .. change:: + :tags: bug, mysql + :tickets: 3263 + + The :meth:`.Operators.match` operator is now handled such that the + return type is not strictly assumed to be boolean; it now + returns a :class:`.Boolean` subclass called :class:`.MatchType`. + The type will still produce boolean behavior when used in Python + expressions, however the dialect can override its behavior at + result time. In the case of MySQL, while the MATCH operator + is typically used in a boolean context within an expression, + if one actually queries for the value of a match expression, a + floating point value is returned; this value is not compatible + with SQLAlchemy's C-based boolean processor, so MySQL's result-set + behavior now follows that of the :class:`.Float` type. + A new operator object ``notmatch_op`` is also added to better allow + dialects to define the negation of a match operation. + + .. seealso:: + + :ref:`change_3263` + .. change:: :tags: bug, postgresql :tickets: 3264 diff --git a/doc/build/changelog/migration_10.rst b/doc/build/changelog/migration_10.rst index e148e7d70..929a5fe3d 100644 --- a/doc/build/changelog/migration_10.rst +++ b/doc/build/changelog/migration_10.rst @@ -1547,6 +1547,37 @@ again works on MySQL. :ticket:`3186` +.. _change_3263: + +The match() operator now returns an agnostic MatchType compatible with MySQL's floating point return value +---------------------------------------------------------------------------------------------------------- + +The return type of a :meth:`.Operators.match` expression is now a new type +called :class:`.MatchType`. This is a subclass of :class:`.Boolean`, +that can be intercepted by the dialect in order to produce a different +result type at SQL execution time. + +Code like the following will now function correctly and return floating points +on MySQL:: + + >>> connection.execute( + ... select([ + ... matchtable.c.title.match('Agile Ruby Programming').label('ruby'), + ... matchtable.c.title.match('Dive Python').label('python'), + ... matchtable.c.title + ... ]).order_by(matchtable.c.id) + ... ) + [ + (2.0, 0.0, 'Agile Web Development with Ruby On Rails'), + (0.0, 2.0, 'Dive Into Python'), + (2.0, 0.0, "Programming Matz's Ruby"), + (0.0, 0.0, 'The Definitive Guide to Django'), + (0.0, 1.0, 'Python in a Nutshell') + ] + + +:ticket:`3263` + .. _change_3182: PyODBC driver name is required with hostname-based SQL Server connections diff --git a/doc/build/core/types.rst b/doc/build/core/types.rst index 14e30e46d..22b36a648 100644 --- a/doc/build/core/types.rst +++ b/doc/build/core/types.rst @@ -67,6 +67,9 @@ Standard Types`_ and the other sections of this chapter. .. autoclass:: LargeBinary :members: +.. autoclass:: MatchType + :members: + .. autoclass:: Numeric :members: diff --git a/lib/sqlalchemy/dialects/mysql/base.py b/lib/sqlalchemy/dialects/mysql/base.py index 58eb3afa0..c868f58b2 100644 --- a/lib/sqlalchemy/dialects/mysql/base.py +++ b/lib/sqlalchemy/dialects/mysql/base.py @@ -602,6 +602,14 @@ class _StringType(sqltypes.String): to_inspect=[_StringType, sqltypes.String]) +class _MatchType(sqltypes.Float, sqltypes.MatchType): + def __init__(self, **kw): + # TODO: float arguments? + sqltypes.Float.__init__(self) + sqltypes.MatchType.__init__(self) + + + class NUMERIC(_NumericType, sqltypes.NUMERIC): """MySQL NUMERIC type.""" @@ -1544,6 +1552,7 @@ colspecs = { sqltypes.Float: FLOAT, sqltypes.Time: TIME, sqltypes.Enum: ENUM, + sqltypes.MatchType: _MatchType } # Everything 3.23 through 5.1 excepting OpenGIS types. diff --git a/lib/sqlalchemy/sql/compiler.py b/lib/sqlalchemy/sql/compiler.py index b102f0240..29a7401a1 100644 --- a/lib/sqlalchemy/sql/compiler.py +++ b/lib/sqlalchemy/sql/compiler.py @@ -82,6 +82,7 @@ OPERATORS = { operators.eq: ' = ', operators.concat_op: ' || ', operators.match_op: ' MATCH ', + operators.notmatch_op: ' NOT MATCH ', operators.in_op: ' IN ', operators.notin_op: ' NOT IN ', operators.comma_op: ', ', @@ -862,14 +863,18 @@ class SQLCompiler(Compiled): else: return "%s = 0" % self.process(element.element, **kw) - def visit_binary(self, binary, **kw): + def visit_notmatch_op_binary(self, binary, operator, **kw): + return "NOT %s" % self.visit_binary( + binary, override_operator=operators.match_op) + + def visit_binary(self, binary, override_operator=None, **kw): # don't allow "? = ?" to render if self.ansi_bind_rules and \ isinstance(binary.left, elements.BindParameter) and \ isinstance(binary.right, elements.BindParameter): kw['literal_binds'] = True - operator_ = binary.operator + operator_ = override_operator or binary.operator disp = getattr(self, "visit_%s_binary" % operator_.__name__, None) if disp: return disp(binary, operator_, **kw) diff --git a/lib/sqlalchemy/sql/default_comparator.py b/lib/sqlalchemy/sql/default_comparator.py index 4f53e2979..d26fdc455 100644 --- a/lib/sqlalchemy/sql/default_comparator.py +++ b/lib/sqlalchemy/sql/default_comparator.py @@ -68,8 +68,12 @@ class _DefaultColumnComparator(operators.ColumnOperators): def _boolean_compare(self, expr, op, obj, negate=None, reverse=False, _python_is_types=(util.NoneType, bool), + result_type = None, **kwargs): + if result_type is None: + result_type = type_api.BOOLEANTYPE + if isinstance(obj, _python_is_types + (Null, True_, False_)): # allow x ==/!= True/False to be treated as a literal. @@ -80,7 +84,7 @@ class _DefaultColumnComparator(operators.ColumnOperators): return BinaryExpression(expr, _literal_as_text(obj), op, - type_=type_api.BOOLEANTYPE, + type_=result_type, negate=negate, modifiers=kwargs) else: # all other None/True/False uses IS, IS NOT @@ -103,13 +107,13 @@ class _DefaultColumnComparator(operators.ColumnOperators): return BinaryExpression(obj, expr, op, - type_=type_api.BOOLEANTYPE, + type_=result_type, negate=negate, modifiers=kwargs) else: return BinaryExpression(expr, obj, op, - type_=type_api.BOOLEANTYPE, + type_=result_type, negate=negate, modifiers=kwargs) def _binary_operate(self, expr, op, obj, reverse=False, result_type=None, @@ -125,7 +129,8 @@ class _DefaultColumnComparator(operators.ColumnOperators): op, result_type = left.comparator._adapt_expression( op, right.comparator) - return BinaryExpression(left, right, op, type_=result_type) + return BinaryExpression( + left, right, op, type_=result_type, modifiers=kw) def _conjunction_operate(self, expr, op, other, **kw): if op is operators.and_: @@ -216,11 +221,16 @@ class _DefaultColumnComparator(operators.ColumnOperators): def _match_impl(self, expr, op, other, **kw): """See :meth:`.ColumnOperators.match`.""" + return self._boolean_compare( expr, operators.match_op, self._check_literal( expr, operators.match_op, other), - **kw) + result_type=type_api.MATCHTYPE, + negate=operators.notmatch_op + if op is operators.match_op else operators.match_op, + **kw + ) def _distinct_impl(self, expr, op, **kw): """See :meth:`.ColumnOperators.distinct`.""" @@ -282,6 +292,7 @@ class _DefaultColumnComparator(operators.ColumnOperators): "isnot": (_boolean_compare, operators.isnot), "collate": (_collate_impl,), "match_op": (_match_impl,), + "notmatch_op": (_match_impl,), "distinct_op": (_distinct_impl,), "between_op": (_between_impl, ), "notbetween_op": (_between_impl, ), diff --git a/lib/sqlalchemy/sql/elements.py b/lib/sqlalchemy/sql/elements.py index 734f78632..30965c801 100644 --- a/lib/sqlalchemy/sql/elements.py +++ b/lib/sqlalchemy/sql/elements.py @@ -2763,7 +2763,7 @@ class BinaryExpression(ColumnElement): self.right, self.negate, negate=self.operator, - type_=type_api.BOOLEANTYPE, + type_=self.type, modifiers=self.modifiers) else: return super(BinaryExpression, self)._negate() diff --git a/lib/sqlalchemy/sql/operators.py b/lib/sqlalchemy/sql/operators.py index 945356328..b08e44ab8 100644 --- a/lib/sqlalchemy/sql/operators.py +++ b/lib/sqlalchemy/sql/operators.py @@ -767,6 +767,10 @@ def match_op(a, b, **kw): return a.match(b, **kw) +def notmatch_op(a, b, **kw): + return a.notmatch(b, **kw) + + def comma_op(a, b): raise NotImplementedError() @@ -834,6 +838,7 @@ _PRECEDENCE = { concat_op: 6, match_op: 6, + notmatch_op: 6, ilike_op: 6, notilike_op: 6, diff --git a/lib/sqlalchemy/sql/sqltypes.py b/lib/sqlalchemy/sql/sqltypes.py index 7bf2f337c..94db1d837 100644 --- a/lib/sqlalchemy/sql/sqltypes.py +++ b/lib/sqlalchemy/sql/sqltypes.py @@ -1654,10 +1654,26 @@ class NullType(TypeEngine): comparator_factory = Comparator +class MatchType(Boolean): + """Refers to the return type of the MATCH operator. + + As the :meth:`.Operators.match` is probably the most open-ended + operator in generic SQLAlchemy Core, we can't assume the return type + at SQL evaluation time, as MySQL returns a floating point, not a boolean, + and other backends might do something different. So this type + acts as a placeholder, currently subclassing :class:`.Boolean`. + The type allows dialects to inject result-processing functionality + if needed, and on MySQL will return floating-point values. + + .. versionadded:: 1.0.0 + + """ + NULLTYPE = NullType() BOOLEANTYPE = Boolean() STRINGTYPE = String() INTEGERTYPE = Integer() +MATCHTYPE = MatchType() _type_map = { int: Integer(), @@ -1685,6 +1701,7 @@ type_api.BOOLEANTYPE = BOOLEANTYPE type_api.STRINGTYPE = STRINGTYPE type_api.INTEGERTYPE = INTEGERTYPE type_api.NULLTYPE = NULLTYPE +type_api.MATCHTYPE = MATCHTYPE type_api._type_map = _type_map # this one, there's all kinds of ways to play it, but at the EOD diff --git a/lib/sqlalchemy/sql/type_api.py b/lib/sqlalchemy/sql/type_api.py index 77c6e1b1e..d3e0a008e 100644 --- a/lib/sqlalchemy/sql/type_api.py +++ b/lib/sqlalchemy/sql/type_api.py @@ -19,7 +19,7 @@ BOOLEANTYPE = None INTEGERTYPE = None NULLTYPE = None STRINGTYPE = None - +MATCHTYPE = None class TypeEngine(Visitable): """The ultimate base class for all SQL datatypes. diff --git a/lib/sqlalchemy/types.py b/lib/sqlalchemy/types.py index b49e389ac..1215bd790 100644 --- a/lib/sqlalchemy/types.py +++ b/lib/sqlalchemy/types.py @@ -51,6 +51,7 @@ from .sql.sqltypes import ( Integer, Interval, LargeBinary, + MatchType, NCHAR, NVARCHAR, NullType, diff --git a/test/dialect/mysql/test_query.py b/test/dialect/mysql/test_query.py index e085d86c1..ccb501651 100644 --- a/test/dialect/mysql/test_query.py +++ b/test/dialect/mysql/test_query.py @@ -55,7 +55,7 @@ class MatchTest(fixtures.TestBase, AssertsCompiledSQL): ]) matchtable.insert().execute([ {'id': 1, - 'title': 'Agile Web Development with Rails', + 'title': 'Agile Web Development with Ruby On Rails', 'category_id': 2}, {'id': 2, 'title': 'Dive Into Python', @@ -76,7 +76,7 @@ class MatchTest(fixtures.TestBase, AssertsCompiledSQL): metadata.drop_all() @testing.fails_on('mysql+mysqlconnector', 'uses pyformat') - def test_expression(self): + def test_expression_format(self): format = testing.db.dialect.paramstyle == 'format' and '%s' or '?' self.assert_compile( matchtable.c.title.match('somstr'), @@ -88,7 +88,7 @@ class MatchTest(fixtures.TestBase, AssertsCompiledSQL): @testing.fails_on('mysql+oursql', 'uses format') @testing.fails_on('mysql+pyodbc', 'uses format') @testing.fails_on('mysql+zxjdbc', 'uses format') - def test_expression(self): + def test_expression_pyformat(self): format = '%(title_1)s' self.assert_compile( matchtable.c.title.match('somstr'), @@ -102,6 +102,14 @@ class MatchTest(fixtures.TestBase, AssertsCompiledSQL): fetchall()) eq_([2, 5], [r.id for r in results]) + def test_not_match(self): + results = (matchtable.select(). + where(~matchtable.c.title.match('python')). + order_by(matchtable.c.id). + execute(). + fetchall()) + eq_([1, 3, 4], [r.id for r in results]) + def test_simple_match_with_apostrophe(self): results = (matchtable.select(). where(matchtable.c.title.match("Matz's")). @@ -109,6 +117,26 @@ class MatchTest(fixtures.TestBase, AssertsCompiledSQL): fetchall()) eq_([3], [r.id for r in results]) + def test_return_value(self): + # test [ticket:3263] + result = testing.db.execute( + select([ + matchtable.c.title.match('Agile Ruby Programming').label('ruby'), + matchtable.c.title.match('Dive Python').label('python'), + matchtable.c.title + ]).order_by(matchtable.c.id) + ).fetchall() + eq_( + result, + [ + (2.0, 0.0, 'Agile Web Development with Ruby On Rails'), + (0.0, 2.0, 'Dive Into Python'), + (2.0, 0.0, "Programming Matz's Ruby"), + (0.0, 0.0, 'The Definitive Guide to Django'), + (0.0, 1.0, 'Python in a Nutshell') + ] + ) + def test_or_match(self): results1 = (matchtable.select(). where(or_(matchtable.c.title.match('nutshell'), @@ -116,14 +144,13 @@ class MatchTest(fixtures.TestBase, AssertsCompiledSQL): order_by(matchtable.c.id). execute(). fetchall()) - eq_([3, 5], [r.id for r in results1]) + eq_([1, 3, 5], [r.id for r in results1]) results2 = (matchtable.select(). where(matchtable.c.title.match('nutshell ruby')). order_by(matchtable.c.id). execute(). fetchall()) - eq_([3, 5], [r.id for r in results2]) - + eq_([1, 3, 5], [r.id for r in results2]) def test_and_match(self): results1 = (matchtable.select(). diff --git a/test/dialect/postgresql/test_query.py b/test/dialect/postgresql/test_query.py index a512b56fa..6841f397a 100644 --- a/test/dialect/postgresql/test_query.py +++ b/test/dialect/postgresql/test_query.py @@ -703,6 +703,12 @@ class MatchTest(fixtures.TestBase, AssertsCompiledSQL): matchtable.c.id).execute().fetchall() eq_([2, 5], [r.id for r in results]) + def test_not_match(self): + results = matchtable.select().where( + ~matchtable.c.title.match('python')).order_by( + matchtable.c.id).execute().fetchall() + eq_([1, 3, 4], [r.id for r in results]) + def test_simple_match_with_apostrophe(self): results = matchtable.select().where( matchtable.c.title.match("Matz's")).execute().fetchall() diff --git a/test/sql/test_operators.py b/test/sql/test_operators.py index e8ad88511..f8ac1528f 100644 --- a/test/sql/test_operators.py +++ b/test/sql/test_operators.py @@ -12,7 +12,8 @@ from sqlalchemy import exc from sqlalchemy.engine import default from sqlalchemy.sql.elements import _literal_as_text from sqlalchemy.schema import Column, Table, MetaData -from sqlalchemy.types import TypeEngine, TypeDecorator, UserDefinedType, Boolean +from sqlalchemy.types import TypeEngine, TypeDecorator, UserDefinedType, \ + Boolean, NullType, MatchType from sqlalchemy.dialects import mysql, firebird, postgresql, oracle, \ sqlite, mssql from sqlalchemy import util @@ -1619,6 +1620,31 @@ class MatchTest(fixtures.TestBase, testing.AssertsCompiledSQL): "CONTAINS (mytable.myid, :myid_1)", dialect=oracle.dialect()) + def test_match_is_now_matchtype(self): + expr = self.table1.c.myid.match('somstr') + assert expr.type._type_affinity is MatchType()._type_affinity + assert isinstance(expr.type, MatchType) + + def test_boolean_inversion_postgresql(self): + self.assert_compile( + ~self.table1.c.myid.match('somstr'), + "NOT mytable.myid @@ to_tsquery(%(myid_1)s)", + dialect=postgresql.dialect()) + + def test_boolean_inversion_mysql(self): + # because mysql doesnt have native boolean + self.assert_compile( + ~self.table1.c.myid.match('somstr'), + "NOT MATCH (mytable.myid) AGAINST (%s IN BOOLEAN MODE)", + dialect=mysql.dialect()) + + def test_boolean_inversion_mssql(self): + # because mssql doesnt have native boolean + self.assert_compile( + ~self.table1.c.myid.match('somstr'), + "NOT CONTAINS (mytable.myid, :myid_1)", + dialect=mssql.dialect()) + class ComposedLikeOperatorsTest(fixtures.TestBase, testing.AssertsCompiledSQL): __dialect__ = 'default' -- cgit v1.2.1 From fda589487b2cb60e8d69f520e0120eeb7c875915 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Thu, 4 Dec 2014 19:12:52 -0500 Subject: - Updated the "supports_unicode_statements" flag to True for MySQLdb and Pymysql under Python 2. This refers to the SQL statements themselves, not the parameters, and affects issues such as table and column names using non-ASCII characters. These drivers both appear to support Python 2 Unicode objects without issue in modern versions. fixes #3121 --- doc/build/changelog/changelog_10.rst | 11 +++++++++++ lib/sqlalchemy/dialects/mysql/mysqldb.py | 2 +- lib/sqlalchemy/dialects/mysql/pymysql.py | 3 +-- 3 files changed, 13 insertions(+), 3 deletions(-) diff --git a/doc/build/changelog/changelog_10.rst b/doc/build/changelog/changelog_10.rst index f90ae40f8..7126d0930 100644 --- a/doc/build/changelog/changelog_10.rst +++ b/doc/build/changelog/changelog_10.rst @@ -22,6 +22,17 @@ series as well. For changes that are specific to 1.0 with an emphasis on compatibility concerns, see :doc:`/changelog/migration_10`. + .. change:: + :tags: feature, mysql + :tickets: 3121 + + Updated the "supports_unicode_statements" flag to True for MySQLdb + and Pymysql under Python 2. This refers to the SQL statements + themselves, not the parameters, and affects issues such as table + and column names using non-ASCII characters. These drivers both + appear to support Python 2 Unicode objects without issue in modern + versions. + .. change:: :tags: bug, mysql :tickets: 3263 diff --git a/lib/sqlalchemy/dialects/mysql/mysqldb.py b/lib/sqlalchemy/dialects/mysql/mysqldb.py index 73210d67a..893c6a9e2 100644 --- a/lib/sqlalchemy/dialects/mysql/mysqldb.py +++ b/lib/sqlalchemy/dialects/mysql/mysqldb.py @@ -77,7 +77,7 @@ class MySQLIdentifierPreparer_mysqldb(MySQLIdentifierPreparer): class MySQLDialect_mysqldb(MySQLDialect): driver = 'mysqldb' - supports_unicode_statements = False + supports_unicode_statements = True supports_sane_rowcount = True supports_sane_multi_rowcount = True diff --git a/lib/sqlalchemy/dialects/mysql/pymysql.py b/lib/sqlalchemy/dialects/mysql/pymysql.py index 31226cea0..8df2ba03f 100644 --- a/lib/sqlalchemy/dialects/mysql/pymysql.py +++ b/lib/sqlalchemy/dialects/mysql/pymysql.py @@ -31,8 +31,7 @@ class MySQLDialect_pymysql(MySQLDialect_mysqldb): driver = 'pymysql' description_encoding = None - if py3k: - supports_unicode_statements = True + supports_unicode_statements = True @classmethod def dbapi(cls): -- cgit v1.2.1 From e46c71b4198ee9811ea851dbe037f19a74af0b08 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Thu, 4 Dec 2014 19:35:00 -0500 Subject: - Added support for CTEs under Oracle. This includes some tweaks to the aliasing syntax, as well as a new CTE feature :meth:`.CTE.suffix_with`, which is useful for adding in special Oracle-specific directives to the CTE. fixes #3220 --- doc/build/changelog/changelog_10.rst | 13 ++++ doc/build/changelog/migration_10.rst | 18 +++++ doc/build/core/selectable.rst | 3 + lib/sqlalchemy/dialects/oracle/base.py | 21 ++---- lib/sqlalchemy/orm/query.py | 30 +++++++- lib/sqlalchemy/sql/compiler.py | 17 ++++- lib/sqlalchemy/sql/selectable.py | 131 ++++++++++++++++++++++----------- test/dialect/test_oracle.py | 45 +++++++++++ test/sql/test_cte.py | 30 ++++++++ 9 files changed, 247 insertions(+), 61 deletions(-) diff --git a/doc/build/changelog/changelog_10.rst b/doc/build/changelog/changelog_10.rst index 7126d0930..32fe4daab 100644 --- a/doc/build/changelog/changelog_10.rst +++ b/doc/build/changelog/changelog_10.rst @@ -22,6 +22,19 @@ series as well. For changes that are specific to 1.0 with an emphasis on compatibility concerns, see :doc:`/changelog/migration_10`. + .. change:: + :tags: feature, oracle + :tickets: 3220 + + Added support for CTEs under Oracle. This includes some tweaks + to the aliasing syntax, as well as a new CTE feature + :meth:`.CTE.suffix_with`, which is useful for adding in special + Oracle-specific directives to the CTE. + + .. seealso:: + + :ref:`change_3220` + .. change:: :tags: feature, mysql :tickets: 3121 diff --git a/doc/build/changelog/migration_10.rst b/doc/build/changelog/migration_10.rst index 929a5fe3d..9fbbb889d 100644 --- a/doc/build/changelog/migration_10.rst +++ b/doc/build/changelog/migration_10.rst @@ -1616,6 +1616,24 @@ reflection from temp tables as well, which is :ticket:`3203`. :ticket:`3204` +.. _change_3220: + +Improved support for CTEs in Oracle +----------------------------------- + +CTE support has been fixed up for Oracle, and there is also a new feature +:meth:`.CTE.with_suffixes` that can assist with Oracle's special directives:: + + included_parts = select([ + part.c.sub_part, part.c.part, part.c.quantity + ]).where(part.c.part == "p1").\ + cte(name="included_parts", recursive=True).\ + suffix_with( + "search depth first by part set ord1", + "cycle part set y_cycle to 1 default 0", dialect='oracle') + +:ticket:`3220` + .. _change_2984: Drizzle Dialect is now an External Dialect diff --git a/doc/build/core/selectable.rst b/doc/build/core/selectable.rst index 52acb28e5..03ebeb4ab 100644 --- a/doc/build/core/selectable.rst +++ b/doc/build/core/selectable.rst @@ -60,6 +60,9 @@ elements are themselves :class:`.ColumnElement` subclasses). .. autoclass:: HasPrefixes :members: +.. autoclass:: HasSuffixes + :members: + .. autoclass:: Join :members: :inherited-members: diff --git a/lib/sqlalchemy/dialects/oracle/base.py b/lib/sqlalchemy/dialects/oracle/base.py index 6df38e57e..524ba8115 100644 --- a/lib/sqlalchemy/dialects/oracle/base.py +++ b/lib/sqlalchemy/dialects/oracle/base.py @@ -549,6 +549,9 @@ class OracleCompiler(compiler.SQLCompiler): def visit_false(self, expr, **kw): return '0' + def get_cte_preamble(self, recursive): + return "WITH" + def get_select_hint_text(self, byfroms): return " ".join( "/*+ %s */" % text for table, text in byfroms.items() @@ -619,22 +622,10 @@ class OracleCompiler(compiler.SQLCompiler): return (self.dialect.identifier_preparer.format_sequence(seq) + ".nextval") - def visit_alias(self, alias, asfrom=False, ashint=False, **kwargs): - """Oracle doesn't like ``FROM table AS alias``. Is the AS standard - SQL?? - """ - - if asfrom or ashint: - alias_name = isinstance(alias.name, expression._truncated_label) and \ - self._truncated_identifier("alias", alias.name) or alias.name + def get_render_as_alias_suffix(self, alias_name_text): + """Oracle doesn't like ``FROM table AS alias``""" - if ashint: - return alias_name - elif asfrom: - return self.process(alias.original, asfrom=asfrom, **kwargs) + \ - " " + self.preparer.format_alias(alias, alias_name) - else: - return self.process(alias.original, **kwargs) + return " " + alias_name_text def returning_clause(self, stmt, returning_cols): columns = [] diff --git a/lib/sqlalchemy/orm/query.py b/lib/sqlalchemy/orm/query.py index 790686288..9b7747e15 100644 --- a/lib/sqlalchemy/orm/query.py +++ b/lib/sqlalchemy/orm/query.py @@ -75,6 +75,7 @@ class Query(object): _having = None _distinct = False _prefixes = None + _suffixes = None _offset = None _limit = None _for_update_arg = None @@ -1003,7 +1004,7 @@ class Query(object): '_limit', '_offset', '_joinpath', '_joinpoint', '_distinct', '_having', - '_prefixes', + '_prefixes', '_suffixes' ): self.__dict__.pop(attr, None) self._set_select_from([fromclause], True) @@ -2359,12 +2360,38 @@ class Query(object): .. versionadded:: 0.7.7 + .. seealso:: + + :meth:`.HasPrefixes.prefix_with` + """ if self._prefixes: self._prefixes += prefixes else: self._prefixes = prefixes + @_generative() + def suffix_with(self, *suffixes): + """Apply the suffix to the query and return the newly resulting + ``Query``. + + :param \*suffixes: optional suffixes, typically strings, + not using any commas. + + .. versionadded:: 1.0.0 + + .. seealso:: + + :meth:`.Query.prefix_with` + + :meth:`.HasSuffixes.suffix_with` + + """ + if self._suffixes: + self._suffixes += suffixes + else: + self._suffixes = suffixes + def all(self): """Return the results represented by this ``Query`` as a list. @@ -2601,6 +2628,7 @@ class Query(object): 'offset': self._offset, 'distinct': self._distinct, 'prefixes': self._prefixes, + 'suffixes': self._suffixes, 'group_by': self._group_by or None, 'having': self._having } diff --git a/lib/sqlalchemy/sql/compiler.py b/lib/sqlalchemy/sql/compiler.py index 29a7401a1..9304bba9f 100644 --- a/lib/sqlalchemy/sql/compiler.py +++ b/lib/sqlalchemy/sql/compiler.py @@ -1193,12 +1193,16 @@ class SQLCompiler(Compiled): self, asfrom=True, **kwargs ) + if cte._suffixes: + text += " " + self._generate_prefixes( + cte, cte._suffixes, **kwargs) + self.ctes[cte] = text if asfrom: if cte_alias_name: text = self.preparer.format_alias(cte, cte_alias_name) - text += " AS " + cte_name + text += self.get_render_as_alias_suffix(cte_name) else: return self.preparer.format_alias(cte, cte_name) return text @@ -1217,8 +1221,8 @@ class SQLCompiler(Compiled): elif asfrom: ret = alias.original._compiler_dispatch(self, asfrom=True, **kwargs) + \ - " AS " + \ - self.preparer.format_alias(alias, alias_name) + self.get_render_as_alias_suffix( + self.preparer.format_alias(alias, alias_name)) if fromhints and alias in fromhints: ret = self.format_from_hint_text(ret, alias, @@ -1228,6 +1232,9 @@ class SQLCompiler(Compiled): else: return alias.original._compiler_dispatch(self, **kwargs) + def get_render_as_alias_suffix(self, alias_name_text): + return " AS " + alias_name_text + def _add_to_result_map(self, keyname, name, objects, type_): if not self.dialect.case_sensitive: keyname = keyname.lower() @@ -1554,6 +1561,10 @@ class SQLCompiler(Compiled): compound_index == 0 and toplevel: text = self._render_cte_clause() + text + if select._suffixes: + text += " " + self._generate_prefixes( + select, select._suffixes, **kwargs) + self.stack.pop(-1) if asfrom and parens: diff --git a/lib/sqlalchemy/sql/selectable.py b/lib/sqlalchemy/sql/selectable.py index 8198a6733..87029ec2b 100644 --- a/lib/sqlalchemy/sql/selectable.py +++ b/lib/sqlalchemy/sql/selectable.py @@ -171,6 +171,79 @@ class Selectable(ClauseElement): return self +class HasPrefixes(object): + _prefixes = () + + @_generative + def prefix_with(self, *expr, **kw): + """Add one or more expressions following the statement keyword, i.e. + SELECT, INSERT, UPDATE, or DELETE. Generative. + + This is used to support backend-specific prefix keywords such as those + provided by MySQL. + + E.g.:: + + stmt = table.insert().prefix_with("LOW_PRIORITY", dialect="mysql") + + Multiple prefixes can be specified by multiple calls + to :meth:`.prefix_with`. + + :param \*expr: textual or :class:`.ClauseElement` construct which + will be rendered following the INSERT, UPDATE, or DELETE + keyword. + :param \**kw: A single keyword 'dialect' is accepted. This is an + optional string dialect name which will + limit rendering of this prefix to only that dialect. + + """ + dialect = kw.pop('dialect', None) + if kw: + raise exc.ArgumentError("Unsupported argument(s): %s" % + ",".join(kw)) + self._setup_prefixes(expr, dialect) + + def _setup_prefixes(self, prefixes, dialect=None): + self._prefixes = self._prefixes + tuple( + [(_literal_as_text(p, warn=False), dialect) for p in prefixes]) + + +class HasSuffixes(object): + _suffixes = () + + @_generative + def suffix_with(self, *expr, **kw): + """Add one or more expressions following the statement as a whole. + + This is used to support backend-specific suffix keywords on + certain constructs. + + E.g.:: + + stmt = select([col1, col2]).cte().suffix_with( + "cycle empno set y_cycle to 1 default 0", dialect="oracle") + + Multiple prefixes can be specified by multiple calls + to :meth:`.suffix_with`. + + :param \*expr: textual or :class:`.ClauseElement` construct which + will be rendered following the target clause. + :param \**kw: A single keyword 'dialect' is accepted. This is an + optional string dialect name which will + limit rendering of this suffix to only that dialect. + + """ + dialect = kw.pop('dialect', None) + if kw: + raise exc.ArgumentError("Unsupported argument(s): %s" % + ",".join(kw)) + self._setup_suffixes(expr, dialect) + + def _setup_suffixes(self, suffixes, dialect=None): + self._suffixes = self._suffixes + tuple( + [(_literal_as_text(p, warn=False), dialect) for p in suffixes]) + + class FromClause(Selectable): """Represent an element that can be used within the ``FROM`` clause of a ``SELECT`` statement. @@ -1088,7 +1161,7 @@ class Alias(FromClause): return self.element.bind -class CTE(Alias): +class CTE(Generative, HasSuffixes, Alias): """Represent a Common Table Expression. The :class:`.CTE` object is obtained using the @@ -1104,10 +1177,13 @@ class CTE(Alias): name=None, recursive=False, _cte_alias=None, - _restates=frozenset()): + _restates=frozenset(), + _suffixes=None): self.recursive = recursive self._cte_alias = _cte_alias self._restates = _restates + if _suffixes: + self._suffixes = _suffixes super(CTE, self).__init__(selectable, name=name) def alias(self, name=None, flat=False): @@ -1116,6 +1192,7 @@ class CTE(Alias): name=name, recursive=self.recursive, _cte_alias=self, + _suffixes=self._suffixes ) def union(self, other): @@ -1123,7 +1200,8 @@ class CTE(Alias): self.original.union(other), name=self.name, recursive=self.recursive, - _restates=self._restates.union([self]) + _restates=self._restates.union([self]), + _suffixes=self._suffixes ) def union_all(self, other): @@ -1131,7 +1209,8 @@ class CTE(Alias): self.original.union_all(other), name=self.name, recursive=self.recursive, - _restates=self._restates.union([self]) + _restates=self._restates.union([self]), + _suffixes=self._suffixes ) @@ -2118,44 +2197,7 @@ class CompoundSelect(GenerativeSelect): bind = property(bind, _set_bind) -class HasPrefixes(object): - _prefixes = () - - @_generative - def prefix_with(self, *expr, **kw): - """Add one or more expressions following the statement keyword, i.e. - SELECT, INSERT, UPDATE, or DELETE. Generative. - - This is used to support backend-specific prefix keywords such as those - provided by MySQL. - - E.g.:: - - stmt = table.insert().prefix_with("LOW_PRIORITY", dialect="mysql") - - Multiple prefixes can be specified by multiple calls - to :meth:`.prefix_with`. - - :param \*expr: textual or :class:`.ClauseElement` construct which - will be rendered following the INSERT, UPDATE, or DELETE - keyword. - :param \**kw: A single keyword 'dialect' is accepted. This is an - optional string dialect name which will - limit rendering of this prefix to only that dialect. - - """ - dialect = kw.pop('dialect', None) - if kw: - raise exc.ArgumentError("Unsupported argument(s): %s" % - ",".join(kw)) - self._setup_prefixes(expr, dialect) - - def _setup_prefixes(self, prefixes, dialect=None): - self._prefixes = self._prefixes + tuple( - [(_literal_as_text(p, warn=False), dialect) for p in prefixes]) - - -class Select(HasPrefixes, GenerativeSelect): +class Select(HasPrefixes, HasSuffixes, GenerativeSelect): """Represents a ``SELECT`` statement. """ @@ -2163,6 +2205,7 @@ class Select(HasPrefixes, GenerativeSelect): __visit_name__ = 'select' _prefixes = () + _suffixes = () _hints = util.immutabledict() _statement_hints = () _distinct = False @@ -2180,6 +2223,7 @@ class Select(HasPrefixes, GenerativeSelect): having=None, correlate=True, prefixes=None, + suffixes=None, **kwargs): """Construct a new :class:`.Select`. @@ -2425,6 +2469,9 @@ class Select(HasPrefixes, GenerativeSelect): if prefixes: self._setup_prefixes(prefixes) + if suffixes: + self._setup_suffixes(suffixes) + GenerativeSelect.__init__(self, **kwargs) @property diff --git a/test/dialect/test_oracle.py b/test/dialect/test_oracle.py index a771c5d80..b2a490e71 100644 --- a/test/dialect/test_oracle.py +++ b/test/dialect/test_oracle.py @@ -180,6 +180,51 @@ class CompileTest(fixtures.TestBase, AssertsCompiledSQL): t.update().values(plain=5), 'UPDATE s SET "plain"=:"plain"' ) + def test_cte(self): + part = table( + 'part', + column('part'), + column('sub_part'), + column('quantity') + ) + + included_parts = select([ + part.c.sub_part, part.c.part, part.c.quantity + ]).where(part.c.part == "p1").\ + cte(name="included_parts", recursive=True).\ + suffix_with( + "search depth first by part set ord1", + "cycle part set y_cycle to 1 default 0", dialect='oracle') + + incl_alias = included_parts.alias("pr1") + parts_alias = part.alias("p") + included_parts = included_parts.union_all( + select([ + parts_alias.c.sub_part, + parts_alias.c.part, parts_alias.c.quantity + ]).where(parts_alias.c.part == incl_alias.c.sub_part) + ) + + q = select([ + included_parts.c.sub_part, + func.sum(included_parts.c.quantity).label('total_quantity')]).\ + group_by(included_parts.c.sub_part) + + self.assert_compile( + q, + "WITH included_parts(sub_part, part, quantity) AS " + "(SELECT part.sub_part AS sub_part, part.part AS part, " + "part.quantity AS quantity FROM part WHERE part.part = :part_1 " + "UNION ALL SELECT p.sub_part AS sub_part, p.part AS part, " + "p.quantity AS quantity FROM part p, included_parts pr1 " + "WHERE p.part = pr1.sub_part) " + "search depth first by part set ord1 cycle part set " + "y_cycle to 1 default 0 " + "SELECT included_parts.sub_part, sum(included_parts.quantity) " + "AS total_quantity FROM included_parts " + "GROUP BY included_parts.sub_part" + ) + def test_limit(self): t = table('sometable', column('col1'), column('col2')) s = select([t]) diff --git a/test/sql/test_cte.py b/test/sql/test_cte.py index b907fe649..c7906dcb7 100644 --- a/test/sql/test_cte.py +++ b/test/sql/test_cte.py @@ -462,3 +462,33 @@ class CTETest(fixtures.TestBase, AssertsCompiledSQL): 'FROM "order" JOIN regional_sales AS anon_1 ' 'ON anon_1."order" = "order"."order"' ) + + def test_suffixes(self): + orders = table('order', column('order')) + s = select([orders.c.order]).cte("regional_sales") + s = s.suffix_with("pg suffix", dialect='postgresql') + s = s.suffix_with('oracle suffix', dialect='oracle') + stmt = select([orders]).where(orders.c.order > s.c.order) + + self.assert_compile( + stmt, + 'WITH regional_sales AS (SELECT "order"."order" AS "order" ' + 'FROM "order") SELECT "order"."order" FROM "order", ' + 'regional_sales WHERE "order"."order" > regional_sales."order"' + ) + + self.assert_compile( + stmt, + 'WITH regional_sales AS (SELECT "order"."order" AS "order" ' + 'FROM "order") oracle suffix SELECT "order"."order" FROM "order", ' + 'regional_sales WHERE "order"."order" > regional_sales."order"', + dialect='oracle' + ) + + self.assert_compile( + stmt, + 'WITH regional_sales AS (SELECT "order"."order" AS "order" ' + 'FROM "order") pg suffix SELECT "order"."order" FROM "order", ' + 'regional_sales WHERE "order"."order" > regional_sales."order"', + dialect='postgresql' + ) \ No newline at end of file -- cgit v1.2.1 From 60174146410d4ce2a17faa76cd981f558490db92 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Thu, 4 Dec 2014 19:45:14 -0500 Subject: - the refactor of the visit_alias() method in Oracle revealed that quoting should be applied in %(name)s under with_hint. --- doc/build/changelog/changelog_10.rst | 7 +++++++ test/sql/test_compiler.py | 2 +- 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/doc/build/changelog/changelog_10.rst b/doc/build/changelog/changelog_10.rst index 32fe4daab..0256958b2 100644 --- a/doc/build/changelog/changelog_10.rst +++ b/doc/build/changelog/changelog_10.rst @@ -22,6 +22,13 @@ series as well. For changes that are specific to 1.0 with an emphasis on compatibility concerns, see :doc:`/changelog/migration_10`. + .. change:: + :tags: bug, oracle + + An alias name will be properly quoted when referred to using the + ``%(name)s`` token inside the :meth:`.Select.with_hint` method. + Previously, the Oracle backend hadn't implemented this quoting. + .. change:: :tags: feature, oracle :tickets: 3220 diff --git a/test/sql/test_compiler.py b/test/sql/test_compiler.py index 9e99a947b..428fc8986 100644 --- a/test/sql/test_compiler.py +++ b/test/sql/test_compiler.py @@ -2440,7 +2440,7 @@ class SelectTest(fixtures.TestBase, AssertsCompiledSQL): """SELECT /*+ "QuotedName" idx1 */ "QuotedName".col1 """ """FROM "QuotedName" WHERE "QuotedName".col1 > :col1_1"""), (s7, oracle_d, - """SELECT /*+ SomeName idx1 */ "SomeName".col1 FROM """ + """SELECT /*+ "SomeName" idx1 */ "SomeName".col1 FROM """ """"QuotedName" "SomeName" WHERE "SomeName".col1 > :col1_1"""), ]: self.assert_compile( -- cgit v1.2.1 From edef95379777a9c84ee7dbcbc9a3b58849aa8930 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Thu, 4 Dec 2014 20:08:07 -0500 Subject: - New Oracle DDL features for tables, indexes: COMPRESS, BITMAP. Patch courtesy Gabor Gombas. fixes #3127 --- doc/build/changelog/changelog_10.rst | 6 ++ doc/build/changelog/migration_10.rst | 9 ++ lib/sqlalchemy/dialects/oracle/base.py | 165 +++++++++++++++++++++++++++++++-- lib/sqlalchemy/engine/reflection.py | 10 +- test/dialect/test_oracle.py | 93 ++++++++++++++++++- 5 files changed, 272 insertions(+), 11 deletions(-) diff --git a/doc/build/changelog/changelog_10.rst b/doc/build/changelog/changelog_10.rst index 0256958b2..b71ecc15d 100644 --- a/doc/build/changelog/changelog_10.rst +++ b/doc/build/changelog/changelog_10.rst @@ -22,6 +22,12 @@ series as well. For changes that are specific to 1.0 with an emphasis on compatibility concerns, see :doc:`/changelog/migration_10`. + .. change:: + :tags: feature, oracle + + New Oracle DDL features for tables, indexes: COMPRESS, BITMAP. + Patch courtesy Gabor Gombas. + .. change:: :tags: bug, oracle diff --git a/doc/build/changelog/migration_10.rst b/doc/build/changelog/migration_10.rst index 9fbbb889d..27a4fae4c 100644 --- a/doc/build/changelog/migration_10.rst +++ b/doc/build/changelog/migration_10.rst @@ -1634,6 +1634,15 @@ CTE support has been fixed up for Oracle, and there is also a new feature :ticket:`3220` +New Oracle Keywords for DDL +----------------------------- + +Keywords such as COMPRESS, ON COMMIT, BITMAP: + +:ref:`oracle_table_options` + +:ref:`oracle_index_options` + .. _change_2984: Drizzle Dialect is now an External Dialect diff --git a/lib/sqlalchemy/dialects/oracle/base.py b/lib/sqlalchemy/dialects/oracle/base.py index 524ba8115..9f375da94 100644 --- a/lib/sqlalchemy/dialects/oracle/base.py +++ b/lib/sqlalchemy/dialects/oracle/base.py @@ -213,6 +213,8 @@ is reflected and the type is reported as ``DATE``, the time-supporting examining the type of column for use in special Python translations or for migrating schemas to other database backends. +.. _oracle_table_options: + Oracle Table Options ------------------------- @@ -228,15 +230,63 @@ in conjunction with the :class:`.Table` construct: .. versionadded:: 1.0.0 +* ``COMPRESS``:: + + Table('mytable', metadata, Column('data', String(32)), + oracle_compress=True) + + Table('mytable', metadata, Column('data', String(32)), + oracle_compress=6) + + The ``oracle_compress`` parameter accepts either an integer compression + level, or ``True`` to use the default compression level. + +.. versionadded:: 1.0.0 + +.. _oracle_index_options: + +Oracle Specific Index Options +----------------------------- + +Bitmap Indexes +~~~~~~~~~~~~~~ + +You can specify the ``oracle_bitmap`` parameter to create a bitmap index +instead of a B-tree index:: + + Index('my_index', my_table.c.data, oracle_bitmap=True) + +Bitmap indexes cannot be unique and cannot be compressed. SQLAlchemy will not +check for such limitations, only the database will. + +.. versionadded:: 1.0.0 + +Index compression +~~~~~~~~~~~~~~~~~ + +Oracle has a more efficient storage mode for indexes containing lots of +repeated values. Use the ``oracle_compress`` parameter to turn on key c +ompression:: + + Index('my_index', my_table.c.data, oracle_compress=True) + + Index('my_index', my_table.c.data1, my_table.c.data2, unique=True, + oracle_compress=1) + +The ``oracle_compress`` parameter accepts either an integer specifying the +number of prefix columns to compress, or ``True`` to use the default (all +columns for non-unique indexes, all but the last column for unique indexes). + +.. versionadded:: 1.0.0 + """ import re from sqlalchemy import util, sql -from sqlalchemy.engine import default, base, reflection +from sqlalchemy.engine import default, reflection from sqlalchemy.sql import compiler, visitors, expression -from sqlalchemy.sql import (operators as sql_operators, - functions as sql_functions) +from sqlalchemy.sql import operators as sql_operators from sqlalchemy import types as sqltypes, schema as sa_schema from sqlalchemy.types import VARCHAR, NVARCHAR, CHAR, \ BLOB, CLOB, TIMESTAMP, FLOAT @@ -786,9 +836,32 @@ class OracleDDLCompiler(compiler.DDLCompiler): return text - def visit_create_index(self, create, **kw): - return super(OracleDDLCompiler, self).\ - visit_create_index(create, include_schema=True) + def visit_create_index(self, create): + index = create.element + self._verify_index_table(index) + preparer = self.preparer + text = "CREATE " + if index.unique: + text += "UNIQUE " + if index.dialect_options['oracle']['bitmap']: + text += "BITMAP " + text += "INDEX %s ON %s (%s)" % ( + self._prepared_index_name(index, include_schema=True), + preparer.format_table(index.table, use_schema=True), + ', '.join( + self.sql_compiler.process( + expr, + include_table=False, literal_binds=True) + for expr in index.expressions) + ) + if index.dialect_options['oracle']['compress'] is not False: + if index.dialect_options['oracle']['compress'] is True: + text += " COMPRESS" + else: + text += " COMPRESS %d" % ( + index.dialect_options['oracle']['compress'] + ) + return text def post_create_table(self, table): table_opts = [] @@ -798,6 +871,14 @@ class OracleDDLCompiler(compiler.DDLCompiler): on_commit_options = opts['on_commit'].replace("_", " ").upper() table_opts.append('\n ON COMMIT %s' % on_commit_options) + if opts['compress']: + if opts['compress'] is True: + table_opts.append("\n COMPRESS") + else: + table_opts.append("\n COMPRESS FOR %s" % ( + opts['compress'] + )) + return ''.join(table_opts) @@ -861,7 +942,12 @@ class OracleDialect(default.DefaultDialect): construct_arguments = [ (sa_schema.Table, { "resolve_synonyms": False, - "on_commit": None + "on_commit": None, + "compress": False + }), + (sa_schema.Index, { + "bitmap": False, + "compress": False }) ] @@ -892,6 +978,16 @@ class OracleDialect(default.DefaultDialect): return self.server_version_info and \ self.server_version_info < (9, ) + @property + def _supports_table_compression(self): + return self.server_version_info and \ + self.server_version_info >= (9, 2, ) + + @property + def _supports_table_compress_for(self): + return self.server_version_info and \ + self.server_version_info >= (11, ) + @property def _supports_char_length(self): return not self._is_oracle_8 @@ -1074,6 +1170,50 @@ class OracleDialect(default.DefaultDialect): cursor = connection.execute(s, owner=self.denormalize_name(schema)) return [self.normalize_name(row[0]) for row in cursor] + @reflection.cache + def get_table_options(self, connection, table_name, schema=None, **kw): + options = {} + + resolve_synonyms = kw.get('oracle_resolve_synonyms', False) + dblink = kw.get('dblink', '') + info_cache = kw.get('info_cache') + + (table_name, schema, dblink, synonym) = \ + self._prepare_reflection_args(connection, table_name, schema, + resolve_synonyms, dblink, + info_cache=info_cache) + + params = {"table_name": table_name} + + columns = ["table_name"] + if self._supports_table_compression: + columns.append("compression") + if self._supports_table_compress_for: + columns.append("compress_for") + + text = "SELECT %(columns)s "\ + "FROM ALL_TABLES%(dblink)s "\ + "WHERE table_name = :table_name" + + if schema is not None: + params['owner'] = schema + text += " AND owner = :owner " + text = text % {'dblink': dblink, 'columns': ", ".join(columns)} + + result = connection.execute(sql.text(text), **params) + + enabled = dict(DISABLED=False, ENABLED=True) + + row = result.first() + if row: + if "compression" in row and enabled.get(row.compression, False): + if "compress_for" in row: + options['oracle_compress'] = row.compress_for + else: + options['oracle_compress'] = True + + return options + @reflection.cache def get_columns(self, connection, table_name, schema=None, **kw): """ @@ -1159,7 +1299,8 @@ class OracleDialect(default.DefaultDialect): params = {'table_name': table_name} text = \ - "SELECT a.index_name, a.column_name, b.uniqueness "\ + "SELECT a.index_name, a.column_name, "\ + "\nb.index_type, b.uniqueness, b.compression, b.prefix_length "\ "\nFROM ALL_IND_COLUMNS%(dblink)s a, "\ "\nALL_INDEXES%(dblink)s b "\ "\nWHERE "\ @@ -1185,6 +1326,7 @@ class OracleDialect(default.DefaultDialect): dblink=dblink, info_cache=kw.get('info_cache')) pkeys = pk_constraint['constrained_columns'] uniqueness = dict(NONUNIQUE=False, UNIQUE=True) + enabled = dict(DISABLED=False, ENABLED=True) oracle_sys_col = re.compile(r'SYS_NC\d+\$', re.IGNORECASE) @@ -1204,10 +1346,15 @@ class OracleDialect(default.DefaultDialect): if rset.index_name != last_index_name: remove_if_primary_key(index) index = dict(name=self.normalize_name(rset.index_name), - column_names=[]) + column_names=[], dialect_options={}) indexes.append(index) index['unique'] = uniqueness.get(rset.uniqueness, False) + if rset.index_type in ('BITMAP', 'FUNCTION-BASED BITMAP'): + index['dialect_options']['oracle_bitmap'] = True + if enabled.get(rset.compression, False): + index['dialect_options']['oracle_compress'] = rset.prefix_length + # filter out Oracle SYS_NC names. could also do an outer join # to the all_tab_columns table and check for real col names there. if not oracle_sys_col.match(rset.column_name): diff --git a/lib/sqlalchemy/engine/reflection.py b/lib/sqlalchemy/engine/reflection.py index 2a1def86a..ebc96f5dd 100644 --- a/lib/sqlalchemy/engine/reflection.py +++ b/lib/sqlalchemy/engine/reflection.py @@ -394,6 +394,9 @@ class Inspector(object): unique boolean + dialect_options + dict of dialect-specific index options + :param table_name: string name of the table. For special quoting, use :class:`.quoted_name`. @@ -642,6 +645,8 @@ class Inspector(object): columns = index_d['column_names'] unique = index_d['unique'] flavor = index_d.get('type', 'index') + dialect_options = index_d.get('dialect_options', {}) + duplicates = index_d.get('duplicates_constraint') if include_columns and \ not set(columns).issubset(include_columns): @@ -667,7 +672,10 @@ class Inspector(object): else: idx_cols.append(idx_col) - sa_schema.Index(name, *idx_cols, **dict(unique=unique)) + sa_schema.Index( + name, *idx_cols, + **dict(list(dialect_options.items()) + [('unique', unique)]) + ) def _reflect_unique_constraints( self, table_name, schema, table, cols_by_orig_name, diff --git a/test/dialect/test_oracle.py b/test/dialect/test_oracle.py index b2a490e71..1e50b9070 100644 --- a/test/dialect/test_oracle.py +++ b/test/dialect/test_oracle.py @@ -732,6 +732,34 @@ class CompileTest(fixtures.TestBase, AssertsCompiledSQL): ) + def test_create_table_compress(self): + m = MetaData() + tbl1 = Table('testtbl1', m, Column('data', Integer), + oracle_compress=True) + tbl2 = Table('testtbl2', m, Column('data', Integer), + oracle_compress="OLTP") + + self.assert_compile(schema.CreateTable(tbl1), + "CREATE TABLE testtbl1 (data INTEGER) COMPRESS") + self.assert_compile(schema.CreateTable(tbl2), + "CREATE TABLE testtbl2 (data INTEGER) " + "COMPRESS FOR OLTP") + + def test_create_index_bitmap_compress(self): + m = MetaData() + tbl = Table('testtbl', m, Column('data', Integer)) + idx1 = Index('idx1', tbl.c.data, oracle_compress=True) + idx2 = Index('idx2', tbl.c.data, oracle_compress=1) + idx3 = Index('idx3', tbl.c.data, oracle_bitmap=True) + + self.assert_compile(schema.CreateIndex(idx1), + "CREATE INDEX idx1 ON testtbl (data) COMPRESS") + self.assert_compile(schema.CreateIndex(idx2), + "CREATE INDEX idx2 ON testtbl (data) COMPRESS 1") + self.assert_compile(schema.CreateIndex(idx3), + "CREATE BITMAP INDEX idx3 ON testtbl (data)") + + class CompatFlagsTest(fixtures.TestBase, AssertsCompiledSQL): def _dialect(self, server_version, **kw): @@ -1772,6 +1800,58 @@ class UnsupportedIndexReflectTest(fixtures.TestBase): m2 = MetaData(testing.db) Table('test_index_reflect', m2, autoload=True) + +def all_tables_compression_missing(): + try: + testing.db.execute('SELECT compression FROM all_tables') + return False + except: + return True + + +def all_tables_compress_for_missing(): + try: + testing.db.execute('SELECT compress_for FROM all_tables') + return False + except: + return True + + +class TableReflectionTest(fixtures.TestBase): + __only_on__ = 'oracle' + + @testing.provide_metadata + @testing.fails_if(all_tables_compression_missing) + def test_reflect_basic_compression(self): + metadata = self.metadata + + tbl = Table('test_compress', metadata, + Column('data', Integer, primary_key=True), + oracle_compress=True) + metadata.create_all() + + m2 = MetaData(testing.db) + + tbl = Table('test_compress', m2, autoload=True) + # Don't hardcode the exact value, but it must be non-empty + assert tbl.dialect_options['oracle']['compress'] + + @testing.provide_metadata + @testing.fails_if(all_tables_compress_for_missing) + def test_reflect_oltp_compression(self): + metadata = self.metadata + + tbl = Table('test_compress', metadata, + Column('data', Integer, primary_key=True), + oracle_compress="OLTP") + metadata.create_all() + + m2 = MetaData(testing.db) + + tbl = Table('test_compress', m2, autoload=True) + assert tbl.dialect_options['oracle']['compress'] == "OLTP" + + class RoundTripIndexTest(fixtures.TestBase): __only_on__ = 'oracle' @@ -1789,6 +1869,10 @@ class RoundTripIndexTest(fixtures.TestBase): # "group" is a keyword, so lower case normalind = Index('tableind', table.c.id_b, table.c.group) + compress1 = Index('compress1', table.c.id_a, table.c.id_b, + oracle_compress=True) + compress2 = Index('compress2', table.c.id_a, table.c.id_b, table.c.col, + oracle_compress=1) metadata.create_all() mirror = MetaData(testing.db) @@ -1837,8 +1921,15 @@ class RoundTripIndexTest(fixtures.TestBase): ) assert (Index, ('id_b', ), True) in reflected assert (Index, ('col', 'group'), True) in reflected + + idx = reflected[(Index, ('id_a', 'id_b', ), False)] + assert idx.dialect_options['oracle']['compress'] == 2 + + idx = reflected[(Index, ('id_a', 'id_b', 'col', ), False)] + assert idx.dialect_options['oracle']['compress'] == 1 + eq_(len(reflectedtable.constraints), 1) - eq_(len(reflectedtable.indexes), 3) + eq_(len(reflectedtable.indexes), 5) class SequenceTest(fixtures.TestBase, AssertsCompiledSQL): -- cgit v1.2.1 From 6e53e866dea4eba630128e856573ca1076b91611 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Fri, 5 Dec 2014 11:35:42 -0500 Subject: - pep8 cleanup --- test/engine/test_parseconnect.py | 135 ++++++++++++++++++++++----------------- 1 file changed, 77 insertions(+), 58 deletions(-) diff --git a/test/engine/test_parseconnect.py b/test/engine/test_parseconnect.py index 391b92144..d8f202f99 100644 --- a/test/engine/test_parseconnect.py +++ b/test/engine/test_parseconnect.py @@ -1,12 +1,13 @@ from sqlalchemy.testing import assert_raises, eq_, assert_raises_message -from sqlalchemy.util.compat import configparser, StringIO import sqlalchemy.engine.url as url from sqlalchemy import create_engine, engine_from_config, exc, pool from sqlalchemy.engine.default import DefaultDialect import sqlalchemy as tsa from sqlalchemy.testing import fixtures from sqlalchemy import testing -from sqlalchemy.testing.mock import Mock, MagicMock, patch +from sqlalchemy.testing.mock import Mock, MagicMock + +dialect = None class ParseConnectTest(fixtures.TestBase): @@ -31,21 +32,25 @@ class ParseConnectTest(fixtures.TestBase): 'dbtype://username:password@/database', 'dbtype:////usr/local/_xtest@example.com/members.db', 'dbtype://username:apples%2Foranges@hostspec/database', - 'dbtype://username:password@[2001:da8:2004:1000:202:116:160:90]/database?foo=bar', - 'dbtype://username:password@[2001:da8:2004:1000:202:116:160:90]:80/database?foo=bar' - ): + 'dbtype://username:password@[2001:da8:2004:1000:202:116:160:90]' + '/database?foo=bar', + 'dbtype://username:password@[2001:da8:2004:1000:202:116:160:90]:80' + '/database?foo=bar' + ): u = url.make_url(text) assert u.drivername in ('dbtype', 'dbtype+apitype') assert u.username in ('username', None) assert u.password in ('password', 'apples/oranges', None) - assert u.host in ('hostspec', '127.0.0.1', - '2001:da8:2004:1000:202:116:160:90', '', None), u.host - assert u.database in ('database', - '/usr/local/_xtest@example.com/members.db', - '/usr/db_file.db', ':memory:', '', - 'foo/bar/im/a/file', - 'E:/work/src/LEM/db/hello.db', None), u.database + assert u.host in ( + 'hostspec', '127.0.0.1', + '2001:da8:2004:1000:202:116:160:90', '', None), u.host + assert u.database in ( + 'database', + '/usr/local/_xtest@example.com/members.db', + '/usr/db_file.db', ':memory:', '', + 'foo/bar/im/a/file', + 'E:/work/src/LEM/db/hello.db', None), u.database eq_(str(u), text) def test_rfc1738_password(self): @@ -53,13 +58,17 @@ class ParseConnectTest(fixtures.TestBase): eq_(u.password, "pass word + other:words") eq_(str(u), "dbtype://user:pass word + other%3Awords@host/dbname") - u = url.make_url('dbtype://username:apples%2Foranges@hostspec/database') + u = url.make_url( + 'dbtype://username:apples%2Foranges@hostspec/database') eq_(u.password, "apples/oranges") eq_(str(u), 'dbtype://username:apples%2Foranges@hostspec/database') - u = url.make_url('dbtype://username:apples%40oranges%40%40@hostspec/database') + u = url.make_url( + 'dbtype://username:apples%40oranges%40%40@hostspec/database') eq_(u.password, "apples@oranges@@") - eq_(str(u), 'dbtype://username:apples%40oranges%40%40@hostspec/database') + eq_( + str(u), + 'dbtype://username:apples%40oranges%40%40@hostspec/database') u = url.make_url('dbtype://username%40:@hostspec/database') eq_(u.password, '') @@ -70,23 +79,23 @@ class ParseConnectTest(fixtures.TestBase): eq_(u.password, 'pass/word') eq_(str(u), 'dbtype://username:pass%2Fword@hostspec/database') + class DialectImportTest(fixtures.TestBase): def test_import_base_dialects(self): - # the globals() somehow makes it for the exec() + nose3. for name in ( - 'mysql', - 'firebird', - 'postgresql', - 'sqlite', - 'oracle', - 'mssql', - ): + 'mysql', + 'firebird', + 'postgresql', + 'sqlite', + 'oracle', + 'mssql'): exec ('from sqlalchemy.dialects import %s\ndialect = ' '%s.dialect()' % (name, name), globals()) eq_(dialect.name, name) + class CreateEngineTest(fixtures.TestBase): """test that create_engine arguments of different types get propagated properly""" @@ -97,26 +106,28 @@ class CreateEngineTest(fixtures.TestBase): create_engine('postgresql://scott:tiger@somehost/test?foobe' 'r=12&lala=18&fooz=somevalue', module=dbapi, _initialize=False) - c = e.connect() + e.connect() def test_kwargs(self): dbapi = MockDBAPI(foober=12, lala=18, hoho={'this': 'dict'}, fooz='somevalue') e = \ - create_engine('postgresql://scott:tiger@somehost/test?fooz=' - 'somevalue', connect_args={'foober': 12, - 'lala': 18, 'hoho': {'this': 'dict'}}, - module=dbapi, _initialize=False) - c = e.connect() - + create_engine( + 'postgresql://scott:tiger@somehost/test?fooz=' + 'somevalue', connect_args={ + 'foober': 12, + 'lala': 18, 'hoho': {'this': 'dict'}}, + module=dbapi, _initialize=False) + e.connect() def test_engine_from_config(self): dbapi = mock_dbapi - config = \ - {'sqlalchemy.url': 'postgresql://scott:tiger@somehost/test'\ - '?fooz=somevalue', 'sqlalchemy.pool_recycle': '50', - 'sqlalchemy.echo': 'true'} + config = { + 'sqlalchemy.url': 'postgresql://scott:tiger@somehost/test' + '?fooz=somevalue', + 'sqlalchemy.pool_recycle': '50', + 'sqlalchemy.echo': 'true'} e = engine_from_config(config, module=dbapi, _initialize=False) assert e.pool._recycle == 50 @@ -125,7 +136,6 @@ class CreateEngineTest(fixtures.TestBase): 'z=somevalue') assert e.echo is True - def test_engine_from_config_custom(self): from sqlalchemy import util from sqlalchemy.dialects import registry @@ -143,8 +153,9 @@ class CreateEngineTest(fixtures.TestBase): global dialect dialect = MyDialect - registry.register("mockdialect.barb", - ".".join(tokens[0:-1]), tokens[-1]) + registry.register( + "mockdialect.barb", + ".".join(tokens[0:-1]), tokens[-1]) config = { "sqlalchemy.url": "mockdialect+barb://", @@ -155,7 +166,6 @@ class CreateEngineTest(fixtures.TestBase): eq_(e.dialect.foobar, 5) eq_(e.dialect.bathoho, False) - def test_custom(self): dbapi = MockDBAPI(foober=12, lala=18, hoho={'this': 'dict'}, fooz='somevalue') @@ -169,7 +179,7 @@ class CreateEngineTest(fixtures.TestBase): e = create_engine('postgresql://', creator=connect, module=dbapi, _initialize=False) - c = e.connect() + e.connect() def test_recycle(self): dbapi = MockDBAPI(foober=12, lala=18, hoho={'this': 'dict'}, @@ -188,8 +198,9 @@ class CreateEngineTest(fixtures.TestBase): (True, pool.reset_rollback), (False, pool.reset_none), ]: - e = create_engine('postgresql://', pool_reset_on_return=value, - module=dbapi, _initialize=False) + e = create_engine( + 'postgresql://', pool_reset_on_return=value, + module=dbapi, _initialize=False) assert e.pool._reset_on_return is expected assert_raises( @@ -217,7 +228,7 @@ class CreateEngineTest(fixtures.TestBase): lala=5, use_ansi=True, module=mock_dbapi, - ) + ) assert_raises(TypeError, create_engine, 'postgresql://', lala=5, module=mock_dbapi) assert_raises(TypeError, create_engine, 'sqlite://', lala=5, @@ -233,14 +244,14 @@ class CreateEngineTest(fixtures.TestBase): dbapi = MockDBAPI() dbapi.Error = sqlite3.Error, dbapi.ProgrammingError = sqlite3.ProgrammingError - dbapi.connect = Mock(side_effect=sqlite3.ProgrammingError("random error")) + dbapi.connect = Mock( + side_effect=sqlite3.ProgrammingError("random error")) try: create_engine('sqlite://', module=dbapi).connect() assert False except tsa.exc.DBAPIError as de: assert not de.connection_invalidated - @testing.requires.sqlite def test_dont_touch_non_dbapi_exception_on_connect(self): e = create_engine('sqlite://') @@ -260,10 +271,12 @@ class CreateEngineTest(fixtures.TestBase): eq_(is_disconnect.call_count, 0) def test_ensure_dialect_does_is_disconnect_no_conn(self): - """test that is_disconnect() doesn't choke if no connection, cursor given.""" + """test that is_disconnect() doesn't choke if no connection, + cursor given.""" dialect = testing.db.dialect dbapi = dialect.dbapi - assert not dialect.is_disconnect(dbapi.OperationalError("test"), None, None) + assert not dialect.is_disconnect( + dbapi.OperationalError("test"), None, None) @testing.requires.sqlite def test_invalidate_on_connect(self): @@ -280,8 +293,9 @@ class CreateEngineTest(fixtures.TestBase): dbapi = MockDBAPI() dbapi.Error = sqlite3.Error, dbapi.ProgrammingError = sqlite3.ProgrammingError - dbapi.connect = Mock(side_effect=sqlite3.ProgrammingError( - "Cannot operate on a closed database.")) + dbapi.connect = Mock( + side_effect=sqlite3.ProgrammingError( + "Cannot operate on a closed database.")) try: create_engine('sqlite://', module=dbapi).connect() assert False @@ -313,7 +327,7 @@ class CreateEngineTest(fixtures.TestBase): echo_pool=None, module=mock_dbapi, _initialize=False, - ) + ) assert e.pool._recycle == 50 # these args work for QueuePool @@ -325,7 +339,7 @@ class CreateEngineTest(fixtures.TestBase): poolclass=tsa.pool.QueuePool, module=mock_dbapi, _initialize=False, - ) + ) # but not SingletonThreadPool @@ -338,7 +352,8 @@ class CreateEngineTest(fixtures.TestBase): poolclass=tsa.pool.SingletonThreadPool, module=mock_sqlite_dbapi, _initialize=False, - ) + ) + class TestRegNewDBAPI(fixtures.TestBase): def test_register_base(self): @@ -361,7 +376,8 @@ class TestRegNewDBAPI(fixtures.TestBase): global dialect dialect = MockDialect - registry.register("mockdialect.foob", ".".join(tokens[0:-1]), tokens[-1]) + registry.register( + "mockdialect.foob", ".".join(tokens[0:-1]), tokens[-1]) e = create_engine("mockdialect+foob://") assert isinstance(e.dialect, MockDialect) @@ -373,13 +389,16 @@ class TestRegNewDBAPI(fixtures.TestBase): e = create_engine("mysql+my_mock_dialect://") assert isinstance(e.dialect, MockDialect) + class MockDialect(DefaultDialect): @classmethod def dbapi(cls, **kw): return MockDBAPI() + def MockDBAPI(**assert_kwargs): connection = Mock(get_server_version_info=Mock(return_value='5.0')) + def connect(*args, **kwargs): for k in assert_kwargs: assert k in kwargs, 'key %s not present in dictionary' % k @@ -389,12 +408,12 @@ def MockDBAPI(**assert_kwargs): return connection return MagicMock( - sqlite_version_info=(99, 9, 9,), - version_info=(99, 9, 9,), - sqlite_version='99.9.9', - paramstyle='named', - connect=Mock(side_effect=connect) - ) + sqlite_version_info=(99, 9, 9,), + version_info=(99, 9, 9,), + sqlite_version='99.9.9', + paramstyle='named', + connect=Mock(side_effect=connect) + ) mock_dbapi = MockDBAPI() mock_sqlite_dbapi = msd = MockDBAPI() -- cgit v1.2.1 From 41e7253dee168b8c26c4993d27aac11f98c7f9e3 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Fri, 5 Dec 2014 12:12:44 -0500 Subject: - The engine-level error handling and wrapping routines will now take effect in all engine connection use cases, including when user-custom connect routines are used via the :paramref:`.create_engine.creator` parameter, as well as when the :class:`.Connection` encounters a connection error on revalidation. fixes #3266 --- doc/build/changelog/changelog_10.rst | 15 +++++ doc/build/changelog/migration_10.rst | 23 +++++++ lib/sqlalchemy/engine/base.py | 74 +++++++++++++++++++++-- lib/sqlalchemy/engine/interfaces.py | 18 +++++- lib/sqlalchemy/engine/strategies.py | 11 +--- lib/sqlalchemy/engine/threadlocal.py | 2 +- lib/sqlalchemy/events.py | 6 ++ test/engine/test_parseconnect.py | 113 ++++++++++++++++++++++++++++++++++- 8 files changed, 243 insertions(+), 19 deletions(-) diff --git a/doc/build/changelog/changelog_10.rst b/doc/build/changelog/changelog_10.rst index b71ecc15d..b8b513821 100644 --- a/doc/build/changelog/changelog_10.rst +++ b/doc/build/changelog/changelog_10.rst @@ -22,6 +22,21 @@ series as well. For changes that are specific to 1.0 with an emphasis on compatibility concerns, see :doc:`/changelog/migration_10`. + .. change:: + :tags: bug, engine + :tickets: 3266 + + The engine-level error handling and wrapping routines will now + take effect in all engine connection use cases, including + when user-custom connect routines are used via the + :paramref:`.create_engine.creator` parameter, as well as when + the :class:`.Connection` encounters a connection error on + revalidation. + + .. seealso:: + + :ref:`change_3266` + .. change:: :tags: feature, oracle diff --git a/doc/build/changelog/migration_10.rst b/doc/build/changelog/migration_10.rst index 27a4fae4c..15e066a75 100644 --- a/doc/build/changelog/migration_10.rst +++ b/doc/build/changelog/migration_10.rst @@ -872,6 +872,29 @@ labeled uniquely. :ticket:`3170` +.. _change_3266: + +DBAPI exception wrapping and handle_error() event improvements +-------------------------------------------------------------- + +SQLAlchemy's wrapping of DBAPI exceptions was not taking place in the +case where a :class:`.Connection` object was invalidated, and then tried +to reconnect and encountered an error; this has been resolved. + +Additionally, the recently added :meth:`.ConnectionEvents.handle_error` +event is now invoked for errors that occur upon initial connect, upon +reconnect, and when :func:`.create_engine` is used given a custom connection +function via :paramref:`.create_engine.creator`. + +The :class:`.ExceptionContext` object has a new datamember +:attr:`.ExceptionContext.engine` that will always refer to the :class:`.Engine` +in use, in those cases when the :class:`.Connection` object is not available +(e.g. on initial connect). + + +:ticket:`3266` + + .. _behavioral_changes_orm_10: Behavioral Changes - ORM diff --git a/lib/sqlalchemy/engine/base.py b/lib/sqlalchemy/engine/base.py index dd82be1d1..901ab07eb 100644 --- a/lib/sqlalchemy/engine/base.py +++ b/lib/sqlalchemy/engine/base.py @@ -276,7 +276,7 @@ class Connection(Connectable): raise exc.InvalidRequestError( "Can't reconnect until invalid " "transaction is rolled back") - self.__connection = self.engine.raw_connection() + self.__connection = self.engine.raw_connection(self) self.__invalid = False return self.__connection raise exc.ResourceClosedError("This Connection is closed") @@ -1194,7 +1194,8 @@ class Connection(Connectable): # new handle_error event ctx = ExceptionContextImpl( - e, sqlalchemy_exception, self, cursor, statement, + e, sqlalchemy_exception, self.engine, + self, cursor, statement, parameters, context, self._is_disconnect) for fn in self.dispatch.handle_error: @@ -1242,6 +1243,58 @@ class Connection(Connectable): if self.should_close_with_result: self.close() + @classmethod + def _handle_dbapi_exception_noconnection( + cls, e, dialect, engine, connection): + exc_info = sys.exc_info() + + is_disconnect = dialect.is_disconnect(e, None, None) + + should_wrap = isinstance(e, dialect.dbapi.Error) + + if should_wrap: + sqlalchemy_exception = exc.DBAPIError.instance( + None, + None, + e, + dialect.dbapi.Error, + connection_invalidated=is_disconnect) + else: + sqlalchemy_exception = None + + newraise = None + + if engine._has_events: + ctx = ExceptionContextImpl( + e, sqlalchemy_exception, engine, connection, None, None, + None, None, is_disconnect) + for fn in engine.dispatch.handle_error: + try: + # handler returns an exception; + # call next handler in a chain + per_fn = fn(ctx) + if per_fn is not None: + ctx.chained_exception = newraise = per_fn + except Exception as _raised: + # handler raises an exception - stop processing + newraise = _raised + break + + if sqlalchemy_exception and \ + is_disconnect != ctx.is_disconnect: + sqlalchemy_exception.connection_invalidated = \ + is_disconnect = ctx.is_disconnect + + if newraise: + util.raise_from_cause(newraise, exc_info) + elif should_wrap: + util.raise_from_cause( + sqlalchemy_exception, + exc_info + ) + else: + util.reraise(*exc_info) + def default_schema_name(self): return self.engine.dialect.get_default_schema_name(self) @@ -1320,8 +1373,9 @@ class ExceptionContextImpl(ExceptionContext): """Implement the :class:`.ExceptionContext` interface.""" def __init__(self, exception, sqlalchemy_exception, - connection, cursor, statement, parameters, + engine, connection, cursor, statement, parameters, context, is_disconnect): + self.engine = engine self.connection = connection self.sqlalchemy_exception = sqlalchemy_exception self.original_exception = exception @@ -1898,7 +1952,15 @@ class Engine(Connectable, log.Identified): """ return self.run_callable(self.dialect.has_table, table_name, schema) - def raw_connection(self): + def _wrap_pool_connect(self, fn, connection=None): + dialect = self.dialect + try: + return fn() + except dialect.dbapi.Error as e: + Connection._handle_dbapi_exception_noconnection( + e, dialect, self, connection) + + def raw_connection(self, _connection=None): """Return a "raw" DBAPI connection from the connection pool. The returned object is a proxied version of the DBAPI @@ -1914,8 +1976,8 @@ class Engine(Connectable, log.Identified): :meth:`.Engine.connect` method. """ - - return self.pool.unique_connection() + return self._wrap_pool_connect( + self.pool.unique_connection, _connection) class OptionEngine(Engine): diff --git a/lib/sqlalchemy/engine/interfaces.py b/lib/sqlalchemy/engine/interfaces.py index 0ad2efae0..5f66e54b5 100644 --- a/lib/sqlalchemy/engine/interfaces.py +++ b/lib/sqlalchemy/engine/interfaces.py @@ -917,7 +917,23 @@ class ExceptionContext(object): connection = None """The :class:`.Connection` in use during the exception. - This member is always present. + This member is present, except in the case of a failure when + first connecting. + + .. seealso:: + + :attr:`.ExceptionContext.engine` + + + """ + + engine = None + """The :class:`.Engine` in use during the exception. + + This member should always be present, even in the case of a failure + when first connecting. + + .. versionadded:: 1.0.0 """ diff --git a/lib/sqlalchemy/engine/strategies.py b/lib/sqlalchemy/engine/strategies.py index 398ef8df6..fd665ad03 100644 --- a/lib/sqlalchemy/engine/strategies.py +++ b/lib/sqlalchemy/engine/strategies.py @@ -86,16 +86,7 @@ class DefaultEngineStrategy(EngineStrategy): pool = pop_kwarg('pool', None) if pool is None: def connect(): - try: - return dialect.connect(*cargs, **cparams) - except dialect.dbapi.Error as e: - invalidated = dialect.is_disconnect(e, None, None) - util.raise_from_cause( - exc.DBAPIError.instance( - None, None, e, dialect.dbapi.Error, - connection_invalidated=invalidated - ) - ) + return dialect.connect(*cargs, **cparams) creator = pop_kwarg('creator', connect) diff --git a/lib/sqlalchemy/engine/threadlocal.py b/lib/sqlalchemy/engine/threadlocal.py index 637523a0e..71caac626 100644 --- a/lib/sqlalchemy/engine/threadlocal.py +++ b/lib/sqlalchemy/engine/threadlocal.py @@ -59,7 +59,7 @@ class TLEngine(base.Engine): # guards against pool-level reapers, if desired. # or not connection.connection.is_valid: connection = self._tl_connection_cls( - self, self.pool.connect(), **kw) + self, self._wrap_pool_connect(self.pool.connect), **kw) self._connections.conn = weakref.ref(connection) return connection._increment_connect() diff --git a/lib/sqlalchemy/events.py b/lib/sqlalchemy/events.py index c144902cd..8600c20f5 100644 --- a/lib/sqlalchemy/events.py +++ b/lib/sqlalchemy/events.py @@ -739,6 +739,12 @@ class ConnectionEvents(event.Events): .. versionadded:: 0.9.7 Added the :meth:`.ConnectionEvents.handle_error` hook. + .. versionchanged:: 1.0.0 The :meth:`.handle_error` event is now + invoked when an :class:`.Engine` fails during the initial + call to :meth:`.Engine.connect`, as well as when a + :class:`.Connection` object encounters an error during a + reconnect operation. + .. versionchanged:: 1.0.0 The :meth:`.handle_error` event is not fired off when a dialect makes use of the ``skip_user_error_events`` execution option. This is used diff --git a/test/engine/test_parseconnect.py b/test/engine/test_parseconnect.py index d8f202f99..72a089aca 100644 --- a/test/engine/test_parseconnect.py +++ b/test/engine/test_parseconnect.py @@ -6,6 +6,7 @@ import sqlalchemy as tsa from sqlalchemy.testing import fixtures from sqlalchemy import testing from sqlalchemy.testing.mock import Mock, MagicMock +from sqlalchemy import event dialect = None @@ -240,7 +241,6 @@ class CreateEngineTest(fixtures.TestBase): def test_wraps_connect_in_dbapi(self): e = create_engine('sqlite://') sqlite3 = e.dialect.dbapi - dbapi = MockDBAPI() dbapi.Error = sqlite3.Error, dbapi.ProgrammingError = sqlite3.ProgrammingError @@ -252,6 +252,117 @@ class CreateEngineTest(fixtures.TestBase): except tsa.exc.DBAPIError as de: assert not de.connection_invalidated + @testing.requires.sqlite + def test_handle_error_event_connect(self): + e = create_engine('sqlite://') + dbapi = MockDBAPI() + sqlite3 = e.dialect.dbapi + dbapi.Error = sqlite3.Error, + dbapi.ProgrammingError = sqlite3.ProgrammingError + dbapi.connect = Mock( + side_effect=sqlite3.ProgrammingError("random error")) + + class MySpecialException(Exception): + pass + + eng = create_engine('sqlite://', module=dbapi) + + @event.listens_for(eng, "handle_error") + def handle_error(ctx): + assert ctx.engine is eng + assert ctx.connection is None + raise MySpecialException("failed operation") + + assert_raises( + MySpecialException, + eng.connect + ) + + @testing.requires.sqlite + def test_handle_error_event_reconnect(self): + e = create_engine('sqlite://') + dbapi = MockDBAPI() + sqlite3 = e.dialect.dbapi + dbapi.Error = sqlite3.Error, + dbapi.ProgrammingError = sqlite3.ProgrammingError + + class MySpecialException(Exception): + pass + + eng = create_engine('sqlite://', module=dbapi, _initialize=False) + + @event.listens_for(eng, "handle_error") + def handle_error(ctx): + assert ctx.engine is eng + assert ctx.connection is conn + raise MySpecialException("failed operation") + + conn = eng.connect() + conn.invalidate() + + dbapi.connect = Mock( + side_effect=sqlite3.ProgrammingError("random error")) + + assert_raises( + MySpecialException, + conn._revalidate_connection + ) + + @testing.requires.sqlite + def test_handle_error_custom_connect(self): + e = create_engine('sqlite://') + + dbapi = MockDBAPI() + sqlite3 = e.dialect.dbapi + dbapi.Error = sqlite3.Error, + dbapi.ProgrammingError = sqlite3.ProgrammingError + + class MySpecialException(Exception): + pass + + def custom_connect(): + raise sqlite3.ProgrammingError("random error") + + eng = create_engine('sqlite://', module=dbapi, creator=custom_connect) + + @event.listens_for(eng, "handle_error") + def handle_error(ctx): + assert ctx.engine is eng + assert ctx.connection is None + raise MySpecialException("failed operation") + + assert_raises( + MySpecialException, + eng.connect + ) + + @testing.requires.sqlite + def test_handle_error_event_connect_invalidate_flag(self): + e = create_engine('sqlite://') + dbapi = MockDBAPI() + sqlite3 = e.dialect.dbapi + dbapi.Error = sqlite3.Error, + dbapi.ProgrammingError = sqlite3.ProgrammingError + dbapi.connect = Mock( + side_effect=sqlite3.ProgrammingError( + "Cannot operate on a closed database.")) + + class MySpecialException(Exception): + pass + + eng = create_engine('sqlite://', module=dbapi) + + @event.listens_for(eng, "handle_error") + def handle_error(ctx): + assert ctx.is_disconnect + ctx.is_disconnect = False + + try: + eng.connect() + assert False + except tsa.exc.DBAPIError as de: + assert not de.connection_invalidated + @testing.requires.sqlite def test_dont_touch_non_dbapi_exception_on_connect(self): e = create_engine('sqlite://') -- cgit v1.2.1 From d204e61f63756f2bbd3322377a283fc995e562ec Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Fri, 5 Dec 2014 12:18:11 -0500 Subject: - document / work around that dialect_options isn't necessarily there --- lib/sqlalchemy/engine/reflection.py | 5 ++++- lib/sqlalchemy/testing/suite/test_reflection.py | 2 ++ 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/lib/sqlalchemy/engine/reflection.py b/lib/sqlalchemy/engine/reflection.py index ebc96f5dd..25f084c15 100644 --- a/lib/sqlalchemy/engine/reflection.py +++ b/lib/sqlalchemy/engine/reflection.py @@ -395,7 +395,10 @@ class Inspector(object): boolean dialect_options - dict of dialect-specific index options + dict of dialect-specific index options. May not be present + for all dialects. + + .. versionadded:: 1.0.0 :param table_name: string name of the table. For special quoting, use :class:`.quoted_name`. diff --git a/lib/sqlalchemy/testing/suite/test_reflection.py b/lib/sqlalchemy/testing/suite/test_reflection.py index e58b6f068..3edbdeb8c 100644 --- a/lib/sqlalchemy/testing/suite/test_reflection.py +++ b/lib/sqlalchemy/testing/suite/test_reflection.py @@ -515,6 +515,8 @@ class ComponentReflectionTest(fixtures.TablesTest): def test_get_temp_table_indexes(self): insp = inspect(self.metadata.bind) indexes = insp.get_indexes('user_tmp') + for ind in indexes: + ind.pop('dialect_options', None) eq_( # TODO: we need to add better filtering for indexes/uq constraints # that are doubled up -- cgit v1.2.1 From ec6214457ed71f0ae87d83076e084214650aae5d Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Fri, 5 Dec 2014 14:19:36 -0500 Subject: - pep8 --- test/dialect/test_sqlite.py | 423 ++++++++++++++++++++++++-------------------- 1 file changed, 232 insertions(+), 191 deletions(-) diff --git a/test/dialect/test_sqlite.py b/test/dialect/test_sqlite.py index 124208dbe..04e82e686 100644 --- a/test/dialect/test_sqlite.py +++ b/test/dialect/test_sqlite.py @@ -7,8 +7,8 @@ import datetime from sqlalchemy.testing import eq_, assert_raises, \ assert_raises_message, is_ from sqlalchemy import Table, select, bindparam, Column,\ - MetaData, func, extract, ForeignKey, text, DefaultClause, and_, create_engine,\ - UniqueConstraint + MetaData, func, extract, ForeignKey, text, DefaultClause, and_, \ + create_engine, UniqueConstraint from sqlalchemy.types import Integer, String, Boolean, DateTime, Date, Time from sqlalchemy import types as sqltypes from sqlalchemy import event, inspect @@ -21,6 +21,8 @@ from sqlalchemy.testing import fixtures, AssertsCompiledSQL, \ AssertsExecutionResults, engines from sqlalchemy import testing from sqlalchemy.schema import CreateTable +from sqlalchemy.engine.reflection import Inspector + class TestTypes(fixtures.TestBase, AssertsExecutionResults): @@ -32,9 +34,10 @@ class TestTypes(fixtures.TestBase, AssertsExecutionResults): """ meta = MetaData(testing.db) - t = Table('bool_table', meta, Column('id', Integer, - primary_key=True), Column('boo', - Boolean(create_constraint=False))) + t = Table( + 'bool_table', meta, + Column('id', Integer, primary_key=True), + Column('boo', Boolean(create_constraint=False))) try: meta.create_all() testing.db.execute("INSERT INTO bool_table (id, boo) " @@ -69,28 +72,31 @@ class TestTypes(fixtures.TestBase, AssertsExecutionResults): ValueError, "Couldn't parse %s string." % disp, lambda: testing.db.execute( - text("select 'ASDF' as value", typemap={"value":typ}) + text("select 'ASDF' as value", typemap={"value": typ}) ).scalar() ) def test_native_datetime(self): dbapi = testing.db.dialect.dbapi - connect_args = {'detect_types': dbapi.PARSE_DECLTYPES \ - | dbapi.PARSE_COLNAMES} - engine = engines.testing_engine(options={'connect_args' - : connect_args, 'native_datetime': True}) - t = Table('datetest', MetaData(), - Column('id', Integer, primary_key=True), - Column('d1', Date), Column('d2', sqltypes.TIMESTAMP)) + connect_args = { + 'detect_types': dbapi.PARSE_DECLTYPES | dbapi.PARSE_COLNAMES} + engine = engines.testing_engine( + options={'connect_args': connect_args, 'native_datetime': True}) + t = Table( + 'datetest', MetaData(), + Column('id', Integer, primary_key=True), + Column('d1', Date), Column('d2', sqltypes.TIMESTAMP)) t.create(engine) try: - engine.execute(t.insert(), {'d1': datetime.date(2010, 5, - 10), - 'd2': datetime.datetime( 2010, 5, 10, 12, 15, 25, - )}) + engine.execute(t.insert(), { + 'd1': datetime.date(2010, 5, 10), + 'd2': datetime.datetime(2010, 5, 10, 12, 15, 25) + }) row = engine.execute(t.select()).first() - eq_(row, (1, datetime.date(2010, 5, 10), - datetime.datetime( 2010, 5, 10, 12, 15, 25, ))) + eq_( + row, + (1, datetime.date(2010, 5, 10), + datetime.datetime(2010, 5, 10, 12, 15, 25))) r = engine.execute(func.current_date()).scalar() assert isinstance(r, util.string_types) finally: @@ -100,15 +106,16 @@ class TestTypes(fixtures.TestBase, AssertsExecutionResults): @testing.provide_metadata def test_custom_datetime(self): sqlite_date = sqlite.DATETIME( - # 2004-05-21T00:00:00 - storage_format="%(year)04d-%(month)02d-%(day)02d" - "T%(hour)02d:%(minute)02d:%(second)02d", - regexp=r"(\d+)-(\d+)-(\d+)T(\d+):(\d+):(\d+)", - ) + # 2004-05-21T00:00:00 + storage_format="%(year)04d-%(month)02d-%(day)02d" + "T%(hour)02d:%(minute)02d:%(second)02d", + regexp=r"(\d+)-(\d+)-(\d+)T(\d+):(\d+):(\d+)", + ) t = Table('t', self.metadata, Column('d', sqlite_date)) self.metadata.create_all(testing.db) - testing.db.execute(t.insert(). - values(d=datetime.datetime(2010, 10, 15, 12, 37, 0))) + testing.db.execute( + t.insert(). + values(d=datetime.datetime(2010, 10, 15, 12, 37, 0))) testing.db.execute("insert into t (d) values ('2004-05-21T00:00:00')") eq_( testing.db.execute("select * from t order by d").fetchall(), @@ -116,21 +123,23 @@ class TestTypes(fixtures.TestBase, AssertsExecutionResults): ) eq_( testing.db.execute(select([t.c.d]).order_by(t.c.d)).fetchall(), - [(datetime.datetime(2004, 5, 21, 0, 0),), - (datetime.datetime(2010, 10, 15, 12, 37),)] + [ + (datetime.datetime(2004, 5, 21, 0, 0),), + (datetime.datetime(2010, 10, 15, 12, 37),)] ) @testing.provide_metadata def test_custom_date(self): sqlite_date = sqlite.DATE( - # 2004-05-21T00:00:00 - storage_format="%(year)04d|%(month)02d|%(day)02d", - regexp=r"(\d+)\|(\d+)\|(\d+)", - ) + # 2004-05-21T00:00:00 + storage_format="%(year)04d|%(month)02d|%(day)02d", + regexp=r"(\d+)\|(\d+)\|(\d+)", + ) t = Table('t', self.metadata, Column('d', sqlite_date)) self.metadata.create_all(testing.db) - testing.db.execute(t.insert(). - values(d=datetime.date(2010, 10, 15))) + testing.db.execute( + t.insert(). + values(d=datetime.date(2010, 10, 15))) testing.db.execute("insert into t (d) values ('2004|05|21')") eq_( testing.db.execute("select * from t order by d").fetchall(), @@ -138,11 +147,11 @@ class TestTypes(fixtures.TestBase, AssertsExecutionResults): ) eq_( testing.db.execute(select([t.c.d]).order_by(t.c.d)).fetchall(), - [(datetime.date(2004, 5, 21),), - (datetime.date(2010, 10, 15),)] + [ + (datetime.date(2004, 5, 21),), + (datetime.date(2010, 10, 15),)] ) - def test_no_convert_unicode(self): """test no utf-8 encoding occurs""" @@ -156,7 +165,7 @@ class TestTypes(fixtures.TestBase, AssertsExecutionResults): sqltypes.CHAR(convert_unicode=True), sqltypes.Unicode(), sqltypes.UnicodeText(), - ): + ): bindproc = t.dialect_impl(dialect).bind_processor(dialect) assert not bindproc or \ isinstance(bindproc(util.u('some string')), util.text_type) @@ -198,6 +207,7 @@ class DateTimeTest(fixtures.TestBase, AssertsCompiledSQL): rp = sldt.result_processor(None, None) eq_(rp(bp(dt)), dt) + class DateTest(fixtures.TestBase, AssertsCompiledSQL): def test_default(self): @@ -221,6 +231,7 @@ class DateTest(fixtures.TestBase, AssertsCompiledSQL): rp = sldt.result_processor(None, None) eq_(rp(bp(dt)), dt) + class TimeTest(fixtures.TestBase, AssertsCompiledSQL): def test_default(self): @@ -333,8 +344,9 @@ class DefaultsTest(fixtures.TestBase, AssertsCompiledSQL): @testing.provide_metadata def test_boolean_default(self): - t = Table("t", self.metadata, - Column("x", Boolean, server_default=sql.false())) + t = Table( + "t", self.metadata, + Column("x", Boolean, server_default=sql.false())) t.create(testing.db) testing.db.execute(t.insert()) testing.db.execute(t.insert().values(x=True)) @@ -351,7 +363,6 @@ class DefaultsTest(fixtures.TestBase, AssertsCompiledSQL): eq_(info['default'], '3') - class DialectTest(fixtures.TestBase, AssertsExecutionResults): __only_on__ = 'sqlite' @@ -372,7 +383,7 @@ class DialectTest(fixtures.TestBase, AssertsExecutionResults): Column('true', Integer), Column('false', Integer), Column('column', Integer), - ) + ) try: meta.create_all() t.insert().execute(safe=1) @@ -403,8 +414,8 @@ class DialectTest(fixtures.TestBase, AssertsExecutionResults): table1 = Table('django_admin_log', metadata, autoload=True) table2 = Table('django_content_type', metadata, autoload=True) j = table1.join(table2) - assert j.onclause.compare(table1.c.content_type_id - == table2.c.id) + assert j.onclause.compare( + table1.c.content_type_id == table2.c.id) @testing.provide_metadata def test_quoted_identifiers_functional_two(self): @@ -426,8 +437,8 @@ class DialectTest(fixtures.TestBase, AssertsExecutionResults): # unfortunately, still can't do this; sqlite quadruples # up the quotes on the table name here for pragma foreign_key_list - #testing.db.execute(r''' - #CREATE TABLE """b""" ( + # testing.db.execute(r''' + # CREATE TABLE """b""" ( # """id""" integer NOT NULL PRIMARY KEY, # """aid""" integer NULL # REFERENCES """a""" ("""id""") @@ -439,14 +450,13 @@ class DialectTest(fixtures.TestBase, AssertsExecutionResults): #table2 = Table(r'"b"', metadata, autoload=True) #j = table1.join(table2) - #assert j.onclause.compare(table1.c['"id"'] + # assert j.onclause.compare(table1.c['"id"'] # == table2.c['"aid"']) def test_legacy_quoted_identifiers_unit(self): dialect = sqlite.dialect() dialect._broken_fk_pragma_quotes = True - for row in [ (0, 'target', 'tid', 'id'), (0, '"target"', 'tid', 'id'), @@ -457,7 +467,9 @@ class DialectTest(fixtures.TestBase, AssertsExecutionResults): fks = {} fkeys = [] dialect._parse_fk(fks, fkeys, *row) - eq_(fkeys, [{ + eq_( + fkeys, + [{ 'referred_table': 'target', 'referred_columns': ['id'], 'referred_schema': None, @@ -470,17 +482,17 @@ class DialectTest(fixtures.TestBase, AssertsExecutionResults): # amazingly, pysqlite seems to still deliver cursor.description # as encoded bytes in py2k - t = Table('x', self.metadata, - Column(u('méil'), Integer, primary_key=True), - Column(ue('\u6e2c\u8a66'), Integer), - ) + t = Table( + 'x', self.metadata, + Column(u('méil'), Integer, primary_key=True), + Column(ue('\u6e2c\u8a66'), Integer), + ) self.metadata.create_all(testing.db) result = testing.db.execute(t.select()) assert u('méil') in result.keys() assert ue('\u6e2c\u8a66') in result.keys() - def test_file_path_is_absolute(self): d = pysqlite_dialect.dialect() eq_( @@ -498,48 +510,51 @@ class DialectTest(fixtures.TestBase, AssertsExecutionResults): e = create_engine('sqlite+pysqlite:///foo.db') assert e.pool.__class__ is pool.NullPool + @testing.provide_metadata def test_dont_reflect_autoindex(self): - meta = MetaData(testing.db) - t = Table('foo', meta, Column('bar', String, primary_key=True)) + meta = self.metadata + Table('foo', meta, Column('bar', String, primary_key=True)) meta.create_all() - from sqlalchemy.engine.reflection import Inspector - try: - inspector = Inspector(testing.db) - eq_(inspector.get_indexes('foo'), []) - eq_(inspector.get_indexes('foo', - include_auto_indexes=True), [{'unique': 1, 'name' - : 'sqlite_autoindex_foo_1', 'column_names': ['bar']}]) - finally: - meta.drop_all() + inspector = Inspector(testing.db) + eq_(inspector.get_indexes('foo'), []) + eq_( + inspector.get_indexes('foo', include_auto_indexes=True), + [{ + 'unique': 1, + 'name': 'sqlite_autoindex_foo_1', + 'column_names': ['bar']}]) + @testing.provide_metadata def test_create_index_with_schema(self): """Test creation of index with explicit schema""" - meta = MetaData(testing.db) - t = Table('foo', meta, Column('bar', String, index=True), - schema='main') - try: - meta.create_all() - finally: - meta.drop_all() + meta = self.metadata + Table( + 'foo', meta, Column('bar', String, index=True), + schema='main') + meta.create_all() + inspector = Inspector(testing.db) + eq_( + inspector.get_indexes('foo', schema='main'), + [{'unique': 0, 'name': u'ix_main_foo_bar', + 'column_names': [u'bar']}]) + @testing.provide_metadata def test_get_unique_constraints(self): - meta = MetaData(testing.db) - t1 = Table('foo', meta, Column('f', Integer), - UniqueConstraint('f', name='foo_f')) - t2 = Table('bar', meta, Column('b', Integer), - UniqueConstraint('b', name='bar_b'), - prefixes=['TEMPORARY']) + meta = self.metadata + Table( + 'foo', meta, Column('f', Integer), + UniqueConstraint('f', name='foo_f')) + Table( + 'bar', meta, Column('b', Integer), + UniqueConstraint('b', name='bar_b'), + prefixes=['TEMPORARY']) meta.create_all() - from sqlalchemy.engine.reflection import Inspector - try: - inspector = Inspector(testing.db) - eq_(inspector.get_unique_constraints('foo'), - [{'column_names': [u'f'], 'name': u'foo_f'}]) - eq_(inspector.get_unique_constraints('bar'), - [{'column_names': [u'b'], 'name': u'bar_b'}]) - finally: - meta.drop_all() + inspector = Inspector(testing.db) + eq_(inspector.get_unique_constraints('foo'), + [{'column_names': [u'f'], 'name': u'foo_f'}]) + eq_(inspector.get_unique_constraints('bar'), + [{'column_names': [u'b'], 'name': u'bar_b'}]) class AttachedMemoryDBTest(fixtures.TestBase): @@ -662,7 +677,7 @@ class SQLTest(fixtures.TestBase, AssertsCompiledSQL): 'epoch': '%s', 'dow': '%w', 'week': '%W', - } + } for field, subst in mapping.items(): self.assert_compile(select([extract(field, t.c.col1)]), "SELECT CAST(STRFTIME('%s', t.col1) AS " @@ -685,53 +700,57 @@ class SQLTest(fixtures.TestBase, AssertsCompiledSQL): def test_constraints_with_schemas(self): metadata = MetaData() - t1 = Table('t1', metadata, - Column('id', Integer, primary_key=True), - schema='master') - t2 = Table('t2', metadata, - Column('id', Integer, primary_key=True), - Column('t1_id', Integer, ForeignKey('master.t1.id')), - schema='master' - ) - t3 = Table('t3', metadata, - Column('id', Integer, primary_key=True), - Column('t1_id', Integer, ForeignKey('master.t1.id')), - schema='alternate' - ) - t4 = Table('t4', metadata, - Column('id', Integer, primary_key=True), - Column('t1_id', Integer, ForeignKey('master.t1.id')), - ) + Table( + 't1', metadata, + Column('id', Integer, primary_key=True), + schema='master') + t2 = Table( + 't2', metadata, + Column('id', Integer, primary_key=True), + Column('t1_id', Integer, ForeignKey('master.t1.id')), + schema='master' + ) + t3 = Table( + 't3', metadata, + Column('id', Integer, primary_key=True), + Column('t1_id', Integer, ForeignKey('master.t1.id')), + schema='alternate' + ) + t4 = Table( + 't4', metadata, + Column('id', Integer, primary_key=True), + Column('t1_id', Integer, ForeignKey('master.t1.id')), + ) # schema->schema, generate REFERENCES with no schema name self.assert_compile( schema.CreateTable(t2), - "CREATE TABLE master.t2 (" - "id INTEGER NOT NULL, " - "t1_id INTEGER, " - "PRIMARY KEY (id), " - "FOREIGN KEY(t1_id) REFERENCES t1 (id)" - ")" + "CREATE TABLE master.t2 (" + "id INTEGER NOT NULL, " + "t1_id INTEGER, " + "PRIMARY KEY (id), " + "FOREIGN KEY(t1_id) REFERENCES t1 (id)" + ")" ) # schema->different schema, don't generate REFERENCES self.assert_compile( schema.CreateTable(t3), - "CREATE TABLE alternate.t3 (" - "id INTEGER NOT NULL, " - "t1_id INTEGER, " - "PRIMARY KEY (id)" - ")" + "CREATE TABLE alternate.t3 (" + "id INTEGER NOT NULL, " + "t1_id INTEGER, " + "PRIMARY KEY (id)" + ")" ) # same for local schema self.assert_compile( schema.CreateTable(t4), - "CREATE TABLE t4 (" - "id INTEGER NOT NULL, " - "t1_id INTEGER, " - "PRIMARY KEY (id)" - ")" + "CREATE TABLE t4 (" + "id INTEGER NOT NULL, " + "t1_id INTEGER, " + "PRIMARY KEY (id)" + ")" ) @@ -756,30 +775,37 @@ class InsertTest(fixtures.TestBase, AssertsExecutionResults): @testing.exclude('sqlite', '<', (3, 3, 8), 'no database support') def test_empty_insert_pk1(self): - self._test_empty_insert(Table('a', MetaData(testing.db), - Column('id', Integer, - primary_key=True))) + self._test_empty_insert( + Table( + 'a', MetaData(testing.db), + Column('id', Integer, primary_key=True))) @testing.exclude('sqlite', '<', (3, 3, 8), 'no database support') def test_empty_insert_pk2(self): - assert_raises(exc.DBAPIError, self._test_empty_insert, Table('b' - , MetaData(testing.db), Column('x', Integer, - primary_key=True), Column('y', Integer, - primary_key=True))) + assert_raises( + exc.DBAPIError, self._test_empty_insert, + Table( + 'b', MetaData(testing.db), + Column('x', Integer, primary_key=True), + Column('y', Integer, primary_key=True))) @testing.exclude('sqlite', '<', (3, 3, 8), 'no database support') def test_empty_insert_pk3(self): - assert_raises(exc.DBAPIError, self._test_empty_insert, Table('c' - , MetaData(testing.db), Column('x', Integer, - primary_key=True), Column('y', Integer, - DefaultClause('123'), primary_key=True))) + assert_raises( + exc.DBAPIError, self._test_empty_insert, + Table( + 'c', MetaData(testing.db), + Column('x', Integer, primary_key=True), + Column('y', Integer, DefaultClause('123'), primary_key=True))) @testing.exclude('sqlite', '<', (3, 3, 8), 'no database support') def test_empty_insert_pk4(self): - self._test_empty_insert(Table('d', MetaData(testing.db), - Column('x', Integer, primary_key=True), - Column('y', Integer, DefaultClause('123' - )))) + self._test_empty_insert( + Table( + 'd', MetaData(testing.db), + Column('x', Integer, primary_key=True), + Column('y', Integer, DefaultClause('123')) + )) @testing.exclude('sqlite', '<', (3, 3, 8), 'no database support') def test_empty_insert_nopk1(self): @@ -788,9 +814,10 @@ class InsertTest(fixtures.TestBase, AssertsExecutionResults): @testing.exclude('sqlite', '<', (3, 3, 8), 'no database support') def test_empty_insert_nopk2(self): - self._test_empty_insert(Table('f', MetaData(testing.db), - Column('x', Integer), Column('y', - Integer))) + self._test_empty_insert( + Table( + 'f', MetaData(testing.db), + Column('x', Integer), Column('y', Integer))) def test_inserts_with_spaces(self): tbl = Table('tbl', MetaData('sqlite:///'), Column('with space', @@ -800,8 +827,8 @@ class InsertTest(fixtures.TestBase, AssertsExecutionResults): tbl.insert().execute({'without': 123}) assert list(tbl.select().execute()) == [(None, 123)] tbl.insert().execute({'with space': 456}) - assert list(tbl.select().execute()) == [(None, 123), (456, - None)] + assert list(tbl.select().execute()) == [ + (None, 123), (456, None)] finally: tbl.drop() @@ -817,6 +844,8 @@ def full_text_search_missing(): except: return True +metadata = cattable = matchtable = None + class MatchTest(fixtures.TestBase, AssertsCompiledSQL): @@ -845,19 +874,20 @@ class MatchTest(fixtures.TestBase, AssertsCompiledSQL): """) matchtable = Table('matchtable', metadata, autoload=True) metadata.create_all() - cattable.insert().execute([{'id': 1, 'description': 'Python'}, - {'id': 2, 'description': 'Ruby'}]) - matchtable.insert().execute([{'id': 1, 'title' - : 'Agile Web Development with Rails' - , 'category_id': 2}, {'id': 2, - 'title': 'Dive Into Python', - 'category_id': 1}, {'id': 3, 'title' - : "Programming Matz's Ruby", - 'category_id': 2}, {'id': 4, 'title' - : 'The Definitive Guide to Django', - 'category_id': 1}, {'id': 5, 'title' - : 'Python in a Nutshell', - 'category_id': 1}]) + cattable.insert().execute( + [{'id': 1, 'description': 'Python'}, + {'id': 2, 'description': 'Ruby'}]) + matchtable.insert().execute( + [ + {'id': 1, 'title': 'Agile Web Development with Rails', + 'category_id': 2}, + {'id': 2, 'title': 'Dive Into Python', 'category_id': 1}, + {'id': 3, 'title': "Programming Matz's Ruby", + 'category_id': 2}, + {'id': 4, 'title': 'The Definitive Guide to Django', + 'category_id': 1}, + {'id': 5, 'title': 'Python in a Nutshell', 'category_id': 1} + ]) @classmethod def teardown_class(cls): @@ -869,35 +899,38 @@ class MatchTest(fixtures.TestBase, AssertsCompiledSQL): def test_simple_match(self): results = \ - matchtable.select().where(matchtable.c.title.match('python' - )).order_by(matchtable.c.id).execute().fetchall() + matchtable.select().where( + matchtable.c.title.match('python')).\ + order_by(matchtable.c.id).execute().fetchall() eq_([2, 5], [r.id for r in results]) def test_simple_prefix_match(self): results = \ - matchtable.select().where(matchtable.c.title.match('nut*' - )).execute().fetchall() + matchtable.select().where( + matchtable.c.title.match('nut*')).execute().fetchall() eq_([5], [r.id for r in results]) def test_or_match(self): results2 = \ matchtable.select().where( - matchtable.c.title.match('nutshell OR ruby' - )).order_by(matchtable.c.id).execute().fetchall() + matchtable.c.title.match('nutshell OR ruby')).\ + order_by(matchtable.c.id).execute().fetchall() eq_([3, 5], [r.id for r in results2]) def test_and_match(self): results2 = \ matchtable.select().where( - matchtable.c.title.match('python nutshell' - )).execute().fetchall() + matchtable.c.title.match('python nutshell') + ).execute().fetchall() eq_([5], [r.id for r in results2]) def test_match_across_joins(self): - results = matchtable.select().where(and_(cattable.c.id - == matchtable.c.category_id, - cattable.c.description.match('Ruby' - ))).order_by(matchtable.c.id).execute().fetchall() + results = matchtable.select().where( + and_( + cattable.c.id == matchtable.c.category_id, + cattable.c.description.match('Ruby') + ) + ).order_by(matchtable.c.id).execute().fetchall() eq_([1, 3], [r.id for r in results]) @@ -907,10 +940,11 @@ class AutoIncrementTest(fixtures.TestBase, AssertsCompiledSQL): table = Table('autoinctable', MetaData(), Column('id', Integer, primary_key=True), Column('x', Integer, default=None), sqlite_autoincrement=True) - self.assert_compile(schema.CreateTable(table), - 'CREATE TABLE autoinctable (id INTEGER NOT ' - 'NULL PRIMARY KEY AUTOINCREMENT, x INTEGER)' - , dialect=sqlite.dialect()) + self.assert_compile( + schema.CreateTable(table), + 'CREATE TABLE autoinctable (id INTEGER NOT ' + 'NULL PRIMARY KEY AUTOINCREMENT, x INTEGER)', + dialect=sqlite.dialect()) def test_sqlite_autoincrement_constraint(self): table = Table( @@ -920,7 +954,7 @@ class AutoIncrementTest(fixtures.TestBase, AssertsCompiledSQL): Column('x', Integer, default=None), UniqueConstraint('x'), sqlite_autoincrement=True, - ) + ) self.assert_compile(schema.CreateTable(table), 'CREATE TABLE autoinctable (id INTEGER NOT ' 'NULL PRIMARY KEY AUTOINCREMENT, x ' @@ -944,7 +978,7 @@ class AutoIncrementTest(fixtures.TestBase, AssertsCompiledSQL): MetaData(), Column('id', MyInteger, primary_key=True), sqlite_autoincrement=True, - ) + ) self.assert_compile(schema.CreateTable(table), 'CREATE TABLE autoinctable (id INTEGER NOT ' 'NULL PRIMARY KEY AUTOINCREMENT)', @@ -958,7 +992,8 @@ class ReflectHeadlessFKsTest(fixtures.TestBase): testing.db.execute("CREATE TABLE a (id INTEGER PRIMARY KEY)") # this syntax actually works on other DBs perhaps we'd want to add # tests to test_reflection - testing.db.execute("CREATE TABLE b (id INTEGER PRIMARY KEY REFERENCES a)") + testing.db.execute( + "CREATE TABLE b (id INTEGER PRIMARY KEY REFERENCES a)") def teardown(self): testing.db.execute("drop table b") @@ -971,21 +1006,24 @@ class ReflectHeadlessFKsTest(fixtures.TestBase): assert b.c.id.references(a.c.id) + class ReflectFKConstraintTest(fixtures.TestBase): __only_on__ = 'sqlite' def setup(self): testing.db.execute("CREATE TABLE a1 (id INTEGER PRIMARY KEY)") testing.db.execute("CREATE TABLE a2 (id INTEGER PRIMARY KEY)") - testing.db.execute("CREATE TABLE b (id INTEGER PRIMARY KEY, " - "FOREIGN KEY(id) REFERENCES a1(id)," - "FOREIGN KEY(id) REFERENCES a2(id)" - ")") - testing.db.execute("CREATE TABLE c (id INTEGER, " - "CONSTRAINT bar PRIMARY KEY(id)," - "CONSTRAINT foo1 FOREIGN KEY(id) REFERENCES a1(id)," - "CONSTRAINT foo2 FOREIGN KEY(id) REFERENCES a2(id)" - ")") + testing.db.execute( + "CREATE TABLE b (id INTEGER PRIMARY KEY, " + "FOREIGN KEY(id) REFERENCES a1(id)," + "FOREIGN KEY(id) REFERENCES a2(id)" + ")") + testing.db.execute( + "CREATE TABLE c (id INTEGER, " + "CONSTRAINT bar PRIMARY KEY(id)," + "CONSTRAINT foo1 FOREIGN KEY(id) REFERENCES a1(id)," + "CONSTRAINT foo2 FOREIGN KEY(id) REFERENCES a2(id)" + ")") def teardown(self): testing.db.execute("drop table c") @@ -1005,7 +1043,8 @@ class ReflectFKConstraintTest(fixtures.TestBase): def test_name_not_none(self): # we don't have names for PK constraints, # it appears we get back None in the pragma for - # FKs also (also it doesn't even appear to be documented on sqlite's docs + # FKs also (also it doesn't even appear to be documented on + # sqlite's docs # at http://www.sqlite.org/pragma.html#pragma_foreign_key_list # how did we ever know that's the "name" field ??) @@ -1018,6 +1057,7 @@ class ReflectFKConstraintTest(fixtures.TestBase): class SavepointTest(fixtures.TablesTest): + """test that savepoints work when we use the correct event setup""" __only_on__ = 'sqlite' @@ -1081,7 +1121,7 @@ class SavepointTest(fixtures.TablesTest): connection = self.bind.connect() transaction = connection.begin() connection.execute(users.insert(), user_id=1, user_name='user1') - trans2 = connection.begin_nested() + connection.begin_nested() connection.execute(users.insert(), user_id=2, user_name='user2') trans3 = connection.begin() connection.execute(users.insert(), user_id=3, user_name='user3') @@ -1169,8 +1209,8 @@ class TypeReflectionTest(fixtures.TestBase): if warnings: def go(): return dialect._resolve_type_affinity(from_) - final_type = testing.assert_warnings(go, - ["Could not instantiate"], regex=True) + final_type = testing.assert_warnings( + go, ["Could not instantiate"], regex=True) else: final_type = dialect._resolve_type_affinity(from_) expected_type = type(to_) @@ -1186,8 +1226,8 @@ class TypeReflectionTest(fixtures.TestBase): if warnings: def go(): return inspector.get_columns("foo")[0] - col_info = testing.assert_warnings(go, - ["Could not instantiate"], regex=True) + col_info = testing.assert_warnings( + go, ["Could not instantiate"], regex=True) else: col_info = inspector.get_columns("foo")[0] expected_type = type(to_) @@ -1207,7 +1247,8 @@ class TypeReflectionTest(fixtures.TestBase): self._test_lookup_direct(self._fixed_lookup_fixture()) def test_lookup_direct_unsupported_args(self): - self._test_lookup_direct(self._unsupported_args_fixture(), warnings=True) + self._test_lookup_direct( + self._unsupported_args_fixture(), warnings=True) def test_lookup_direct_type_affinity(self): self._test_lookup_direct(self._type_affinity_fixture()) @@ -1216,8 +1257,8 @@ class TypeReflectionTest(fixtures.TestBase): self._test_round_trip(self._fixed_lookup_fixture()) def test_round_trip_direct_unsupported_args(self): - self._test_round_trip(self._unsupported_args_fixture(), warnings=True) + self._test_round_trip( + self._unsupported_args_fixture(), warnings=True) def test_round_trip_direct_type_affinity(self): self._test_round_trip(self._type_affinity_fixture()) - -- cgit v1.2.1 From 0ce045bd853ec078943c14fc93b87897d2169882 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Fri, 5 Dec 2014 14:46:43 -0500 Subject: - The SQLite dialect, when using the :class:`.sqlite.DATE`, :class:`.sqlite.TIME`, or :class:`.sqlite.DATETIME` types, and given a ``storage_format`` that only renders numbers, will render the types in DDL as ``DATE_CHAR``, ``TIME_CHAR``, and ``DATETIME_CHAR``, so that despite the lack of alpha characters in the values, the column will still deliver the "text affinity". Normally this is not needed, as the textual values within the default storage formats already imply text. fixes #3257 --- doc/build/changelog/changelog_10.rst | 18 ++++++++++ lib/sqlalchemy/dialects/sqlite/base.py | 60 +++++++++++++++++++++++++++++++++- test/dialect/test_sqlite.py | 57 ++++++++++++++++++++++++++++++++ 3 files changed, 134 insertions(+), 1 deletion(-) diff --git a/doc/build/changelog/changelog_10.rst b/doc/build/changelog/changelog_10.rst index b8b513821..9cc144fc6 100644 --- a/doc/build/changelog/changelog_10.rst +++ b/doc/build/changelog/changelog_10.rst @@ -22,6 +22,24 @@ series as well. For changes that are specific to 1.0 with an emphasis on compatibility concerns, see :doc:`/changelog/migration_10`. + .. change:: + :tags: bug, sqlite + :tickets: 3257 + + The SQLite dialect, when using the :class:`.sqlite.DATE`, + :class:`.sqlite.TIME`, + or :class:`.sqlite.DATETIME` types, and given a ``storage_format`` that + only renders numbers, will render the types in DDL as + ``DATE_CHAR``, ``TIME_CHAR``, and ``DATETIME_CHAR``, so that despite the + lack of alpha characters in the values, the column will still + deliver the "text affinity". Normally this is not needed, as the + textual values within the default storage formats already + imply text. + + .. seealso:: + + :ref:`sqlite_datetime` + .. change:: :tags: bug, engine :tickets: 3266 diff --git a/lib/sqlalchemy/dialects/sqlite/base.py b/lib/sqlalchemy/dialects/sqlite/base.py index 33003297c..ccd7f2539 100644 --- a/lib/sqlalchemy/dialects/sqlite/base.py +++ b/lib/sqlalchemy/dialects/sqlite/base.py @@ -9,6 +9,7 @@ .. dialect:: sqlite :name: SQLite +.. _sqlite_datetime: Date and Time Types ------------------- @@ -23,6 +24,20 @@ These types represent dates and times as ISO formatted strings, which also nicely support ordering. There's no reliance on typical "libc" internals for these functions so historical dates are fully supported. +Ensuring Text affinity +^^^^^^^^^^^^^^^^^^^^^^ + +The DDL rendered for these types is the standard ``DATE``, ``TIME`` +and ``DATETIME`` indicators. However, custom storage formats can also be +applied to these types. When the +storage format is detected as containing no alpha characters, the DDL for +these types is rendered as ``DATE_CHAR``, ``TIME_CHAR``, and ``DATETIME_CHAR``, +so that the column continues to have textual affinity. + +.. seealso:: + + `Type Affinity `_ - in the SQLite documentation + .. _sqlite_autoincrement: SQLite Auto Incrementing Behavior @@ -255,7 +270,7 @@ from ... import util from ...engine import default, reflection from ...sql import compiler -from ...types import (BLOB, BOOLEAN, CHAR, DATE, DECIMAL, FLOAT, +from ...types import (BLOB, BOOLEAN, CHAR, DECIMAL, FLOAT, INTEGER, REAL, NUMERIC, SMALLINT, TEXT, TIMESTAMP, VARCHAR) @@ -271,6 +286,25 @@ class _DateTimeMixin(object): if storage_format is not None: self._storage_format = storage_format + @property + def format_is_text_affinity(self): + """return True if the storage format will automatically imply + a TEXT affinity. + + If the storage format contains no non-numeric characters, + it will imply a NUMERIC storage format on SQLite; in this case, + the type will generate its DDL as DATE_CHAR, DATETIME_CHAR, + TIME_CHAR. + + .. versionadded:: 1.0.0 + + """ + spec = self._storage_format % { + "year": 0, "month": 0, "day": 0, "hour": 0, + "minute": 0, "second": 0, "microsecond": 0 + } + return bool(re.search(r'[^0-9]', spec)) + def adapt(self, cls, **kw): if issubclass(cls, _DateTimeMixin): if self._storage_format: @@ -526,7 +560,9 @@ ischema_names = { 'BOOLEAN': sqltypes.BOOLEAN, 'CHAR': sqltypes.CHAR, 'DATE': sqltypes.DATE, + 'DATE_CHAR': sqltypes.DATE, 'DATETIME': sqltypes.DATETIME, + 'DATETIME_CHAR': sqltypes.DATETIME, 'DOUBLE': sqltypes.FLOAT, 'DECIMAL': sqltypes.DECIMAL, 'FLOAT': sqltypes.FLOAT, @@ -537,6 +573,7 @@ ischema_names = { 'SMALLINT': sqltypes.SMALLINT, 'TEXT': sqltypes.TEXT, 'TIME': sqltypes.TIME, + 'TIME_CHAR': sqltypes.TIME, 'TIMESTAMP': sqltypes.TIMESTAMP, 'VARCHAR': sqltypes.VARCHAR, 'NVARCHAR': sqltypes.NVARCHAR, @@ -670,6 +707,27 @@ class SQLiteTypeCompiler(compiler.GenericTypeCompiler): def visit_large_binary(self, type_): return self.visit_BLOB(type_) + def visit_DATETIME(self, type_): + if not isinstance(type_, _DateTimeMixin) or \ + type_.format_is_text_affinity: + return super(SQLiteTypeCompiler, self).visit_DATETIME(type_) + else: + return "DATETIME_CHAR" + + def visit_DATE(self, type_): + if not isinstance(type_, _DateTimeMixin) or \ + type_.format_is_text_affinity: + return super(SQLiteTypeCompiler, self).visit_DATE(type_) + else: + return "DATE_CHAR" + + def visit_TIME(self, type_): + if not isinstance(type_, _DateTimeMixin) or \ + type_.format_is_text_affinity: + return super(SQLiteTypeCompiler, self).visit_TIME(type_) + else: + return "TIME_CHAR" + class SQLiteIdentifierPreparer(compiler.IdentifierPreparer): reserved_words = set([ diff --git a/test/dialect/test_sqlite.py b/test/dialect/test_sqlite.py index 04e82e686..22772d2fb 100644 --- a/test/dialect/test_sqlite.py +++ b/test/dialect/test_sqlite.py @@ -128,6 +128,53 @@ class TestTypes(fixtures.TestBase, AssertsExecutionResults): (datetime.datetime(2010, 10, 15, 12, 37),)] ) + @testing.provide_metadata + def test_custom_datetime_text_affinity(self): + sqlite_date = sqlite.DATETIME( + storage_format="%(year)04d%(month)02d%(day)02d" + "%(hour)02d%(minute)02d%(second)02d", + regexp=r"(\d{4})(\d{2})(\d{2})(\d{2})(\d{2})(\d{2})", + ) + t = Table('t', self.metadata, Column('d', sqlite_date)) + self.metadata.create_all(testing.db) + testing.db.execute( + t.insert(). + values(d=datetime.datetime(2010, 10, 15, 12, 37, 0))) + testing.db.execute("insert into t (d) values ('20040521000000')") + eq_( + testing.db.execute("select * from t order by d").fetchall(), + [('20040521000000',), ('20101015123700',)] + ) + eq_( + testing.db.execute(select([t.c.d]).order_by(t.c.d)).fetchall(), + [ + (datetime.datetime(2004, 5, 21, 0, 0),), + (datetime.datetime(2010, 10, 15, 12, 37),)] + ) + + @testing.provide_metadata + def test_custom_date_text_affinity(self): + sqlite_date = sqlite.DATE( + storage_format="%(year)04d%(month)02d%(day)02d", + regexp=r"(\d{4})(\d{2})(\d{2})", + ) + t = Table('t', self.metadata, Column('d', sqlite_date)) + self.metadata.create_all(testing.db) + testing.db.execute( + t.insert(). + values(d=datetime.date(2010, 10, 15))) + testing.db.execute("insert into t (d) values ('20040521')") + eq_( + testing.db.execute("select * from t order by d").fetchall(), + [('20040521',), ('20101015',)] + ) + eq_( + testing.db.execute(select([t.c.d]).order_by(t.c.d)).fetchall(), + [ + (datetime.date(2004, 5, 21),), + (datetime.date(2010, 10, 15),)] + ) + @testing.provide_metadata def test_custom_date(self): sqlite_date = sqlite.DATE( @@ -1167,6 +1214,16 @@ class TypeReflectionTest(fixtures.TestBase): (sqltypes.Time, sqltypes.TIME()), (sqltypes.BOOLEAN, sqltypes.BOOLEAN()), (sqltypes.Boolean, sqltypes.BOOLEAN()), + (sqlite.DATE( + storage_format="%(year)04d%(month)02d%(day)02d", + ), sqltypes.DATE()), + (sqlite.TIME( + storage_format="%(hour)02d%(minute)02d%(second)02d", + ), sqltypes.TIME()), + (sqlite.DATETIME( + storage_format="%(year)04d%(month)02d%(day)02d" + "%(hour)02d%(minute)02d%(second)02d", + ), sqltypes.DATETIME()), ] def _unsupported_args_fixture(self): -- cgit v1.2.1 From 0639c199a547343d62134d2f233225fd2862ec45 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Fri, 5 Dec 2014 16:34:43 -0500 Subject: - move inner calls to _revalidate_connection() outside of existing _handle_dbapi_error(); these are now handled already and the reentrant call is not needed / breaks things. Adjustment to 41e7253dee168b8c26c49 / --- lib/sqlalchemy/engine/base.py | 17 +++++++++-------- test/engine/test_parseconnect.py | 35 ++++++++++++++++++++++++++++++++++- 2 files changed, 43 insertions(+), 9 deletions(-) diff --git a/lib/sqlalchemy/engine/base.py b/lib/sqlalchemy/engine/base.py index 901ab07eb..235e1bf43 100644 --- a/lib/sqlalchemy/engine/base.py +++ b/lib/sqlalchemy/engine/base.py @@ -814,11 +814,11 @@ class Connection(Connectable): fn(self, default, multiparams, params) try: - try: - conn = self.__connection - except AttributeError: - conn = self._revalidate_connection() + conn = self.__connection + except AttributeError: + conn = self._revalidate_connection() + try: dialect = self.dialect ctx = dialect.execution_ctx_cls._init_default( dialect, self, conn) @@ -952,11 +952,11 @@ class Connection(Connectable): a :class:`.ResultProxy`.""" try: - try: - conn = self.__connection - except AttributeError: - conn = self._revalidate_connection() + conn = self.__connection + except AttributeError: + conn = self._revalidate_connection() + try: context = constructor(dialect, self, conn, *args) except Exception as e: self._handle_dbapi_exception(e, @@ -1246,6 +1246,7 @@ class Connection(Connectable): @classmethod def _handle_dbapi_exception_noconnection( cls, e, dialect, engine, connection): + exc_info = sys.exc_info() is_disconnect = dialect.is_disconnect(e, None, None) diff --git a/test/engine/test_parseconnect.py b/test/engine/test_parseconnect.py index 72a089aca..b6d08ceba 100644 --- a/test/engine/test_parseconnect.py +++ b/test/engine/test_parseconnect.py @@ -7,6 +7,7 @@ from sqlalchemy.testing import fixtures from sqlalchemy import testing from sqlalchemy.testing.mock import Mock, MagicMock from sqlalchemy import event +from sqlalchemy import select dialect = None @@ -279,7 +280,7 @@ class CreateEngineTest(fixtures.TestBase): ) @testing.requires.sqlite - def test_handle_error_event_reconnect(self): + def test_handle_error_event_revalidate(self): e = create_engine('sqlite://') dbapi = MockDBAPI() sqlite3 = e.dialect.dbapi @@ -295,6 +296,7 @@ class CreateEngineTest(fixtures.TestBase): def handle_error(ctx): assert ctx.engine is eng assert ctx.connection is conn + assert isinstance(ctx.sqlalchemy_exception, exc.ProgrammingError) raise MySpecialException("failed operation") conn = eng.connect() @@ -308,6 +310,37 @@ class CreateEngineTest(fixtures.TestBase): conn._revalidate_connection ) + @testing.requires.sqlite + def test_handle_error_event_implicit_revalidate(self): + e = create_engine('sqlite://') + dbapi = MockDBAPI() + sqlite3 = e.dialect.dbapi + dbapi.Error = sqlite3.Error, + dbapi.ProgrammingError = sqlite3.ProgrammingError + + class MySpecialException(Exception): + pass + + eng = create_engine('sqlite://', module=dbapi, _initialize=False) + + @event.listens_for(eng, "handle_error") + def handle_error(ctx): + assert ctx.engine is eng + assert ctx.connection is conn + assert isinstance(ctx.sqlalchemy_exception, exc.ProgrammingError) + raise MySpecialException("failed operation") + + conn = eng.connect() + conn.invalidate() + + dbapi.connect = Mock( + side_effect=sqlite3.ProgrammingError("random error")) + + assert_raises( + MySpecialException, + conn.execute, select([1]) + ) + @testing.requires.sqlite def test_handle_error_custom_connect(self): e = create_engine('sqlite://') -- cgit v1.2.1 From b8114a357684ab3232ff90ceb0da16dad080d1ac Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Fri, 5 Dec 2014 19:08:47 -0500 Subject: - adjust _revalidate_connection() again such that we pass a _wrap=False to it, so that we say we will do the wrapping just once right here in _execute_context() / _execute_default(). An adjustment is made to _handle_dbapi_error() to not assume self.__connection in case we are already in an invalidated state further adjustment to 0639c199a547343d62134d2f233225fd2862ec45, 41e7253dee168b8c26c49, #3266 --- lib/sqlalchemy/engine/base.py | 46 ++++++++++++++++++++---------------- lib/sqlalchemy/engine/threadlocal.py | 5 +++- test/engine/test_parseconnect.py | 2 +- test/engine/test_reconnect.py | 4 ++-- 4 files changed, 33 insertions(+), 24 deletions(-) diff --git a/lib/sqlalchemy/engine/base.py b/lib/sqlalchemy/engine/base.py index 235e1bf43..23348469d 100644 --- a/lib/sqlalchemy/engine/base.py +++ b/lib/sqlalchemy/engine/base.py @@ -265,18 +265,18 @@ class Connection(Connectable): try: return self.__connection except AttributeError: - return self._revalidate_connection() + return self._revalidate_connection(_wrap=True) - def _revalidate_connection(self): + def _revalidate_connection(self, _wrap): if self.__branch_from: - return self.__branch_from._revalidate_connection() - + return self.__branch_from._revalidate_connection(_wrap=_wrap) if self.__can_reconnect and self.__invalid: if self.__transaction is not None: raise exc.InvalidRequestError( "Can't reconnect until invalid " "transaction is rolled back") - self.__connection = self.engine.raw_connection(self) + self.__connection = self.engine.raw_connection( + _connection=self, _wrap=_wrap) self.__invalid = False return self.__connection raise exc.ResourceClosedError("This Connection is closed") @@ -814,11 +814,11 @@ class Connection(Connectable): fn(self, default, multiparams, params) try: - conn = self.__connection - except AttributeError: - conn = self._revalidate_connection() + try: + conn = self.__connection + except AttributeError: + conn = self._revalidate_connection(_wrap=False) - try: dialect = self.dialect ctx = dialect.execution_ctx_cls._init_default( dialect, self, conn) @@ -952,16 +952,17 @@ class Connection(Connectable): a :class:`.ResultProxy`.""" try: - conn = self.__connection - except AttributeError: - conn = self._revalidate_connection() + try: + conn = self.__connection + except AttributeError: + conn = self._revalidate_connection(_wrap=False) - try: context = constructor(dialect, self, conn, *args) except Exception as e: - self._handle_dbapi_exception(e, - util.text_type(statement), parameters, - None, None) + self._handle_dbapi_exception( + e, + util.text_type(statement), parameters, + None, None) if context.compiled: context.pre_exec() @@ -1149,7 +1150,10 @@ class Connection(Connectable): self._is_disconnect = \ isinstance(e, self.dialect.dbapi.Error) and \ not self.closed and \ - self.dialect.is_disconnect(e, self.__connection, cursor) + self.dialect.is_disconnect( + e, + self.__connection if not self.invalidated else None, + cursor) if context: context.is_disconnect = self._is_disconnect @@ -1953,7 +1957,9 @@ class Engine(Connectable, log.Identified): """ return self.run_callable(self.dialect.has_table, table_name, schema) - def _wrap_pool_connect(self, fn, connection=None): + def _wrap_pool_connect(self, fn, connection, wrap=True): + if not wrap: + return fn() dialect = self.dialect try: return fn() @@ -1961,7 +1967,7 @@ class Engine(Connectable, log.Identified): Connection._handle_dbapi_exception_noconnection( e, dialect, self, connection) - def raw_connection(self, _connection=None): + def raw_connection(self, _connection=None, _wrap=True): """Return a "raw" DBAPI connection from the connection pool. The returned object is a proxied version of the DBAPI @@ -1978,7 +1984,7 @@ class Engine(Connectable, log.Identified): """ return self._wrap_pool_connect( - self.pool.unique_connection, _connection) + self.pool.unique_connection, _connection, _wrap) class OptionEngine(Engine): diff --git a/lib/sqlalchemy/engine/threadlocal.py b/lib/sqlalchemy/engine/threadlocal.py index 71caac626..824b68fdf 100644 --- a/lib/sqlalchemy/engine/threadlocal.py +++ b/lib/sqlalchemy/engine/threadlocal.py @@ -59,7 +59,10 @@ class TLEngine(base.Engine): # guards against pool-level reapers, if desired. # or not connection.connection.is_valid: connection = self._tl_connection_cls( - self, self._wrap_pool_connect(self.pool.connect), **kw) + self, + self._wrap_pool_connect( + self.pool.connect, connection, wrap=True), + **kw) self._connections.conn = weakref.ref(connection) return connection._increment_connect() diff --git a/test/engine/test_parseconnect.py b/test/engine/test_parseconnect.py index b6d08ceba..4a3da7d1c 100644 --- a/test/engine/test_parseconnect.py +++ b/test/engine/test_parseconnect.py @@ -307,7 +307,7 @@ class CreateEngineTest(fixtures.TestBase): assert_raises( MySpecialException, - conn._revalidate_connection + getattr, conn, 'connection' ) @testing.requires.sqlite diff --git a/test/engine/test_reconnect.py b/test/engine/test_reconnect.py index 4500ada6a..0efce87ce 100644 --- a/test/engine/test_reconnect.py +++ b/test/engine/test_reconnect.py @@ -517,7 +517,7 @@ class RealReconnectTest(fixtures.TestBase): assert c1.invalidated assert c1_branch.invalidated - c1_branch._revalidate_connection() + c1_branch._revalidate_connection(_wrap=True) assert not c1.invalidated assert not c1_branch.invalidated @@ -535,7 +535,7 @@ class RealReconnectTest(fixtures.TestBase): assert c1.invalidated assert c1_branch.invalidated - c1._revalidate_connection() + c1._revalidate_connection(_wrap=True) assert not c1.invalidated assert not c1_branch.invalidated -- cgit v1.2.1 From 95cd2003bbe1b5da2d3c78ac845855126e03de2f Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Sat, 6 Dec 2014 12:39:18 -0500 Subject: pep8 --- test/dialect/mssql/test_types.py | 247 ++++++++++++++++++++++----------------- 1 file changed, 141 insertions(+), 106 deletions(-) diff --git a/test/dialect/mssql/test_types.py b/test/dialect/mssql/test_types.py index 9dc1983ae..24f0eb0be 100644 --- a/test/dialect/mssql/test_types.py +++ b/test/dialect/mssql/test_types.py @@ -2,12 +2,14 @@ from sqlalchemy.testing import eq_, engines, pickleable import datetime import os -from sqlalchemy import * +from sqlalchemy import Table, Column, MetaData, Float, \ + Integer, String, Boolean, TIMESTAMP, Sequence, Numeric, select, \ + Date, Time, DateTime, DefaultClause, PickleType, text from sqlalchemy import types, schema from sqlalchemy.databases import mssql from sqlalchemy.dialects.mssql.base import TIME from sqlalchemy.testing import fixtures, \ - AssertsExecutionResults, ComparesTables + AssertsExecutionResults, ComparesTables from sqlalchemy import testing from sqlalchemy.testing import emits_warning_on import decimal @@ -32,6 +34,7 @@ class TimeTypeTest(fixtures.TestBase): class TypeDDLTest(fixtures.TestBase): + def test_boolean(self): "Exercise type specification for boolean type." @@ -39,7 +42,7 @@ class TypeDDLTest(fixtures.TestBase): # column type, args, kwargs, expected ddl (Boolean, [], {}, 'BIT'), - ] + ] metadata = MetaData() table_args = ['test_mssql_boolean', metadata] @@ -54,11 +57,11 @@ class TypeDDLTest(fixtures.TestBase): for col in boolean_table.c: index = int(col.name[1:]) - testing.eq_(gen.get_column_specification(col), - "%s %s" % (col.name, columns[index][3])) + testing.eq_( + gen.get_column_specification(col), + "%s %s" % (col.name, columns[index][3])) self.assert_(repr(col)) - def test_numeric(self): "Exercise type specification and options for numeric types." @@ -88,7 +91,7 @@ class TypeDDLTest(fixtures.TestBase): 'TINYINT'), (types.SmallInteger, [], {}, 'SMALLINT'), - ] + ] metadata = MetaData() table_args = ['test_mssql_numeric', metadata] @@ -103,11 +106,11 @@ class TypeDDLTest(fixtures.TestBase): for col in numeric_table.c: index = int(col.name[1:]) - testing.eq_(gen.get_column_specification(col), - "%s %s" % (col.name, columns[index][3])) + testing.eq_( + gen.get_column_specification(col), + "%s %s" % (col.name, columns[index][3])) self.assert_(repr(col)) - def test_char(self): """Exercise COLLATE-ish options on string types.""" @@ -149,7 +152,7 @@ class TypeDDLTest(fixtures.TestBase): 'NTEXT'), (mssql.MSNText, [], {'collation': 'Latin1_General_CI_AS'}, 'NTEXT COLLATE Latin1_General_CI_AS'), - ] + ] metadata = MetaData() table_args = ['test_mssql_charset', metadata] @@ -164,11 +167,11 @@ class TypeDDLTest(fixtures.TestBase): for col in charset_table.c: index = int(col.name[1:]) - testing.eq_(gen.get_column_specification(col), - "%s %s" % (col.name, columns[index][3])) + testing.eq_( + gen.get_column_specification(col), + "%s %s" % (col.name, columns[index][3])) self.assert_(repr(col)) - def test_timestamp(self): """Exercise TIMESTAMP column.""" @@ -176,9 +179,10 @@ class TypeDDLTest(fixtures.TestBase): metadata = MetaData() spec, expected = (TIMESTAMP, 'TIMESTAMP') - t = Table('mssql_ts', metadata, - Column('id', Integer, primary_key=True), - Column('t', spec, nullable=None)) + t = Table( + 'mssql_ts', metadata, + Column('id', Integer, primary_key=True), + Column('t', spec, nullable=None)) gen = dialect.ddl_compiler(dialect, schema.CreateTable(t)) testing.eq_(gen.get_column_specification(t.c.t), "t %s" % expected) self.assert_(repr(t.c.t)) @@ -255,7 +259,11 @@ class TypeDDLTest(fixtures.TestBase): % (col.name, columns[index][3])) self.assert_(repr(col)) -class TypeRoundTripTest(fixtures.TestBase, AssertsExecutionResults, ComparesTables): +metadata = None + + +class TypeRoundTripTest( + fixtures.TestBase, AssertsExecutionResults, ComparesTables): __only_on__ = 'mssql' @classmethod @@ -266,15 +274,18 @@ class TypeRoundTripTest(fixtures.TestBase, AssertsExecutionResults, ComparesTabl def teardown(self): metadata.drop_all() - @testing.fails_on_everything_except('mssql+pyodbc', - 'this is some pyodbc-specific feature') + @testing.fails_on_everything_except( + 'mssql+pyodbc', + 'this is some pyodbc-specific feature') def test_decimal_notation(self): - numeric_table = Table('numeric_table', metadata, Column('id', - Integer, Sequence('numeric_id_seq', - optional=True), primary_key=True), - Column('numericcol', - Numeric(precision=38, scale=20, - asdecimal=True))) + numeric_table = Table( + 'numeric_table', metadata, + Column( + 'id', Integer, + Sequence('numeric_id_seq', optional=True), primary_key=True), + Column( + 'numericcol', + Numeric(precision=38, scale=20, asdecimal=True))) metadata.create_all() test_items = [decimal.Decimal(d) for d in ( '1500000.00000000000000000000', @@ -323,7 +334,7 @@ class TypeRoundTripTest(fixtures.TestBase, AssertsExecutionResults, ComparesTabl '000000000000.32E12', '00000000000000.1E+12', '000000000000.2E-32', - )] + )] for value in test_items: numeric_table.insert().execute(numericcol=value) @@ -332,10 +343,13 @@ class TypeRoundTripTest(fixtures.TestBase, AssertsExecutionResults, ComparesTabl assert value[0] in test_items, "%r not in test_items" % value[0] def test_float(self): - float_table = Table('float_table', metadata, Column('id', - Integer, Sequence('numeric_id_seq', - optional=True), primary_key=True), - Column('floatcol', Float())) + float_table = Table( + 'float_table', metadata, + Column( + 'id', Integer, + Sequence('numeric_id_seq', optional=True), primary_key=True), + Column('floatcol', Float())) + metadata.create_all() try: test_items = [float(d) for d in ( @@ -363,13 +377,12 @@ class TypeRoundTripTest(fixtures.TestBase, AssertsExecutionResults, ComparesTabl '1E-6', '1E-7', '1E-8', - )] + )] for value in test_items: float_table.insert().execute(floatcol=value) except Exception as e: raise e - # todo this should suppress warnings, but it does not @emits_warning_on('mssql+mxodbc', r'.*does not have any indexes.*') def test_dates(self): @@ -417,20 +430,20 @@ class TypeRoundTripTest(fixtures.TestBase, AssertsExecutionResults, ComparesTabl (mssql.MSDateTime2, [1], {}, 'DATETIME2(1)', ['>=', (10,)]), - ] + ] table_args = ['test_mssql_dates', metadata] for index, spec in enumerate(columns): type_, args, kw, res, requires = spec[0:5] - if requires and testing._is_excluded('mssql', *requires) \ - or not requires: - c = Column('c%s' % index, type_(*args, - **kw), nullable=None) + if requires and \ + testing._is_excluded('mssql', *requires) or not requires: + c = Column('c%s' % index, type_(*args, **kw), nullable=None) testing.db.dialect.type_descriptor(c.type) table_args.append(c) dates_table = Table(*table_args) - gen = testing.db.dialect.ddl_compiler(testing.db.dialect, - schema.CreateTable(dates_table)) + gen = testing.db.dialect.ddl_compiler( + testing.db.dialect, + schema.CreateTable(dates_table)) for col in dates_table.c: index = int(col.name[1:]) testing.eq_(gen.get_column_specification(col), '%s %s' @@ -443,13 +456,14 @@ class TypeRoundTripTest(fixtures.TestBase, AssertsExecutionResults, ComparesTabl self.assert_types_base(col, dates_table.c[col.key]) def test_date_roundtrip(self): - t = Table('test_dates', metadata, - Column('id', Integer, - Sequence('datetest_id_seq', optional=True), - primary_key=True), - Column('adate', Date), - Column('atime', Time), - Column('adatetime', DateTime)) + t = Table( + 'test_dates', metadata, + Column('id', Integer, + Sequence('datetest_id_seq', optional=True), + primary_key=True), + Column('adate', Date), + Column('atime', Time), + Column('adatetime', DateTime)) metadata.create_all() d1 = datetime.date(2007, 10, 30) t1 = datetime.time(11, 2, 32) @@ -527,48 +541,57 @@ class TypeRoundTripTest(fixtures.TestBase, AssertsExecutionResults, ComparesTabl testing.eq_(col.type.length, binary_table.c[col.name].type.length) - def test_autoincrement(self): - Table('ai_1', metadata, - Column('int_y', Integer, primary_key=True), - Column('int_n', Integer, DefaultClause('0'), - primary_key=True, autoincrement=False)) - Table('ai_2', metadata, - Column('int_y', Integer, primary_key=True), - Column('int_n', Integer, DefaultClause('0'), - primary_key=True, autoincrement=False)) - Table('ai_3', metadata, - Column('int_n', Integer, DefaultClause('0'), - primary_key=True, autoincrement=False), - Column('int_y', Integer, primary_key=True)) - Table('ai_4', metadata, - Column('int_n', Integer, DefaultClause('0'), - primary_key=True, autoincrement=False), - Column('int_n2', Integer, DefaultClause('0'), - primary_key=True, autoincrement=False)) - Table('ai_5', metadata, - Column('int_y', Integer, primary_key=True), - Column('int_n', Integer, DefaultClause('0'), - primary_key=True, autoincrement=False)) - Table('ai_6', metadata, - Column('o1', String(1), DefaultClause('x'), - primary_key=True), - Column('int_y', Integer, primary_key=True)) - Table('ai_7', metadata, - Column('o1', String(1), DefaultClause('x'), - primary_key=True), - Column('o2', String(1), DefaultClause('x'), - primary_key=True), - Column('int_y', Integer, primary_key=True)) - Table('ai_8', metadata, - Column('o1', String(1), DefaultClause('x'), - primary_key=True), - Column('o2', String(1), DefaultClause('x'), - primary_key=True)) + Table( + 'ai_1', metadata, + Column('int_y', Integer, primary_key=True), + Column( + 'int_n', Integer, DefaultClause('0'), + primary_key=True, autoincrement=False)) + Table( + 'ai_2', metadata, + Column('int_y', Integer, primary_key=True), + Column('int_n', Integer, DefaultClause('0'), + primary_key=True, autoincrement=False)) + Table( + 'ai_3', metadata, + Column('int_n', Integer, DefaultClause('0'), + primary_key=True, autoincrement=False), + Column('int_y', Integer, primary_key=True)) + + Table( + 'ai_4', metadata, + Column('int_n', Integer, DefaultClause('0'), + primary_key=True, autoincrement=False), + Column('int_n2', Integer, DefaultClause('0'), + primary_key=True, autoincrement=False)) + Table( + 'ai_5', metadata, + Column('int_y', Integer, primary_key=True), + Column('int_n', Integer, DefaultClause('0'), + primary_key=True, autoincrement=False)) + Table( + 'ai_6', metadata, + Column('o1', String(1), DefaultClause('x'), + primary_key=True), + Column('int_y', Integer, primary_key=True)) + Table( + 'ai_7', metadata, + Column('o1', String(1), DefaultClause('x'), + primary_key=True), + Column('o2', String(1), DefaultClause('x'), + primary_key=True), + Column('int_y', Integer, primary_key=True)) + Table( + 'ai_8', metadata, + Column('o1', String(1), DefaultClause('x'), + primary_key=True), + Column('o2', String(1), DefaultClause('x'), + primary_key=True)) metadata.create_all() table_names = ['ai_1', 'ai_2', 'ai_3', 'ai_4', - 'ai_5', 'ai_6', 'ai_7', 'ai_8'] + 'ai_5', 'ai_6', 'ai_7', 'ai_8'] mr = MetaData(testing.db) for name in table_names: @@ -586,27 +609,29 @@ class TypeRoundTripTest(fixtures.TestBase, AssertsExecutionResults, ComparesTabl if testing.db.driver == 'mxodbc': eng = \ - [engines.testing_engine(options={'implicit_returning' - : True})] + [engines.testing_engine(options={ + 'implicit_returning': True})] else: eng = \ - [engines.testing_engine(options={'implicit_returning' - : False}), - engines.testing_engine(options={'implicit_returning' - : True})] + [engines.testing_engine(options={ + 'implicit_returning': False}), + engines.testing_engine(options={ + 'implicit_returning': True})] for counter, engine in enumerate(eng): engine.execute(tbl.insert()) if 'int_y' in tbl.c: assert engine.scalar(select([tbl.c.int_y])) \ == counter + 1 - assert list(engine.execute(tbl.select()).first()).\ - count(counter + 1) == 1 + assert list( + engine.execute(tbl.select()).first()).\ + count(counter + 1) == 1 else: assert 1 \ not in list(engine.execute(tbl.select()).first()) engine.execute(tbl.delete()) + class MonkeyPatchedBinaryTest(fixtures.TestBase): __only_on__ = 'mssql+pymssql' @@ -622,7 +647,12 @@ class MonkeyPatchedBinaryTest(fixtures.TestBase): result = module.Binary(input) eq_(result, expected_result) +binary_table = None +MyPickleType = None + + class BinaryTest(fixtures.TestBase, AssertsExecutionResults): + """Test the Binary and VarBinary types""" __only_on__ = 'mssql' @@ -655,7 +685,7 @@ class BinaryTest(fixtures.TestBase, AssertsExecutionResults): Column('misc', String(30)), Column('pickled', PickleType), Column('mypickle', MyPickleType), - ) + ) binary_table.create() def teardown(self): @@ -679,7 +709,7 @@ class BinaryTest(fixtures.TestBase, AssertsExecutionResults): data_slice=stream1[0:100], pickled=testobj1, mypickle=testobj3, - ) + ) binary_table.insert().execute( primary_id=2, misc='binary_data_two.dat', @@ -687,7 +717,7 @@ class BinaryTest(fixtures.TestBase, AssertsExecutionResults): data_image=stream2, data_slice=stream2[0:99], pickled=testobj2, - ) + ) # TODO: pyodbc does not seem to accept "None" for a VARBINARY # column (data=None). error: [Microsoft][ODBC SQL Server @@ -697,17 +727,21 @@ class BinaryTest(fixtures.TestBase, AssertsExecutionResults): # misc='binary_data_two.dat', data=None, data_image=None, # data_slice=stream2[0:99], pickled=None) - binary_table.insert().execute(primary_id=3, - misc='binary_data_two.dat', data_image=None, - data_slice=stream2[0:99], pickled=None) + binary_table.insert().execute( + primary_id=3, + misc='binary_data_two.dat', data_image=None, + data_slice=stream2[0:99], pickled=None) for stmt in \ binary_table.select(order_by=binary_table.c.primary_id), \ - text('select * from binary_table order by ' - 'binary_table.primary_id', - typemap=dict(data=mssql.MSVarBinary(8000), - data_image=mssql.MSImage, - data_slice=types.BINARY(100), pickled=PickleType, - mypickle=MyPickleType), bind=testing.db): + text( + 'select * from binary_table order by ' + 'binary_table.primary_id', + typemap=dict( + data=mssql.MSVarBinary(8000), + data_image=mssql.MSImage, + data_slice=types.BINARY(100), pickled=PickleType, + mypickle=MyPickleType), + bind=testing.db): l = stmt.execute().fetchall() eq_(list(stream1), list(l[0]['data'])) paddedstream = list(stream1[0:100]) @@ -721,7 +755,8 @@ class BinaryTest(fixtures.TestBase, AssertsExecutionResults): eq_(l[0]['mypickle'].stuff, 'this is the right stuff') def load_stream(self, name, len=3000): - fp = open(os.path.join(os.path.dirname(__file__), "..", "..", name), 'rb') + fp = open( + os.path.join(os.path.dirname(__file__), "..", "..", name), 'rb') stream = fp.read(len) fp.close() return stream -- cgit v1.2.1 From c24423bc2e3fd227bf4a86599e28407bd190ee9e Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Sat, 6 Dec 2014 13:29:32 -0500 Subject: - enhance only_on() to work with compound specs - fix "temporary_tables" requirement --- lib/sqlalchemy/testing/exclusions.py | 2 +- lib/sqlalchemy/testing/requirements.py | 5 +++++ test/requirements.py | 12 +++++++++--- test/sql/test_metadata.py | 3 ++- 4 files changed, 17 insertions(+), 5 deletions(-) diff --git a/lib/sqlalchemy/testing/exclusions.py b/lib/sqlalchemy/testing/exclusions.py index f94724608..0aff43ae1 100644 --- a/lib/sqlalchemy/testing/exclusions.py +++ b/lib/sqlalchemy/testing/exclusions.py @@ -425,7 +425,7 @@ def skip(db, reason=None): def only_on(dbs, reason=None): return only_if( - OrPredicate([SpecPredicate(db) for db in util.to_list(dbs)]) + OrPredicate([Predicate.as_predicate(db) for db in util.to_list(dbs)]) ) diff --git a/lib/sqlalchemy/testing/requirements.py b/lib/sqlalchemy/testing/requirements.py index da3e3128a..5744431cb 100644 --- a/lib/sqlalchemy/testing/requirements.py +++ b/lib/sqlalchemy/testing/requirements.py @@ -322,6 +322,11 @@ class SuiteRequirements(Requirements): """target dialect supports listing of temporary table names""" return exclusions.closed() + @property + def temporary_tables(self): + """target database supports temporary tables""" + return exclusions.open() + @property def temporary_views(self): """target database supports temporary views""" diff --git a/test/requirements.py b/test/requirements.py index d1b7913f0..22ac13fe8 100644 --- a/test/requirements.py +++ b/test/requirements.py @@ -127,9 +127,15 @@ class DefaultRequirements(SuiteRequirements): ) @property - def temporary_table(self): - """Target database must support CREATE TEMPORARY TABLE""" - return exclusions.open() + def temporary_tables(self): + """target database supports temporary tables""" + return skip_if( + ["mssql"], "sql server has some other syntax?" + ) + + @property + def temp_table_reflection(self): + return self.temporary_tables @property def reflectable_autoincrement(self): diff --git a/test/sql/test_metadata.py b/test/sql/test_metadata.py index 0aa5d7305..52ecf88c5 100644 --- a/test/sql/test_metadata.py +++ b/test/sql/test_metadata.py @@ -1160,9 +1160,10 @@ class InfoTest(fixtures.TestBase): t = Table('x', MetaData(), info={'foo': 'bar'}) eq_(t.info, {'foo': 'bar'}) + class TableTest(fixtures.TestBase, AssertsCompiledSQL): - @testing.requires.temporary_table + @testing.requires.temporary_tables @testing.skip_if('mssql', 'different col format') def test_prefixes(self): from sqlalchemy import Table -- cgit v1.2.1 From c8817e608788799837a91b1d2616227594698d2b Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Sat, 6 Dec 2014 13:30:51 -0500 Subject: - SQL Server 2012 now recommends VARCHAR(max), NVARCHAR(max), VARBINARY(max) for large text/binary types. The MSSQL dialect will now respect this based on version detection, as well as the new ``deprecate_large_types`` flag. fixes #3039 --- doc/build/changelog/changelog_10.rst | 13 +++++ doc/build/changelog/migration_10.rst | 8 +++ lib/sqlalchemy/dialects/mssql/base.py | 105 +++++++++++++++++++++++++++++++--- lib/sqlalchemy/sql/sqltypes.py | 2 +- test/dialect/mssql/test_engine.py | 3 +- test/dialect/mssql/test_reflection.py | 11 +++- test/dialect/mssql/test_types.py | 71 ++++++++++++++++++++--- test/requirements.py | 12 ++++ 8 files changed, 201 insertions(+), 24 deletions(-) diff --git a/doc/build/changelog/changelog_10.rst b/doc/build/changelog/changelog_10.rst index 9cc144fc6..6d99095d9 100644 --- a/doc/build/changelog/changelog_10.rst +++ b/doc/build/changelog/changelog_10.rst @@ -22,6 +22,19 @@ series as well. For changes that are specific to 1.0 with an emphasis on compatibility concerns, see :doc:`/changelog/migration_10`. + .. change:: + :tags: feature, mssql + :tickets: 3039 + + SQL Server 2012 now recommends VARCHAR(max), NVARCHAR(max), + VARBINARY(max) for large text/binary types. The MSSQL dialect will + now respect this based on version detection, as well as the new + ``deprecate_large_types`` flag. + + .. seealso:: + + :ref:`mssql_large_type_deprecation` + .. change:: :tags: bug, sqlite :tickets: 3257 diff --git a/doc/build/changelog/migration_10.rst b/doc/build/changelog/migration_10.rst index 15e066a75..562bb9f1b 100644 --- a/doc/build/changelog/migration_10.rst +++ b/doc/build/changelog/migration_10.rst @@ -1619,6 +1619,14 @@ when using ODBC to avoid this issue entirely. :ticket:`3182` +SQL Server 2012 large text / binary types render as VARCHAR, NVARCHAR, VARBINARY +-------------------------------------------------------------------------------- + +The rendering of the :class:`.Text`, :class:`.UnicodeText`, and :class:`.LargeBinary` +types has been changed for SQL Server 2012 and greater, with options +to control the behavior completely, based on deprecation guidelines from +Microsoft. See :ref:`mssql_large_type_deprecation` for details. + .. _change_3204: SQLite/Oracle have distinct methods for temporary table/view name reporting diff --git a/lib/sqlalchemy/dialects/mssql/base.py b/lib/sqlalchemy/dialects/mssql/base.py index dad02ee0f..5d84975c0 100644 --- a/lib/sqlalchemy/dialects/mssql/base.py +++ b/lib/sqlalchemy/dialects/mssql/base.py @@ -226,6 +226,53 @@ The DATE and TIME types are not available for MSSQL 2005 and previous - if a server version below 2008 is detected, DDL for these types will be issued as DATETIME. +.. _mssql_large_type_deprecation: + +Large Text/Binary Type Deprecation +---------------------------------- + +Per `SQL Server 2012/2014 Documentation `_, +the ``NTEXT``, ``TEXT`` and ``IMAGE`` datatypes are to be removed from SQL Server +in a future release. SQLAlchemy normally relates these types to the +:class:`.UnicodeText`, :class:`.Text` and :class:`.LargeBinary` datatypes. + +In order to accommodate this change, a new flag ``deprecate_large_types`` +is added to the dialect, which will be automatically set based on detection +of the server version in use, if not otherwise set by the user. The +behavior of this flag is as follows: + +* When this flag is ``True``, the :class:`.UnicodeText`, :class:`.Text` and + :class:`.LargeBinary` datatypes, when used to render DDL, will render the + types ``NVARCHAR(max)``, ``VARCHAR(max)``, and ``VARBINARY(max)``, + respectively. This is a new behavior as of the addition of this flag. + +* When this flag is ``False``, the :class:`.UnicodeText`, :class:`.Text` and + :class:`.LargeBinary` datatypes, when used to render DDL, will render the + types ``NTEXT``, ``TEXT``, and ``IMAGE``, + respectively. This is the long-standing behavior of these types. + +* The flag begins with the value ``None``, before a database connection is + established. If the dialect is used to render DDL without the flag being + set, it is interpreted the same as ``False``. + +* On first connection, the dialect detects if SQL Server version 2012 or greater + is in use; if the flag is still at ``None``, it sets it to ``True`` or + ``False`` based on whether 2012 or greater is detected. + +* The flag can be set to either ``True`` or ``False`` when the dialect + is created, typically via :func:`.create_engine`:: + + eng = create_engine("mssql+pymssql://user:pass@host/db", + deprecate_large_types=True) + +* Complete control over whether the "old" or "new" types are rendered is + available in all SQLAlchemy versions by using the UPPERCASE type objects + instead: :class:`.NVARCHAR`, :class:`.VARCHAR`, :class:`.types.VARBINARY`, + :class:`.TEXT`, :class:`.mssql.NTEXT`, :class:`.mssql.IMAGE` will always remain + fixed and always output exactly that type. + +.. versionadded:: 1.0.0 + .. _mssql_indexes: Clustered Index Support @@ -367,19 +414,20 @@ import operator import re from ... import sql, schema as sa_schema, exc, util -from ...sql import compiler, expression, \ - util as sql_util, cast +from ...sql import compiler, expression, util as sql_util from ... import engine from ...engine import reflection, default from ... import types as sqltypes from ...types import INTEGER, BIGINT, SMALLINT, DECIMAL, NUMERIC, \ FLOAT, TIMESTAMP, DATETIME, DATE, BINARY,\ - VARBINARY, TEXT, VARCHAR, NVARCHAR, CHAR, NCHAR + TEXT, VARCHAR, NVARCHAR, CHAR, NCHAR from ...util import update_wrapper from . import information_schema as ischema +# http://sqlserverbuilds.blogspot.com/ +MS_2012_VERSION = (11,) MS_2008_VERSION = (10,) MS_2005_VERSION = (9,) MS_2000_VERSION = (8,) @@ -545,6 +593,26 @@ class NTEXT(sqltypes.UnicodeText): __visit_name__ = 'NTEXT' +class VARBINARY(sqltypes.VARBINARY, sqltypes.LargeBinary): + """The MSSQL VARBINARY type. + + This type extends both :class:`.types.VARBINARY` and + :class:`.types.LargeBinary`. In "deprecate_large_types" mode, + the :class:`.types.LargeBinary` type will produce ``VARBINARY(max)`` + on SQL Server. + + .. versionadded:: 1.0.0 + + .. seealso:: + + :ref:`mssql_large_type_deprecation` + + + + """ + __visit_name__ = 'VARBINARY' + + class IMAGE(sqltypes.LargeBinary): __visit_name__ = 'IMAGE' @@ -683,8 +751,17 @@ class MSTypeCompiler(compiler.GenericTypeCompiler): def visit_unicode(self, type_): return self.visit_NVARCHAR(type_) + def visit_text(self, type_): + if self.dialect.deprecate_large_types: + return self.visit_VARCHAR(type_) + else: + return self.visit_TEXT(type_) + def visit_unicode_text(self, type_): - return self.visit_NTEXT(type_) + if self.dialect.deprecate_large_types: + return self.visit_NVARCHAR(type_) + else: + return self.visit_NTEXT(type_) def visit_NTEXT(self, type_): return self._extend("NTEXT", type_) @@ -717,7 +794,10 @@ class MSTypeCompiler(compiler.GenericTypeCompiler): return self.visit_TIME(type_) def visit_large_binary(self, type_): - return self.visit_IMAGE(type_) + if self.dialect.deprecate_large_types: + return self.visit_VARBINARY(type_) + else: + return self.visit_IMAGE(type_) def visit_IMAGE(self, type_): return "IMAGE" @@ -1370,13 +1450,15 @@ class MSDialect(default.DefaultDialect): query_timeout=None, use_scope_identity=True, max_identifier_length=None, - schema_name="dbo", **opts): + schema_name="dbo", + deprecate_large_types=None, **opts): self.query_timeout = int(query_timeout or 0) self.schema_name = schema_name self.use_scope_identity = use_scope_identity self.max_identifier_length = int(max_identifier_length or 0) or \ self.max_identifier_length + self.deprecate_large_types = deprecate_large_types super(MSDialect, self).__init__(**opts) def do_savepoint(self, connection, name): @@ -1390,6 +1472,9 @@ class MSDialect(default.DefaultDialect): def initialize(self, connection): super(MSDialect, self).initialize(connection) + self._setup_version_attributes() + + def _setup_version_attributes(self): if self.server_version_info[0] not in list(range(8, 17)): # FreeTDS with version 4.2 seems to report here # a number like "95.10.255". Don't know what @@ -1405,6 +1490,9 @@ class MSDialect(default.DefaultDialect): self.implicit_returning = True if self.server_version_info >= MS_2008_VERSION: self.supports_multivalues_insert = True + if self.deprecate_large_types is None: + self.deprecate_large_types = \ + self.server_version_info >= MS_2012_VERSION def _get_default_schema_name(self, connection): if self.server_version_info < MS_2005_VERSION: @@ -1592,12 +1680,11 @@ class MSDialect(default.DefaultDialect): if coltype in (MSString, MSChar, MSNVarchar, MSNChar, MSText, MSNText, MSBinary, MSVarBinary, sqltypes.LargeBinary): + if charlen == -1: + charlen = 'max' kwargs['length'] = charlen if collation: kwargs['collation'] = collation - if coltype == MSText or \ - (coltype in (MSString, MSNVarchar) and charlen == -1): - kwargs.pop('length') if coltype is None: util.warn( diff --git a/lib/sqlalchemy/sql/sqltypes.py b/lib/sqlalchemy/sql/sqltypes.py index 94db1d837..9a2de39b4 100644 --- a/lib/sqlalchemy/sql/sqltypes.py +++ b/lib/sqlalchemy/sql/sqltypes.py @@ -894,7 +894,7 @@ class LargeBinary(_Binary): :param length: optional, a length for the column for use in DDL statements, for those BLOB types that accept a length - (i.e. MySQL). It does *not* produce a small BINARY/VARBINARY + (i.e. MySQL). It does *not* produce a *lengthed* BINARY/VARBINARY type - use the BINARY/VARBINARY types specifically for those. May be safely omitted if no ``CREATE TABLE`` will be issued. Certain databases may require a diff --git a/test/dialect/mssql/test_engine.py b/test/dialect/mssql/test_engine.py index 4b4780d43..a994b1787 100644 --- a/test/dialect/mssql/test_engine.py +++ b/test/dialect/mssql/test_engine.py @@ -157,8 +157,7 @@ class ParseConnectTest(fixtures.TestBase): eq_(dialect.is_disconnect("not an error", None, None), False) - @testing.only_on(['mssql+pyodbc', 'mssql+pymssql'], - "FreeTDS specific test") + @testing.requires.mssql_freetds def test_bad_freetds_warning(self): engine = engines.testing_engine() diff --git a/test/dialect/mssql/test_reflection.py b/test/dialect/mssql/test_reflection.py index 0ef69f656..bee441586 100644 --- a/test/dialect/mssql/test_reflection.py +++ b/test/dialect/mssql/test_reflection.py @@ -24,14 +24,14 @@ class ReflectionTest(fixtures.TestBase, ComparesTables): Column('user_name', types.VARCHAR(20), nullable=False), Column('test1', types.CHAR(5), nullable=False), Column('test2', types.Float(5), nullable=False), - Column('test3', types.Text), + Column('test3', types.Text('max')), Column('test4', types.Numeric, nullable=False), Column('test5', types.DateTime), Column('parent_user_id', types.Integer, ForeignKey('engine_users.user_id')), Column('test6', types.DateTime, nullable=False), - Column('test7', types.Text), - Column('test8', types.LargeBinary), + Column('test7', types.Text('max')), + Column('test8', types.LargeBinary('max')), Column('test_passivedefault2', types.Integer, server_default='5'), Column('test9', types.BINARY(100)), @@ -204,6 +204,11 @@ class InfoCoerceUnicodeTest(fixtures.TestBase, AssertsCompiledSQL): class ReflectHugeViewTest(fixtures.TestBase): __only_on__ = 'mssql' + # crashes on freetds 0.91, not worth it + __skip_if__ = ( + lambda: testing.requires.mssql_freetds.enabled, + ) + def setup(self): self.col_num = 150 diff --git a/test/dialect/mssql/test_types.py b/test/dialect/mssql/test_types.py index 24f0eb0be..5c9157379 100644 --- a/test/dialect/mssql/test_types.py +++ b/test/dialect/mssql/test_types.py @@ -4,7 +4,8 @@ import datetime import os from sqlalchemy import Table, Column, MetaData, Float, \ Integer, String, Boolean, TIMESTAMP, Sequence, Numeric, select, \ - Date, Time, DateTime, DefaultClause, PickleType, text + Date, Time, DateTime, DefaultClause, PickleType, text, Text, \ + UnicodeText, LargeBinary from sqlalchemy import types, schema from sqlalchemy.databases import mssql from sqlalchemy.dialects.mssql.base import TIME @@ -172,6 +173,44 @@ class TypeDDLTest(fixtures.TestBase): "%s %s" % (col.name, columns[index][3])) self.assert_(repr(col)) + def test_large_type_deprecation(self): + d1 = mssql.dialect(deprecate_large_types=True) + d2 = mssql.dialect(deprecate_large_types=False) + d3 = mssql.dialect() + d3.server_version_info = (11, 0) + d3._setup_version_attributes() + d4 = mssql.dialect() + d4.server_version_info = (10, 0) + d4._setup_version_attributes() + + for dialect in (d1, d3): + eq_( + str(Text().compile(dialect=dialect)), + "VARCHAR(max)" + ) + eq_( + str(UnicodeText().compile(dialect=dialect)), + "NVARCHAR(max)" + ) + eq_( + str(LargeBinary().compile(dialect=dialect)), + "VARBINARY(max)" + ) + + for dialect in (d2, d4): + eq_( + str(Text().compile(dialect=dialect)), + "TEXT" + ) + eq_( + str(UnicodeText().compile(dialect=dialect)), + "NTEXT" + ) + eq_( + str(LargeBinary().compile(dialect=dialect)), + "IMAGE" + ) + def test_timestamp(self): """Exercise TIMESTAMP column.""" @@ -485,18 +524,18 @@ class TypeRoundTripTest( @emits_warning_on('mssql+mxodbc', r'.*does not have any indexes.*') @testing.provide_metadata - def test_binary_reflection(self): + def _test_binary_reflection(self, deprecate_large_types): "Exercise type specification for binary types." columns = [ - # column type, args, kwargs, expected ddl + # column type, args, kwargs, expected ddl from reflected (mssql.MSBinary, [], {}, - 'BINARY'), + 'BINARY(1)'), (mssql.MSBinary, [10], {}, 'BINARY(10)'), (types.BINARY, [], {}, - 'BINARY'), + 'BINARY(1)'), (types.BINARY, [10], {}, 'BINARY(10)'), @@ -517,10 +556,12 @@ class TypeRoundTripTest( 'IMAGE'), (types.LargeBinary, [], {}, - 'IMAGE'), + 'IMAGE' if not deprecate_large_types else 'VARBINARY(max)'), ] metadata = self.metadata + metadata.bind = engines.testing_engine( + options={"deprecate_large_types": deprecate_large_types}) table_args = ['test_mssql_binary', metadata] for index, spec in enumerate(columns): type_, args, kw, res = spec @@ -530,17 +571,29 @@ class TypeRoundTripTest( metadata.create_all() reflected_binary = Table('test_mssql_binary', MetaData(testing.db), autoload=True) - for col in reflected_binary.c: + for col, spec in zip(reflected_binary.c, columns): + eq_( + str(col.type), spec[3], + "column %s %s != %s" % (col.key, str(col.type), spec[3]) + ) c1 = testing.db.dialect.type_descriptor(col.type).__class__ c2 = \ testing.db.dialect.type_descriptor( binary_table.c[col.name].type).__class__ - assert issubclass(c1, c2), '%r is not a subclass of %r' \ - % (c1, c2) + assert issubclass(c1, c2), \ + 'column %s: %r is not a subclass of %r' \ + % (col.key, c1, c2) if binary_table.c[col.name].type.length: testing.eq_(col.type.length, binary_table.c[col.name].type.length) + def test_binary_reflection_legacy_large_types(self): + self._test_binary_reflection(False) + + @testing.only_on('mssql >= 11') + def test_binary_reflection_sql2012_large_types(self): + self._test_binary_reflection(True) + def test_autoincrement(self): Table( 'ai_1', metadata, diff --git a/test/requirements.py b/test/requirements.py index 22ac13fe8..ffbdfba23 100644 --- a/test/requirements.py +++ b/test/requirements.py @@ -460,6 +460,7 @@ class DefaultRequirements(SuiteRequirements): ) + @property def emulated_lastrowid(self): """"target dialect retrieves cursor.lastrowid or an equivalent @@ -777,6 +778,17 @@ class DefaultRequirements(SuiteRequirements): "Not supported on MySQL + Windows" ) + @property + def mssql_freetds(self): + return only_on( + LambdaPredicate( + lambda config: ( + (against(config, 'mssql+pyodbc') and + config.db.dialect.freetds) + or against(config, 'mssql+pymssql') + ) + ) + ) @property def selectone(self): -- cgit v1.2.1 From 60e6ac8856e5f7f257e1797280d1510682ae8fb7 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Sun, 7 Dec 2014 18:54:52 -0500 Subject: - rework the assert_sql system so that we have a context manager to work with, use events that are local to the engine and to the run and are removed afterwards. --- lib/sqlalchemy/testing/assertions.py | 13 +++-- lib/sqlalchemy/testing/assertsql.py | 92 ++++++++++++++++++++++++++---------- lib/sqlalchemy/testing/engines.py | 3 -- 3 files changed, 75 insertions(+), 33 deletions(-) diff --git a/lib/sqlalchemy/testing/assertions.py b/lib/sqlalchemy/testing/assertions.py index bf7c27a89..66d1f3cb0 100644 --- a/lib/sqlalchemy/testing/assertions.py +++ b/lib/sqlalchemy/testing/assertions.py @@ -405,13 +405,16 @@ class AssertsExecutionResults(object): cls.__name__, repr(expected_item))) return True + def sql_execution_asserter(self, db=None): + if db is None: + from . import db as db + + return assertsql.assert_engine(db) + def assert_sql_execution(self, db, callable_, *rules): - assertsql.asserter.add_rules(rules) - try: + with self.sql_execution_asserter(db) as asserter: callable_() - assertsql.asserter.statement_complete() - finally: - assertsql.asserter.clear_rules() + asserter.assert_(*rules) def assert_sql(self, db, callable_, list_, with_sequences=None): if (with_sequences is not None and diff --git a/lib/sqlalchemy/testing/assertsql.py b/lib/sqlalchemy/testing/assertsql.py index bcc999fe3..2ac0605a2 100644 --- a/lib/sqlalchemy/testing/assertsql.py +++ b/lib/sqlalchemy/testing/assertsql.py @@ -8,6 +8,9 @@ from ..engine.default import DefaultDialect from .. import util import re +import collections +import contextlib +from .. import event class AssertRule(object): @@ -321,39 +324,78 @@ def _process_assertion_statement(query, context): return query -class SQLAssert(object): +class SQLExecuteObserved( + collections.namedtuple( + "SQLExecuteObserved", ["clauseelement", "multiparams", "params"]) +): + def process(self, rules): + if rules is not None: + if not rules: + assert False, \ + 'All rules have been exhausted, but further '\ + 'statements remain' + rule = rules[0] + rule.process_execute( + self.clauseelement, *self.multiparams, **self.params) + if rule.is_consumed(): + rules.pop(0) - rules = None - def add_rules(self, rules): - self.rules = list(rules) +class SQLCursorExecuteObserved( + collections.namedtuple( + "SQLCursorExecuteObserved", + ["statement", "parameters", "context", "executemany"]) +): + def process(self, rules): + if rules: + rule = rules[0] + rule.process_cursor_execute( + self.statement, self.parameters, + self.context, self.executemany) - def statement_complete(self): - for rule in self.rules: + +class SQLAsserter(object): + def __init__(self): + self.accumulated = [] + + def _close(self): + # safety feature in case event.remove + # goes haywire + self._final = self.accumulated + del self.accumulated + + def assert_(self, *rules): + rules = list(rules) + for observed in self._final: + observed.process(rules) + + for rule in rules: if not rule.consume_final(): assert False, \ 'All statements are complete, but pending '\ 'assertion rules remain' - def clear_rules(self): - del self.rules - def execute(self, conn, clauseelement, multiparams, params, result): - if self.rules is not None: - if not self.rules: - assert False, \ - 'All rules have been exhausted, but further '\ - 'statements remain' - rule = self.rules[0] - rule.process_execute(clauseelement, *multiparams, **params) - if rule.is_consumed(): - self.rules.pop(0) +@contextlib.contextmanager +def assert_engine(engine): + asserter = SQLAsserter() - def cursor_execute(self, conn, cursor, statement, parameters, - context, executemany): - if self.rules: - rule = self.rules[0] - rule.process_cursor_execute(statement, parameters, context, - executemany) + @event.listens_for(engine, "after_execute") + def execute(conn, clauseelement, multiparams, params, result): + asserter.accumulated.append( + SQLExecuteObserved( + clauseelement, multiparams, params)) -asserter = SQLAssert() + @event.listens_for(engine, "after_cursor_execute") + def cursor_execute(conn, cursor, statement, parameters, + context, executemany): + asserter.accumulated.append( + SQLCursorExecuteObserved( + statement, parameters, context, executemany)) + + try: + yield asserter + finally: + asserter._close() + event.remove(engine, "after_cursor_execute", cursor_execute) + event.remove(engine, "after_execute", execute) diff --git a/lib/sqlalchemy/testing/engines.py b/lib/sqlalchemy/testing/engines.py index 0f6f59401..7d73e7423 100644 --- a/lib/sqlalchemy/testing/engines.py +++ b/lib/sqlalchemy/testing/engines.py @@ -204,7 +204,6 @@ def testing_engine(url=None, options=None): """Produce an engine configured by --options with optional overrides.""" from sqlalchemy import create_engine - from .assertsql import asserter if not options: use_reaper = True @@ -219,8 +218,6 @@ def testing_engine(url=None, options=None): if isinstance(engine.pool, pool.QueuePool): engine.pool._timeout = 0 engine.pool._max_overflow = 0 - event.listen(engine, 'after_execute', asserter.execute) - event.listen(engine, 'after_cursor_execute', asserter.cursor_execute) if use_reaper: event.listen(engine.pool, 'connect', testing_reaper.connect) event.listen(engine.pool, 'checkout', testing_reaper.checkout) -- cgit v1.2.1 From e257ca6c5268517ec2e9a561372d82dfc10475e8 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Sun, 7 Dec 2014 18:55:23 -0500 Subject: - initial tests for bulk --- lib/sqlalchemy/orm/session.py | 3 +- test/orm/test_bulk.py | 317 ++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 319 insertions(+), 1 deletion(-) create mode 100644 test/orm/test_bulk.py diff --git a/lib/sqlalchemy/orm/session.py b/lib/sqlalchemy/orm/session.py index ef911824c..7dd577230 100644 --- a/lib/sqlalchemy/orm/session.py +++ b/lib/sqlalchemy/orm/session.py @@ -2056,7 +2056,8 @@ class Session(_SessionClassMethods): mapper, states, isupdate, True, return_defaults) def bulk_insert_mappings(self, mapper, mappings, return_defaults=False): - self._bulk_save_mappings(mapper, mappings, False, False, return_defaults) + self._bulk_save_mappings( + mapper, mappings, False, False, return_defaults) def bulk_update_mappings(self, mapper, mappings): self._bulk_save_mappings(mapper, mappings, True, False, False) diff --git a/test/orm/test_bulk.py b/test/orm/test_bulk.py new file mode 100644 index 000000000..4bcde2480 --- /dev/null +++ b/test/orm/test_bulk.py @@ -0,0 +1,317 @@ +from sqlalchemy import testing +from sqlalchemy.testing import eq_ +from sqlalchemy.testing.schema import Table, Column +from sqlalchemy.testing import fixtures +from sqlalchemy import Integer, String, ForeignKey +from sqlalchemy.orm import mapper, Session +from sqlalchemy.testing.assertsql import CompiledSQL +from test.orm import _fixtures + + +class BulkTest(testing.AssertsExecutionResults): + run_inserts = None + + +class BulkInsertTest(BulkTest, _fixtures.FixtureTest): + + @classmethod + def setup_mappers(cls): + User, Address = cls.classes("User", "Address") + u, a = cls.tables("users", "addresses") + + mapper(User, u) + mapper(Address, a) + + def test_bulk_save_return_defaults(self): + User, = self.classes("User",) + + s = Session() + objects = [ + User(name="u1"), + User(name="u2"), + User(name="u3") + ] + assert 'id' not in objects[0].__dict__ + + with self.sql_execution_asserter() as asserter: + s.bulk_save_objects(objects, return_defaults=True) + + asserter.assert_( + CompiledSQL( + "INSERT INTO users (name) VALUES (:name)", + [{'name': 'u1'}] + ), + CompiledSQL( + "INSERT INTO users (name) VALUES (:name)", + [{'name': 'u2'}] + ), + CompiledSQL( + "INSERT INTO users (name) VALUES (:name)", + [{'name': 'u3'}] + ), + ) + eq_(objects[0].__dict__['id'], 1) + + def test_bulk_save_no_defaults(self): + User, = self.classes("User",) + + s = Session() + objects = [ + User(name="u1"), + User(name="u2"), + User(name="u3") + ] + assert 'id' not in objects[0].__dict__ + + with self.sql_execution_asserter() as asserter: + s.bulk_save_objects(objects) + + asserter.assert_( + CompiledSQL( + "INSERT INTO users (name) VALUES (:name)", + [{'name': 'u1'}, {'name': 'u2'}, {'name': 'u3'}] + ), + ) + assert 'id' not in objects[0].__dict__ + + +class BulkInheritanceTest(fixtures.MappedTest, BulkTest): + @classmethod + def define_tables(cls, metadata): + Table( + 'people', metadata, + Column( + 'person_id', Integer, + primary_key=True, + test_needs_autoincrement=True), + Column('name', String(50)), + Column('type', String(30))) + + Table( + 'engineers', metadata, + Column( + 'person_id', Integer, + ForeignKey('people.person_id'), + primary_key=True), + Column('status', String(30)), + Column('primary_language', String(50))) + + Table( + 'managers', metadata, + Column( + 'person_id', Integer, + ForeignKey('people.person_id'), + primary_key=True), + Column('status', String(30)), + Column('manager_name', String(50))) + + Table( + 'boss', metadata, + Column( + 'boss_id', Integer, + ForeignKey('managers.person_id'), + primary_key=True), + Column('golf_swing', String(30))) + + @classmethod + def setup_classes(cls): + class Base(cls.Comparable): + pass + + class Person(Base): + pass + + class Engineer(Person): + pass + + class Manager(Person): + pass + + class Boss(Manager): + pass + + @classmethod + def setup_mappers(cls): + Person, Engineer, Manager, Boss = \ + cls.classes('Person', 'Engineer', 'Manager', 'Boss') + p, e, m, b = cls.tables('people', 'engineers', 'managers', 'boss') + + mapper( + Person, p, polymorphic_on=p.c.type, + polymorphic_identity='person') + mapper(Engineer, e, inherits=Person, polymorphic_identity='engineer') + mapper(Manager, m, inherits=Person, polymorphic_identity='manager') + mapper(Boss, b, inherits=Manager, polymorphic_identity='boss') + + def test_bulk_save_joined_inh_return_defaults(self): + Person, Engineer, Manager, Boss = \ + self.classes('Person', 'Engineer', 'Manager', 'Boss') + + s = Session() + objects = [ + Manager(name='m1', status='s1', manager_name='mn1'), + Engineer(name='e1', status='s2', primary_language='l1'), + Engineer(name='e2', status='s3', primary_language='l2'), + Boss( + name='b1', status='s3', manager_name='mn2', + golf_swing='g1') + ] + assert 'person_id' not in objects[0].__dict__ + + with self.sql_execution_asserter() as asserter: + s.bulk_save_objects(objects, return_defaults=True) + + asserter.assert_( + CompiledSQL( + "INSERT INTO people (name, type) VALUES (:name, :type)", + [{'type': 'manager', 'name': 'm1'}] + ), + CompiledSQL( + "INSERT INTO managers (person_id, status, manager_name) " + "VALUES (:person_id, :status, :manager_name)", + [{'person_id': 1, 'status': 's1', 'manager_name': 'mn1'}] + ), + CompiledSQL( + "INSERT INTO people (name, type) VALUES (:name, :type)", + [{'type': 'engineer', 'name': 'e1'}] + ), + CompiledSQL( + "INSERT INTO people (name, type) VALUES (:name, :type)", + [{'type': 'engineer', 'name': 'e2'}] + ), + CompiledSQL( + "INSERT INTO engineers (person_id, status, primary_language) " + "VALUES (:person_id, :status, :primary_language)", + [{'person_id': 2, 'status': 's2', 'primary_language': 'l1'}, + {'person_id': 3, 'status': 's3', 'primary_language': 'l2'}] + + ), + CompiledSQL( + "INSERT INTO people (name, type) VALUES (:name, :type)", + [{'type': 'boss', 'name': 'b1'}] + ), + CompiledSQL( + "INSERT INTO managers (person_id, status, manager_name) " + "VALUES (:person_id, :status, :manager_name)", + [{'person_id': 4, 'status': 's3', 'manager_name': 'mn2'}] + + ), + CompiledSQL( + "INSERT INTO boss (golf_swing) VALUES (:golf_swing)", + [{'golf_swing': 'g1'}] + ) + ) + eq_(objects[0].__dict__['person_id'], 1) + + def test_bulk_save_joined_inh_no_defaults(self): + Person, Engineer, Manager, Boss = \ + self.classes('Person', 'Engineer', 'Manager', 'Boss') + + s = Session() + with self.sql_execution_asserter() as asserter: + s.bulk_save_objects([ + Manager( + person_id=1, + name='m1', status='s1', manager_name='mn1'), + Engineer( + person_id=2, + name='e1', status='s2', primary_language='l1'), + Engineer( + person_id=3, + name='e2', status='s3', primary_language='l2'), + Boss( + person_id=4, + name='b1', status='s3', manager_name='mn2', + golf_swing='g1') + ], + + ) + + # the only difference here is that common classes are grouped together. + # at the moment it doesn't lump all the "people" tables from + # different classes together. + asserter.assert_( + CompiledSQL( + "INSERT INTO people (person_id, name, type) VALUES " + "(:person_id, :name, :type)", + [{'person_id': 1, 'type': 'manager', 'name': 'm1'}] + ), + CompiledSQL( + "INSERT INTO managers (person_id, status, manager_name) " + "VALUES (:person_id, :status, :manager_name)", + [{'status': 's1', 'person_id': 1, 'manager_name': 'mn1'}] + ), + CompiledSQL( + "INSERT INTO people (person_id, name, type) VALUES " + "(:person_id, :name, :type)", + [{'person_id': 2, 'type': 'engineer', 'name': 'e1'}, + {'person_id': 3, 'type': 'engineer', 'name': 'e2'}] + ), + CompiledSQL( + "INSERT INTO engineers (person_id, status, primary_language) " + "VALUES (:person_id, :status, :primary_language)", + [{'person_id': 2, 'status': 's2', 'primary_language': 'l1'}, + {'person_id': 3, 'status': 's3', 'primary_language': 'l2'}] + ), + CompiledSQL( + "INSERT INTO people (person_id, name, type) VALUES " + "(:person_id, :name, :type)", + [{'person_id': 4, 'type': 'boss', 'name': 'b1'}] + ), + CompiledSQL( + "INSERT INTO managers (person_id, status, manager_name) " + "VALUES (:person_id, :status, :manager_name)", + [{'status': 's3', 'person_id': 4, 'manager_name': 'mn2'}] + ), + CompiledSQL( + "INSERT INTO boss (golf_swing) VALUES (:golf_swing)", + [{'golf_swing': 'g1'}] + ) + ) + + def test_bulk_insert_joined_inh_return_defaults(self): + Person, Engineer, Manager, Boss = \ + self.classes('Person', 'Engineer', 'Manager', 'Boss') + + s = Session() + with self.sql_execution_asserter() as asserter: + s.bulk_insert_mappings( + Boss, + [ + dict( + name='b1', status='s1', manager_name='mn1', + golf_swing='g1' + ), + dict( + name='b2', status='s2', manager_name='mn2', + golf_swing='g2' + ), + dict( + name='b3', status='s3', manager_name='mn3', + golf_swing='g3' + ), + ] + ) + + # the only difference here is that common classes are grouped together. + # at the moment it doesn't lump all the "people" tables from + # different classes together. + asserter.assert_( + CompiledSQL( + "INSERT INTO people (name) VALUES (:name)", + [{'name': 'b1'}, {'name': 'b2'}, {'name': 'b3'}] + ), + CompiledSQL( + "INSERT INTO managers (status, manager_name) VALUES " + "(:status, :manager_name)", + [{'status': 's1', 'manager_name': 'mn1'}, + {'status': 's2', 'manager_name': 'mn2'}, + {'status': 's3', 'manager_name': 'mn3'}] + + ), + CompiledSQL( + "INSERT INTO boss (golf_swing) VALUES (:golf_swing)", + [{'golf_swing': 'g1'}, + {'golf_swing': 'g2'}, {'golf_swing': 'g3'}] + ) + ) -- cgit v1.2.1 From c42b8f8eb8f4c324e2469bf3baaa316c214abce5 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Sun, 7 Dec 2014 20:21:20 -0500 Subject: - fix inheritance persistence - start writing docs --- lib/sqlalchemy/orm/persistence.py | 15 ++-- lib/sqlalchemy/orm/session.py | 158 ++++++++++++++++++++++++++++++++++++++ lib/sqlalchemy/orm/sync.py | 17 ++++ test/orm/test_bulk.py | 50 +++++++----- 4 files changed, 215 insertions(+), 25 deletions(-) diff --git a/lib/sqlalchemy/orm/persistence.py b/lib/sqlalchemy/orm/persistence.py index 81024c41f..d94fbb040 100644 --- a/lib/sqlalchemy/orm/persistence.py +++ b/lib/sqlalchemy/orm/persistence.py @@ -49,7 +49,7 @@ def _bulk_insert( continue records = ( - (None, state_dict, params, super_mapper, + (None, state_dict, params, mapper, connection, value_params, has_all_pks, has_all_defaults) for state, state_dict, params, mp, @@ -918,7 +918,7 @@ def _finalize_insert_update_commands(base_mapper, uowtransaction, states): def _postfetch(mapper, uowtransaction, table, - state, dict_, result, params, value_params): + state, dict_, result, params, value_params, bulk=False): """Expire attributes in need of newly persisted database state, after an INSERT or UPDATE statement has proceeded for that state.""" @@ -954,10 +954,13 @@ def _postfetch(mapper, uowtransaction, table, # TODO: this still goes a little too often. would be nice to # have definitive list of "columns that changed" here for m, equated_pairs in mapper._table_to_equated[table]: - sync.populate(state, m, state, m, - equated_pairs, - uowtransaction, - mapper.passive_updates) + if state is None: + sync.bulk_populate_inherit_keys(dict_, m, equated_pairs) + else: + sync.populate(state, m, state, m, + equated_pairs, + uowtransaction, + mapper.passive_updates) def _connections_for_states(base_mapper, uowtransaction, states): diff --git a/lib/sqlalchemy/orm/session.py b/lib/sqlalchemy/orm/session.py index 7dd577230..e07b4554e 100644 --- a/lib/sqlalchemy/orm/session.py +++ b/lib/sqlalchemy/orm/session.py @@ -2048,6 +2048,66 @@ class Session(_SessionClassMethods): transaction.rollback(_capture_exception=True) def bulk_save_objects(self, objects, return_defaults=False): + """Perform a bulk save of the given list of objects. + + The bulk save feature allows mapped objects to be used as the + source of simple INSERT and UPDATE operations which can be more easily + grouped together into higher performing "executemany" + operations; the extraction of data from the objects is also performed + using a lower-latency process that ignores whether or not attributes + have actually been modified in the case of UPDATEs, and also ignores + SQL expressions. + + The objects as given are not added to the session and no additional + state is established on them, unless the ``return_defaults`` flag + is also set. + + .. warning:: + + The bulk save feature allows for a lower-latency INSERT/UPDATE + of rows at the expense of a lack of features. Features such + as object management, relationship handling, and SQL clause + support are bypassed in favor of raw INSERT/UPDATES of records. + + **Please read the list of caveats at :ref:`bulk_operations` + before using this method.** + + :param objects: a list of mapped object instances. The mapped + objects are persisted as is, and are **not** associated with the + :class:`.Session` afterwards. + + For each object, whether the object is sent as an INSERT or an + UPDATE is dependent on the same rules used by the :class:`.Session` + in traditional operation; if the object has the + :attr:`.InstanceState.key` + attribute set, then the object is assumed to be "detached" and + will result in an UPDATE. Otherwise, an INSERT is used. + + In the case of an UPDATE, **all** those attributes which are present + and are not part of the primary key are applied to the SET clause + of the UPDATE statement, regardless of whether any change in state + was logged on each attribute; there is no checking of per-attribute + history. The primary key attributes, which are required, + are applied to the WHERE clause. + + :param return_defaults: when True, rows that are missing values which + generate defaults, namely integer primary key defaults and sequences, + will be inserted **one at a time**, so that the primary key value + is available. In particular this will allow joined-inheritance + and other multi-table mappings to insert correctly without the need + to provide primary key values ahead of time; however, + return_defaults mode greatly reduces the performance gains of the + method overall. + + .. seealso:: + + :ref:`bulk_operations` + + :meth:`.Session.bulk_insert_mappings` + + :meth:`.Session.bulk_update_mappings` + + """ for (mapper, isupdate), states in itertools.groupby( (attributes.instance_state(obj) for obj in objects), lambda state: (state.mapper, state.key is not None) @@ -2056,10 +2116,108 @@ class Session(_SessionClassMethods): mapper, states, isupdate, True, return_defaults) def bulk_insert_mappings(self, mapper, mappings, return_defaults=False): + """Perform a bulk insert of the given list of mapping dictionaries. + + The bulk insert feature allows plain Python dictionaries to be used as + the source of simple INSERT operations which can be more easily + grouped together into higher performing "executemany" + operations. Using dictionaries, there is no "history" or session + state management features in use, reducing latency when inserting + large numbers of simple rows. + + The values within the dictionaries as given are typically passed + without modification into Core :meth:`.Insert` constructs, after + organizing the values within them across the tables to which + the given mapper is mapped. + + .. warning:: + + The bulk insert feature allows for a lower-latency INSERT + of rows at the expense of a lack of features. Features such + as relationship handling and SQL clause support are bypassed + in favor of a raw INSERT of records. + + **Please read the list of caveats at :ref:`bulk_operations` + before using this method.** + + :param mapper: a mapped class, or the actual :class:`.Mapper` object, + representing the single kind of object represented within the mapping + list. + + :param mappings: a list of dictionaries, each one containing the state + of the mapped row to be inserted, in terms of the attribute names + on the mapped class. If the mapping refers to multiple tables, + such as a joined-inheritance mapping, each dictionary must contain + all keys to be populated into all tables. + + :param return_defaults: when True, rows that are missing values which + generate defaults, namely integer primary key defaults and sequences, + will be inserted **one at a time**, so that the primary key value + is available. In particular this will allow joined-inheritance + and other multi-table mappings to insert correctly without the need + to provide primary + key values ahead of time; however, return_defaults mode greatly + reduces the performance gains of the method overall. If the rows + to be inserted only refer to a single table, then there is no + reason this flag should be set as the returned default information + is not used. + + + .. seealso:: + + :ref:`bulk_operations` + + :meth:`.Session.bulk_save_objects` + + :meth:`.Session.bulk_update_mappings` + + """ self._bulk_save_mappings( mapper, mappings, False, False, return_defaults) def bulk_update_mappings(self, mapper, mappings): + """Perform a bulk update of the given list of mapping dictionaries. + + The bulk update feature allows plain Python dictionaries to be used as + the source of simple UPDATE operations which can be more easily + grouped together into higher performing "executemany" + operations. Using dictionaries, there is no "history" or session + state management features in use, reducing latency when updating + large numbers of simple rows. + + .. warning:: + + The bulk update feature allows for a lower-latency UPDATE + of rows at the expense of a lack of features. Features such + as relationship handling and SQL clause support are bypassed + in favor of a raw UPDATE of records. + + **Please read the list of caveats at :ref:`bulk_operations` + before using this method.** + + :param mapper: a mapped class, or the actual :class:`.Mapper` object, + representing the single kind of object represented within the mapping + list. + + :param mappings: a list of dictionaries, each one containing the state + of the mapped row to be updated, in terms of the attribute names + on the mapped class. If the mapping refers to multiple tables, + such as a joined-inheritance mapping, each dictionary may contain + keys corresponding to all tables. All those keys which are present + and are not part of the primary key are applied to the SET clause + of the UPDATE statement; the primary key values, which are required, + are applied to the WHERE clause. + + + .. seealso:: + + :ref:`bulk_operations` + + :meth:`.Session.bulk_insert_mappings` + + :meth:`.Session.bulk_save_objects` + + """ self._bulk_save_mappings(mapper, mappings, True, False, False) def _bulk_save_mappings( diff --git a/lib/sqlalchemy/orm/sync.py b/lib/sqlalchemy/orm/sync.py index e1ef85c1d..671c7c067 100644 --- a/lib/sqlalchemy/orm/sync.py +++ b/lib/sqlalchemy/orm/sync.py @@ -45,6 +45,23 @@ def populate(source, source_mapper, dest, dest_mapper, uowcommit.attributes[("pk_cascaded", dest, r)] = True +def bulk_populate_inherit_keys( + source_dict, source_mapper, synchronize_pairs): + # a simplified version of populate() used by bulk insert mode + for l, r in synchronize_pairs: + try: + prop = source_mapper._columntoproperty[l] + value = source_dict[prop.key] + except exc.UnmappedColumnError: + _raise_col_to_prop(False, source_mapper, l, source_mapper, r) + + try: + prop = source_mapper._columntoproperty[r] + source_dict[prop.key] = value + except exc.UnmappedColumnError: + _raise_col_to_prop(True, source_mapper, l, source_mapper, r) + + def clear(dest, dest_mapper, synchronize_pairs): for l, r in synchronize_pairs: if r.primary_key and \ diff --git a/test/orm/test_bulk.py b/test/orm/test_bulk.py index 4bcde2480..f6d2513d1 100644 --- a/test/orm/test_bulk.py +++ b/test/orm/test_bulk.py @@ -10,6 +10,7 @@ from test.orm import _fixtures class BulkTest(testing.AssertsExecutionResults): run_inserts = None + run_define_tables = 'each' class BulkInsertTest(BulkTest, _fixtures.FixtureTest): @@ -75,7 +76,7 @@ class BulkInsertTest(BulkTest, _fixtures.FixtureTest): assert 'id' not in objects[0].__dict__ -class BulkInheritanceTest(fixtures.MappedTest, BulkTest): +class BulkInheritanceTest(BulkTest, fixtures.MappedTest): @classmethod def define_tables(cls, metadata): Table( @@ -197,11 +198,14 @@ class BulkInheritanceTest(fixtures.MappedTest, BulkTest): ), CompiledSQL( - "INSERT INTO boss (golf_swing) VALUES (:golf_swing)", - [{'golf_swing': 'g1'}] + "INSERT INTO boss (boss_id, golf_swing) VALUES " + "(:boss_id, :golf_swing)", + [{'boss_id': 4, 'golf_swing': 'g1'}] ) ) eq_(objects[0].__dict__['person_id'], 1) + eq_(objects[3].__dict__['person_id'], 4) + eq_(objects[3].__dict__['boss_id'], 4) def test_bulk_save_joined_inh_no_defaults(self): Person, Engineer, Manager, Boss = \ @@ -220,7 +224,7 @@ class BulkInheritanceTest(fixtures.MappedTest, BulkTest): person_id=3, name='e2', status='s3', primary_language='l2'), Boss( - person_id=4, + person_id=4, boss_id=4, name='b1', status='s3', manager_name='mn2', golf_swing='g1') ], @@ -264,8 +268,9 @@ class BulkInheritanceTest(fixtures.MappedTest, BulkTest): [{'status': 's3', 'person_id': 4, 'manager_name': 'mn2'}] ), CompiledSQL( - "INSERT INTO boss (golf_swing) VALUES (:golf_swing)", - [{'golf_swing': 'g1'}] + "INSERT INTO boss (boss_id, golf_swing) VALUES " + "(:boss_id, :golf_swing)", + [{'boss_id': 4, 'golf_swing': 'g1'}] ) ) @@ -290,28 +295,35 @@ class BulkInheritanceTest(fixtures.MappedTest, BulkTest): name='b3', status='s3', manager_name='mn3', golf_swing='g3' ), - ] + ], return_defaults=True ) - # the only difference here is that common classes are grouped together. - # at the moment it doesn't lump all the "people" tables from - # different classes together. asserter.assert_( CompiledSQL( "INSERT INTO people (name) VALUES (:name)", - [{'name': 'b1'}, {'name': 'b2'}, {'name': 'b3'}] + [{'name': 'b1'}] + ), + CompiledSQL( + "INSERT INTO people (name) VALUES (:name)", + [{'name': 'b2'}] ), CompiledSQL( - "INSERT INTO managers (status, manager_name) VALUES " - "(:status, :manager_name)", - [{'status': 's1', 'manager_name': 'mn1'}, - {'status': 's2', 'manager_name': 'mn2'}, - {'status': 's3', 'manager_name': 'mn3'}] + "INSERT INTO people (name) VALUES (:name)", + [{'name': 'b3'}] + ), + CompiledSQL( + "INSERT INTO managers (person_id, status, manager_name) " + "VALUES (:person_id, :status, :manager_name)", + [{'person_id': 1, 'status': 's1', 'manager_name': 'mn1'}, + {'person_id': 2, 'status': 's2', 'manager_name': 'mn2'}, + {'person_id': 3, 'status': 's3', 'manager_name': 'mn3'}] ), CompiledSQL( - "INSERT INTO boss (golf_swing) VALUES (:golf_swing)", - [{'golf_swing': 'g1'}, - {'golf_swing': 'g2'}, {'golf_swing': 'g3'}] + "INSERT INTO boss (boss_id, golf_swing) VALUES " + "(:boss_id, :golf_swing)", + [{'golf_swing': 'g1', 'boss_id': 1}, + {'golf_swing': 'g2', 'boss_id': 2}, + {'golf_swing': 'g3', 'boss_id': 3}] ) ) -- cgit v1.2.1 From 07cc9e054ae4d5bb9cfc3c1d807b2a0d58a95b69 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Sun, 7 Dec 2014 20:36:01 -0500 Subject: - add an option for bulk_save -> update to not do history --- lib/sqlalchemy/orm/persistence.py | 9 +++++++-- lib/sqlalchemy/orm/session.py | 32 +++++++++++++++++++++----------- test/orm/test_bulk.py | 31 ++++++++++++++++++++++++++++++- 3 files changed, 58 insertions(+), 14 deletions(-) diff --git a/lib/sqlalchemy/orm/persistence.py b/lib/sqlalchemy/orm/persistence.py index d94fbb040..f477e1dd7 100644 --- a/lib/sqlalchemy/orm/persistence.py +++ b/lib/sqlalchemy/orm/persistence.py @@ -75,7 +75,8 @@ def _bulk_insert( ) -def _bulk_update(mapper, mappings, session_transaction, isstates): +def _bulk_update(mapper, mappings, session_transaction, + isstates, update_changed_only): base_mapper = mapper.base_mapper cached_connections = _cached_connection_dict(base_mapper) @@ -88,7 +89,10 @@ def _bulk_update(mapper, mappings, session_transaction, isstates): ) if isstates: - mappings = [_changed_dict(mapper, state) for state in mappings] + if update_changed_only: + mappings = [_changed_dict(mapper, state) for state in mappings] + else: + mappings = [state.dict for state in mappings] else: mappings = list(mappings) @@ -612,6 +616,7 @@ def _emit_update_statements(base_mapper, uowtransaction, rows = 0 records = list(records) + if hasvalue: for state, state_dict, params, mapper, \ connection, value_params in records: diff --git a/lib/sqlalchemy/orm/session.py b/lib/sqlalchemy/orm/session.py index e07b4554e..72d393f54 100644 --- a/lib/sqlalchemy/orm/session.py +++ b/lib/sqlalchemy/orm/session.py @@ -2047,7 +2047,8 @@ class Session(_SessionClassMethods): with util.safe_reraise(): transaction.rollback(_capture_exception=True) - def bulk_save_objects(self, objects, return_defaults=False): + def bulk_save_objects( + self, objects, return_defaults=False, update_changed_only=True): """Perform a bulk save of the given list of objects. The bulk save feature allows mapped objects to be used as the @@ -2083,12 +2084,13 @@ class Session(_SessionClassMethods): attribute set, then the object is assumed to be "detached" and will result in an UPDATE. Otherwise, an INSERT is used. - In the case of an UPDATE, **all** those attributes which are present - and are not part of the primary key are applied to the SET clause - of the UPDATE statement, regardless of whether any change in state - was logged on each attribute; there is no checking of per-attribute - history. The primary key attributes, which are required, - are applied to the WHERE clause. + In the case of an UPDATE, statements are grouped based on which + attributes have changed, and are thus to be the subject of each + SET clause. If ``update_changed_only`` is False, then all + attributes present within each object are applied to the UPDATE + statement, which may help in allowing the statements to be grouped + together into a larger executemany(), and will also reduce the + overhead of checking history on attributes. :param return_defaults: when True, rows that are missing values which generate defaults, namely integer primary key defaults and sequences, @@ -2099,6 +2101,11 @@ class Session(_SessionClassMethods): return_defaults mode greatly reduces the performance gains of the method overall. + :param update_changed_only: when True, UPDATE statements are rendered + based on those attributes in each state that have logged changes. + When False, all attributes present are rendered into the SET clause + with the exception of primary key attributes. + .. seealso:: :ref:`bulk_operations` @@ -2113,7 +2120,8 @@ class Session(_SessionClassMethods): lambda state: (state.mapper, state.key is not None) ): self._bulk_save_mappings( - mapper, states, isupdate, True, return_defaults) + mapper, states, isupdate, True, + return_defaults, update_changed_only) def bulk_insert_mappings(self, mapper, mappings, return_defaults=False): """Perform a bulk insert of the given list of mapping dictionaries. @@ -2218,10 +2226,11 @@ class Session(_SessionClassMethods): :meth:`.Session.bulk_save_objects` """ - self._bulk_save_mappings(mapper, mappings, True, False, False) + self._bulk_save_mappings(mapper, mappings, True, False, False, False) def _bulk_save_mappings( - self, mapper, mappings, isupdate, isstates, return_defaults): + self, mapper, mappings, isupdate, isstates, + return_defaults, update_changed_only): mapper = _class_to_mapper(mapper) self._flushing = True @@ -2230,7 +2239,8 @@ class Session(_SessionClassMethods): try: if isupdate: persistence._bulk_update( - mapper, mappings, transaction, isstates) + mapper, mappings, transaction, + isstates, update_changed_only) else: persistence._bulk_insert( mapper, mappings, transaction, isstates, return_defaults) diff --git a/test/orm/test_bulk.py b/test/orm/test_bulk.py index f6d2513d1..e27d3b73c 100644 --- a/test/orm/test_bulk.py +++ b/test/orm/test_bulk.py @@ -13,7 +13,7 @@ class BulkTest(testing.AssertsExecutionResults): run_define_tables = 'each' -class BulkInsertTest(BulkTest, _fixtures.FixtureTest): +class BulkInsertUpdateTest(BulkTest, _fixtures.FixtureTest): @classmethod def setup_mappers(cls): @@ -75,6 +75,35 @@ class BulkInsertTest(BulkTest, _fixtures.FixtureTest): ) assert 'id' not in objects[0].__dict__ + def test_bulk_save_updated_include_unchanged(self): + User, = self.classes("User",) + + s = Session(expire_on_commit=False) + objects = [ + User(name="u1"), + User(name="u2"), + User(name="u3") + ] + s.add_all(objects) + s.commit() + + objects[0].name = 'u1new' + objects[2].name = 'u3new' + + s = Session() + with self.sql_execution_asserter() as asserter: + s.bulk_save_objects(objects, update_changed_only=False) + + asserter.assert_( + CompiledSQL( + "UPDATE users SET id=:id, name=:name WHERE " + "users.id = :users_id", + [{'users_id': 1, 'id': 1, 'name': 'u1new'}, + {'users_id': 2, 'id': 2, 'name': 'u2'}, + {'users_id': 3, 'id': 3, 'name': 'u3new'}] + ) + ) + class BulkInheritanceTest(BulkTest, fixtures.MappedTest): @classmethod -- cgit v1.2.1 From 5ed7a9672a4c143f111a15f26dfce4bc80547b6f Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Sun, 7 Dec 2014 21:08:14 -0500 Subject: start docs... --- doc/build/orm/session.rst | 55 +++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 55 insertions(+) diff --git a/doc/build/orm/session.rst b/doc/build/orm/session.rst index 78ae1ba81..01ac7230e 100644 --- a/doc/build/orm/session.rst +++ b/doc/build/orm/session.rst @@ -2456,6 +2456,61 @@ tables) across multiple databases. See the "sharding" example: :ref:`examples_sharding`. +.. _bulk_operations: + +Bulk Operations +--------------- + +.. note:: Bulk Operations mode is a new series of operations made available + on the :class:`.Session` object for the purpose of invoking INSERT and + UPDATE statements with greatly reduced Python overhead, at the expense + of much less functionality, automation, and error checking. + As of SQLAlchemy 1.0, these features should be considered as "beta", and + additionally are intended for advanced users. + +.. versionadded:: 1.0.0 + +Bulk operations on the :class:`.Session` include :meth:`.Session.bulk_save_objects`, +:meth:`.Session.bulk_insert_mappings`, and :meth:`.Session.bulk_update_mappings`. +The purpose of these methods is to directly expose internal elements of the unit of work system, +such that facilities for emitting INSERT and UPDATE statements given dictionaries +or object states can be utilized alone, bypassing the normal unit of work +mechanics of state, relationship and attribute management. The advantages +to this approach is strictly one of reduced Python overhead: + +* The flush() process, including the survey of all objects, their state, + their cascade status, the status of all objects associated with them + via :meth:`.relationship`, and the topological sort of all operations to + be performed is completely bypassed. This reduces a great amount of + Python overhead. + +* The objects as given have no defined relationship to the target + :class:`.Session`, even when the operation is complete, meaning there's no + overhead in attaching them or managing their state in terms of the identity + map or session. + +* The :meth:`.Session.bulk_insert_mappings`, and :meth:`.Session.bulk_update_mappings` + methods accept lists of plain Python dictionaries, not objects; this further + reduces a large amount of overhead associated with instantiating mapped + objects and assigning state to them, which normally is also subject to + expensive tracking of history on a per-attribute basis. + +* The process of fetching primary keys after an INSERT also is disabled by + default. When performed correctly, INSERT statements can now more readily + be batched by the unit of work process into ``executemany()`` blocks, which + perform vastly better than individual statement invocations. + +* UPDATE statements can similarly be tailored such that all attributes + are subject to the SET clase unconditionally, again making it much more + likely that ``executemany()`` blocks can be used. + +The performance behavior of the bulk routines should be studied using the +:ref:`examples_performance` example suite. This is a series of example +scripts which illustrate Python call-counts across a variety of scenarios, +including bulk insert and update scenarios. + + + Sessions API ============ -- cgit v1.2.1 From 3f1477e2ecf3b2e95a26383490d0e8c363f4d0cc Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Mon, 8 Dec 2014 01:10:30 -0500 Subject: - A new series of :class:`.Session` methods which provide hooks directly into the unit of work's facility for emitting INSERT and UPDATE statements has been created. When used correctly, this expert-oriented system can allow ORM-mappings to be used to generate bulk insert and update statements batched into executemany groups, allowing the statements to proceed at speeds that rival direct use of the Core. fixes #3100 --- doc/build/changelog/changelog_10.rst | 29 ++++++++++++ doc/build/changelog/migration_10.rst | 35 +++++++++++++- doc/build/core/tutorial.rst | 2 + doc/build/faq.rst | 29 ++++++------ doc/build/orm/session.rst | 92 ++++++++++++++++++++++++++++++++++-- examples/performance/__init__.py | 9 ++++ examples/performance/__main__.py | 2 + lib/sqlalchemy/orm/session.py | 59 ++++++++++++++--------- 8 files changed, 217 insertions(+), 40 deletions(-) diff --git a/doc/build/changelog/changelog_10.rst b/doc/build/changelog/changelog_10.rst index 6d99095d9..d6f36e97e 100644 --- a/doc/build/changelog/changelog_10.rst +++ b/doc/build/changelog/changelog_10.rst @@ -22,6 +22,35 @@ series as well. For changes that are specific to 1.0 with an emphasis on compatibility concerns, see :doc:`/changelog/migration_10`. + .. change:: + :tags: feature, examples + + A new suite of examples dedicated to providing a detailed study + into performance of SQLAlchemy ORM and Core, as well as the DBAPI, + from multiple perspectives. The suite runs within a container + that provides built in profiling displays both through console + output as well as graphically via the RunSnake tool. + + .. seealso:: + + :ref:`examples_performance` + + .. change:: + :tags: feature, orm + :tickets: 3100 + + A new series of :class:`.Session` methods which provide hooks + directly into the unit of work's facility for emitting INSERT + and UPDATE statements has been created. When used correctly, + this expert-oriented system can allow ORM-mappings to be used + to generate bulk insert and update statements batched into + executemany groups, allowing the statements to proceed at + speeds that rival direct use of the Core. + + .. seealso:: + + :ref:`bulk_operations` + .. change:: :tags: feature, mssql :tickets: 3039 diff --git a/doc/build/changelog/migration_10.rst b/doc/build/changelog/migration_10.rst index 562bb9f1b..cd5d420e5 100644 --- a/doc/build/changelog/migration_10.rst +++ b/doc/build/changelog/migration_10.rst @@ -8,7 +8,7 @@ What's New in SQLAlchemy 1.0? undergoing maintenance releases as of May, 2014, and SQLAlchemy version 1.0, as of yet unreleased. - Document last updated: October 23, 2014 + Document last updated: December 8, 2014 Introduction ============ @@ -230,6 +230,39 @@ the :class:`.Table` construct. :ticket:`2051` +New Session Bulk INSERT/UPDATE API +---------------------------------- + +A new series of :class:`.Session` methods which provide hooks directly +into the unit of work's facility for emitting INSERT and UPDATE +statements has been created. When used correctly, this expert-oriented system +can allow ORM-mappings to be used to generate bulk insert and update +statements batched into executemany groups, allowing the statements +to proceed at speeds that rival direct use of the Core. + +.. seealso:: + + :ref:`bulk_operations` - introduction and full documentation + +:ticket:`3100` + +New Performance Example Suite +------------------------------ + +Inspired by the benchmarking done for the :ref:`bulk_operations` feature +as well as for the :ref:`faq_how_to_profile` section of the FAQ, a new +example section has been added which features several scripts designed +to illustrate the relative performance profile of various Core and ORM +techniques. The scripts are organized into use cases, and are packaged +under a single console interface such that any combination of demonstrations +can be run, dumping out timings, Python profile results and/or RunSnake profile +displays. + +.. seealso:: + + :ref:`examples_performance` + + .. _feature_get_enums: New get_enums() method with Postgresql Dialect diff --git a/doc/build/core/tutorial.rst b/doc/build/core/tutorial.rst index 04a25b174..b6c07bdc0 100644 --- a/doc/build/core/tutorial.rst +++ b/doc/build/core/tutorial.rst @@ -307,6 +307,8 @@ them is different across different databases; each database's determine the correct value (or values; note that ``inserted_primary_key`` returns a list so that it supports composite primary keys). +.. _execute_multiple: + Executing Multiple Statements ============================== diff --git a/doc/build/faq.rst b/doc/build/faq.rst index 555fdc9e1..8c3bd24f4 100644 --- a/doc/build/faq.rst +++ b/doc/build/faq.rst @@ -705,9 +705,13 @@ main query. :ref:`subqueryload_ordering` +.. _faq_performance: + Performance =========== +.. _faq_how_to_profile: + How can I profile a SQLAlchemy powered application? --------------------------------------------------- @@ -961,18 +965,10 @@ Common strategies to mitigate this include: The output of a profile can be a little daunting but after some practice they are very easy to read. -If you're feeling ambitious, there's also a more involved example of -SQLAlchemy profiling within the SQLAlchemy unit tests in the -``tests/aaa_profiling`` section. Tests in this area -use decorators that assert a -maximum number of method calls being used for particular operations, -so that if something inefficient gets checked in, the tests will -reveal it (it is important to note that in cPython, function calls have -the highest overhead of any operation, and the count of calls is more -often than not nearly proportional to time spent). Of note are the -the "zoomark" tests which use a fancy "SQL capturing" scheme which -cuts out the overhead of the DBAPI from the equation - although that -technique isn't really necessary for garden-variety profiling. +.. seealso:: + + :ref:`examples_performance` - a suite of performance demonstrations + with bundled profiling capabilities. I'm inserting 400,000 rows with the ORM and it's really slow! -------------------------------------------------------------- @@ -1001,10 +997,15 @@ ORM as a first-class component. For the use case of fast bulk inserts, the SQL generation and execution system that the ORM builds on top of -is part of the Core. Using this system directly, we can produce an INSERT that +is part of the :doc:`Core `. Using this system directly, we can produce an INSERT that is competitive with using the raw database API directly. -The example below illustrates time-based tests for four different +Alternatively, the SQLAlchemy ORM offers the :ref:`bulk_operations` +suite of methods, which provide hooks into subsections of the unit of +work process in order to emit Core-level INSERT and UPDATE constructs with +a small degree of ORM-based automation. + +The example below illustrates time-based tests for several different methods of inserting rows, going from the most automated to the least. With cPython 2.7, runtimes observed:: diff --git a/doc/build/orm/session.rst b/doc/build/orm/session.rst index 01ac7230e..08ef9303e 100644 --- a/doc/build/orm/session.rst +++ b/doc/build/orm/session.rst @@ -1944,6 +1944,8 @@ transactions set the flag ``twophase=True`` on the session:: # before committing both transactions session.commit() +.. _session_sql_expressions: + Embedding SQL Insert/Update Expressions into a Flush ===================================================== @@ -2459,7 +2461,7 @@ See the "sharding" example: :ref:`examples_sharding`. .. _bulk_operations: Bulk Operations ---------------- +=============== .. note:: Bulk Operations mode is a new series of operations made available on the :class:`.Session` object for the purpose of invoking INSERT and @@ -2480,7 +2482,7 @@ to this approach is strictly one of reduced Python overhead: * The flush() process, including the survey of all objects, their state, their cascade status, the status of all objects associated with them - via :meth:`.relationship`, and the topological sort of all operations to + via :func:`.relationship`, and the topological sort of all operations to be performed is completely bypassed. This reduces a great amount of Python overhead. @@ -2489,7 +2491,7 @@ to this approach is strictly one of reduced Python overhead: overhead in attaching them or managing their state in terms of the identity map or session. -* The :meth:`.Session.bulk_insert_mappings`, and :meth:`.Session.bulk_update_mappings` +* The :meth:`.Session.bulk_insert_mappings` and :meth:`.Session.bulk_update_mappings` methods accept lists of plain Python dictionaries, not objects; this further reduces a large amount of overhead associated with instantiating mapped objects and assigning state to them, which normally is also subject to @@ -2509,6 +2511,90 @@ The performance behavior of the bulk routines should be studied using the scripts which illustrate Python call-counts across a variety of scenarios, including bulk insert and update scenarios. +.. seealso:: + + :ref:`examples_performance` - includes detailed examples of bulk operations + contrasted against traditional Core and ORM methods, including performance + metrics. + +Usage +----- + +The methods each work in the context of the :class:`.Session` object's +transaction, like any other:: + + s = Session() + objects = [ + User(name="u1"), + User(name="u2"), + User(name="u3") + ] + s.bulk_save_objects(objects) + +For :meth:`.Session.bulk_insert_mappings`, and :meth:`.Session.bulk_update_mappings`, +dictionaries are passed:: + + s.bulk_insert_mappings(User, + [dict(name="u1"), dict(name="u2"), dict(name="u3")] + ) + +.. seealso:: + + :meth:`.Session.bulk_save_objects` + + :meth:`.Session.bulk_insert_mappings` + + :meth:`.Session.bulk_update_mappings` + + +Comparison to Core Insert / Update Constructs +--------------------------------------------- + +The bulk methods offer performance that under particular circumstances +can be close to that of using the core :class:`.Insert` and +:class:`.Update` constructs in an "executemany" context (for a description +of "executemany", see :ref:`execute_multiple` in the Core tutorial). +In order to achieve this, the +:paramref:`.Session.bulk_insert_mappings.return_defaults` +flag should be disabled so that rows can be batched together. The example +suite in :ref:`examples_performance` should be carefully studied in order +to gain familiarity with how fast bulk performance can be achieved. + +ORM Compatibility +----------------- + +The bulk insert / update methods lose a significant amount of functionality +versus traditional ORM use. The following is a listing of features that +are **not available** when using these methods: + +* persistence along :meth:`.relationship` linkages + +* sorting of rows within order of dependency; rows are inserted or updated + directly in the order in which they are passed to the methods + +* Session-management on the given objects, including attachment to the + session, identity map management. + +* Functionality related to primary key mutation, ON UPDATE cascade + +* SQL expression inserts / updates (e.g. :ref:`session_sql_expressions`) + +* ORM events such as :meth:`.MapperEvents.before_insert`, etc. The bulk + session methods have no event support. + +Features that **are available** include:: + +* INSERTs and UPDATEs of mapped objects + +* Version identifier support + +* Multi-table mappings, such as joined-inheritance - however, an object + to be inserted across multiple tables either needs to have primary key + identifiers fully populated ahead of time, else the + :paramref:`.Session.bulk_save_objects.return_defaults` flag must be used, + which will greatly reduce the performance benefits + + Sessions API diff --git a/examples/performance/__init__.py b/examples/performance/__init__.py index 6e2e1fc89..a4edfce36 100644 --- a/examples/performance/__init__.py +++ b/examples/performance/__init__.py @@ -48,6 +48,15 @@ Or with options:: --dburl mysql+mysqldb://scott:tiger@localhost/test \\ --profile --num 1000 +.. seealso:: + + :ref:`faq_how_to_profile` + +File Listing +------------- + +.. autosource:: + Running all tests with time --------------------------- diff --git a/examples/performance/__main__.py b/examples/performance/__main__.py index 957d6c699..5e05143bf 100644 --- a/examples/performance/__main__.py +++ b/examples/performance/__main__.py @@ -1,3 +1,5 @@ +"""Allows the examples/performance package to be run as a script.""" + from . import Profiler if __name__ == '__main__': diff --git a/lib/sqlalchemy/orm/session.py b/lib/sqlalchemy/orm/session.py index 72d393f54..d40d28154 100644 --- a/lib/sqlalchemy/orm/session.py +++ b/lib/sqlalchemy/orm/session.py @@ -2061,17 +2061,22 @@ class Session(_SessionClassMethods): The objects as given are not added to the session and no additional state is established on them, unless the ``return_defaults`` flag - is also set. + is also set, in which case primary key attributes and server-side + default values will be populated. + + .. versionadded:: 1.0.0 .. warning:: The bulk save feature allows for a lower-latency INSERT/UPDATE - of rows at the expense of a lack of features. Features such - as object management, relationship handling, and SQL clause - support are bypassed in favor of raw INSERT/UPDATES of records. + of rows at the expense of most other unit-of-work features. + Features such as object management, relationship handling, + and SQL clause support are **silently omitted** in favor of raw + INSERT/UPDATES of records. - **Please read the list of caveats at :ref:`bulk_operations` - before using this method.** + **Please read the list of caveats at** :ref:`bulk_operations` + **before using this method, and fully test and confirm the + functionality of all code developed using these systems.** :param objects: a list of mapped object instances. The mapped objects are persisted as is, and are **not** associated with the @@ -2098,8 +2103,8 @@ class Session(_SessionClassMethods): is available. In particular this will allow joined-inheritance and other multi-table mappings to insert correctly without the need to provide primary key values ahead of time; however, - return_defaults mode greatly reduces the performance gains of the - method overall. + :paramref:`.Session.bulk_save_objects.return_defaults` **greatly + reduces the performance gains** of the method overall. :param update_changed_only: when True, UPDATE statements are rendered based on those attributes in each state that have logged changes. @@ -2138,15 +2143,19 @@ class Session(_SessionClassMethods): organizing the values within them across the tables to which the given mapper is mapped. + .. versionadded:: 1.0.0 + .. warning:: The bulk insert feature allows for a lower-latency INSERT - of rows at the expense of a lack of features. Features such - as relationship handling and SQL clause support are bypassed - in favor of a raw INSERT of records. + of rows at the expense of most other unit-of-work features. + Features such as object management, relationship handling, + and SQL clause support are **silently omitted** in favor of raw + INSERT of records. - **Please read the list of caveats at :ref:`bulk_operations` - before using this method.** + **Please read the list of caveats at** :ref:`bulk_operations` + **before using this method, and fully test and confirm the + functionality of all code developed using these systems.** :param mapper: a mapped class, or the actual :class:`.Mapper` object, representing the single kind of object represented within the mapping @@ -2164,8 +2173,10 @@ class Session(_SessionClassMethods): is available. In particular this will allow joined-inheritance and other multi-table mappings to insert correctly without the need to provide primary - key values ahead of time; however, return_defaults mode greatly - reduces the performance gains of the method overall. If the rows + key values ahead of time; however, + :paramref:`.Session.bulk_insert_mappings.return_defaults` + **greatly reduces the performance gains** of the method overall. + If the rows to be inserted only refer to a single table, then there is no reason this flag should be set as the returned default information is not used. @@ -2181,7 +2192,7 @@ class Session(_SessionClassMethods): """ self._bulk_save_mappings( - mapper, mappings, False, False, return_defaults) + mapper, mappings, False, False, return_defaults, False) def bulk_update_mappings(self, mapper, mappings): """Perform a bulk update of the given list of mapping dictionaries. @@ -2193,15 +2204,19 @@ class Session(_SessionClassMethods): state management features in use, reducing latency when updating large numbers of simple rows. + .. versionadded:: 1.0.0 + .. warning:: The bulk update feature allows for a lower-latency UPDATE - of rows at the expense of a lack of features. Features such - as relationship handling and SQL clause support are bypassed - in favor of a raw UPDATE of records. - - **Please read the list of caveats at :ref:`bulk_operations` - before using this method.** + of rows at the expense of most other unit-of-work features. + Features such as object management, relationship handling, + and SQL clause support are **silently omitted** in favor of raw + UPDATES of records. + + **Please read the list of caveats at** :ref:`bulk_operations` + **before using this method, and fully test and confirm the + functionality of all code developed using these systems.** :param mapper: a mapped class, or the actual :class:`.Mapper` object, representing the single kind of object represented within the mapping -- cgit v1.2.1 From 902c8d480beebb69e09ee613fe51579c3fd2ce0d Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Mon, 8 Dec 2014 01:18:07 -0500 Subject: - some profile changes likely due to the change in event listening on engines --- test/ext/test_horizontal_shard.py | 2 -- test/profiles.txt | 75 ++++++++++++++++++++++++++++----------- 2 files changed, 54 insertions(+), 23 deletions(-) diff --git a/test/ext/test_horizontal_shard.py b/test/ext/test_horizontal_shard.py index 99879a74d..0af33ecde 100644 --- a/test/ext/test_horizontal_shard.py +++ b/test/ext/test_horizontal_shard.py @@ -235,8 +235,6 @@ class AttachedFileShardTest(ShardTest, fixtures.TestBase): def _init_dbs(self): db1 = testing_engine('sqlite://', options={"execution_options": {"shard_id": "shard1"}}) - assert db1._has_events - db2 = db1.execution_options(shard_id="shard2") db3 = db1.execution_options(shard_id="shard3") db4 = db1.execution_options(shard_id="shard4") diff --git a/test/profiles.txt b/test/profiles.txt index 97ef13873..97691e4a1 100644 --- a/test/profiles.txt +++ b/test/profiles.txt @@ -104,11 +104,13 @@ test.aaa_profiling.test_orm.AttributeOverheadTest.test_attribute_set 2.7_postgre test.aaa_profiling.test_orm.AttributeOverheadTest.test_attribute_set 2.7_postgresql_psycopg2_nocextensions 4265 test.aaa_profiling.test_orm.AttributeOverheadTest.test_attribute_set 2.7_sqlite_pysqlite_cextensions 4265 test.aaa_profiling.test_orm.AttributeOverheadTest.test_attribute_set 2.7_sqlite_pysqlite_nocextensions 4260 +test.aaa_profiling.test_orm.AttributeOverheadTest.test_attribute_set 3.3_postgresql_psycopg2_cextensions 4283 test.aaa_profiling.test_orm.AttributeOverheadTest.test_attribute_set 3.3_postgresql_psycopg2_nocextensions 4266 test.aaa_profiling.test_orm.AttributeOverheadTest.test_attribute_set 3.3_sqlite_pysqlite_cextensions 4266 test.aaa_profiling.test_orm.AttributeOverheadTest.test_attribute_set 3.3_sqlite_pysqlite_nocextensions 4266 test.aaa_profiling.test_orm.AttributeOverheadTest.test_attribute_set 3.4_postgresql_psycopg2_cextensions 4266 test.aaa_profiling.test_orm.AttributeOverheadTest.test_attribute_set 3.4_postgresql_psycopg2_nocextensions 4266 +test.aaa_profiling.test_orm.AttributeOverheadTest.test_attribute_set 3.4_sqlite_pysqlite_cextensions 4283 # TEST: test.aaa_profiling.test_orm.AttributeOverheadTest.test_collection_append_remove @@ -118,11 +120,13 @@ test.aaa_profiling.test_orm.AttributeOverheadTest.test_collection_append_remove test.aaa_profiling.test_orm.AttributeOverheadTest.test_collection_append_remove 2.7_postgresql_psycopg2_nocextensions 6426 test.aaa_profiling.test_orm.AttributeOverheadTest.test_collection_append_remove 2.7_sqlite_pysqlite_cextensions 6426 test.aaa_profiling.test_orm.AttributeOverheadTest.test_collection_append_remove 2.7_sqlite_pysqlite_nocextensions 6426 +test.aaa_profiling.test_orm.AttributeOverheadTest.test_collection_append_remove 3.3_postgresql_psycopg2_cextensions 6431 test.aaa_profiling.test_orm.AttributeOverheadTest.test_collection_append_remove 3.3_postgresql_psycopg2_nocextensions 6428 test.aaa_profiling.test_orm.AttributeOverheadTest.test_collection_append_remove 3.3_sqlite_pysqlite_cextensions 6428 test.aaa_profiling.test_orm.AttributeOverheadTest.test_collection_append_remove 3.3_sqlite_pysqlite_nocextensions 6428 test.aaa_profiling.test_orm.AttributeOverheadTest.test_collection_append_remove 3.4_postgresql_psycopg2_cextensions 6428 test.aaa_profiling.test_orm.AttributeOverheadTest.test_collection_append_remove 3.4_postgresql_psycopg2_nocextensions 6428 +test.aaa_profiling.test_orm.AttributeOverheadTest.test_collection_append_remove 3.4_sqlite_pysqlite_cextensions 6431 # TEST: test.aaa_profiling.test_orm.DeferOptionsTest.test_baseline @@ -132,11 +136,13 @@ test.aaa_profiling.test_orm.DeferOptionsTest.test_baseline 2.7_postgresql_psycop test.aaa_profiling.test_orm.DeferOptionsTest.test_baseline 2.7_postgresql_psycopg2_nocextensions 40149 test.aaa_profiling.test_orm.DeferOptionsTest.test_baseline 2.7_sqlite_pysqlite_cextensions 19280 test.aaa_profiling.test_orm.DeferOptionsTest.test_baseline 2.7_sqlite_pysqlite_nocextensions 28297 +test.aaa_profiling.test_orm.DeferOptionsTest.test_baseline 3.3_postgresql_psycopg2_cextensions 20100 test.aaa_profiling.test_orm.DeferOptionsTest.test_baseline 3.3_postgresql_psycopg2_nocextensions 29138 -test.aaa_profiling.test_orm.DeferOptionsTest.test_baseline 3.3_sqlite_pysqlite_cextensions 32398 +test.aaa_profiling.test_orm.DeferOptionsTest.test_baseline 3.3_sqlite_pysqlite_cextensions 20289 test.aaa_profiling.test_orm.DeferOptionsTest.test_baseline 3.3_sqlite_pysqlite_nocextensions 37327 test.aaa_profiling.test_orm.DeferOptionsTest.test_baseline 3.4_postgresql_psycopg2_cextensions 20135 test.aaa_profiling.test_orm.DeferOptionsTest.test_baseline 3.4_postgresql_psycopg2_nocextensions 29138 +test.aaa_profiling.test_orm.DeferOptionsTest.test_baseline 3.4_sqlite_pysqlite_cextensions 20289 # TEST: test.aaa_profiling.test_orm.DeferOptionsTest.test_defer_many_cols @@ -146,11 +152,13 @@ test.aaa_profiling.test_orm.DeferOptionsTest.test_defer_many_cols 2.7_postgresql test.aaa_profiling.test_orm.DeferOptionsTest.test_defer_many_cols 2.7_postgresql_psycopg2_nocextensions 30054 test.aaa_profiling.test_orm.DeferOptionsTest.test_defer_many_cols 2.7_sqlite_pysqlite_cextensions 27144 test.aaa_profiling.test_orm.DeferOptionsTest.test_defer_many_cols 2.7_sqlite_pysqlite_nocextensions 30149 +test.aaa_profiling.test_orm.DeferOptionsTest.test_defer_many_cols 3.3_postgresql_psycopg2_cextensions 26016 test.aaa_profiling.test_orm.DeferOptionsTest.test_defer_many_cols 3.3_postgresql_psycopg2_nocextensions 29068 -test.aaa_profiling.test_orm.DeferOptionsTest.test_defer_many_cols 3.3_sqlite_pysqlite_cextensions 32197 +test.aaa_profiling.test_orm.DeferOptionsTest.test_defer_many_cols 3.3_sqlite_pysqlite_cextensions 26127 test.aaa_profiling.test_orm.DeferOptionsTest.test_defer_many_cols 3.3_sqlite_pysqlite_nocextensions 31179 test.aaa_profiling.test_orm.DeferOptionsTest.test_defer_many_cols 3.4_postgresql_psycopg2_cextensions 26065 test.aaa_profiling.test_orm.DeferOptionsTest.test_defer_many_cols 3.4_postgresql_psycopg2_nocextensions 29068 +test.aaa_profiling.test_orm.DeferOptionsTest.test_defer_many_cols 3.4_sqlite_pysqlite_cextensions 26127 # TEST: test.aaa_profiling.test_orm.LoadManyToOneFromIdentityTest.test_many_to_one_load_identity @@ -160,11 +168,13 @@ test.aaa_profiling.test_orm.LoadManyToOneFromIdentityTest.test_many_to_one_load_ test.aaa_profiling.test_orm.LoadManyToOneFromIdentityTest.test_many_to_one_load_identity 2.7_postgresql_psycopg2_nocextensions 17988 test.aaa_profiling.test_orm.LoadManyToOneFromIdentityTest.test_many_to_one_load_identity 2.7_sqlite_pysqlite_cextensions 17988 test.aaa_profiling.test_orm.LoadManyToOneFromIdentityTest.test_many_to_one_load_identity 2.7_sqlite_pysqlite_nocextensions 17988 +test.aaa_profiling.test_orm.LoadManyToOneFromIdentityTest.test_many_to_one_load_identity 3.3_postgresql_psycopg2_cextensions 18988 test.aaa_profiling.test_orm.LoadManyToOneFromIdentityTest.test_many_to_one_load_identity 3.3_postgresql_psycopg2_nocextensions 18988 test.aaa_profiling.test_orm.LoadManyToOneFromIdentityTest.test_many_to_one_load_identity 3.3_sqlite_pysqlite_cextensions 18988 test.aaa_profiling.test_orm.LoadManyToOneFromIdentityTest.test_many_to_one_load_identity 3.3_sqlite_pysqlite_nocextensions 18988 test.aaa_profiling.test_orm.LoadManyToOneFromIdentityTest.test_many_to_one_load_identity 3.4_postgresql_psycopg2_cextensions 18988 test.aaa_profiling.test_orm.LoadManyToOneFromIdentityTest.test_many_to_one_load_identity 3.4_postgresql_psycopg2_nocextensions 18988 +test.aaa_profiling.test_orm.LoadManyToOneFromIdentityTest.test_many_to_one_load_identity 3.4_sqlite_pysqlite_cextensions 18988 # TEST: test.aaa_profiling.test_orm.LoadManyToOneFromIdentityTest.test_many_to_one_load_no_identity @@ -174,11 +184,13 @@ test.aaa_profiling.test_orm.LoadManyToOneFromIdentityTest.test_many_to_one_load_ test.aaa_profiling.test_orm.LoadManyToOneFromIdentityTest.test_many_to_one_load_no_identity 2.7_postgresql_psycopg2_nocextensions 122553 test.aaa_profiling.test_orm.LoadManyToOneFromIdentityTest.test_many_to_one_load_no_identity 2.7_sqlite_pysqlite_cextensions 162315 test.aaa_profiling.test_orm.LoadManyToOneFromIdentityTest.test_many_to_one_load_no_identity 2.7_sqlite_pysqlite_nocextensions 165111 +test.aaa_profiling.test_orm.LoadManyToOneFromIdentityTest.test_many_to_one_load_no_identity 3.3_postgresql_psycopg2_cextensions 119353 test.aaa_profiling.test_orm.LoadManyToOneFromIdentityTest.test_many_to_one_load_no_identity 3.3_postgresql_psycopg2_nocextensions 125352 test.aaa_profiling.test_orm.LoadManyToOneFromIdentityTest.test_many_to_one_load_no_identity 3.3_sqlite_pysqlite_cextensions 169566 test.aaa_profiling.test_orm.LoadManyToOneFromIdentityTest.test_many_to_one_load_no_identity 3.3_sqlite_pysqlite_nocextensions 171364 test.aaa_profiling.test_orm.LoadManyToOneFromIdentityTest.test_many_to_one_load_no_identity 3.4_postgresql_psycopg2_cextensions 123602 test.aaa_profiling.test_orm.LoadManyToOneFromIdentityTest.test_many_to_one_load_no_identity 3.4_postgresql_psycopg2_nocextensions 125352 +test.aaa_profiling.test_orm.LoadManyToOneFromIdentityTest.test_many_to_one_load_no_identity 3.4_sqlite_pysqlite_cextensions 161603 # TEST: test.aaa_profiling.test_orm.MergeBackrefsTest.test_merge_pending_with_all_pks @@ -188,25 +200,29 @@ test.aaa_profiling.test_orm.MergeBackrefsTest.test_merge_pending_with_all_pks 2. test.aaa_profiling.test_orm.MergeBackrefsTest.test_merge_pending_with_all_pks 2.7_postgresql_psycopg2_nocextensions 19219 test.aaa_profiling.test_orm.MergeBackrefsTest.test_merge_pending_with_all_pks 2.7_sqlite_pysqlite_cextensions 22288 test.aaa_profiling.test_orm.MergeBackrefsTest.test_merge_pending_with_all_pks 2.7_sqlite_pysqlite_nocextensions 22530 +test.aaa_profiling.test_orm.MergeBackrefsTest.test_merge_pending_with_all_pks 3.3_postgresql_psycopg2_cextensions 18958 test.aaa_profiling.test_orm.MergeBackrefsTest.test_merge_pending_with_all_pks 3.3_postgresql_psycopg2_nocextensions 19492 test.aaa_profiling.test_orm.MergeBackrefsTest.test_merge_pending_with_all_pks 3.3_sqlite_pysqlite_cextensions 23067 test.aaa_profiling.test_orm.MergeBackrefsTest.test_merge_pending_with_all_pks 3.3_sqlite_pysqlite_nocextensions 23271 test.aaa_profiling.test_orm.MergeBackrefsTest.test_merge_pending_with_all_pks 3.4_postgresql_psycopg2_cextensions 19228 test.aaa_profiling.test_orm.MergeBackrefsTest.test_merge_pending_with_all_pks 3.4_postgresql_psycopg2_nocextensions 19480 +test.aaa_profiling.test_orm.MergeBackrefsTest.test_merge_pending_with_all_pks 3.4_sqlite_pysqlite_cextensions 21753 # TEST: test.aaa_profiling.test_orm.MergeTest.test_merge_load test.aaa_profiling.test_orm.MergeTest.test_merge_load 2.7_mysql_mysqldb_cextensions 1411 test.aaa_profiling.test_orm.MergeTest.test_merge_load 2.7_mysql_mysqldb_nocextensions 1436 -test.aaa_profiling.test_orm.MergeTest.test_merge_load 2.7_postgresql_psycopg2_cextensions 1323 -test.aaa_profiling.test_orm.MergeTest.test_merge_load 2.7_postgresql_psycopg2_nocextensions 1348 +test.aaa_profiling.test_orm.MergeTest.test_merge_load 2.7_postgresql_psycopg2_cextensions 1249 +test.aaa_profiling.test_orm.MergeTest.test_merge_load 2.7_postgresql_psycopg2_nocextensions 1274 test.aaa_profiling.test_orm.MergeTest.test_merge_load 2.7_sqlite_pysqlite_cextensions 1601 -test.aaa_profiling.test_orm.MergeTest.test_merge_load 2.7_sqlite_pysqlite_nocextensions 1626 -test.aaa_profiling.test_orm.MergeTest.test_merge_load 3.3_postgresql_psycopg2_nocextensions 1355 -test.aaa_profiling.test_orm.MergeTest.test_merge_load 3.3_sqlite_pysqlite_cextensions 1656 +test.aaa_profiling.test_orm.MergeTest.test_merge_load 2.7_sqlite_pysqlite_nocextensions 1512 +test.aaa_profiling.test_orm.MergeTest.test_merge_load 3.3_postgresql_psycopg2_cextensions 1264 +test.aaa_profiling.test_orm.MergeTest.test_merge_load 3.3_postgresql_psycopg2_nocextensions 1279 +test.aaa_profiling.test_orm.MergeTest.test_merge_load 3.3_sqlite_pysqlite_cextensions 1537 test.aaa_profiling.test_orm.MergeTest.test_merge_load 3.3_sqlite_pysqlite_nocextensions 1671 -test.aaa_profiling.test_orm.MergeTest.test_merge_load 3.4_postgresql_psycopg2_cextensions 1340 -test.aaa_profiling.test_orm.MergeTest.test_merge_load 3.4_postgresql_psycopg2_nocextensions 1355 +test.aaa_profiling.test_orm.MergeTest.test_merge_load 3.4_postgresql_psycopg2_cextensions 1264 +test.aaa_profiling.test_orm.MergeTest.test_merge_load 3.4_postgresql_psycopg2_nocextensions 1279 +test.aaa_profiling.test_orm.MergeTest.test_merge_load 3.4_sqlite_pysqlite_cextensions 1537 # TEST: test.aaa_profiling.test_orm.MergeTest.test_merge_no_load @@ -216,11 +232,13 @@ test.aaa_profiling.test_orm.MergeTest.test_merge_no_load 2.7_postgresql_psycopg2 test.aaa_profiling.test_orm.MergeTest.test_merge_no_load 2.7_postgresql_psycopg2_nocextensions 117,18 test.aaa_profiling.test_orm.MergeTest.test_merge_no_load 2.7_sqlite_pysqlite_cextensions 117,18 test.aaa_profiling.test_orm.MergeTest.test_merge_no_load 2.7_sqlite_pysqlite_nocextensions 117,18 +test.aaa_profiling.test_orm.MergeTest.test_merge_no_load 3.3_postgresql_psycopg2_cextensions 122,19 test.aaa_profiling.test_orm.MergeTest.test_merge_no_load 3.3_postgresql_psycopg2_nocextensions 122,19 test.aaa_profiling.test_orm.MergeTest.test_merge_no_load 3.3_sqlite_pysqlite_cextensions 122,19 test.aaa_profiling.test_orm.MergeTest.test_merge_no_load 3.3_sqlite_pysqlite_nocextensions 122,19 test.aaa_profiling.test_orm.MergeTest.test_merge_no_load 3.4_postgresql_psycopg2_cextensions 122,19 test.aaa_profiling.test_orm.MergeTest.test_merge_no_load 3.4_postgresql_psycopg2_nocextensions 122,19 +test.aaa_profiling.test_orm.MergeTest.test_merge_no_load 3.4_sqlite_pysqlite_cextensions 122,19 # TEST: test.aaa_profiling.test_pool.QueuePoolTest.test_first_connect @@ -272,10 +290,13 @@ test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_connection_execute test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_connection_execute 2.7_postgresql_psycopg2_nocextensions 45 test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_connection_execute 2.7_sqlite_pysqlite_cextensions 43 test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_connection_execute 2.7_sqlite_pysqlite_nocextensions 45 +test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_connection_execute 3.3_mysql_mysqlconnector_cextensions 43 test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_connection_execute 3.3_postgresql_psycopg2_cextensions 43 test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_connection_execute 3.3_postgresql_psycopg2_nocextensions 43 test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_connection_execute 3.3_sqlite_pysqlite_cextensions 43 test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_connection_execute 3.3_sqlite_pysqlite_nocextensions 43 +test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_connection_execute 3.4_mysql_mysqlconnector_cextensions 43 +test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_connection_execute 3.4_mysql_mysqlconnector_nocextensions 43 test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_connection_execute 3.4_postgresql_psycopg2_cextensions 43 test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_connection_execute 3.4_postgresql_psycopg2_nocextensions 43 test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_connection_execute 3.4_sqlite_pysqlite_cextensions 43 @@ -289,10 +310,13 @@ test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_engine_execute 2.7_ test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_engine_execute 2.7_postgresql_psycopg2_nocextensions 80 test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_engine_execute 2.7_sqlite_pysqlite_cextensions 78 test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_engine_execute 2.7_sqlite_pysqlite_nocextensions 80 +test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_engine_execute 3.3_mysql_mysqlconnector_cextensions 78 test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_engine_execute 3.3_postgresql_psycopg2_cextensions 78 test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_engine_execute 3.3_postgresql_psycopg2_nocextensions 78 test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_engine_execute 3.3_sqlite_pysqlite_cextensions 78 test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_engine_execute 3.3_sqlite_pysqlite_nocextensions 78 +test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_engine_execute 3.4_mysql_mysqlconnector_cextensions 78 +test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_engine_execute 3.4_mysql_mysqlconnector_nocextensions 78 test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_engine_execute 3.4_postgresql_psycopg2_cextensions 78 test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_engine_execute 3.4_postgresql_psycopg2_nocextensions 78 test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_engine_execute 3.4_sqlite_pysqlite_cextensions 78 @@ -306,10 +330,13 @@ test.aaa_profiling.test_resultset.ResultSetTest.test_contains_doesnt_compile 2.7 test.aaa_profiling.test_resultset.ResultSetTest.test_contains_doesnt_compile 2.7_postgresql_psycopg2_nocextensions 15 test.aaa_profiling.test_resultset.ResultSetTest.test_contains_doesnt_compile 2.7_sqlite_pysqlite_cextensions 15 test.aaa_profiling.test_resultset.ResultSetTest.test_contains_doesnt_compile 2.7_sqlite_pysqlite_nocextensions 15 +test.aaa_profiling.test_resultset.ResultSetTest.test_contains_doesnt_compile 3.3_mysql_mysqlconnector_cextensions 16 test.aaa_profiling.test_resultset.ResultSetTest.test_contains_doesnt_compile 3.3_postgresql_psycopg2_cextensions 16 test.aaa_profiling.test_resultset.ResultSetTest.test_contains_doesnt_compile 3.3_postgresql_psycopg2_nocextensions 16 test.aaa_profiling.test_resultset.ResultSetTest.test_contains_doesnt_compile 3.3_sqlite_pysqlite_cextensions 16 test.aaa_profiling.test_resultset.ResultSetTest.test_contains_doesnt_compile 3.3_sqlite_pysqlite_nocextensions 16 +test.aaa_profiling.test_resultset.ResultSetTest.test_contains_doesnt_compile 3.4_mysql_mysqlconnector_cextensions 16 +test.aaa_profiling.test_resultset.ResultSetTest.test_contains_doesnt_compile 3.4_mysql_mysqlconnector_nocextensions 16 test.aaa_profiling.test_resultset.ResultSetTest.test_contains_doesnt_compile 3.4_postgresql_psycopg2_cextensions 16 test.aaa_profiling.test_resultset.ResultSetTest.test_contains_doesnt_compile 3.4_postgresql_psycopg2_nocextensions 16 test.aaa_profiling.test_resultset.ResultSetTest.test_contains_doesnt_compile 3.4_sqlite_pysqlite_cextensions 16 @@ -317,36 +344,42 @@ test.aaa_profiling.test_resultset.ResultSetTest.test_contains_doesnt_compile 3.4 # TEST: test.aaa_profiling.test_resultset.ResultSetTest.test_string -test.aaa_profiling.test_resultset.ResultSetTest.test_string 2.7_mysql_mysqldb_cextensions 514 +test.aaa_profiling.test_resultset.ResultSetTest.test_string 2.7_mysql_mysqldb_cextensions 451 test.aaa_profiling.test_resultset.ResultSetTest.test_string 2.7_mysql_mysqldb_nocextensions 15534 test.aaa_profiling.test_resultset.ResultSetTest.test_string 2.7_postgresql_psycopg2_cextensions 20501 test.aaa_profiling.test_resultset.ResultSetTest.test_string 2.7_postgresql_psycopg2_nocextensions 35521 -test.aaa_profiling.test_resultset.ResultSetTest.test_string 2.7_sqlite_pysqlite_cextensions 457 +test.aaa_profiling.test_resultset.ResultSetTest.test_string 2.7_sqlite_pysqlite_cextensions 394 test.aaa_profiling.test_resultset.ResultSetTest.test_string 2.7_sqlite_pysqlite_nocextensions 15477 -test.aaa_profiling.test_resultset.ResultSetTest.test_string 3.3_postgresql_psycopg2_cextensions 489 +test.aaa_profiling.test_resultset.ResultSetTest.test_string 3.3_mysql_mysqlconnector_cextensions 109074 +test.aaa_profiling.test_resultset.ResultSetTest.test_string 3.3_postgresql_psycopg2_cextensions 427 test.aaa_profiling.test_resultset.ResultSetTest.test_string 3.3_postgresql_psycopg2_nocextensions 14489 -test.aaa_profiling.test_resultset.ResultSetTest.test_string 3.3_sqlite_pysqlite_cextensions 462 +test.aaa_profiling.test_resultset.ResultSetTest.test_string 3.3_sqlite_pysqlite_cextensions 399 test.aaa_profiling.test_resultset.ResultSetTest.test_string 3.3_sqlite_pysqlite_nocextensions 14462 -test.aaa_profiling.test_resultset.ResultSetTest.test_string 3.4_postgresql_psycopg2_cextensions 489 +test.aaa_profiling.test_resultset.ResultSetTest.test_string 3.4_mysql_mysqlconnector_cextensions 53694 +test.aaa_profiling.test_resultset.ResultSetTest.test_string 3.4_mysql_mysqlconnector_nocextensions 67694 +test.aaa_profiling.test_resultset.ResultSetTest.test_string 3.4_postgresql_psycopg2_cextensions 427 test.aaa_profiling.test_resultset.ResultSetTest.test_string 3.4_postgresql_psycopg2_nocextensions 14489 -test.aaa_profiling.test_resultset.ResultSetTest.test_string 3.4_sqlite_pysqlite_cextensions 462 +test.aaa_profiling.test_resultset.ResultSetTest.test_string 3.4_sqlite_pysqlite_cextensions 399 test.aaa_profiling.test_resultset.ResultSetTest.test_string 3.4_sqlite_pysqlite_nocextensions 14462 # TEST: test.aaa_profiling.test_resultset.ResultSetTest.test_unicode -test.aaa_profiling.test_resultset.ResultSetTest.test_unicode 2.7_mysql_mysqldb_cextensions 514 +test.aaa_profiling.test_resultset.ResultSetTest.test_unicode 2.7_mysql_mysqldb_cextensions 451 test.aaa_profiling.test_resultset.ResultSetTest.test_unicode 2.7_mysql_mysqldb_nocextensions 45534 test.aaa_profiling.test_resultset.ResultSetTest.test_unicode 2.7_postgresql_psycopg2_cextensions 20501 test.aaa_profiling.test_resultset.ResultSetTest.test_unicode 2.7_postgresql_psycopg2_nocextensions 35521 -test.aaa_profiling.test_resultset.ResultSetTest.test_unicode 2.7_sqlite_pysqlite_cextensions 457 +test.aaa_profiling.test_resultset.ResultSetTest.test_unicode 2.7_sqlite_pysqlite_cextensions 394 test.aaa_profiling.test_resultset.ResultSetTest.test_unicode 2.7_sqlite_pysqlite_nocextensions 15477 -test.aaa_profiling.test_resultset.ResultSetTest.test_unicode 3.3_postgresql_psycopg2_cextensions 489 +test.aaa_profiling.test_resultset.ResultSetTest.test_unicode 3.3_mysql_mysqlconnector_cextensions 109074 +test.aaa_profiling.test_resultset.ResultSetTest.test_unicode 3.3_postgresql_psycopg2_cextensions 427 test.aaa_profiling.test_resultset.ResultSetTest.test_unicode 3.3_postgresql_psycopg2_nocextensions 14489 -test.aaa_profiling.test_resultset.ResultSetTest.test_unicode 3.3_sqlite_pysqlite_cextensions 462 +test.aaa_profiling.test_resultset.ResultSetTest.test_unicode 3.3_sqlite_pysqlite_cextensions 399 test.aaa_profiling.test_resultset.ResultSetTest.test_unicode 3.3_sqlite_pysqlite_nocextensions 14462 -test.aaa_profiling.test_resultset.ResultSetTest.test_unicode 3.4_postgresql_psycopg2_cextensions 489 +test.aaa_profiling.test_resultset.ResultSetTest.test_unicode 3.4_mysql_mysqlconnector_cextensions 53694 +test.aaa_profiling.test_resultset.ResultSetTest.test_unicode 3.4_mysql_mysqlconnector_nocextensions 67694 +test.aaa_profiling.test_resultset.ResultSetTest.test_unicode 3.4_postgresql_psycopg2_cextensions 427 test.aaa_profiling.test_resultset.ResultSetTest.test_unicode 3.4_postgresql_psycopg2_nocextensions 14489 -test.aaa_profiling.test_resultset.ResultSetTest.test_unicode 3.4_sqlite_pysqlite_cextensions 462 +test.aaa_profiling.test_resultset.ResultSetTest.test_unicode 3.4_sqlite_pysqlite_cextensions 399 test.aaa_profiling.test_resultset.ResultSetTest.test_unicode 3.4_sqlite_pysqlite_nocextensions 14462 # TEST: test.aaa_profiling.test_zoomark.ZooMarkTest.test_invocation -- cgit v1.2.1 From 6b9f62df10e1b1f557b9077613e5e96a08427460 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Mon, 8 Dec 2014 11:18:38 -0500 Subject: - force the _has_events flag to True on engines, so that profiling is more predictable - restore the profiling from before this change --- lib/sqlalchemy/testing/engines.py | 3 ++ test/profiles.txt | 75 +++++++++++---------------------------- 2 files changed, 24 insertions(+), 54 deletions(-) diff --git a/lib/sqlalchemy/testing/engines.py b/lib/sqlalchemy/testing/engines.py index 7d73e7423..444a79b70 100644 --- a/lib/sqlalchemy/testing/engines.py +++ b/lib/sqlalchemy/testing/engines.py @@ -215,6 +215,9 @@ def testing_engine(url=None, options=None): options = config.db_opts engine = create_engine(url, **options) + engine._has_events = True # enable event blocks, helps with + # profiling + if isinstance(engine.pool, pool.QueuePool): engine.pool._timeout = 0 engine.pool._max_overflow = 0 diff --git a/test/profiles.txt b/test/profiles.txt index 97691e4a1..97ef13873 100644 --- a/test/profiles.txt +++ b/test/profiles.txt @@ -104,13 +104,11 @@ test.aaa_profiling.test_orm.AttributeOverheadTest.test_attribute_set 2.7_postgre test.aaa_profiling.test_orm.AttributeOverheadTest.test_attribute_set 2.7_postgresql_psycopg2_nocextensions 4265 test.aaa_profiling.test_orm.AttributeOverheadTest.test_attribute_set 2.7_sqlite_pysqlite_cextensions 4265 test.aaa_profiling.test_orm.AttributeOverheadTest.test_attribute_set 2.7_sqlite_pysqlite_nocextensions 4260 -test.aaa_profiling.test_orm.AttributeOverheadTest.test_attribute_set 3.3_postgresql_psycopg2_cextensions 4283 test.aaa_profiling.test_orm.AttributeOverheadTest.test_attribute_set 3.3_postgresql_psycopg2_nocextensions 4266 test.aaa_profiling.test_orm.AttributeOverheadTest.test_attribute_set 3.3_sqlite_pysqlite_cextensions 4266 test.aaa_profiling.test_orm.AttributeOverheadTest.test_attribute_set 3.3_sqlite_pysqlite_nocextensions 4266 test.aaa_profiling.test_orm.AttributeOverheadTest.test_attribute_set 3.4_postgresql_psycopg2_cextensions 4266 test.aaa_profiling.test_orm.AttributeOverheadTest.test_attribute_set 3.4_postgresql_psycopg2_nocextensions 4266 -test.aaa_profiling.test_orm.AttributeOverheadTest.test_attribute_set 3.4_sqlite_pysqlite_cextensions 4283 # TEST: test.aaa_profiling.test_orm.AttributeOverheadTest.test_collection_append_remove @@ -120,13 +118,11 @@ test.aaa_profiling.test_orm.AttributeOverheadTest.test_collection_append_remove test.aaa_profiling.test_orm.AttributeOverheadTest.test_collection_append_remove 2.7_postgresql_psycopg2_nocextensions 6426 test.aaa_profiling.test_orm.AttributeOverheadTest.test_collection_append_remove 2.7_sqlite_pysqlite_cextensions 6426 test.aaa_profiling.test_orm.AttributeOverheadTest.test_collection_append_remove 2.7_sqlite_pysqlite_nocextensions 6426 -test.aaa_profiling.test_orm.AttributeOverheadTest.test_collection_append_remove 3.3_postgresql_psycopg2_cextensions 6431 test.aaa_profiling.test_orm.AttributeOverheadTest.test_collection_append_remove 3.3_postgresql_psycopg2_nocextensions 6428 test.aaa_profiling.test_orm.AttributeOverheadTest.test_collection_append_remove 3.3_sqlite_pysqlite_cextensions 6428 test.aaa_profiling.test_orm.AttributeOverheadTest.test_collection_append_remove 3.3_sqlite_pysqlite_nocextensions 6428 test.aaa_profiling.test_orm.AttributeOverheadTest.test_collection_append_remove 3.4_postgresql_psycopg2_cextensions 6428 test.aaa_profiling.test_orm.AttributeOverheadTest.test_collection_append_remove 3.4_postgresql_psycopg2_nocextensions 6428 -test.aaa_profiling.test_orm.AttributeOverheadTest.test_collection_append_remove 3.4_sqlite_pysqlite_cextensions 6431 # TEST: test.aaa_profiling.test_orm.DeferOptionsTest.test_baseline @@ -136,13 +132,11 @@ test.aaa_profiling.test_orm.DeferOptionsTest.test_baseline 2.7_postgresql_psycop test.aaa_profiling.test_orm.DeferOptionsTest.test_baseline 2.7_postgresql_psycopg2_nocextensions 40149 test.aaa_profiling.test_orm.DeferOptionsTest.test_baseline 2.7_sqlite_pysqlite_cextensions 19280 test.aaa_profiling.test_orm.DeferOptionsTest.test_baseline 2.7_sqlite_pysqlite_nocextensions 28297 -test.aaa_profiling.test_orm.DeferOptionsTest.test_baseline 3.3_postgresql_psycopg2_cextensions 20100 test.aaa_profiling.test_orm.DeferOptionsTest.test_baseline 3.3_postgresql_psycopg2_nocextensions 29138 -test.aaa_profiling.test_orm.DeferOptionsTest.test_baseline 3.3_sqlite_pysqlite_cextensions 20289 +test.aaa_profiling.test_orm.DeferOptionsTest.test_baseline 3.3_sqlite_pysqlite_cextensions 32398 test.aaa_profiling.test_orm.DeferOptionsTest.test_baseline 3.3_sqlite_pysqlite_nocextensions 37327 test.aaa_profiling.test_orm.DeferOptionsTest.test_baseline 3.4_postgresql_psycopg2_cextensions 20135 test.aaa_profiling.test_orm.DeferOptionsTest.test_baseline 3.4_postgresql_psycopg2_nocextensions 29138 -test.aaa_profiling.test_orm.DeferOptionsTest.test_baseline 3.4_sqlite_pysqlite_cextensions 20289 # TEST: test.aaa_profiling.test_orm.DeferOptionsTest.test_defer_many_cols @@ -152,13 +146,11 @@ test.aaa_profiling.test_orm.DeferOptionsTest.test_defer_many_cols 2.7_postgresql test.aaa_profiling.test_orm.DeferOptionsTest.test_defer_many_cols 2.7_postgresql_psycopg2_nocextensions 30054 test.aaa_profiling.test_orm.DeferOptionsTest.test_defer_many_cols 2.7_sqlite_pysqlite_cextensions 27144 test.aaa_profiling.test_orm.DeferOptionsTest.test_defer_many_cols 2.7_sqlite_pysqlite_nocextensions 30149 -test.aaa_profiling.test_orm.DeferOptionsTest.test_defer_many_cols 3.3_postgresql_psycopg2_cextensions 26016 test.aaa_profiling.test_orm.DeferOptionsTest.test_defer_many_cols 3.3_postgresql_psycopg2_nocextensions 29068 -test.aaa_profiling.test_orm.DeferOptionsTest.test_defer_many_cols 3.3_sqlite_pysqlite_cextensions 26127 +test.aaa_profiling.test_orm.DeferOptionsTest.test_defer_many_cols 3.3_sqlite_pysqlite_cextensions 32197 test.aaa_profiling.test_orm.DeferOptionsTest.test_defer_many_cols 3.3_sqlite_pysqlite_nocextensions 31179 test.aaa_profiling.test_orm.DeferOptionsTest.test_defer_many_cols 3.4_postgresql_psycopg2_cextensions 26065 test.aaa_profiling.test_orm.DeferOptionsTest.test_defer_many_cols 3.4_postgresql_psycopg2_nocextensions 29068 -test.aaa_profiling.test_orm.DeferOptionsTest.test_defer_many_cols 3.4_sqlite_pysqlite_cextensions 26127 # TEST: test.aaa_profiling.test_orm.LoadManyToOneFromIdentityTest.test_many_to_one_load_identity @@ -168,13 +160,11 @@ test.aaa_profiling.test_orm.LoadManyToOneFromIdentityTest.test_many_to_one_load_ test.aaa_profiling.test_orm.LoadManyToOneFromIdentityTest.test_many_to_one_load_identity 2.7_postgresql_psycopg2_nocextensions 17988 test.aaa_profiling.test_orm.LoadManyToOneFromIdentityTest.test_many_to_one_load_identity 2.7_sqlite_pysqlite_cextensions 17988 test.aaa_profiling.test_orm.LoadManyToOneFromIdentityTest.test_many_to_one_load_identity 2.7_sqlite_pysqlite_nocextensions 17988 -test.aaa_profiling.test_orm.LoadManyToOneFromIdentityTest.test_many_to_one_load_identity 3.3_postgresql_psycopg2_cextensions 18988 test.aaa_profiling.test_orm.LoadManyToOneFromIdentityTest.test_many_to_one_load_identity 3.3_postgresql_psycopg2_nocextensions 18988 test.aaa_profiling.test_orm.LoadManyToOneFromIdentityTest.test_many_to_one_load_identity 3.3_sqlite_pysqlite_cextensions 18988 test.aaa_profiling.test_orm.LoadManyToOneFromIdentityTest.test_many_to_one_load_identity 3.3_sqlite_pysqlite_nocextensions 18988 test.aaa_profiling.test_orm.LoadManyToOneFromIdentityTest.test_many_to_one_load_identity 3.4_postgresql_psycopg2_cextensions 18988 test.aaa_profiling.test_orm.LoadManyToOneFromIdentityTest.test_many_to_one_load_identity 3.4_postgresql_psycopg2_nocextensions 18988 -test.aaa_profiling.test_orm.LoadManyToOneFromIdentityTest.test_many_to_one_load_identity 3.4_sqlite_pysqlite_cextensions 18988 # TEST: test.aaa_profiling.test_orm.LoadManyToOneFromIdentityTest.test_many_to_one_load_no_identity @@ -184,13 +174,11 @@ test.aaa_profiling.test_orm.LoadManyToOneFromIdentityTest.test_many_to_one_load_ test.aaa_profiling.test_orm.LoadManyToOneFromIdentityTest.test_many_to_one_load_no_identity 2.7_postgresql_psycopg2_nocextensions 122553 test.aaa_profiling.test_orm.LoadManyToOneFromIdentityTest.test_many_to_one_load_no_identity 2.7_sqlite_pysqlite_cextensions 162315 test.aaa_profiling.test_orm.LoadManyToOneFromIdentityTest.test_many_to_one_load_no_identity 2.7_sqlite_pysqlite_nocextensions 165111 -test.aaa_profiling.test_orm.LoadManyToOneFromIdentityTest.test_many_to_one_load_no_identity 3.3_postgresql_psycopg2_cextensions 119353 test.aaa_profiling.test_orm.LoadManyToOneFromIdentityTest.test_many_to_one_load_no_identity 3.3_postgresql_psycopg2_nocextensions 125352 test.aaa_profiling.test_orm.LoadManyToOneFromIdentityTest.test_many_to_one_load_no_identity 3.3_sqlite_pysqlite_cextensions 169566 test.aaa_profiling.test_orm.LoadManyToOneFromIdentityTest.test_many_to_one_load_no_identity 3.3_sqlite_pysqlite_nocextensions 171364 test.aaa_profiling.test_orm.LoadManyToOneFromIdentityTest.test_many_to_one_load_no_identity 3.4_postgresql_psycopg2_cextensions 123602 test.aaa_profiling.test_orm.LoadManyToOneFromIdentityTest.test_many_to_one_load_no_identity 3.4_postgresql_psycopg2_nocextensions 125352 -test.aaa_profiling.test_orm.LoadManyToOneFromIdentityTest.test_many_to_one_load_no_identity 3.4_sqlite_pysqlite_cextensions 161603 # TEST: test.aaa_profiling.test_orm.MergeBackrefsTest.test_merge_pending_with_all_pks @@ -200,29 +188,25 @@ test.aaa_profiling.test_orm.MergeBackrefsTest.test_merge_pending_with_all_pks 2. test.aaa_profiling.test_orm.MergeBackrefsTest.test_merge_pending_with_all_pks 2.7_postgresql_psycopg2_nocextensions 19219 test.aaa_profiling.test_orm.MergeBackrefsTest.test_merge_pending_with_all_pks 2.7_sqlite_pysqlite_cextensions 22288 test.aaa_profiling.test_orm.MergeBackrefsTest.test_merge_pending_with_all_pks 2.7_sqlite_pysqlite_nocextensions 22530 -test.aaa_profiling.test_orm.MergeBackrefsTest.test_merge_pending_with_all_pks 3.3_postgresql_psycopg2_cextensions 18958 test.aaa_profiling.test_orm.MergeBackrefsTest.test_merge_pending_with_all_pks 3.3_postgresql_psycopg2_nocextensions 19492 test.aaa_profiling.test_orm.MergeBackrefsTest.test_merge_pending_with_all_pks 3.3_sqlite_pysqlite_cextensions 23067 test.aaa_profiling.test_orm.MergeBackrefsTest.test_merge_pending_with_all_pks 3.3_sqlite_pysqlite_nocextensions 23271 test.aaa_profiling.test_orm.MergeBackrefsTest.test_merge_pending_with_all_pks 3.4_postgresql_psycopg2_cextensions 19228 test.aaa_profiling.test_orm.MergeBackrefsTest.test_merge_pending_with_all_pks 3.4_postgresql_psycopg2_nocextensions 19480 -test.aaa_profiling.test_orm.MergeBackrefsTest.test_merge_pending_with_all_pks 3.4_sqlite_pysqlite_cextensions 21753 # TEST: test.aaa_profiling.test_orm.MergeTest.test_merge_load test.aaa_profiling.test_orm.MergeTest.test_merge_load 2.7_mysql_mysqldb_cextensions 1411 test.aaa_profiling.test_orm.MergeTest.test_merge_load 2.7_mysql_mysqldb_nocextensions 1436 -test.aaa_profiling.test_orm.MergeTest.test_merge_load 2.7_postgresql_psycopg2_cextensions 1249 -test.aaa_profiling.test_orm.MergeTest.test_merge_load 2.7_postgresql_psycopg2_nocextensions 1274 +test.aaa_profiling.test_orm.MergeTest.test_merge_load 2.7_postgresql_psycopg2_cextensions 1323 +test.aaa_profiling.test_orm.MergeTest.test_merge_load 2.7_postgresql_psycopg2_nocextensions 1348 test.aaa_profiling.test_orm.MergeTest.test_merge_load 2.7_sqlite_pysqlite_cextensions 1601 -test.aaa_profiling.test_orm.MergeTest.test_merge_load 2.7_sqlite_pysqlite_nocextensions 1512 -test.aaa_profiling.test_orm.MergeTest.test_merge_load 3.3_postgresql_psycopg2_cextensions 1264 -test.aaa_profiling.test_orm.MergeTest.test_merge_load 3.3_postgresql_psycopg2_nocextensions 1279 -test.aaa_profiling.test_orm.MergeTest.test_merge_load 3.3_sqlite_pysqlite_cextensions 1537 +test.aaa_profiling.test_orm.MergeTest.test_merge_load 2.7_sqlite_pysqlite_nocextensions 1626 +test.aaa_profiling.test_orm.MergeTest.test_merge_load 3.3_postgresql_psycopg2_nocextensions 1355 +test.aaa_profiling.test_orm.MergeTest.test_merge_load 3.3_sqlite_pysqlite_cextensions 1656 test.aaa_profiling.test_orm.MergeTest.test_merge_load 3.3_sqlite_pysqlite_nocextensions 1671 -test.aaa_profiling.test_orm.MergeTest.test_merge_load 3.4_postgresql_psycopg2_cextensions 1264 -test.aaa_profiling.test_orm.MergeTest.test_merge_load 3.4_postgresql_psycopg2_nocextensions 1279 -test.aaa_profiling.test_orm.MergeTest.test_merge_load 3.4_sqlite_pysqlite_cextensions 1537 +test.aaa_profiling.test_orm.MergeTest.test_merge_load 3.4_postgresql_psycopg2_cextensions 1340 +test.aaa_profiling.test_orm.MergeTest.test_merge_load 3.4_postgresql_psycopg2_nocextensions 1355 # TEST: test.aaa_profiling.test_orm.MergeTest.test_merge_no_load @@ -232,13 +216,11 @@ test.aaa_profiling.test_orm.MergeTest.test_merge_no_load 2.7_postgresql_psycopg2 test.aaa_profiling.test_orm.MergeTest.test_merge_no_load 2.7_postgresql_psycopg2_nocextensions 117,18 test.aaa_profiling.test_orm.MergeTest.test_merge_no_load 2.7_sqlite_pysqlite_cextensions 117,18 test.aaa_profiling.test_orm.MergeTest.test_merge_no_load 2.7_sqlite_pysqlite_nocextensions 117,18 -test.aaa_profiling.test_orm.MergeTest.test_merge_no_load 3.3_postgresql_psycopg2_cextensions 122,19 test.aaa_profiling.test_orm.MergeTest.test_merge_no_load 3.3_postgresql_psycopg2_nocextensions 122,19 test.aaa_profiling.test_orm.MergeTest.test_merge_no_load 3.3_sqlite_pysqlite_cextensions 122,19 test.aaa_profiling.test_orm.MergeTest.test_merge_no_load 3.3_sqlite_pysqlite_nocextensions 122,19 test.aaa_profiling.test_orm.MergeTest.test_merge_no_load 3.4_postgresql_psycopg2_cextensions 122,19 test.aaa_profiling.test_orm.MergeTest.test_merge_no_load 3.4_postgresql_psycopg2_nocextensions 122,19 -test.aaa_profiling.test_orm.MergeTest.test_merge_no_load 3.4_sqlite_pysqlite_cextensions 122,19 # TEST: test.aaa_profiling.test_pool.QueuePoolTest.test_first_connect @@ -290,13 +272,10 @@ test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_connection_execute test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_connection_execute 2.7_postgresql_psycopg2_nocextensions 45 test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_connection_execute 2.7_sqlite_pysqlite_cextensions 43 test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_connection_execute 2.7_sqlite_pysqlite_nocextensions 45 -test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_connection_execute 3.3_mysql_mysqlconnector_cextensions 43 test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_connection_execute 3.3_postgresql_psycopg2_cextensions 43 test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_connection_execute 3.3_postgresql_psycopg2_nocextensions 43 test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_connection_execute 3.3_sqlite_pysqlite_cextensions 43 test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_connection_execute 3.3_sqlite_pysqlite_nocextensions 43 -test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_connection_execute 3.4_mysql_mysqlconnector_cextensions 43 -test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_connection_execute 3.4_mysql_mysqlconnector_nocextensions 43 test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_connection_execute 3.4_postgresql_psycopg2_cextensions 43 test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_connection_execute 3.4_postgresql_psycopg2_nocextensions 43 test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_connection_execute 3.4_sqlite_pysqlite_cextensions 43 @@ -310,13 +289,10 @@ test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_engine_execute 2.7_ test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_engine_execute 2.7_postgresql_psycopg2_nocextensions 80 test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_engine_execute 2.7_sqlite_pysqlite_cextensions 78 test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_engine_execute 2.7_sqlite_pysqlite_nocextensions 80 -test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_engine_execute 3.3_mysql_mysqlconnector_cextensions 78 test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_engine_execute 3.3_postgresql_psycopg2_cextensions 78 test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_engine_execute 3.3_postgresql_psycopg2_nocextensions 78 test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_engine_execute 3.3_sqlite_pysqlite_cextensions 78 test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_engine_execute 3.3_sqlite_pysqlite_nocextensions 78 -test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_engine_execute 3.4_mysql_mysqlconnector_cextensions 78 -test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_engine_execute 3.4_mysql_mysqlconnector_nocextensions 78 test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_engine_execute 3.4_postgresql_psycopg2_cextensions 78 test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_engine_execute 3.4_postgresql_psycopg2_nocextensions 78 test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_engine_execute 3.4_sqlite_pysqlite_cextensions 78 @@ -330,13 +306,10 @@ test.aaa_profiling.test_resultset.ResultSetTest.test_contains_doesnt_compile 2.7 test.aaa_profiling.test_resultset.ResultSetTest.test_contains_doesnt_compile 2.7_postgresql_psycopg2_nocextensions 15 test.aaa_profiling.test_resultset.ResultSetTest.test_contains_doesnt_compile 2.7_sqlite_pysqlite_cextensions 15 test.aaa_profiling.test_resultset.ResultSetTest.test_contains_doesnt_compile 2.7_sqlite_pysqlite_nocextensions 15 -test.aaa_profiling.test_resultset.ResultSetTest.test_contains_doesnt_compile 3.3_mysql_mysqlconnector_cextensions 16 test.aaa_profiling.test_resultset.ResultSetTest.test_contains_doesnt_compile 3.3_postgresql_psycopg2_cextensions 16 test.aaa_profiling.test_resultset.ResultSetTest.test_contains_doesnt_compile 3.3_postgresql_psycopg2_nocextensions 16 test.aaa_profiling.test_resultset.ResultSetTest.test_contains_doesnt_compile 3.3_sqlite_pysqlite_cextensions 16 test.aaa_profiling.test_resultset.ResultSetTest.test_contains_doesnt_compile 3.3_sqlite_pysqlite_nocextensions 16 -test.aaa_profiling.test_resultset.ResultSetTest.test_contains_doesnt_compile 3.4_mysql_mysqlconnector_cextensions 16 -test.aaa_profiling.test_resultset.ResultSetTest.test_contains_doesnt_compile 3.4_mysql_mysqlconnector_nocextensions 16 test.aaa_profiling.test_resultset.ResultSetTest.test_contains_doesnt_compile 3.4_postgresql_psycopg2_cextensions 16 test.aaa_profiling.test_resultset.ResultSetTest.test_contains_doesnt_compile 3.4_postgresql_psycopg2_nocextensions 16 test.aaa_profiling.test_resultset.ResultSetTest.test_contains_doesnt_compile 3.4_sqlite_pysqlite_cextensions 16 @@ -344,42 +317,36 @@ test.aaa_profiling.test_resultset.ResultSetTest.test_contains_doesnt_compile 3.4 # TEST: test.aaa_profiling.test_resultset.ResultSetTest.test_string -test.aaa_profiling.test_resultset.ResultSetTest.test_string 2.7_mysql_mysqldb_cextensions 451 +test.aaa_profiling.test_resultset.ResultSetTest.test_string 2.7_mysql_mysqldb_cextensions 514 test.aaa_profiling.test_resultset.ResultSetTest.test_string 2.7_mysql_mysqldb_nocextensions 15534 test.aaa_profiling.test_resultset.ResultSetTest.test_string 2.7_postgresql_psycopg2_cextensions 20501 test.aaa_profiling.test_resultset.ResultSetTest.test_string 2.7_postgresql_psycopg2_nocextensions 35521 -test.aaa_profiling.test_resultset.ResultSetTest.test_string 2.7_sqlite_pysqlite_cextensions 394 +test.aaa_profiling.test_resultset.ResultSetTest.test_string 2.7_sqlite_pysqlite_cextensions 457 test.aaa_profiling.test_resultset.ResultSetTest.test_string 2.7_sqlite_pysqlite_nocextensions 15477 -test.aaa_profiling.test_resultset.ResultSetTest.test_string 3.3_mysql_mysqlconnector_cextensions 109074 -test.aaa_profiling.test_resultset.ResultSetTest.test_string 3.3_postgresql_psycopg2_cextensions 427 +test.aaa_profiling.test_resultset.ResultSetTest.test_string 3.3_postgresql_psycopg2_cextensions 489 test.aaa_profiling.test_resultset.ResultSetTest.test_string 3.3_postgresql_psycopg2_nocextensions 14489 -test.aaa_profiling.test_resultset.ResultSetTest.test_string 3.3_sqlite_pysqlite_cextensions 399 +test.aaa_profiling.test_resultset.ResultSetTest.test_string 3.3_sqlite_pysqlite_cextensions 462 test.aaa_profiling.test_resultset.ResultSetTest.test_string 3.3_sqlite_pysqlite_nocextensions 14462 -test.aaa_profiling.test_resultset.ResultSetTest.test_string 3.4_mysql_mysqlconnector_cextensions 53694 -test.aaa_profiling.test_resultset.ResultSetTest.test_string 3.4_mysql_mysqlconnector_nocextensions 67694 -test.aaa_profiling.test_resultset.ResultSetTest.test_string 3.4_postgresql_psycopg2_cextensions 427 +test.aaa_profiling.test_resultset.ResultSetTest.test_string 3.4_postgresql_psycopg2_cextensions 489 test.aaa_profiling.test_resultset.ResultSetTest.test_string 3.4_postgresql_psycopg2_nocextensions 14489 -test.aaa_profiling.test_resultset.ResultSetTest.test_string 3.4_sqlite_pysqlite_cextensions 399 +test.aaa_profiling.test_resultset.ResultSetTest.test_string 3.4_sqlite_pysqlite_cextensions 462 test.aaa_profiling.test_resultset.ResultSetTest.test_string 3.4_sqlite_pysqlite_nocextensions 14462 # TEST: test.aaa_profiling.test_resultset.ResultSetTest.test_unicode -test.aaa_profiling.test_resultset.ResultSetTest.test_unicode 2.7_mysql_mysqldb_cextensions 451 +test.aaa_profiling.test_resultset.ResultSetTest.test_unicode 2.7_mysql_mysqldb_cextensions 514 test.aaa_profiling.test_resultset.ResultSetTest.test_unicode 2.7_mysql_mysqldb_nocextensions 45534 test.aaa_profiling.test_resultset.ResultSetTest.test_unicode 2.7_postgresql_psycopg2_cextensions 20501 test.aaa_profiling.test_resultset.ResultSetTest.test_unicode 2.7_postgresql_psycopg2_nocextensions 35521 -test.aaa_profiling.test_resultset.ResultSetTest.test_unicode 2.7_sqlite_pysqlite_cextensions 394 +test.aaa_profiling.test_resultset.ResultSetTest.test_unicode 2.7_sqlite_pysqlite_cextensions 457 test.aaa_profiling.test_resultset.ResultSetTest.test_unicode 2.7_sqlite_pysqlite_nocextensions 15477 -test.aaa_profiling.test_resultset.ResultSetTest.test_unicode 3.3_mysql_mysqlconnector_cextensions 109074 -test.aaa_profiling.test_resultset.ResultSetTest.test_unicode 3.3_postgresql_psycopg2_cextensions 427 +test.aaa_profiling.test_resultset.ResultSetTest.test_unicode 3.3_postgresql_psycopg2_cextensions 489 test.aaa_profiling.test_resultset.ResultSetTest.test_unicode 3.3_postgresql_psycopg2_nocextensions 14489 -test.aaa_profiling.test_resultset.ResultSetTest.test_unicode 3.3_sqlite_pysqlite_cextensions 399 +test.aaa_profiling.test_resultset.ResultSetTest.test_unicode 3.3_sqlite_pysqlite_cextensions 462 test.aaa_profiling.test_resultset.ResultSetTest.test_unicode 3.3_sqlite_pysqlite_nocextensions 14462 -test.aaa_profiling.test_resultset.ResultSetTest.test_unicode 3.4_mysql_mysqlconnector_cextensions 53694 -test.aaa_profiling.test_resultset.ResultSetTest.test_unicode 3.4_mysql_mysqlconnector_nocextensions 67694 -test.aaa_profiling.test_resultset.ResultSetTest.test_unicode 3.4_postgresql_psycopg2_cextensions 427 +test.aaa_profiling.test_resultset.ResultSetTest.test_unicode 3.4_postgresql_psycopg2_cextensions 489 test.aaa_profiling.test_resultset.ResultSetTest.test_unicode 3.4_postgresql_psycopg2_nocextensions 14489 -test.aaa_profiling.test_resultset.ResultSetTest.test_unicode 3.4_sqlite_pysqlite_cextensions 399 +test.aaa_profiling.test_resultset.ResultSetTest.test_unicode 3.4_sqlite_pysqlite_cextensions 462 test.aaa_profiling.test_resultset.ResultSetTest.test_unicode 3.4_sqlite_pysqlite_nocextensions 14462 # TEST: test.aaa_profiling.test_zoomark.ZooMarkTest.test_invocation -- cgit v1.2.1 From 8553c195c24f67ff5d75893ddad57d1003fb9759 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Mon, 8 Dec 2014 12:34:40 -0500 Subject: - autoinc here for oracle --- test/orm/test_naturalpks.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/test/orm/test_naturalpks.py b/test/orm/test_naturalpks.py index 709e1c0b1..60387ddce 100644 --- a/test/orm/test_naturalpks.py +++ b/test/orm/test_naturalpks.py @@ -1228,7 +1228,9 @@ class JoinedInheritancePKOnFKTest(fixtures.MappedTest): Table( 'engineer', metadata, - Column('id', Integer, primary_key=True), + Column( + 'id', Integer, + primary_key=True, test_needs_autoincrement=True), Column( 'person_name', String(50), ForeignKey('person.name', **fk_args)), -- cgit v1.2.1 From b7cf11b163dd7d15f56634a41dcceb880821ecf3 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Mon, 8 Dec 2014 14:05:20 -0500 Subject: - simplify the "noconnection" error handling, setting _handle_dbapi_exception_noconnection() to only invoke in the case of raw_connection() in the constructor of Connection. in all other cases the Connection proceeds with _handle_dbapi_exception() including revalidate. --- lib/sqlalchemy/engine/base.py | 36 +++++++++++++++++++----------------- lib/sqlalchemy/engine/threadlocal.py | 2 +- test/engine/test_reconnect.py | 4 ++-- 3 files changed, 22 insertions(+), 20 deletions(-) diff --git a/lib/sqlalchemy/engine/base.py b/lib/sqlalchemy/engine/base.py index 23348469d..dd8ea275c 100644 --- a/lib/sqlalchemy/engine/base.py +++ b/lib/sqlalchemy/engine/base.py @@ -265,18 +265,20 @@ class Connection(Connectable): try: return self.__connection except AttributeError: - return self._revalidate_connection(_wrap=True) + try: + return self._revalidate_connection() + except Exception as e: + self._handle_dbapi_exception(e, None, None, None, None) - def _revalidate_connection(self, _wrap): + def _revalidate_connection(self): if self.__branch_from: - return self.__branch_from._revalidate_connection(_wrap=_wrap) + return self.__branch_from._revalidate_connection() if self.__can_reconnect and self.__invalid: if self.__transaction is not None: raise exc.InvalidRequestError( "Can't reconnect until invalid " "transaction is rolled back") - self.__connection = self.engine.raw_connection( - _connection=self, _wrap=_wrap) + self.__connection = self.engine.raw_connection(_connection=self) self.__invalid = False return self.__connection raise exc.ResourceClosedError("This Connection is closed") @@ -817,7 +819,7 @@ class Connection(Connectable): try: conn = self.__connection except AttributeError: - conn = self._revalidate_connection(_wrap=False) + conn = self._revalidate_connection() dialect = self.dialect ctx = dialect.execution_ctx_cls._init_default( @@ -955,7 +957,7 @@ class Connection(Connectable): try: conn = self.__connection except AttributeError: - conn = self._revalidate_connection(_wrap=False) + conn = self._revalidate_connection() context = constructor(dialect, self, conn, *args) except Exception as e: @@ -1248,8 +1250,7 @@ class Connection(Connectable): self.close() @classmethod - def _handle_dbapi_exception_noconnection( - cls, e, dialect, engine, connection): + def _handle_dbapi_exception_noconnection(cls, e, dialect, engine): exc_info = sys.exc_info() @@ -1271,7 +1272,7 @@ class Connection(Connectable): if engine._has_events: ctx = ExceptionContextImpl( - e, sqlalchemy_exception, engine, connection, None, None, + e, sqlalchemy_exception, engine, None, None, None, None, None, is_disconnect) for fn in engine.dispatch.handle_error: try: @@ -1957,17 +1958,18 @@ class Engine(Connectable, log.Identified): """ return self.run_callable(self.dialect.has_table, table_name, schema) - def _wrap_pool_connect(self, fn, connection, wrap=True): - if not wrap: - return fn() + def _wrap_pool_connect(self, fn, connection): dialect = self.dialect try: return fn() except dialect.dbapi.Error as e: - Connection._handle_dbapi_exception_noconnection( - e, dialect, self, connection) + if connection is None: + Connection._handle_dbapi_exception_noconnection( + e, dialect, self) + else: + util.reraise(*sys.exc_info()) - def raw_connection(self, _connection=None, _wrap=True): + def raw_connection(self, _connection=None): """Return a "raw" DBAPI connection from the connection pool. The returned object is a proxied version of the DBAPI @@ -1984,7 +1986,7 @@ class Engine(Connectable, log.Identified): """ return self._wrap_pool_connect( - self.pool.unique_connection, _connection, _wrap) + self.pool.unique_connection, _connection) class OptionEngine(Engine): diff --git a/lib/sqlalchemy/engine/threadlocal.py b/lib/sqlalchemy/engine/threadlocal.py index 824b68fdf..e64ab09f4 100644 --- a/lib/sqlalchemy/engine/threadlocal.py +++ b/lib/sqlalchemy/engine/threadlocal.py @@ -61,7 +61,7 @@ class TLEngine(base.Engine): connection = self._tl_connection_cls( self, self._wrap_pool_connect( - self.pool.connect, connection, wrap=True), + self.pool.connect, connection), **kw) self._connections.conn = weakref.ref(connection) diff --git a/test/engine/test_reconnect.py b/test/engine/test_reconnect.py index 0efce87ce..4500ada6a 100644 --- a/test/engine/test_reconnect.py +++ b/test/engine/test_reconnect.py @@ -517,7 +517,7 @@ class RealReconnectTest(fixtures.TestBase): assert c1.invalidated assert c1_branch.invalidated - c1_branch._revalidate_connection(_wrap=True) + c1_branch._revalidate_connection() assert not c1.invalidated assert not c1_branch.invalidated @@ -535,7 +535,7 @@ class RealReconnectTest(fixtures.TestBase): assert c1.invalidated assert c1_branch.invalidated - c1._revalidate_connection(_wrap=True) + c1._revalidate_connection() assert not c1.invalidated assert not c1_branch.invalidated -- cgit v1.2.1 From 06738f665ea936246a3813ad7de01e98ff8d519a Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Mon, 8 Dec 2014 15:15:02 -0500 Subject: - identify another spot where _handle_dbapi_error() needs to do something differently for the case where it is called in an already-invalidated state; don't call upon self.connection --- lib/sqlalchemy/engine/base.py | 7 ++++--- test/engine/test_parseconnect.py | 28 ++++++++++++++++++++++++++++ 2 files changed, 32 insertions(+), 3 deletions(-) diff --git a/lib/sqlalchemy/engine/base.py b/lib/sqlalchemy/engine/base.py index dd8ea275c..9a8610344 100644 --- a/lib/sqlalchemy/engine/base.py +++ b/lib/sqlalchemy/engine/base.py @@ -1243,9 +1243,10 @@ class Connection(Connectable): del self._reentrant_error if self._is_disconnect: del self._is_disconnect - dbapi_conn_wrapper = self.connection - self.engine.pool._invalidate(dbapi_conn_wrapper, e) - self.invalidate(e) + if not self.invalidated: + dbapi_conn_wrapper = self.__connection + self.engine.pool._invalidate(dbapi_conn_wrapper, e) + self.invalidate(e) if self.should_close_with_result: self.close() diff --git a/test/engine/test_parseconnect.py b/test/engine/test_parseconnect.py index 4a3da7d1c..8d659420d 100644 --- a/test/engine/test_parseconnect.py +++ b/test/engine/test_parseconnect.py @@ -396,6 +396,34 @@ class CreateEngineTest(fixtures.TestBase): except tsa.exc.DBAPIError as de: assert not de.connection_invalidated + @testing.requires.sqlite + def test_cant_connect_stay_invalidated(self): + e = create_engine('sqlite://') + sqlite3 = e.dialect.dbapi + + class MySpecialException(Exception): + pass + + eng = create_engine('sqlite://') + + @event.listens_for(eng, "handle_error") + def handle_error(ctx): + assert ctx.is_disconnect + + conn = eng.connect() + + conn.invalidate() + + eng.pool._creator = Mock( + side_effect=sqlite3.ProgrammingError( + "Cannot operate on a closed database.")) + + try: + conn.connection + assert False + except tsa.exc.DBAPIError: + assert conn.invalidated + @testing.requires.sqlite def test_dont_touch_non_dbapi_exception_on_connect(self): e = create_engine('sqlite://') -- cgit v1.2.1 From c86c593ec3b913361999a1970efae3e6f3d831fa Mon Sep 17 00:00:00 2001 From: Yuval Langer Date: Tue, 9 Dec 2014 04:19:18 +0200 Subject: Removing unneeded space. --- doc/build/core/tutorial.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/build/core/tutorial.rst b/doc/build/core/tutorial.rst index b6c07bdc0..e96217f79 100644 --- a/doc/build/core/tutorial.rst +++ b/doc/build/core/tutorial.rst @@ -370,7 +370,7 @@ Selecting ========== We began with inserts just so that our test database had some data in it. The -more interesting part of the data is selecting it ! We'll cover UPDATE and +more interesting part of the data is selecting it! We'll cover UPDATE and DELETE statements later. The primary construct used to generate SELECT statements is the :func:`.select` function: -- cgit v1.2.1 From eee617e08eb761de7279de31246d904ca6b17da7 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Wed, 10 Dec 2014 12:11:59 -0500 Subject: - rework the handle error on connect tests from test_parsconnect where they don't really belong into a new suite in test_execute --- test/engine/test_execute.py | 245 +++++++++++++++++++++++++++++++++++++++ test/engine/test_parseconnect.py | 236 ------------------------------------- 2 files changed, 245 insertions(+), 236 deletions(-) diff --git a/test/engine/test_execute.py b/test/engine/test_execute.py index 5c3279ba9..8e58d202d 100644 --- a/test/engine/test_execute.py +++ b/test/engine/test_execute.py @@ -1901,6 +1901,251 @@ class HandleErrorTest(fixtures.TestBase): self._test_alter_disconnect(False, False) +class HandleInvalidatedOnConnectTest(fixtures.TestBase): + __requires__ = ('sqlite', ) + + def setUp(self): + e = create_engine('sqlite://') + + connection = Mock( + get_server_version_info=Mock(return_value='5.0')) + + def connect(*args, **kwargs): + return connection + dbapi = Mock( + sqlite_version_info=(99, 9, 9,), + version_info=(99, 9, 9,), + sqlite_version='99.9.9', + paramstyle='named', + connect=Mock(side_effect=connect) + ) + + sqlite3 = e.dialect.dbapi + dbapi.Error = sqlite3.Error, + dbapi.ProgrammingError = sqlite3.ProgrammingError + + self.dbapi = dbapi + self.ProgrammingError = sqlite3.ProgrammingError + + def test_wraps_connect_in_dbapi(self): + dbapi = self.dbapi + dbapi.connect = Mock( + side_effect=self.ProgrammingError("random error")) + try: + create_engine('sqlite://', module=dbapi).connect() + assert False + except tsa.exc.DBAPIError as de: + assert not de.connection_invalidated + + def test_handle_error_event_connect(self): + dbapi = self.dbapi + dbapi.connect = Mock( + side_effect=self.ProgrammingError("random error")) + + class MySpecialException(Exception): + pass + + eng = create_engine('sqlite://', module=dbapi) + + @event.listens_for(eng, "handle_error") + def handle_error(ctx): + assert ctx.engine is eng + assert ctx.connection is None + raise MySpecialException("failed operation") + + assert_raises( + MySpecialException, + eng.connect + ) + + def test_handle_error_event_revalidate(self): + dbapi = self.dbapi + + class MySpecialException(Exception): + pass + + eng = create_engine('sqlite://', module=dbapi, _initialize=False) + + @event.listens_for(eng, "handle_error") + def handle_error(ctx): + assert ctx.engine is eng + assert ctx.connection is conn + assert isinstance(ctx.sqlalchemy_exception, tsa.exc.ProgrammingError) + raise MySpecialException("failed operation") + + conn = eng.connect() + conn.invalidate() + + dbapi.connect = Mock( + side_effect=self.ProgrammingError("random error")) + + assert_raises( + MySpecialException, + getattr, conn, 'connection' + ) + + def test_handle_error_event_implicit_revalidate(self): + dbapi = self.dbapi + + class MySpecialException(Exception): + pass + + eng = create_engine('sqlite://', module=dbapi, _initialize=False) + + @event.listens_for(eng, "handle_error") + def handle_error(ctx): + assert ctx.engine is eng + assert ctx.connection is conn + assert isinstance( + ctx.sqlalchemy_exception, tsa.exc.ProgrammingError) + raise MySpecialException("failed operation") + + conn = eng.connect() + conn.invalidate() + + dbapi.connect = Mock( + side_effect=self.ProgrammingError("random error")) + + assert_raises( + MySpecialException, + conn.execute, select([1]) + ) + + def test_handle_error_custom_connect(self): + dbapi = self.dbapi + + class MySpecialException(Exception): + pass + + def custom_connect(): + raise self.ProgrammingError("random error") + + eng = create_engine('sqlite://', module=dbapi, creator=custom_connect) + + @event.listens_for(eng, "handle_error") + def handle_error(ctx): + assert ctx.engine is eng + assert ctx.connection is None + raise MySpecialException("failed operation") + + assert_raises( + MySpecialException, + eng.connect + ) + + def test_handle_error_event_connect_invalidate_flag(self): + dbapi = self.dbapi + dbapi.connect = Mock( + side_effect=self.ProgrammingError( + "Cannot operate on a closed database.")) + + class MySpecialException(Exception): + pass + + eng = create_engine('sqlite://', module=dbapi) + + @event.listens_for(eng, "handle_error") + def handle_error(ctx): + assert ctx.is_disconnect + ctx.is_disconnect = False + + try: + eng.connect() + assert False + except tsa.exc.DBAPIError as de: + assert not de.connection_invalidated + + def test_cant_connect_stay_invalidated(self): + class MySpecialException(Exception): + pass + + eng = create_engine('sqlite://') + + @event.listens_for(eng, "handle_error") + def handle_error(ctx): + assert ctx.is_disconnect + + conn = eng.connect() + + conn.invalidate() + + eng.pool._creator = Mock( + side_effect=self.ProgrammingError( + "Cannot operate on a closed database.")) + + try: + conn.connection + assert False + except tsa.exc.DBAPIError: + assert conn.invalidated + + def _test_dont_touch_non_dbapi_exception_on_connect(self, connect_fn): + dbapi = self.dbapi + dbapi.connect = Mock(side_effect=TypeError("I'm not a DBAPI error")) + + e = create_engine('sqlite://', module=dbapi) + e.dialect.is_disconnect = is_disconnect = Mock() + assert_raises_message( + TypeError, + "I'm not a DBAPI error", + connect_fn, e + ) + eq_(is_disconnect.call_count, 0) + + def test_dont_touch_non_dbapi_exception_on_connect(self): + self._test_dont_touch_non_dbapi_exception_on_connect( + lambda engine: engine.connect()) + + def test_dont_touch_non_dbapi_exception_on_contextual_connect(self): + self._test_dont_touch_non_dbapi_exception_on_connect( + lambda engine: engine.contextual_connect()) + + def test_ensure_dialect_does_is_disconnect_no_conn(self): + """test that is_disconnect() doesn't choke if no connection, + cursor given.""" + dialect = testing.db.dialect + dbapi = dialect.dbapi + assert not dialect.is_disconnect( + dbapi.OperationalError("test"), None, None) + + def _test_invalidate_on_connect(self, connect_fn): + """test that is_disconnect() is called during connect. + + interpretation of connection failures are not supported by + every backend. + + """ + + dbapi = self.dbapi + dbapi.connect = Mock( + side_effect=self.ProgrammingError( + "Cannot operate on a closed database.")) + try: + connect_fn(create_engine('sqlite://', module=dbapi)) + assert False + except tsa.exc.DBAPIError as de: + assert de.connection_invalidated + + def test_invalidate_on_connect(self): + """test that is_disconnect() is called during connect. + + interpretation of connection failures are not supported by + every backend. + + """ + self._test_invalidate_on_connect(lambda engine: engine.connect()) + + def test_invalidate_on_contextual_connect(self): + """test that is_disconnect() is called during connect. + + interpretation of connection failures are not supported by + every backend. + + """ + self._test_invalidate_on_connect( + lambda engine: engine.contextual_connect()) + + class ProxyConnectionTest(fixtures.TestBase): """These are the same tests as EngineEventsTest, except using diff --git a/test/engine/test_parseconnect.py b/test/engine/test_parseconnect.py index 8d659420d..e53a99e15 100644 --- a/test/engine/test_parseconnect.py +++ b/test/engine/test_parseconnect.py @@ -238,242 +238,6 @@ class CreateEngineTest(fixtures.TestBase): assert_raises(TypeError, create_engine, 'mysql+mysqldb://', use_unicode=True, module=mock_dbapi) - @testing.requires.sqlite - def test_wraps_connect_in_dbapi(self): - e = create_engine('sqlite://') - sqlite3 = e.dialect.dbapi - dbapi = MockDBAPI() - dbapi.Error = sqlite3.Error, - dbapi.ProgrammingError = sqlite3.ProgrammingError - dbapi.connect = Mock( - side_effect=sqlite3.ProgrammingError("random error")) - try: - create_engine('sqlite://', module=dbapi).connect() - assert False - except tsa.exc.DBAPIError as de: - assert not de.connection_invalidated - - @testing.requires.sqlite - def test_handle_error_event_connect(self): - e = create_engine('sqlite://') - dbapi = MockDBAPI() - sqlite3 = e.dialect.dbapi - dbapi.Error = sqlite3.Error, - dbapi.ProgrammingError = sqlite3.ProgrammingError - dbapi.connect = Mock( - side_effect=sqlite3.ProgrammingError("random error")) - - class MySpecialException(Exception): - pass - - eng = create_engine('sqlite://', module=dbapi) - - @event.listens_for(eng, "handle_error") - def handle_error(ctx): - assert ctx.engine is eng - assert ctx.connection is None - raise MySpecialException("failed operation") - - assert_raises( - MySpecialException, - eng.connect - ) - - @testing.requires.sqlite - def test_handle_error_event_revalidate(self): - e = create_engine('sqlite://') - dbapi = MockDBAPI() - sqlite3 = e.dialect.dbapi - dbapi.Error = sqlite3.Error, - dbapi.ProgrammingError = sqlite3.ProgrammingError - - class MySpecialException(Exception): - pass - - eng = create_engine('sqlite://', module=dbapi, _initialize=False) - - @event.listens_for(eng, "handle_error") - def handle_error(ctx): - assert ctx.engine is eng - assert ctx.connection is conn - assert isinstance(ctx.sqlalchemy_exception, exc.ProgrammingError) - raise MySpecialException("failed operation") - - conn = eng.connect() - conn.invalidate() - - dbapi.connect = Mock( - side_effect=sqlite3.ProgrammingError("random error")) - - assert_raises( - MySpecialException, - getattr, conn, 'connection' - ) - - @testing.requires.sqlite - def test_handle_error_event_implicit_revalidate(self): - e = create_engine('sqlite://') - dbapi = MockDBAPI() - sqlite3 = e.dialect.dbapi - dbapi.Error = sqlite3.Error, - dbapi.ProgrammingError = sqlite3.ProgrammingError - - class MySpecialException(Exception): - pass - - eng = create_engine('sqlite://', module=dbapi, _initialize=False) - - @event.listens_for(eng, "handle_error") - def handle_error(ctx): - assert ctx.engine is eng - assert ctx.connection is conn - assert isinstance(ctx.sqlalchemy_exception, exc.ProgrammingError) - raise MySpecialException("failed operation") - - conn = eng.connect() - conn.invalidate() - - dbapi.connect = Mock( - side_effect=sqlite3.ProgrammingError("random error")) - - assert_raises( - MySpecialException, - conn.execute, select([1]) - ) - - @testing.requires.sqlite - def test_handle_error_custom_connect(self): - e = create_engine('sqlite://') - - dbapi = MockDBAPI() - sqlite3 = e.dialect.dbapi - dbapi.Error = sqlite3.Error, - dbapi.ProgrammingError = sqlite3.ProgrammingError - - class MySpecialException(Exception): - pass - - def custom_connect(): - raise sqlite3.ProgrammingError("random error") - - eng = create_engine('sqlite://', module=dbapi, creator=custom_connect) - - @event.listens_for(eng, "handle_error") - def handle_error(ctx): - assert ctx.engine is eng - assert ctx.connection is None - raise MySpecialException("failed operation") - - assert_raises( - MySpecialException, - eng.connect - ) - - @testing.requires.sqlite - def test_handle_error_event_connect_invalidate_flag(self): - e = create_engine('sqlite://') - dbapi = MockDBAPI() - sqlite3 = e.dialect.dbapi - dbapi.Error = sqlite3.Error, - dbapi.ProgrammingError = sqlite3.ProgrammingError - dbapi.connect = Mock( - side_effect=sqlite3.ProgrammingError( - "Cannot operate on a closed database.")) - - class MySpecialException(Exception): - pass - - eng = create_engine('sqlite://', module=dbapi) - - @event.listens_for(eng, "handle_error") - def handle_error(ctx): - assert ctx.is_disconnect - ctx.is_disconnect = False - - try: - eng.connect() - assert False - except tsa.exc.DBAPIError as de: - assert not de.connection_invalidated - - @testing.requires.sqlite - def test_cant_connect_stay_invalidated(self): - e = create_engine('sqlite://') - sqlite3 = e.dialect.dbapi - - class MySpecialException(Exception): - pass - - eng = create_engine('sqlite://') - - @event.listens_for(eng, "handle_error") - def handle_error(ctx): - assert ctx.is_disconnect - - conn = eng.connect() - - conn.invalidate() - - eng.pool._creator = Mock( - side_effect=sqlite3.ProgrammingError( - "Cannot operate on a closed database.")) - - try: - conn.connection - assert False - except tsa.exc.DBAPIError: - assert conn.invalidated - - @testing.requires.sqlite - def test_dont_touch_non_dbapi_exception_on_connect(self): - e = create_engine('sqlite://') - sqlite3 = e.dialect.dbapi - - dbapi = MockDBAPI() - dbapi.Error = sqlite3.Error, - dbapi.ProgrammingError = sqlite3.ProgrammingError - dbapi.connect = Mock(side_effect=TypeError("I'm not a DBAPI error")) - e = create_engine('sqlite://', module=dbapi) - e.dialect.is_disconnect = is_disconnect = Mock() - assert_raises_message( - TypeError, - "I'm not a DBAPI error", - e.connect - ) - eq_(is_disconnect.call_count, 0) - - def test_ensure_dialect_does_is_disconnect_no_conn(self): - """test that is_disconnect() doesn't choke if no connection, - cursor given.""" - dialect = testing.db.dialect - dbapi = dialect.dbapi - assert not dialect.is_disconnect( - dbapi.OperationalError("test"), None, None) - - @testing.requires.sqlite - def test_invalidate_on_connect(self): - """test that is_disconnect() is called during connect. - - interpretation of connection failures are not supported by - every backend. - - """ - - e = create_engine('sqlite://') - sqlite3 = e.dialect.dbapi - - dbapi = MockDBAPI() - dbapi.Error = sqlite3.Error, - dbapi.ProgrammingError = sqlite3.ProgrammingError - dbapi.connect = Mock( - side_effect=sqlite3.ProgrammingError( - "Cannot operate on a closed database.")) - try: - create_engine('sqlite://', module=dbapi).connect() - assert False - except tsa.exc.DBAPIError as de: - assert de.connection_invalidated - def test_urlattr(self): """test the url attribute on ``Engine``.""" -- cgit v1.2.1 From 347db81aea9bfe301a9fe1fade644ad099545f3e Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Wed, 10 Dec 2014 12:15:14 -0500 Subject: - keep working on fixing #3266, more cases, more tests --- lib/sqlalchemy/engine/base.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/lib/sqlalchemy/engine/base.py b/lib/sqlalchemy/engine/base.py index 9a8610344..918ee0e37 100644 --- a/lib/sqlalchemy/engine/base.py +++ b/lib/sqlalchemy/engine/base.py @@ -1926,10 +1926,11 @@ class Engine(Connectable, log.Identified): """ - return self._connection_cls(self, - self.pool.connect(), - close_with_result=close_with_result, - **kwargs) + return self._connection_cls( + self, + self._wrap_pool_connect(self.pool.connect, None), + close_with_result=close_with_result, + **kwargs) def table_names(self, schema=None, connection=None): """Return a list of all table names available in the database. -- cgit v1.2.1 From 3c70f609507ccc6775495cc533265aeb645528cd Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Wed, 10 Dec 2014 13:08:53 -0500 Subject: - fix up query update /delete documentation, make warnings a lot clearer, partial fixes for #3252 --- lib/sqlalchemy/orm/query.py | 179 +++++++++++++++++++++++++++----------------- 1 file changed, 110 insertions(+), 69 deletions(-) diff --git a/lib/sqlalchemy/orm/query.py b/lib/sqlalchemy/orm/query.py index 9b7747e15..1afffb90e 100644 --- a/lib/sqlalchemy/orm/query.py +++ b/lib/sqlalchemy/orm/query.py @@ -2725,6 +2725,18 @@ class Query(object): Deletes rows matched by this query from the database. + E.g.:: + + sess.query(User).filter(User.age == 25).\\ + delete(synchronize_session=False) + + sess.query(User).filter(User.age == 25).\\ + delete(synchronize_session='evaluate') + + .. warning:: The :meth:`.Query.delete` method is a "bulk" operation, + which bypasses ORM unit-of-work automation in favor of greater + performance. **Please read all caveats and warnings below.** + :param synchronize_session: chooses the strategy for the removal of matched objects from the session. Valid values are: @@ -2743,8 +2755,7 @@ class Query(object): ``'evaluate'`` - Evaluate the query's criteria in Python straight on the objects in the session. If evaluation of the criteria isn't - implemented, an error is raised. In that case you probably - want to use the 'fetch' strategy as a fallback. + implemented, an error is raised. The expression evaluator currently doesn't account for differing string collations between the database and Python. @@ -2752,29 +2763,42 @@ class Query(object): :return: the count of rows matched as returned by the database's "row count" feature. - This method has several key caveats: - - * The method does **not** offer in-Python cascading of relationships - - it is assumed that ON DELETE CASCADE/SET NULL/etc. is configured - for any foreign key references which require it, otherwise the - database may emit an integrity violation if foreign key references - are being enforced. - - After the DELETE, dependent objects in the :class:`.Session` which - were impacted by an ON DELETE may not contain the current - state, or may have been deleted. This issue is resolved once the - :class:`.Session` is expired, - which normally occurs upon :meth:`.Session.commit` or can be forced - by using :meth:`.Session.expire_all`. Accessing an expired object - whose row has been deleted will invoke a SELECT to locate the - row; when the row is not found, an - :class:`~sqlalchemy.orm.exc.ObjectDeletedError` is raised. - - * The :meth:`.MapperEvents.before_delete` and - :meth:`.MapperEvents.after_delete` - events are **not** invoked from this method. Instead, the - :meth:`.SessionEvents.after_bulk_delete` method is provided to act - upon a mass DELETE of entity rows. + .. warning:: **Additional Caveats for bulk query deletes** + + * The method does **not** offer in-Python cascading of + relationships - it is assumed that ON DELETE CASCADE/SET + NULL/etc. is configured for any foreign key references + which require it, otherwise the database may emit an + integrity violation if foreign key references are being + enforced. + + After the DELETE, dependent objects in the + :class:`.Session` which were impacted by an ON DELETE + may not contain the current state, or may have been + deleted. This issue is resolved once the + :class:`.Session` is expired, which normally occurs upon + :meth:`.Session.commit` or can be forced by using + :meth:`.Session.expire_all`. Accessing an expired + object whose row has been deleted will invoke a SELECT + to locate the row; when the row is not found, an + :class:`~sqlalchemy.orm.exc.ObjectDeletedError` is + raised. + + * The ``'fetch'`` strategy results in an additional + SELECT statement emitted and will significantly reduce + performance. + + * The ``'evaulate'`` strategy performs a scan of + all matching objects within the :class:`.Session`; if the + contents of the :class:`.Session` are expired, such as + via a proceeding :meth:`.Session.commit` call, **this will + result in SELECT queries emitted for every matching object**. + + * The :meth:`.MapperEvents.before_delete` and + :meth:`.MapperEvents.after_delete` + events **are not invoked** from this method. Instead, the + :meth:`.SessionEvents.after_bulk_delete` method is provided to + act upon a mass DELETE of entity rows. .. seealso:: @@ -2797,17 +2821,21 @@ class Query(object): E.g.:: - sess.query(User).filter(User.age == 25).\ - update({User.age: User.age - 10}, synchronize_session='fetch') + sess.query(User).filter(User.age == 25).\\ + update({User.age: User.age - 10}, synchronize_session=False) - - sess.query(User).filter(User.age == 25).\ + sess.query(User).filter(User.age == 25).\\ update({"age": User.age - 10}, synchronize_session='evaluate') + .. warning:: The :meth:`.Query.update` method is a "bulk" operation, + which bypasses ORM unit-of-work automation in favor of greater + performance. **Please read all caveats and warnings below.** + + :param values: a dictionary with attributes names, or alternatively - mapped attributes or SQL expressions, as keys, and literal - values or sql expressions as values. + mapped attributes or SQL expressions, as keys, and literal + values or sql expressions as values. .. versionchanged:: 1.0.0 - string names in the values dictionary are now resolved against the mapped entity; previously, these @@ -2815,7 +2843,7 @@ class Query(object): translation. :param synchronize_session: chooses the strategy to update the - attributes on objects in the session. Valid values are: + attributes on objects in the session. Valid values are: ``False`` - don't synchronize the session. This option is the most efficient and is reliable once the session is expired, which @@ -2836,43 +2864,56 @@ class Query(object): string collations between the database and Python. :return: the count of rows matched as returned by the database's - "row count" feature. - - This method has several key caveats: - - * The method does **not** offer in-Python cascading of relationships - - it is assumed that ON UPDATE CASCADE is configured for any foreign - key references which require it, otherwise the database may emit an - integrity violation if foreign key references are being enforced. - - After the UPDATE, dependent objects in the :class:`.Session` which - were impacted by an ON UPDATE CASCADE may not contain the current - state; this issue is resolved once the :class:`.Session` is expired, - which normally occurs upon :meth:`.Session.commit` or can be forced - by using :meth:`.Session.expire_all`. - - * The method supports multiple table updates, as - detailed in :ref:`multi_table_updates`, and this behavior does - extend to support updates of joined-inheritance and other multiple - table mappings. However, the **join condition of an inheritance - mapper is currently not automatically rendered**. - Care must be taken in any multiple-table update to explicitly - include the joining condition between those tables, even in mappings - where this is normally automatic. - E.g. if a class ``Engineer`` subclasses ``Employee``, an UPDATE of - the ``Engineer`` local table using criteria against the ``Employee`` - local table might look like:: - - session.query(Engineer).\\ - filter(Engineer.id == Employee.id).\\ - filter(Employee.name == 'dilbert').\\ - update({"engineer_type": "programmer"}) - - * The :meth:`.MapperEvents.before_update` and - :meth:`.MapperEvents.after_update` - events are **not** invoked from this method. Instead, the - :meth:`.SessionEvents.after_bulk_update` method is provided to act - upon a mass UPDATE of entity rows. + "row count" feature. + + .. warning:: **Additional Caveats for bulk query updates** + + * The method does **not** offer in-Python cascading of + relationships - it is assumed that ON UPDATE CASCADE is + configured for any foreign key references which require + it, otherwise the database may emit an integrity + violation if foreign key references are being enforced. + + After the UPDATE, dependent objects in the + :class:`.Session` which were impacted by an ON UPDATE + CASCADE may not contain the current state; this issue is + resolved once the :class:`.Session` is expired, which + normally occurs upon :meth:`.Session.commit` or can be + forced by using :meth:`.Session.expire_all`. + + * The ``'fetch'`` strategy results in an additional + SELECT statement emitted and will significantly reduce + performance. + + * The ``'evaulate'`` strategy performs a scan of + all matching objects within the :class:`.Session`; if the + contents of the :class:`.Session` are expired, such as + via a proceeding :meth:`.Session.commit` call, **this will + result in SELECT queries emitted for every matching object**. + + * The method supports multiple table updates, as detailed + in :ref:`multi_table_updates`, and this behavior does + extend to support updates of joined-inheritance and + other multiple table mappings. However, the **join + condition of an inheritance mapper is not + automatically rendered**. Care must be taken in any + multiple-table update to explicitly include the joining + condition between those tables, even in mappings where + this is normally automatic. E.g. if a class ``Engineer`` + subclasses ``Employee``, an UPDATE of the ``Engineer`` + local table using criteria against the ``Employee`` + local table might look like:: + + session.query(Engineer).\\ + filter(Engineer.id == Employee.id).\\ + filter(Employee.name == 'dilbert').\\ + update({"engineer_type": "programmer"}) + + * The :meth:`.MapperEvents.before_update` and + :meth:`.MapperEvents.after_update` + events **are not invoked from this method**. Instead, the + :meth:`.SessionEvents.after_bulk_update` method is provided to + act upon a mass UPDATE of entity rows. .. seealso:: -- cgit v1.2.1 From 08e02579e03bf37cfc742c549b837841ec8f7ffe Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Fri, 12 Dec 2014 15:55:34 -0500 Subject: - update zoomark --- test/profiles.txt | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/test/profiles.txt b/test/profiles.txt index 97ef13873..c11000e29 100644 --- a/test/profiles.txt +++ b/test/profiles.txt @@ -351,12 +351,12 @@ test.aaa_profiling.test_resultset.ResultSetTest.test_unicode 3.4_sqlite_pysqlite # TEST: test.aaa_profiling.test_zoomark.ZooMarkTest.test_invocation -test.aaa_profiling.test_zoomark.ZooMarkTest.test_invocation 2.7_postgresql_psycopg2_cextensions 5562,277,3697,11893,1106,1968,2433 -test.aaa_profiling.test_zoomark.ZooMarkTest.test_invocation 2.7_postgresql_psycopg2_nocextensions 5606,277,3929,13595,1223,2011,2692 -test.aaa_profiling.test_zoomark.ZooMarkTest.test_invocation 3.3_postgresql_psycopg2_cextensions 5238,273,3577,11529,1077,1886,2439 -test.aaa_profiling.test_zoomark.ZooMarkTest.test_invocation 3.3_postgresql_psycopg2_nocextensions 5260,273,3673,12701,1171,1893,2631 -test.aaa_profiling.test_zoomark.ZooMarkTest.test_invocation 3.4_postgresql_psycopg2_cextensions 5221,273,3577,11529,1077,1883,2439 -test.aaa_profiling.test_zoomark.ZooMarkTest.test_invocation 3.4_postgresql_psycopg2_nocextensions 5243,273,3697,12796,1187,1923,2653 +test.aaa_profiling.test_zoomark.ZooMarkTest.test_invocation 2.7_postgresql_psycopg2_cextensions 5562,292,3697,11893,1106,1968,2433 +test.aaa_profiling.test_zoomark.ZooMarkTest.test_invocation 2.7_postgresql_psycopg2_nocextensions 5606,292,3929,13595,1223,2011,2692 +test.aaa_profiling.test_zoomark.ZooMarkTest.test_invocation 3.3_postgresql_psycopg2_cextensions 5497,274,3609,11647,1097,1921,2486 +test.aaa_profiling.test_zoomark.ZooMarkTest.test_invocation 3.3_postgresql_psycopg2_nocextensions 5519,274,3705,12819,1191,1928,2678 +test.aaa_profiling.test_zoomark.ZooMarkTest.test_invocation 3.4_postgresql_psycopg2_cextensions 5497,273,3577,11529,1077,1883,2439 +test.aaa_profiling.test_zoomark.ZooMarkTest.test_invocation 3.4_postgresql_psycopg2_nocextensions 5519,273,3697,12796,1187,1923,2653 # TEST: test.aaa_profiling.test_zoomark_orm.ZooMarkTest.test_invocation -- cgit v1.2.1 From cf7981f60d485f17465f44c6ff651ae283ade377 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Fri, 12 Dec 2014 19:59:11 -0500 Subject: - Added new method :meth:`.Session.invalidate`, functions similarly to :meth:`.Session.close`, except also calls :meth:`.Connection.invalidate` on all connections, guaranteeing that they will not be returned to the connection pool. This is useful in situations e.g. dealing with gevent timeouts when it is not safe to use the connection further, even for rollbacks. references #3258 --- doc/build/changelog/changelog_09.rst | 12 +++++++++++ lib/sqlalchemy/orm/session.py | 42 ++++++++++++++++++++++++++++++++++-- test/orm/test_session.py | 3 +++ test/orm/test_transaction.py | 17 +++++++++++++++ 4 files changed, 72 insertions(+), 2 deletions(-) diff --git a/doc/build/changelog/changelog_09.rst b/doc/build/changelog/changelog_09.rst index f10d48273..419827959 100644 --- a/doc/build/changelog/changelog_09.rst +++ b/doc/build/changelog/changelog_09.rst @@ -13,6 +13,18 @@ .. changelog:: :version: 0.9.9 + .. change:: + :tags: enhancement, orm + :versions: 1.0.0 + + Added new method :meth:`.Session.invalidate`, functions similarly + to :meth:`.Session.close`, except also calls + :meth:`.Connection.invalidate` + on all connections, guaranteeing that they will not be returned to + the connection pool. This is useful in situations e.g. dealing + with gevent timeouts when it is not safe to use the connection further, + even for rollbacks. + .. change:: :tags: bug, examples :versions: 1.0.0 diff --git a/lib/sqlalchemy/orm/session.py b/lib/sqlalchemy/orm/session.py index d40d28154..507e99b2e 100644 --- a/lib/sqlalchemy/orm/session.py +++ b/lib/sqlalchemy/orm/session.py @@ -435,11 +435,13 @@ class SessionTransaction(object): self.session.dispatch.after_rollback(self.session) - def close(self): + def close(self, invalidate=False): self.session.transaction = self._parent if self._parent is None: for connection, transaction, autoclose in \ set(self._connections.values()): + if invalidate: + connection.invalidate() if autoclose: connection.close() else: @@ -1000,10 +1002,46 @@ class Session(_SessionClassMethods): not use any connection resources until they are first needed. """ + self._close_impl(invalidate=False) + + def invalidate(self): + """Close this Session, using connection invalidation. + + This is a variant of :meth:`.Session.close` that will additionally + ensure that the :meth:`.Connection.invalidate` method will be called + on all :class:`.Connection` objects. This can be called when + the database is known to be in a state where the connections are + no longer safe to be used. + + E.g.:: + + try: + sess = Session() + sess.add(User()) + sess.commit() + except gevent.Timeout: + sess.invalidate() + raise + except: + sess.rollback() + raise + + This clears all items and ends any transaction in progress. + + If this session were created with ``autocommit=False``, a new + transaction is immediately begun. Note that this new transaction does + not use any connection resources until they are first needed. + + .. versionadded:: 0.9.9 + + """ + self._close_impl(invalidate=True) + + def _close_impl(self, invalidate): self.expunge_all() if self.transaction is not None: for transaction in self.transaction._iterate_parents(): - transaction.close() + transaction.close(invalidate) def expunge_all(self): """Remove all object instances from this ``Session``. diff --git a/test/orm/test_session.py b/test/orm/test_session.py index b81c03f88..2aa0cd3eb 100644 --- a/test/orm/test_session.py +++ b/test/orm/test_session.py @@ -1364,6 +1364,9 @@ class DisposedStates(fixtures.MappedTest): def test_close(self): self._test_session().close() + def test_invalidate(self): + self._test_session().invalidate() + def test_expunge_all(self): self._test_session().expunge_all() diff --git a/test/orm/test_transaction.py b/test/orm/test_transaction.py index ba31e4c7d..1d7e8e693 100644 --- a/test/orm/test_transaction.py +++ b/test/orm/test_transaction.py @@ -184,6 +184,23 @@ class SessionTransactionTest(FixtureTest): assert users.count().scalar() == 1 assert addresses.count().scalar() == 1 + @testing.requires.independent_connections + def test_invalidate(self): + User, users = self.classes.User, self.tables.users + mapper(User, users) + sess = Session() + u = User(name='u1') + sess.add(u) + sess.flush() + c1 = sess.connection(User) + + sess.invalidate() + assert c1.invalidated + + eq_(sess.query(User).all(), []) + c2 = sess.connection(User) + assert not c2.invalidated + def test_subtransaction_on_noautocommit(self): User, users = self.classes.User, self.tables.users -- cgit v1.2.1 From 91af7337878612b2497269e600eef147a0f5bb30 Mon Sep 17 00:00:00 2001 From: Jon Nelson Date: Tue, 11 Nov 2014 22:46:07 -0600 Subject: - fix unique constraint parsing for sqlite -- may return '' for name, however --- lib/sqlalchemy/dialects/sqlite/base.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/sqlalchemy/dialects/sqlite/base.py b/lib/sqlalchemy/dialects/sqlite/base.py index ccd7f2539..30d8a6ea3 100644 --- a/lib/sqlalchemy/dialects/sqlite/base.py +++ b/lib/sqlalchemy/dialects/sqlite/base.py @@ -1173,7 +1173,7 @@ class SQLiteDialect(default.DefaultDialect): return [] table_data = row[0] - UNIQUE_PATTERN = 'CONSTRAINT (\w+) UNIQUE \(([^\)]+)\)' + UNIQUE_PATTERN = '(?:CONSTRAINT (\w+) )?UNIQUE \(([^\)]+)\)' return [ {'name': name, 'column_names': [col.strip(' "') for col in cols.split(',')]} -- cgit v1.2.1 From 85c04dd0bb9d0f140dde25e3901b172ebb431f7e Mon Sep 17 00:00:00 2001 From: Jon Nelson Date: Fri, 14 Nov 2014 19:53:28 -0600 Subject: - add test_get_unnamed_unique_constraints to SQLite reflection tests --- test/dialect/test_sqlite.py | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/test/dialect/test_sqlite.py b/test/dialect/test_sqlite.py index 22772d2fb..b4524dc27 100644 --- a/test/dialect/test_sqlite.py +++ b/test/dialect/test_sqlite.py @@ -603,6 +603,24 @@ class DialectTest(fixtures.TestBase, AssertsExecutionResults): eq_(inspector.get_unique_constraints('bar'), [{'column_names': [u'b'], 'name': u'bar_b'}]) + def test_get_unnamed_unique_constraints(self): + meta = MetaData(testing.db) + t1 = Table('foo', meta, Column('f', Integer), + UniqueConstraint('f')) + t2 = Table('bar', meta, Column('b', Integer), + UniqueConstraint('b'), + prefixes=['TEMPORARY']) + meta.create_all() + from sqlalchemy.engine.reflection import Inspector + try: + inspector = Inspector(testing.db) + eq_(inspector.get_unique_constraints('foo'), + [{'column_names': [u'f'], 'name': u''}]) + eq_(inspector.get_unique_constraints('bar'), + [{'column_names': [u'b'], 'name': u''}]) + finally: + meta.drop_all() + class AttachedMemoryDBTest(fixtures.TestBase): __only_on__ = 'sqlite' -- cgit v1.2.1 From 5b146e1bab7b440038c356f388e3362a669399c1 Mon Sep 17 00:00:00 2001 From: Jon Nelson Date: Fri, 14 Nov 2014 20:05:58 -0600 Subject: - add tentative 'changelog' documentation on #3244 --- doc/build/changelog/changelog_09.rst | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/doc/build/changelog/changelog_09.rst b/doc/build/changelog/changelog_09.rst index 419827959..f83afd2da 100644 --- a/doc/build/changelog/changelog_09.rst +++ b/doc/build/changelog/changelog_09.rst @@ -59,6 +59,15 @@ replaced, however if the mapping were already used for querying, the old relationship would still be referenced within some registries. + .. change:: + :tags: bug, sqlite + :versions: 1.0.0 + :tickets: 3244 + + Fixed issue where un-named UNIQUE constraints were not being + reflected in SQLite. Now un-named UNIQUE constraints are returned + with a name of u''. + .. change:: :tags: bug, sql :versions: 1.0.0 -- cgit v1.2.1 From 468db416dbf284f0e7dddde90ec9641dc89428c6 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Sat, 13 Dec 2014 18:04:11 -0500 Subject: - rework sqlite FK and unique constraint system to combine both PRAGMA and regexp parsing of SQL in order to form a complete picture of constraints + their names. fixes #3244 fixes #3261 - factor various PRAGMA work to be centralized into one call --- doc/build/changelog/changelog_09.rst | 9 - doc/build/changelog/changelog_10.rst | 9 + doc/build/changelog/migration_10.rst | 19 ++ lib/sqlalchemy/dialects/sqlite/base.py | 299 +++++++++++++++--------- test/dialect/test_sqlite.py | 414 +++++++++++++++++++++++---------- 5 files changed, 506 insertions(+), 244 deletions(-) diff --git a/doc/build/changelog/changelog_09.rst b/doc/build/changelog/changelog_09.rst index f83afd2da..419827959 100644 --- a/doc/build/changelog/changelog_09.rst +++ b/doc/build/changelog/changelog_09.rst @@ -59,15 +59,6 @@ replaced, however if the mapping were already used for querying, the old relationship would still be referenced within some registries. - .. change:: - :tags: bug, sqlite - :versions: 1.0.0 - :tickets: 3244 - - Fixed issue where un-named UNIQUE constraints were not being - reflected in SQLite. Now un-named UNIQUE constraints are returned - with a name of u''. - .. change:: :tags: bug, sql :versions: 1.0.0 diff --git a/doc/build/changelog/changelog_10.rst b/doc/build/changelog/changelog_10.rst index d6f36e97e..4da7b9456 100644 --- a/doc/build/changelog/changelog_10.rst +++ b/doc/build/changelog/changelog_10.rst @@ -22,6 +22,15 @@ series as well. For changes that are specific to 1.0 with an emphasis on compatibility concerns, see :doc:`/changelog/migration_10`. + .. change:: + :tags: bug, sqlite + :tickets: 3244, 3261 + + UNIQUE and FOREIGN KEY constraints are now fully reflected on + SQLite both with and without names. Previously, foreign key + names were ignored and unnamed unique constraints were skipped. + Thanks to Jon Nelson for assistance with this. + .. change:: :tags: feature, examples diff --git a/doc/build/changelog/migration_10.rst b/doc/build/changelog/migration_10.rst index cd5d420e5..e1fb13662 100644 --- a/doc/build/changelog/migration_10.rst +++ b/doc/build/changelog/migration_10.rst @@ -1680,6 +1680,25 @@ reflection from temp tables as well, which is :ticket:`3203`. :ticket:`3204` +SQLite named and unnamed UNIQUE and FOREIGN KEY constraints will inspect and reflect +------------------------------------------------------------------------------------- + +UNIQUE and FOREIGN KEY constraints are now fully reflected on +SQLite both with and without names. Previously, foreign key +names were ignored and unnamed unique constraints were skipped. In particular +this will help with Alembic's new SQLite migration features. + +To achieve this, for both foreign keys and unique constraints, the result +of PRAGMA foreign_keys, index_list, and index_info is combined with regular +expression parsing of the CREATE TABLE statement overall to form a complete +picture of the names of constraints, as well as differentiating UNIQUE +constraints that were created as UNIQUE vs. unnamed INDEXes. + +:ticket:`3244` + +:ticket:`3261` + + .. _change_3220: Improved support for CTEs in Oracle diff --git a/lib/sqlalchemy/dialects/sqlite/base.py b/lib/sqlalchemy/dialects/sqlite/base.py index 30d8a6ea3..e79299527 100644 --- a/lib/sqlalchemy/dialects/sqlite/base.py +++ b/lib/sqlalchemy/dialects/sqlite/base.py @@ -913,22 +913,9 @@ class SQLiteDialect(default.DefaultDialect): return [row[0] for row in rs] def has_table(self, connection, table_name, schema=None): - quote = self.identifier_preparer.quote_identifier - if schema is not None: - pragma = "PRAGMA %s." % quote(schema) - else: - pragma = "PRAGMA " - qtable = quote(table_name) - statement = "%stable_info(%s)" % (pragma, qtable) - cursor = _pragma_cursor(connection.execute(statement)) - row = cursor.fetchone() - - # consume remaining rows, to work around - # http://www.sqlite.org/cvstrac/tktview?tn=1884 - while not cursor.closed and cursor.fetchone() is not None: - pass - - return row is not None + info = self._get_table_pragma( + connection, "table_info", table_name, schema=schema) + return bool(info) @reflection.cache def get_view_names(self, connection, schema=None, **kw): @@ -970,18 +957,11 @@ class SQLiteDialect(default.DefaultDialect): @reflection.cache def get_columns(self, connection, table_name, schema=None, **kw): - quote = self.identifier_preparer.quote_identifier - if schema is not None: - pragma = "PRAGMA %s." % quote(schema) - else: - pragma = "PRAGMA " - qtable = quote(table_name) - statement = "%stable_info(%s)" % (pragma, qtable) - c = _pragma_cursor(connection.execute(statement)) + info = self._get_table_pragma( + connection, "table_info", table_name, schema=schema) - rows = c.fetchall() columns = [] - for row in rows: + for row in info: (name, type_, nullable, default, primary_key) = ( row[1], row[2].upper(), not row[3], row[4], row[5]) @@ -1068,92 +1048,192 @@ class SQLiteDialect(default.DefaultDialect): @reflection.cache def get_foreign_keys(self, connection, table_name, schema=None, **kw): - quote = self.identifier_preparer.quote_identifier - if schema is not None: - pragma = "PRAGMA %s." % quote(schema) - else: - pragma = "PRAGMA " - qtable = quote(table_name) - statement = "%sforeign_key_list(%s)" % (pragma, qtable) - c = _pragma_cursor(connection.execute(statement)) - fkeys = [] + # sqlite makes this *extremely difficult*. + # First, use the pragma to get the actual FKs. + pragma_fks = self._get_table_pragma( + connection, "foreign_key_list", + table_name, schema=schema + ) + fks = {} - while True: - row = c.fetchone() - if row is None: - break + + for row in pragma_fks: (numerical_id, rtbl, lcol, rcol) = ( row[0], row[2], row[3], row[4]) - self._parse_fk(fks, fkeys, numerical_id, rtbl, lcol, rcol) - return fkeys + if rcol is None: + rcol = lcol - def _parse_fk(self, fks, fkeys, numerical_id, rtbl, lcol, rcol): - # sqlite won't return rcol if the table was created with REFERENCES - # , no col - if rcol is None: - rcol = lcol + if self._broken_fk_pragma_quotes: + rtbl = re.sub(r'^[\"\[`\']|[\"\]`\']$', '', rtbl) - if self._broken_fk_pragma_quotes: - rtbl = re.sub(r'^[\"\[`\']|[\"\]`\']$', '', rtbl) + if numerical_id in fks: + fk = fks[numerical_id] + else: + fk = fks[numerical_id] = { + 'name': None, + 'constrained_columns': [], + 'referred_schema': None, + 'referred_table': rtbl, + 'referred_columns': [], + } + fks[numerical_id] = fk - try: - fk = fks[numerical_id] - except KeyError: - fk = { - 'name': None, - 'constrained_columns': [], - 'referred_schema': None, - 'referred_table': rtbl, - 'referred_columns': [], - } - fkeys.append(fk) - fks[numerical_id] = fk - - if lcol not in fk['constrained_columns']: fk['constrained_columns'].append(lcol) - if rcol not in fk['referred_columns']: fk['referred_columns'].append(rcol) - return fk + + def fk_sig(constrained_columns, referred_table, referred_columns): + return tuple(constrained_columns) + (referred_table,) + \ + tuple(referred_columns) + + # then, parse the actual SQL and attempt to find DDL that matches + # the names as well. SQLite saves the DDL in whatever format + # it was typed in as, so need to be liberal here. + + keys_by_signature = dict( + ( + fk_sig( + fk['constrained_columns'], + fk['referred_table'], fk['referred_columns']), + fk + ) for fk in fks.values() + ) + + table_data = self._get_table_sql(connection, table_name, schema=schema) + if table_data is None: + # system tables, etc. + return [] + + def parse_fks(): + FK_PATTERN = ( + '(?:CONSTRAINT (\w+) +)?' + 'FOREIGN KEY *\( *(.+?) *\) +' + 'REFERENCES +(?:(?:"(.+?)")|([a-z0-9_]+)) *\((.+?)\)' + ) + + for match in re.finditer(FK_PATTERN, table_data, re.I): + ( + constraint_name, constrained_columns, + referred_quoted_name, referred_name, + referred_columns) = match.group(1, 2, 3, 4, 5) + constrained_columns = list( + self._find_cols_in_sig(constrained_columns)) + if not referred_columns: + referred_columns = constrained_columns + else: + referred_columns = list( + self._find_cols_in_sig(referred_columns)) + referred_name = referred_quoted_name or referred_name + yield ( + constraint_name, constrained_columns, + referred_name, referred_columns) + fkeys = [] + + for ( + constraint_name, constrained_columns, + referred_name, referred_columns) in parse_fks(): + sig = fk_sig( + constrained_columns, referred_name, referred_columns) + if sig not in keys_by_signature: + util.warn( + "WARNING: SQL-parsed foreign key constraint " + "'%s' could not be located in PRAGMA " + "foreign_keys for table %s" % ( + sig, + table_name + )) + continue + key = keys_by_signature.pop(sig) + key['name'] = constraint_name + fkeys.append(key) + # assume the remainders are the unnamed, inline constraints, just + # use them as is as it's extremely difficult to parse inline + # constraints + fkeys.extend(keys_by_signature.values()) + return fkeys + + def _find_cols_in_sig(self, sig): + for match in re.finditer(r'(?:"(.+?)")|([a-z0-9_]+)', sig, re.I): + yield match.group(1) or match.group(2) + + @reflection.cache + def get_unique_constraints(self, connection, table_name, + schema=None, **kw): + + auto_index_by_sig = {} + for idx in self.get_indexes( + connection, table_name, schema=schema, + include_auto_indexes=True, **kw): + if not idx['name'].startswith("sqlite_autoindex"): + continue + sig = tuple(idx['column_names']) + auto_index_by_sig[sig] = idx + + table_data = self._get_table_sql( + connection, table_name, schema=schema, **kw) + if not table_data: + return [] + + unique_constraints = [] + + def parse_uqs(): + UNIQUE_PATTERN = '(?:CONSTRAINT (\w+) +)?UNIQUE *\((.+?)\)' + INLINE_UNIQUE_PATTERN = ( + '(?:(".+?")|([a-z0-9]+)) ' + '+[a-z0-9_ ]+? +UNIQUE') + + for match in re.finditer(UNIQUE_PATTERN, table_data, re.I): + name, cols = match.group(1, 2) + yield name, list(self._find_cols_in_sig(cols)) + + # we need to match inlines as well, as we seek to differentiate + # a UNIQUE constraint from a UNIQUE INDEX, even though these + # are kind of the same thing :) + for match in re.finditer(INLINE_UNIQUE_PATTERN, table_data, re.I): + cols = list( + self._find_cols_in_sig(match.group(1) or match.group(2))) + yield None, cols + + for name, cols in parse_uqs(): + sig = tuple(cols) + if sig in auto_index_by_sig: + auto_index_by_sig.pop(sig) + parsed_constraint = { + 'name': name, + 'column_names': cols + } + unique_constraints.append(parsed_constraint) + # NOTE: auto_index_by_sig might not be empty here, + # the PRIMARY KEY may have an entry. + return unique_constraints @reflection.cache def get_indexes(self, connection, table_name, schema=None, **kw): - quote = self.identifier_preparer.quote_identifier - if schema is not None: - pragma = "PRAGMA %s." % quote(schema) - else: - pragma = "PRAGMA " - include_auto_indexes = kw.pop('include_auto_indexes', False) - qtable = quote(table_name) - statement = "%sindex_list(%s)" % (pragma, qtable) - c = _pragma_cursor(connection.execute(statement)) + pragma_indexes = self._get_table_pragma( + connection, "index_list", table_name, schema=schema) indexes = [] - while True: - row = c.fetchone() - if row is None: - break + + include_auto_indexes = kw.pop('include_auto_indexes', False) + for row in pragma_indexes: # ignore implicit primary key index. # http://www.mail-archive.com/sqlite-users@sqlite.org/msg30517.html - elif (not include_auto_indexes and - row[1].startswith('sqlite_autoindex')): + if (not include_auto_indexes and + row[1].startswith('sqlite_autoindex')): continue indexes.append(dict(name=row[1], column_names=[], unique=row[2])) + # loop thru unique indexes to get the column names. for idx in indexes: - statement = "%sindex_info(%s)" % (pragma, quote(idx['name'])) - c = connection.execute(statement) - cols = idx['column_names'] - while True: - row = c.fetchone() - if row is None: - break - cols.append(row[2]) + pragma_index = self._get_table_pragma( + connection, "index_info", idx['name']) + + for row in pragma_index: + idx['column_names'].append(row[2]) return indexes @reflection.cache - def get_unique_constraints(self, connection, table_name, - schema=None, **kw): + def _get_table_sql(self, connection, table_name, schema=None, **kw): try: s = ("SELECT sql FROM " " (SELECT * FROM sqlite_master UNION ALL " @@ -1165,27 +1245,22 @@ class SQLiteDialect(default.DefaultDialect): s = ("SELECT sql FROM sqlite_master WHERE name = '%s' " "AND type = 'table'") % table_name rs = connection.execute(s) - row = rs.fetchone() - if row is None: - # sqlite won't return the schema for the sqlite_master or - # sqlite_temp_master tables from this query. These tables - # don't have any unique constraints anyway. - return [] - table_data = row[0] - - UNIQUE_PATTERN = '(?:CONSTRAINT (\w+) )?UNIQUE \(([^\)]+)\)' - return [ - {'name': name, - 'column_names': [col.strip(' "') for col in cols.split(',')]} - for name, cols in re.findall(UNIQUE_PATTERN, table_data) - ] + return rs.scalar() - -def _pragma_cursor(cursor): - """work around SQLite issue whereby cursor.description - is blank when PRAGMA returns no rows.""" - - if cursor.closed: - cursor.fetchone = lambda: None - cursor.fetchall = lambda: [] - return cursor + def _get_table_pragma(self, connection, pragma, table_name, schema=None): + quote = self.identifier_preparer.quote_identifier + if schema is not None: + statement = "PRAGMA %s." % quote(schema) + else: + statement = "PRAGMA " + qtable = quote(table_name) + statement = "%s%s(%s)" % (statement, pragma, qtable) + cursor = connection.execute(statement) + if not cursor.closed: + # work around SQLite issue whereby cursor.description + # is blank when PRAGMA returns no rows: + # http://www.sqlite.org/cvstrac/tktview?tn=1884 + result = cursor.fetchall() + else: + result = [] + return result diff --git a/test/dialect/test_sqlite.py b/test/dialect/test_sqlite.py index b4524dc27..44e4eda42 100644 --- a/test/dialect/test_sqlite.py +++ b/test/dialect/test_sqlite.py @@ -22,6 +22,7 @@ from sqlalchemy.testing import fixtures, AssertsCompiledSQL, \ from sqlalchemy import testing from sqlalchemy.schema import CreateTable from sqlalchemy.engine.reflection import Inspector +from sqlalchemy.testing import mock class TestTypes(fixtures.TestBase, AssertsExecutionResults): @@ -500,30 +501,6 @@ class DialectTest(fixtures.TestBase, AssertsExecutionResults): # assert j.onclause.compare(table1.c['"id"'] # == table2.c['"aid"']) - def test_legacy_quoted_identifiers_unit(self): - dialect = sqlite.dialect() - dialect._broken_fk_pragma_quotes = True - - for row in [ - (0, 'target', 'tid', 'id'), - (0, '"target"', 'tid', 'id'), - (0, '[target]', 'tid', 'id'), - (0, "'target'", 'tid', 'id'), - (0, '`target`', 'tid', 'id'), - ]: - fks = {} - fkeys = [] - dialect._parse_fk(fks, fkeys, *row) - eq_( - fkeys, - [{ - 'referred_table': 'target', - 'referred_columns': ['id'], - 'referred_schema': None, - 'name': None, - 'constrained_columns': ['tid'] - }]) - @testing.provide_metadata def test_description_encoding(self): # amazingly, pysqlite seems to still deliver cursor.description @@ -557,69 +534,6 @@ class DialectTest(fixtures.TestBase, AssertsExecutionResults): e = create_engine('sqlite+pysqlite:///foo.db') assert e.pool.__class__ is pool.NullPool - @testing.provide_metadata - def test_dont_reflect_autoindex(self): - meta = self.metadata - Table('foo', meta, Column('bar', String, primary_key=True)) - meta.create_all() - inspector = Inspector(testing.db) - eq_(inspector.get_indexes('foo'), []) - eq_( - inspector.get_indexes('foo', include_auto_indexes=True), - [{ - 'unique': 1, - 'name': 'sqlite_autoindex_foo_1', - 'column_names': ['bar']}]) - - @testing.provide_metadata - def test_create_index_with_schema(self): - """Test creation of index with explicit schema""" - - meta = self.metadata - Table( - 'foo', meta, Column('bar', String, index=True), - schema='main') - meta.create_all() - inspector = Inspector(testing.db) - eq_( - inspector.get_indexes('foo', schema='main'), - [{'unique': 0, 'name': u'ix_main_foo_bar', - 'column_names': [u'bar']}]) - - @testing.provide_metadata - def test_get_unique_constraints(self): - meta = self.metadata - Table( - 'foo', meta, Column('f', Integer), - UniqueConstraint('f', name='foo_f')) - Table( - 'bar', meta, Column('b', Integer), - UniqueConstraint('b', name='bar_b'), - prefixes=['TEMPORARY']) - meta.create_all() - inspector = Inspector(testing.db) - eq_(inspector.get_unique_constraints('foo'), - [{'column_names': [u'f'], 'name': u'foo_f'}]) - eq_(inspector.get_unique_constraints('bar'), - [{'column_names': [u'b'], 'name': u'bar_b'}]) - - def test_get_unnamed_unique_constraints(self): - meta = MetaData(testing.db) - t1 = Table('foo', meta, Column('f', Integer), - UniqueConstraint('f')) - t2 = Table('bar', meta, Column('b', Integer), - UniqueConstraint('b'), - prefixes=['TEMPORARY']) - meta.create_all() - from sqlalchemy.engine.reflection import Inspector - try: - inspector = Inspector(testing.db) - eq_(inspector.get_unique_constraints('foo'), - [{'column_names': [u'f'], 'name': u''}]) - eq_(inspector.get_unique_constraints('bar'), - [{'column_names': [u'b'], 'name': u''}]) - finally: - meta.drop_all() class AttachedMemoryDBTest(fixtures.TestBase): @@ -1072,52 +986,306 @@ class ReflectHeadlessFKsTest(fixtures.TestBase): assert b.c.id.references(a.c.id) -class ReflectFKConstraintTest(fixtures.TestBase): +class ConstraintReflectionTest(fixtures.TestBase): __only_on__ = 'sqlite' - def setup(self): - testing.db.execute("CREATE TABLE a1 (id INTEGER PRIMARY KEY)") - testing.db.execute("CREATE TABLE a2 (id INTEGER PRIMARY KEY)") - testing.db.execute( - "CREATE TABLE b (id INTEGER PRIMARY KEY, " - "FOREIGN KEY(id) REFERENCES a1(id)," - "FOREIGN KEY(id) REFERENCES a2(id)" - ")") - testing.db.execute( - "CREATE TABLE c (id INTEGER, " - "CONSTRAINT bar PRIMARY KEY(id)," - "CONSTRAINT foo1 FOREIGN KEY(id) REFERENCES a1(id)," - "CONSTRAINT foo2 FOREIGN KEY(id) REFERENCES a2(id)" - ")") + @classmethod + def setup_class(cls): + with testing.db.begin() as conn: + + conn.execute("CREATE TABLE a1 (id INTEGER PRIMARY KEY)") + conn.execute("CREATE TABLE a2 (id INTEGER PRIMARY KEY)") + conn.execute( + "CREATE TABLE b (id INTEGER PRIMARY KEY, " + "FOREIGN KEY(id) REFERENCES a1(id)," + "FOREIGN KEY(id) REFERENCES a2(id)" + ")") + conn.execute( + "CREATE TABLE c (id INTEGER, " + "CONSTRAINT bar PRIMARY KEY(id)," + "CONSTRAINT foo1 FOREIGN KEY(id) REFERENCES a1(id)," + "CONSTRAINT foo2 FOREIGN KEY(id) REFERENCES a2(id)" + ")") + conn.execute( + # the lower casing + inline is intentional here + "CREATE TABLE d (id INTEGER, x INTEGER unique)") + conn.execute( + # the lower casing + inline is intentional here + 'CREATE TABLE d1 ' + '(id INTEGER, "some ( STUPID n,ame" INTEGER unique)') + conn.execute( + # the lower casing + inline is intentional here + 'CREATE TABLE d2 ( "some STUPID n,ame" INTEGER unique)') + conn.execute( + # the lower casing + inline is intentional here + 'CREATE TABLE d3 ( "some STUPID n,ame" INTEGER NULL unique)') + + conn.execute( + # lower casing + inline is intentional + "CREATE TABLE e (id INTEGER, x INTEGER references a2(id))") + conn.execute( + 'CREATE TABLE e1 (id INTEGER, "some ( STUPID n,ame" INTEGER ' + 'references a2 ("some ( STUPID n,ame"))') + conn.execute( + 'CREATE TABLE e2 (id INTEGER, ' + '"some ( STUPID n,ame" INTEGER NOT NULL ' + 'references a2 ("some ( STUPID n,ame"))') + + conn.execute( + "CREATE TABLE f (x INTEGER, CONSTRAINT foo_fx UNIQUE(x))" + ) + conn.execute( + "CREATE TEMPORARY TABLE g " + "(x INTEGER, CONSTRAINT foo_gx UNIQUE(x))" + ) + conn.execute( + # intentional broken casing + "CREATE TABLE h (x INTEGER, COnstraINT foo_hx unIQUE(x))" + ) + conn.execute( + "CREATE TABLE i (x INTEGER, y INTEGER, PRIMARY KEY(x, y))" + ) + conn.execute( + "CREATE TABLE j (id INTEGER, q INTEGER, p INTEGER, " + "PRIMARY KEY(id), FOreiGN KEY(q,p) REFERENCes i(x,y))" + ) + conn.execute( + "CREATE TABLE k (id INTEGER, q INTEGER, p INTEGER, " + "PRIMARY KEY(id), " + "conSTRAINT my_fk FOreiGN KEY ( q , p ) " + "REFERENCes i ( x , y ))" + ) - def teardown(self): - testing.db.execute("drop table c") - testing.db.execute("drop table b") - testing.db.execute("drop table a1") - testing.db.execute("drop table a2") + meta = MetaData() + Table( + 'l', meta, Column('bar', String, index=True), + schema='main') + + Table( + 'm', meta, + Column('id', Integer, primary_key=True), + Column('x', String(30)), + UniqueConstraint('x') + ) - def test_name_is_none(self): + Table( + 'n', meta, + Column('id', Integer, primary_key=True), + Column('x', String(30)), + UniqueConstraint('x'), + prefixes=['TEMPORARY'] + ) + + meta.create_all(conn) + + # will contain an "autoindex" + conn.execute("create table o (foo varchar(20) primary key)") + + @classmethod + def teardown_class(cls): + with testing.db.begin() as conn: + for name in [ + "m", "main.l", "k", "j", "i", "h", "g", "f", "e", "e1", + "d", "d1", "d2", "c", "b", "a1", "a2"]: + conn.execute("drop table %s" % name) + + def test_legacy_quoted_identifiers_unit(self): + dialect = sqlite.dialect() + dialect._broken_fk_pragma_quotes = True + + for row in [ + (0, None, 'target', 'tid', 'id', None), + (0, None, '"target"', 'tid', 'id', None), + (0, None, '[target]', 'tid', 'id', None), + (0, None, "'target'", 'tid', 'id', None), + (0, None, '`target`', 'tid', 'id', None), + ]: + def _get_table_pragma(*arg, **kw): + return [row] + + def _get_table_sql(*arg, **kw): + return "CREATE TABLE foo "\ + "(tid INTEGER, "\ + "FOREIGN KEY(tid) REFERENCES %s (id))" % row[2] + with mock.patch.object( + dialect, "_get_table_pragma", _get_table_pragma): + with mock.patch.object( + dialect, '_get_table_sql', _get_table_sql): + + fkeys = dialect.get_foreign_keys(None, 'foo') + eq_( + fkeys, + [{ + 'referred_table': 'target', + 'referred_columns': ['id'], + 'referred_schema': None, + 'name': None, + 'constrained_columns': ['tid'] + }]) + + def test_foreign_key_name_is_none(self): # and not "0" - meta = MetaData() - b = Table('b', meta, autoload=True, autoload_with=testing.db) + inspector = Inspector(testing.db) + fks = inspector.get_foreign_keys('b') eq_( - [con.name for con in b.constraints], - [None, None, None] + fks, + [ + {'referred_table': 'a1', 'referred_columns': ['id'], + 'referred_schema': None, 'name': None, + 'constrained_columns': ['id']}, + {'referred_table': 'a2', 'referred_columns': ['id'], + 'referred_schema': None, 'name': None, + 'constrained_columns': ['id']}, + ] ) - def test_name_not_none(self): - # we don't have names for PK constraints, - # it appears we get back None in the pragma for - # FKs also (also it doesn't even appear to be documented on - # sqlite's docs - # at http://www.sqlite.org/pragma.html#pragma_foreign_key_list - # how did we ever know that's the "name" field ??) + def test_foreign_key_name_is_not_none(self): + inspector = Inspector(testing.db) + fks = inspector.get_foreign_keys('c') + eq_( + fks, + [ + { + 'referred_table': 'a1', 'referred_columns': ['id'], + 'referred_schema': None, 'name': 'foo1', + 'constrained_columns': ['id']}, + { + 'referred_table': 'a2', 'referred_columns': ['id'], + 'referred_schema': None, 'name': 'foo2', + 'constrained_columns': ['id']}, + ] + ) - meta = MetaData() - c = Table('c', meta, autoload=True, autoload_with=testing.db) + def test_unnamed_inline_foreign_key(self): + inspector = Inspector(testing.db) + fks = inspector.get_foreign_keys('e') + eq_( + fks, + [{ + 'referred_table': 'a2', 'referred_columns': ['id'], + 'referred_schema': None, + 'name': None, 'constrained_columns': ['x'] + }] + ) + + def test_unnamed_inline_foreign_key_quoted(self): + inspector = Inspector(testing.db) + + inspector = Inspector(testing.db) + fks = inspector.get_foreign_keys('e1') + eq_( + fks, + [{ + 'referred_table': 'a2', + 'referred_columns': ['some ( STUPID n,ame'], + 'referred_schema': None, + 'name': None, 'constrained_columns': ['some ( STUPID n,ame'] + }] + ) + fks = inspector.get_foreign_keys('e2') + eq_( + fks, + [{ + 'referred_table': 'a2', + 'referred_columns': ['some ( STUPID n,ame'], + 'referred_schema': None, + 'name': None, 'constrained_columns': ['some ( STUPID n,ame'] + }] + ) + + def test_foreign_key_composite_broken_casing(self): + inspector = Inspector(testing.db) + fks = inspector.get_foreign_keys('j') + eq_( + fks, + [{ + 'referred_table': 'i', + 'referred_columns': ['x', 'y'], + 'referred_schema': None, 'name': None, + 'constrained_columns': ['q', 'p']}] + ) + fks = inspector.get_foreign_keys('k') + eq_( + fks, + [{'referred_table': 'i', 'referred_columns': ['x', 'y'], + 'referred_schema': None, 'name': 'my_fk', + 'constrained_columns': ['q', 'p']}] + ) + + def test_dont_reflect_autoindex(self): + inspector = Inspector(testing.db) + eq_(inspector.get_indexes('o'), []) + eq_( + inspector.get_indexes('o', include_auto_indexes=True), + [{ + 'unique': 1, + 'name': 'sqlite_autoindex_o_1', + 'column_names': ['foo']}]) + + def test_create_index_with_schema(self): + """Test creation of index with explicit schema""" + + inspector = Inspector(testing.db) + eq_( + inspector.get_indexes('l', schema='main'), + [{'unique': 0, 'name': u'ix_main_l_bar', + 'column_names': [u'bar']}]) + + def test_unique_constraint_named(self): + inspector = Inspector(testing.db) + eq_( + inspector.get_unique_constraints("f"), + [{'column_names': ['x'], 'name': 'foo_fx'}] + ) + + def test_unique_constraint_named_broken_casing(self): + inspector = Inspector(testing.db) + eq_( + inspector.get_unique_constraints("h"), + [{'column_names': ['x'], 'name': 'foo_hx'}] + ) + + def test_unique_constraint_named_broken_temp(self): + inspector = Inspector(testing.db) + eq_( + inspector.get_unique_constraints("g"), + [{'column_names': ['x'], 'name': 'foo_gx'}] + ) + + def test_unique_constraint_unnamed_inline(self): + inspector = Inspector(testing.db) + eq_( + inspector.get_unique_constraints("d"), + [{'column_names': ['x'], 'name': None}] + ) + + def test_unique_constraint_unnamed_inline_quoted(self): + inspector = Inspector(testing.db) + eq_( + inspector.get_unique_constraints("d1"), + [{'column_names': ['some ( STUPID n,ame'], 'name': None}] + ) + eq_( + inspector.get_unique_constraints("d2"), + [{'column_names': ['some STUPID n,ame'], 'name': None}] + ) + eq_( + inspector.get_unique_constraints("d3"), + [{'column_names': ['some STUPID n,ame'], 'name': None}] + ) + + def test_unique_constraint_unnamed_normal(self): + inspector = Inspector(testing.db) + eq_( + inspector.get_unique_constraints("m"), + [{'column_names': ['x'], 'name': None}] + ) + + def test_unique_constraint_unnamed_normal_temporary(self): + inspector = Inspector(testing.db) eq_( - set([con.name for con in c.constraints]), - set([None, None]) + inspector.get_unique_constraints("n"), + [{'column_names': ['x'], 'name': None}] ) -- cgit v1.2.1 From 7cd4362924dd0133a604d4a0c52f1566acbd31ff Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Sun, 14 Dec 2014 16:21:40 -0500 Subject: - automap isn't new anymore --- doc/build/index.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/build/index.rst b/doc/build/index.rst index b65755d43..205a5c12b 100644 --- a/doc/build/index.rst +++ b/doc/build/index.rst @@ -39,7 +39,7 @@ of Python objects, proceed first to the tutorial. :doc:`Declarative Extension ` | :doc:`Association Proxy ` | :doc:`Hybrid Attributes ` | - :doc:`Automap ` (**new**) | + :doc:`Automap ` | :doc:`Mutable Scalars ` | :doc:`Ordered List ` -- cgit v1.2.1 From 8038cfa0771ff860f48967a6800477ce8a508d65 Mon Sep 17 00:00:00 2001 From: Tony Locke Date: Sun, 24 Aug 2014 16:33:29 +0100 Subject: pg8000 client_encoding in create_engine() The pg8000 dialect now supports the setting of the PostgreSQL parameter client_encoding from create_engine(). --- lib/sqlalchemy/dialects/postgresql/pg8000.py | 61 ++++++++++++++++++++++++---- test/dialect/postgresql/test_dialect.py | 9 ++-- 2 files changed, 60 insertions(+), 10 deletions(-) diff --git a/lib/sqlalchemy/dialects/postgresql/pg8000.py b/lib/sqlalchemy/dialects/postgresql/pg8000.py index 4ccc90208..a76787016 100644 --- a/lib/sqlalchemy/dialects/postgresql/pg8000.py +++ b/lib/sqlalchemy/dialects/postgresql/pg8000.py @@ -13,17 +13,30 @@ postgresql+pg8000://user:password@host:port/dbname[?key=value&key=value...] :url: https://pythonhosted.org/pg8000/ + +.. _pg8000_unicode: + Unicode ------- -When communicating with the server, pg8000 **always uses the server-side -character set**. SQLAlchemy has no ability to modify what character set -pg8000 chooses to use, and additionally SQLAlchemy does no unicode conversion -of any kind with the pg8000 backend. The origin of the client encoding setting -is ultimately the CLIENT_ENCODING setting in postgresql.conf. +pg8000 will encode / decode string values between it and the server using the +PostgreSQL ``client_encoding`` parameter; by default this is the value in +the ``postgresql.conf`` file, which often defaults to ``SQL_ASCII``. +Typically, this can be changed to ``utf-8``, as a more useful default:: + + #client_encoding = sql_ascii # actually, defaults to database + # encoding + client_encoding = utf8 + +The ``client_encoding`` can be overriden for a session by executing the SQL: -It is not necessary, though is also harmless, to pass the "encoding" parameter -to :func:`.create_engine` when using pg8000. +SET CLIENT_ENCODING TO 'utf8'; + +SQLAlchemy will execute this SQL on all new connections based on the value +passed to :func:`.create_engine` using the ``client_encoding`` parameter:: + + engine = create_engine( + "postgresql+pg8000://user:pass@host/dbname", client_encoding='utf8') .. _pg8000_isolation_level: @@ -133,6 +146,10 @@ class PGDialect_pg8000(PGDialect): } ) + def __init__(self, client_encoding=None, **kwargs): + PGDialect.__init__(self, **kwargs) + self.client_encoding = client_encoding + def initialize(self, connection): if self.dbapi and hasattr(self.dbapi, '__version__'): self._dbapi_version = tuple([ @@ -181,6 +198,16 @@ class PGDialect_pg8000(PGDialect): (level, self.name, ", ".join(self._isolation_lookup)) ) + def set_client_encoding(self, connection, client_encoding): + # adjust for ConnectionFairy possibly being present + if hasattr(connection, 'connection'): + connection = connection.connection + + cursor = connection.cursor() + cursor.execute("SET CLIENT_ENCODING TO '" + client_encoding + "'") + cursor.execute("COMMIT") + cursor.close() + def do_begin_twophase(self, connection, xid): connection.connection.tpc_begin((0, xid, '')) @@ -198,4 +225,24 @@ class PGDialect_pg8000(PGDialect): def do_recover_twophase(self, connection): return [row[1] for row in connection.connection.tpc_recover()] + def on_connect(self): + fns = [] + if self.client_encoding is not None: + def on_connect(conn): + self.set_client_encoding(conn, self.client_encoding) + fns.append(on_connect) + + if self.isolation_level is not None: + def on_connect(conn): + self.set_isolation_level(conn, self.isolation_level) + fns.append(on_connect) + + if len(fns) > 0: + def on_connect(conn): + for fn in fns: + fn(conn) + return on_connect + else: + return None + dialect = PGDialect_pg8000 diff --git a/test/dialect/postgresql/test_dialect.py b/test/dialect/postgresql/test_dialect.py index b751bbcdd..cf470f055 100644 --- a/test/dialect/postgresql/test_dialect.py +++ b/test/dialect/postgresql/test_dialect.py @@ -99,11 +99,13 @@ class MiscTest(fixtures.TestBase, AssertsExecutionResults, AssertsCompiledSQL): assert 'will create implicit sequence' in msgs assert 'will create implicit index' in msgs - @testing.only_on('postgresql+psycopg2', 'psycopg2-specific feature') + @testing.only_on( + ['postgresql+psycopg2', 'postgresql+pg8000'], + 'psycopg2/pg8000-specific feature') @engines.close_open_connections def test_client_encoding(self): c = testing.db.connect() - current_encoding = c.connection.connection.encoding + current_encoding = c.execute("show client_encoding").fetchone()[0] c.close() # attempt to use an encoding that's not @@ -115,7 +117,8 @@ class MiscTest(fixtures.TestBase, AssertsExecutionResults, AssertsCompiledSQL): e = engines.testing_engine(options={'client_encoding': test_encoding}) c = e.connect() - eq_(c.connection.connection.encoding, test_encoding) + new_encoding = c.execute("show client_encoding").fetchone()[0] + eq_(new_encoding, test_encoding) @testing.only_on( ['postgresql+psycopg2', 'postgresql+pg8000'], -- cgit v1.2.1 From c93706fa3319663234e3ab886b65f055bf9ed5da Mon Sep 17 00:00:00 2001 From: Tony Locke Date: Sun, 24 Aug 2014 15:15:17 +0100 Subject: Make pg8000 version detection more robust pg8000 uses Versioneer, which means that development versions have version strings that don't fit into the dotted triple number format. Released versions will always fit the triple format though. --- lib/sqlalchemy/dialects/postgresql/pg8000.py | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/lib/sqlalchemy/dialects/postgresql/pg8000.py b/lib/sqlalchemy/dialects/postgresql/pg8000.py index a76787016..17d83fa61 100644 --- a/lib/sqlalchemy/dialects/postgresql/pg8000.py +++ b/lib/sqlalchemy/dialects/postgresql/pg8000.py @@ -71,6 +71,7 @@ from ... import types as sqltypes from .base import ( PGDialect, PGCompiler, PGIdentifierPreparer, PGExecutionContext, _DECIMAL_TYPES, _FLOAT_TYPES, _INT_TYPES) +import re class _PGNumeric(sqltypes.Numeric): @@ -151,15 +152,19 @@ class PGDialect_pg8000(PGDialect): self.client_encoding = client_encoding def initialize(self, connection): - if self.dbapi and hasattr(self.dbapi, '__version__'): - self._dbapi_version = tuple([ - int(x) for x in - self.dbapi.__version__.split(".")]) - else: - self._dbapi_version = (99, 99, 99) self.supports_sane_multi_rowcount = self._dbapi_version >= (1, 9, 14) super(PGDialect_pg8000, self).initialize(connection) + @util.memoized_property + def _dbapi_version(self): + if self.dbapi and hasattr(self.dbapi, '__version__'): + return tuple( + [ + int(x) for x in re.findall( + r'(\d+)(?:[-\.]?|$)', self.dbapi.__version__)]) + else: + return (99, 99, 99) + @classmethod def dbapi(cls): return __import__('pg8000') -- cgit v1.2.1 From 17e03a0ea86cd92816b4002a203b2b0b2c1a538a Mon Sep 17 00:00:00 2001 From: Tony Locke Date: Sat, 3 Jan 2015 16:59:17 +0000 Subject: Changed pg8000 dialect to cope with native JSON For versions > 1.10.1 pg8000 returns de-serialized JSON objects rather than a string. SQL parameters are still strings though. --- lib/sqlalchemy/dialects/postgresql/pg8000.py | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/lib/sqlalchemy/dialects/postgresql/pg8000.py b/lib/sqlalchemy/dialects/postgresql/pg8000.py index 17d83fa61..4bb376a96 100644 --- a/lib/sqlalchemy/dialects/postgresql/pg8000.py +++ b/lib/sqlalchemy/dialects/postgresql/pg8000.py @@ -72,6 +72,7 @@ from .base import ( PGDialect, PGCompiler, PGIdentifierPreparer, PGExecutionContext, _DECIMAL_TYPES, _FLOAT_TYPES, _INT_TYPES) import re +from sqlalchemy.dialects.postgresql.json import JSON class _PGNumeric(sqltypes.Numeric): @@ -102,6 +103,15 @@ class _PGNumericNoBind(_PGNumeric): return None +class _PGJSON(JSON): + + def result_processor(self, dialect, coltype): + if dialect._dbapi_version > (1, 10, 1): + return None # Has native JSON + else: + return super(_PGJSON, self).result_processor(dialect, coltype) + + class PGExecutionContext_pg8000(PGExecutionContext): pass @@ -143,7 +153,8 @@ class PGDialect_pg8000(PGDialect): PGDialect.colspecs, { sqltypes.Numeric: _PGNumericNoBind, - sqltypes.Float: _PGNumeric + sqltypes.Float: _PGNumeric, + JSON: _PGJSON, } ) -- cgit v1.2.1