summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRodrigo Menezes <rodrigo.menezes@moat.com>2014-08-26 12:57:00 -0400
committerRodrigo Menezes <rodrigo.menezes@moat.com>2014-08-26 12:57:00 -0400
commitb3f7cd8bf497febb80e6cd7dc39effc75ff1a7e7 (patch)
treee3a022b20405768bb4e1912c9a2f1347b751d64c
parentbcf7a55da01633c4890502463a08cb96af9fe5e9 (diff)
parent8e84942aa6fa2644b3fe6407c79449715a7e2c8c (diff)
downloadsqlalchemy-b3f7cd8bf497febb80e6cd7dc39effc75ff1a7e7.tar.gz
Merge branch 'master' of https://github.com/zzzeek/sqlalchemy into feature/postgres-relkind
-rw-r--r--doc/build/changelog/changelog_09.rst52
-rw-r--r--doc/build/changelog/changelog_10.rst86
-rw-r--r--doc/build/changelog/migration_10.rst58
-rw-r--r--doc/build/orm/mapper_config.rst6
-rw-r--r--doc/build/orm/session.rst5
-rw-r--r--lib/sqlalchemy/dialects/mysql/base.py12
-rw-r--r--lib/sqlalchemy/dialects/postgresql/base.py72
-rw-r--r--lib/sqlalchemy/dialects/postgresql/pg8000.py14
-rw-r--r--lib/sqlalchemy/engine/base.py4
-rw-r--r--lib/sqlalchemy/event/api.py54
-rw-r--r--lib/sqlalchemy/event/attr.py15
-rw-r--r--lib/sqlalchemy/event/registry.py2
-rw-r--r--lib/sqlalchemy/ext/mutable.py10
-rw-r--r--lib/sqlalchemy/orm/events.py12
-rw-r--r--lib/sqlalchemy/orm/identity.py29
-rw-r--r--lib/sqlalchemy/orm/mapper.py91
-rw-r--r--lib/sqlalchemy/orm/persistence.py547
-rw-r--r--lib/sqlalchemy/pool.py24
-rw-r--r--lib/sqlalchemy/sql/compiler.py4
-rw-r--r--lib/sqlalchemy/sql/dml.py38
-rw-r--r--lib/sqlalchemy/sql/schema.py3
-rw-r--r--lib/sqlalchemy/testing/engines.py112
-rw-r--r--lib/sqlalchemy/testing/plugin/provision.py17
-rw-r--r--lib/sqlalchemy/testing/plugin/pytestplugin.py14
-rw-r--r--lib/sqlalchemy/testing/profiling.py216
-rw-r--r--lib/sqlalchemy/testing/replay_fixture.py167
-rw-r--r--lib/sqlalchemy/util/_collections.py50
-rw-r--r--setup.cfg2
-rw-r--r--test/aaa_profiling/test_compiler.py2
-rw-r--r--test/aaa_profiling/test_zoomark.py155
-rw-r--r--test/aaa_profiling/test_zoomark_orm.py233
-rw-r--r--test/base/test_events.py111
-rw-r--r--test/dialect/postgresql/test_compiler.py84
-rw-r--r--test/engine/test_execute.py49
-rw-r--r--test/engine/test_logging.py122
-rw-r--r--test/engine/test_reconnect.py110
-rw-r--r--test/engine/test_transaction.py7
-rw-r--r--test/ext/test_mutable.py65
-rw-r--r--test/orm/test_dynamic.py10
-rw-r--r--test/orm/test_naturalpks.py16
-rw-r--r--test/orm/test_options.py2
-rw-r--r--test/orm/test_unitofwork.py9
-rw-r--r--test/orm/test_unitofworkv2.py1057
-rw-r--r--test/profiles.txt617
-rw-r--r--test/requirements.py11
-rw-r--r--test/sql/test_insert.py109
-rw-r--r--test/sql/test_metadata.py14
-rw-r--r--test/sql/test_query.py7
-rw-r--r--test/sql/test_returning.py45
49 files changed, 2538 insertions, 2013 deletions
diff --git a/doc/build/changelog/changelog_09.rst b/doc/build/changelog/changelog_09.rst
index a797bfa29..2931916e3 100644
--- a/doc/build/changelog/changelog_09.rst
+++ b/doc/build/changelog/changelog_09.rst
@@ -14,6 +14,58 @@
:version: 0.9.8
.. change::
+ :tags: bug, ext
+ :versions: 1.0.0
+ :pullrequest: bitbucket:28
+
+ Fixed bug where :ref:`ext.mutable.MutableDict`
+ failed to implement the ``update()`` dictionary method, thus
+ not catching changes. Pull request courtesy Matt Chisholm.
+
+ .. change::
+ :tags: bug, ext
+ :versions: 1.0.0
+ :pullrequest: bitbucket:27
+
+ Fixed bug where a custom subclass of :ref:`ext.mutable.MutableDict`
+ would not show up in a "coerce" operation, and would instead
+ return a plain :ref:`ext.mutable.MutableDict`. Pull request
+ courtesy Matt Chisholm.
+
+ .. change::
+ :tags: bug, pool
+ :versions: 1.0.0
+ :tickets: 3168
+
+ Fixed bug in connection pool logging where the "connection checked out"
+ debug logging message would not emit if the logging were set up using
+ ``logging.setLevel()``, rather than using the ``echo_pool`` flag.
+ Tests to assert this logging have been added. This is a
+ regression that was introduced in 0.9.0.
+
+ .. change::
+ :tags: feature, postgresql, pg8000
+ :versions: 1.0.0
+ :pullreq: github:125
+
+ Support is added for "sane multi row count" with the pg8000 driver,
+ which applies mostly to when using versioning with the ORM.
+ The feature is version-detected based on pg8000 1.9.14 or greater
+ in use. Pull request courtesy Tony Locke.
+
+ .. change::
+ :tags: bug, engine
+ :versions: 1.0.0
+ :tickets: 3165
+
+ The string keys that are used to determine the columns impacted
+ for an INSERT or UPDATE are now sorted when they contribute towards
+ the "compiled cache" cache key. These keys were previously not
+ deterministically ordered, meaning the same statement could be
+ cached multiple times on equivalent keys, costing both in terms of
+ memory as well as performance.
+
+ .. change::
:tags: bug, postgresql
:versions: 1.0.0
:tickets: 3159
diff --git a/doc/build/changelog/changelog_10.rst b/doc/build/changelog/changelog_10.rst
index 815de72c7..bff3652c5 100644
--- a/doc/build/changelog/changelog_10.rst
+++ b/doc/build/changelog/changelog_10.rst
@@ -17,6 +17,92 @@
:version: 1.0.0
.. change::
+ :tags: bug, orm
+ :tickets: 3171
+
+ The "resurrect" ORM event has been removed. This event hook had
+ no purpose since the old "mutable attribute" system was removed
+ in 0.8.
+
+ .. change::
+ :tags: bug, sql
+ :tickets: 3169
+
+ The INSERT...FROM SELECT construct now implies ``inline=True``
+ on :class:`.Insert`. This helps to fix a bug where an
+ INSERT...FROM SELECT construct would inadvertently be compiled
+ as "implicit returning" on supporting backends, which would
+ cause breakage in the case of an INSERT that inserts zero rows
+ (as implicit returning expects a row), as well as arbitrary
+ return data in the case of an INSERT that inserts multiple
+ rows (e.g. only the first row of many).
+ A similar change is also applied to an INSERT..VALUES
+ with multiple parameter sets; implicit RETURNING will no longer emit
+ for this statement either. As both of these constructs deal
+ with varible numbers of rows, the
+ :attr:`.ResultProxy.inserted_primary_key` accessor does not
+ apply. Previously, there was a documentation note that one
+ may prefer ``inline=True`` with INSERT..FROM SELECT as some databases
+ don't support returning and therefore can't do "implicit" returning,
+ but there's no reason an INSERT...FROM SELECT needs implicit returning
+ in any case. Regular explicit :meth:`.Insert.returning` should
+ be used to return variable numbers of result rows if inserted
+ data is needed.
+
+ .. change::
+ :tags: bug, orm
+ :tickets: 3167
+
+ Fixed bug where attribute "set" events or columns with
+ ``@validates`` would have events triggered within the flush process,
+ when those columns were the targets of a "fetch and populate"
+ operation, such as an autoincremented primary key, a Python side
+ default, or a server-side default "eagerly" fetched via RETURNING.
+
+ .. change::
+ :tags: feature, postgresql
+ :tickets: 2051
+
+ Added support for PG table options TABLESPACE, ON COMMIT,
+ WITH(OUT) OIDS, and INHERITS, when rendering DDL via
+ the :class:`.Table` construct. Pull request courtesy
+ malikdiarra.
+
+ .. seealso::
+
+ :ref:`postgresql_table_options`
+
+ .. change::
+ :tags: bug, orm, py3k
+
+ The :class:`.IdentityMap` exposed from :class:`.Session.identity`
+ now returns lists for ``items()`` and ``values()`` in Py3K.
+ Early porting to Py3K here had these returning iterators, when
+ they technically should be "iterable views"..for now, lists are OK.
+
+ .. change::
+ :tags: orm, feature
+
+ UPDATE statements can now be batched within an ORM flush
+ into more performant executemany() call, similarly to how INSERT
+ statements can be batched; this will be invoked within flush
+ to the degree that subsequent UPDATE statements for the
+ same mapping and table involve the identical columns within the
+ VALUES clause, as well as that no VALUES-level SQL expressions
+ are embedded.
+
+ .. change::
+ :tags: engine, bug
+ :tickets: 3163
+
+ Removing (or adding) an event listener at the same time that the event
+ is being run itself, either from inside the listener or from a
+ concurrent thread, now raises a RuntimeError, as the collection used is
+ now an instance of ``colletions.deque()`` and does not support changes
+ while being iterated. Previously, a plain Python list was used where
+ removal from inside the event itself would produce silent failures.
+
+ .. change::
:tags: orm, feature
:tickets: 2963
diff --git a/doc/build/changelog/migration_10.rst b/doc/build/changelog/migration_10.rst
index 06fccd1dd..cf81d41fd 100644
--- a/doc/build/changelog/migration_10.rst
+++ b/doc/build/changelog/migration_10.rst
@@ -102,6 +102,18 @@ symbol, and no change to the object's state occurs.
Behavioral Changes - Core
=========================
+.. _change_3027:
+
+``autoload_with`` now implies ``autoload=True``
+-----------------------------------------------
+
+A :class:`.Table` can be set up for reflection by passing ``autoload_with``
+alone::
+
+ my_table = Table('my_table', metadata, autoload_with=some_engine)
+
+:ticket:`3027`
+
New Features
============
@@ -131,11 +143,55 @@ wishes to support the new feature should now call upon the ``._limit_clause``
and ``._offset_clause`` attributes to receive the full SQL expression, rather
than the integer value.
-.. _feature_3076:
+.. _feature_get_enums:
+
+New get_enums() method with Postgresql Dialect
+----------------------------------------------
+
+The :func:`.inspect` method returns a :class:`.PGInspector` object in the
+case of Postgresql, which includes a new :meth:`.PGInspector.get_enums`
+method that returns information on all available ``ENUM`` types::
+
+ from sqlalchemy import inspect, create_engine
+
+ engine = create_engine("postgresql+psycopg2://host/dbname")
+ insp = inspect(engine)
+ print(insp.get_enums())
+
+.. seealso::
+
+ :meth:`.PGInspector.get_enums`
Behavioral Improvements
=======================
+.. _feature_2963:
+
+.info dictionary improvements
+-----------------------------
+
+The :attr:`.InspectionAttr.info` collection is now available on every kind
+of object that one would retrieve from the :attr:`.Mapper.all_orm_descriptors`
+collection::
+
+ class SomeObject(Base):
+ # ...
+
+ @hybrid_property(self):
+ def some_prop(self):
+ return self.value + 5
+
+
+ inspect(SomeObject).all_orm_descriptors.some_prop.info['foo'] = 'bar'
+
+It is also available as a constructor argument for all :class:`.SchemaItem`
+objects (e.g. :class:`.ForeignKey`, :class:`.UniqueConstraint` etc.) as well
+as remaining ORM constructs such as :func:`.orm.synonym`.
+
+:ticket:`2971`
+
+:ticket:`2963`
+
Dialect Changes
===============
diff --git a/doc/build/orm/mapper_config.rst b/doc/build/orm/mapper_config.rst
index 9139b53f0..d0679c721 100644
--- a/doc/build/orm/mapper_config.rst
+++ b/doc/build/orm/mapper_config.rst
@@ -667,6 +667,12 @@ issued when the ORM is populating the object::
assert '@' in address
return address
+.. versionchanged:: 1.0.0 - validators are no longer triggered within
+ the flush process when the newly fetched values for primary key
+ columns as well as some python- or server-side defaults are fetched.
+ Prior to 1.0, validators may be triggered in those cases as well.
+
+
Validators also receive collection append events, when items are added to a
collection::
diff --git a/doc/build/orm/session.rst b/doc/build/orm/session.rst
index b47e70d53..78ae1ba81 100644
--- a/doc/build/orm/session.rst
+++ b/doc/build/orm/session.rst
@@ -1773,7 +1773,10 @@ method::
of times, which will issue a new SAVEPOINT with a unique identifier for each
call. For each :meth:`~.Session.begin_nested` call, a
corresponding :meth:`~.Session.rollback` or
-:meth:`~.Session.commit` must be issued.
+:meth:`~.Session.commit` must be issued. (But note that if the return value is
+used as a context manager, i.e. in a with-statement, then this rollback/commit
+is issued by the context manager upon exiting the context, and so should not be
+added explicitly.)
When :meth:`~.Session.begin_nested` is called, a
:meth:`~.Session.flush` is unconditionally issued
diff --git a/lib/sqlalchemy/dialects/mysql/base.py b/lib/sqlalchemy/dialects/mysql/base.py
index 374960765..012d178e7 100644
--- a/lib/sqlalchemy/dialects/mysql/base.py
+++ b/lib/sqlalchemy/dialects/mysql/base.py
@@ -190,15 +190,13 @@ SQLAlchemy standardizes the DBAPI ``cursor.rowcount`` attribute to be the
usual definition of "number of rows matched by an UPDATE or DELETE" statement.
This is in contradiction to the default setting on most MySQL DBAPI drivers,
which is "number of rows actually modified/deleted". For this reason, the
-SQLAlchemy MySQL dialects always set the ``constants.CLIENT.FOUND_ROWS`` flag,
-or whatever is equivalent for the DBAPI in use, on connect, unless the flag
-value is overridden using DBAPI-specific options
-(such as ``client_flag`` for the MySQL-Python driver, ``found_rows`` for the
-OurSQL driver).
+SQLAlchemy MySQL dialects always add the ``constants.CLIENT.FOUND_ROWS``
+flag, or whatever is equivalent for the target dialect, upon connection.
+This setting is currently hardcoded.
-See also:
+.. seealso::
-:attr:`.ResultProxy.rowcount`
+ :attr:`.ResultProxy.rowcount`
CAST Support
diff --git a/lib/sqlalchemy/dialects/postgresql/base.py b/lib/sqlalchemy/dialects/postgresql/base.py
index 75d0696ad..206a25d28 100644
--- a/lib/sqlalchemy/dialects/postgresql/base.py
+++ b/lib/sqlalchemy/dialects/postgresql/base.py
@@ -417,6 +417,42 @@ of :class:`.PGInspector`, which offers additional methods::
.. autoclass:: PGInspector
:members:
+.. postgresql_table_options:
+
+PostgreSQL Table Options
+-------------------------
+
+Several options for CREATE TABLE are supported directly by the PostgreSQL
+dialect in conjunction with the :class:`.Table` construct:
+
+* ``TABLESPACE``::
+
+ Table("some_table", metadata, ..., postgresql_tablespace='some_tablespace')
+
+* ``ON COMMIT``::
+
+ Table("some_table", metadata, ..., postgresql_on_commit='PRESERVE ROWS')
+
+* ``WITH OIDS``::
+
+ Table("some_table", metadata, ..., postgresql_with_oids=True)
+
+* ``WITHOUT OIDS``::
+
+ Table("some_table", metadata, ..., postgresql_with_oids=False)
+
+* ``INHERITS``::
+
+ Table("some_table", metadata, ..., postgresql_inherits="some_supertable")
+
+ Table("some_table", metadata, ..., postgresql_inherits=("t1", "t2", ...))
+
+.. versionadded:: 1.0.0
+
+.. seealso::
+
+ `Postgresql CREATE TABLE options
+ <http://www.postgresql.org/docs/9.3/static/sql-createtable.html>`_
"""
from collections import defaultdict
@@ -1448,6 +1484,36 @@ class PGDDLCompiler(compiler.DDLCompiler):
text += self.define_constraint_deferrability(constraint)
return text
+ def post_create_table(self, table):
+ table_opts = []
+ pg_opts = table.dialect_options['postgresql']
+
+ inherits = pg_opts.get('inherits')
+ if inherits is not None:
+ if not isinstance(inherits, (list, tuple)):
+ inherits = (inherits, )
+ table_opts.append(
+ '\n INHERITS ( ' +
+ ', '.join(self.preparer.quote(name) for name in inherits) +
+ ' )')
+
+ if pg_opts['with_oids'] is True:
+ table_opts.append('\n WITH OIDS')
+ elif pg_opts['with_oids'] is False:
+ table_opts.append('\n WITHOUT OIDS')
+
+ if pg_opts['on_commit']:
+ on_commit_options = pg_opts['on_commit'].replace("_", " ").upper()
+ table_opts.append('\n ON COMMIT %s' % on_commit_options)
+
+ if pg_opts['tablespace']:
+ tablespace_name = pg_opts['tablespace']
+ table_opts.append(
+ '\n TABLESPACE %s' % self.preparer.quote(tablespace_name)
+ )
+
+ return ''.join(table_opts)
+
class PGTypeCompiler(compiler.GenericTypeCompiler):
@@ -1707,7 +1773,11 @@ class PGDialect(default.DefaultDialect):
"ops": {}
}),
(schema.Table, {
- "ignore_search_path": False
+ "ignore_search_path": False,
+ "tablespace": None,
+ "with_oids": None,
+ "on_commit": None,
+ "inherits": None
})
]
diff --git a/lib/sqlalchemy/dialects/postgresql/pg8000.py b/lib/sqlalchemy/dialects/postgresql/pg8000.py
index 68da5b6d7..4ccc90208 100644
--- a/lib/sqlalchemy/dialects/postgresql/pg8000.py
+++ b/lib/sqlalchemy/dialects/postgresql/pg8000.py
@@ -119,7 +119,7 @@ class PGDialect_pg8000(PGDialect):
supports_unicode_binds = True
default_paramstyle = 'format'
- supports_sane_multi_rowcount = False
+ supports_sane_multi_rowcount = True
execution_ctx_cls = PGExecutionContext_pg8000
statement_compiler = PGCompiler_pg8000
preparer = PGIdentifierPreparer_pg8000
@@ -133,6 +133,16 @@ class PGDialect_pg8000(PGDialect):
}
)
+ def initialize(self, connection):
+ if self.dbapi and hasattr(self.dbapi, '__version__'):
+ self._dbapi_version = tuple([
+ int(x) for x in
+ self.dbapi.__version__.split(".")])
+ else:
+ self._dbapi_version = (99, 99, 99)
+ self.supports_sane_multi_rowcount = self._dbapi_version >= (1, 9, 14)
+ super(PGDialect_pg8000, self).initialize(connection)
+
@classmethod
def dbapi(cls):
return __import__('pg8000')
@@ -172,11 +182,9 @@ class PGDialect_pg8000(PGDialect):
)
def do_begin_twophase(self, connection, xid):
- print("begin twophase", xid)
connection.connection.tpc_begin((0, xid, ''))
def do_prepare_twophase(self, connection, xid):
- print("prepare twophase", xid)
connection.connection.tpc_prepare()
def do_rollback_twophase(
diff --git a/lib/sqlalchemy/engine/base.py b/lib/sqlalchemy/engine/base.py
index 2dc4d43f2..d2cc8890f 100644
--- a/lib/sqlalchemy/engine/base.py
+++ b/lib/sqlalchemy/engine/base.py
@@ -798,14 +798,14 @@ class Connection(Connectable):
distilled_params = _distill_params(multiparams, params)
if distilled_params:
# note this is usually dict but we support RowProxy
- # as well; but dict.keys() as an iterator is OK
+ # as well; but dict.keys() as an iterable is OK
keys = distilled_params[0].keys()
else:
keys = []
dialect = self.dialect
if 'compiled_cache' in self._execution_options:
- key = dialect, elem, tuple(keys), len(distilled_params) > 1
+ key = dialect, elem, tuple(sorted(keys)), len(distilled_params) > 1
if key in self._execution_options['compiled_cache']:
compiled_sql = self._execution_options['compiled_cache'][key]
else:
diff --git a/lib/sqlalchemy/event/api.py b/lib/sqlalchemy/event/api.py
index 270e95c9c..b3d79bcf4 100644
--- a/lib/sqlalchemy/event/api.py
+++ b/lib/sqlalchemy/event/api.py
@@ -58,6 +58,32 @@ def listen(target, identifier, fn, *args, **kw):
.. versionadded:: 0.9.4 Added ``once=True`` to :func:`.event.listen`
and :func:`.event.listens_for`.
+ .. note::
+
+ The :func:`.listen` function cannot be called at the same time
+ that the target event is being run. This has implications
+ for thread safety, and also means an event cannot be added
+ from inside the listener function for itself. The list of
+ events to be run are present inside of a mutable collection
+ that can't be changed during iteration.
+
+ Event registration and removal is not intended to be a "high
+ velocity" operation; it is a configurational operation. For
+ systems that need to quickly associate and deassociate with
+ events at high scale, use a mutable structure that is handled
+ from inside of a single listener.
+
+ .. versionchanged:: 1.0.0 - a ``collections.deque()`` object is now
+ used as the container for the list of events, which explicitly
+ disallows collection mutation while the collection is being
+ iterated.
+
+ .. seealso::
+
+ :func:`.listens_for`
+
+ :func:`.remove`
+
"""
_event_key(target, identifier, fn).listen(*args, **kw)
@@ -89,6 +115,10 @@ def listens_for(target, identifier, *args, **kw):
.. versionadded:: 0.9.4 Added ``once=True`` to :func:`.event.listen`
and :func:`.event.listens_for`.
+ .. seealso::
+
+ :func:`.listen` - general description of event listening
+
"""
def decorate(fn):
listen(target, identifier, fn, *args, **kw)
@@ -120,6 +150,30 @@ def remove(target, identifier, fn):
.. versionadded:: 0.9.0
+ .. note::
+
+ The :func:`.remove` function cannot be called at the same time
+ that the target event is being run. This has implications
+ for thread safety, and also means an event cannot be removed
+ from inside the listener function for itself. The list of
+ events to be run are present inside of a mutable collection
+ that can't be changed during iteration.
+
+ Event registration and removal is not intended to be a "high
+ velocity" operation; it is a configurational operation. For
+ systems that need to quickly associate and deassociate with
+ events at high scale, use a mutable structure that is handled
+ from inside of a single listener.
+
+ .. versionchanged:: 1.0.0 - a ``collections.deque()`` object is now
+ used as the container for the list of events, which explicitly
+ disallows collection mutation while the collection is being
+ iterated.
+
+ .. seealso::
+
+ :func:`.listen`
+
"""
_event_key(target, identifier, fn).remove()
diff --git a/lib/sqlalchemy/event/attr.py b/lib/sqlalchemy/event/attr.py
index 7641b595a..dba1063cf 100644
--- a/lib/sqlalchemy/event/attr.py
+++ b/lib/sqlalchemy/event/attr.py
@@ -37,6 +37,7 @@ from . import registry
from . import legacy
from itertools import chain
import weakref
+import collections
class RefCollection(object):
@@ -96,8 +97,8 @@ class _DispatchDescriptor(RefCollection):
self.update_subclass(cls)
else:
if cls not in self._clslevel:
- self._clslevel[cls] = []
- self._clslevel[cls].insert(0, event_key._listen_fn)
+ self._clslevel[cls] = collections.deque()
+ self._clslevel[cls].appendleft(event_key._listen_fn)
registry._stored_in_collection(event_key, self)
def append(self, event_key, propagate):
@@ -113,13 +114,13 @@ class _DispatchDescriptor(RefCollection):
self.update_subclass(cls)
else:
if cls not in self._clslevel:
- self._clslevel[cls] = []
+ self._clslevel[cls] = collections.deque()
self._clslevel[cls].append(event_key._listen_fn)
registry._stored_in_collection(event_key, self)
def update_subclass(self, target):
if target not in self._clslevel:
- self._clslevel[target] = []
+ self._clslevel[target] = collections.deque()
clslevel = self._clslevel[target]
for cls in target.__mro__[1:]:
if cls in self._clslevel:
@@ -145,7 +146,7 @@ class _DispatchDescriptor(RefCollection):
to_clear = set()
for dispatcher in self._clslevel.values():
to_clear.update(dispatcher)
- dispatcher[:] = []
+ dispatcher.clear()
registry._clear(self, to_clear)
def for_modify(self, obj):
@@ -287,7 +288,7 @@ class _ListenerCollection(RefCollection, _CompoundListener):
self.parent_listeners = parent._clslevel[target_cls]
self.parent = parent
self.name = parent.__name__
- self.listeners = []
+ self.listeners = collections.deque()
self.propagate = set()
def for_modify(self, obj):
@@ -337,7 +338,7 @@ class _ListenerCollection(RefCollection, _CompoundListener):
def clear(self):
registry._clear(self, self.listeners)
self.propagate.clear()
- self.listeners[:] = []
+ self.listeners.clear()
class _JoinedDispatchDescriptor(object):
diff --git a/lib/sqlalchemy/event/registry.py b/lib/sqlalchemy/event/registry.py
index a34de3cd7..ba2f671a3 100644
--- a/lib/sqlalchemy/event/registry.py
+++ b/lib/sqlalchemy/event/registry.py
@@ -243,4 +243,4 @@ class _EventKey(object):
def prepend_to_list(self, owner, list_):
_stored_in_collection(self, owner)
- list_.insert(0, self._listen_fn)
+ list_.appendleft(self._listen_fn)
diff --git a/lib/sqlalchemy/ext/mutable.py b/lib/sqlalchemy/ext/mutable.py
index 7469bcbda..e49e9ea8b 100644
--- a/lib/sqlalchemy/ext/mutable.py
+++ b/lib/sqlalchemy/ext/mutable.py
@@ -621,16 +621,20 @@ class MutableDict(Mutable, dict):
dict.__delitem__(self, key)
self.changed()
+ def update(self, *a, **kw):
+ dict.update(self, *a, **kw)
+ self.changed()
+
def clear(self):
dict.clear(self)
self.changed()
@classmethod
def coerce(cls, key, value):
- """Convert plain dictionary to MutableDict."""
- if not isinstance(value, MutableDict):
+ """Convert plain dictionary to instance of this class."""
+ if not isinstance(value, cls):
if isinstance(value, dict):
- return MutableDict(value)
+ return cls(value)
return Mutable.coerce(key, value)
else:
return value
diff --git a/lib/sqlalchemy/orm/events.py b/lib/sqlalchemy/orm/events.py
index aa99673ba..8edaa2744 100644
--- a/lib/sqlalchemy/orm/events.py
+++ b/lib/sqlalchemy/orm/events.py
@@ -293,18 +293,6 @@ class InstanceEvents(event.Events):
"""
- def resurrect(self, target):
- """Receive an object instance as it is 'resurrected' from
- garbage collection, which occurs when a "dirty" state falls
- out of scope.
-
- :param target: the mapped instance. If
- the event is configured with ``raw=True``, this will
- instead be the :class:`.InstanceState` state-management
- object associated with the instance.
-
- """
-
def pickle(self, target, state_dict):
"""Receive an object instance when its associated state is
being pickled.
diff --git a/lib/sqlalchemy/orm/identity.py b/lib/sqlalchemy/orm/identity.py
index d9cdd791f..0fa541194 100644
--- a/lib/sqlalchemy/orm/identity.py
+++ b/lib/sqlalchemy/orm/identity.py
@@ -150,7 +150,7 @@ class WeakInstanceDict(IdentityMap):
return default
return o
- def _items(self):
+ def items(self):
values = self.all_states()
result = []
for state in values:
@@ -159,7 +159,7 @@ class WeakInstanceDict(IdentityMap):
result.append((state.key, value))
return result
- def _values(self):
+ def values(self):
values = self.all_states()
result = []
for state in values:
@@ -169,9 +169,10 @@ class WeakInstanceDict(IdentityMap):
return result
+ def __iter__(self):
+ return iter(self.keys())
+
if util.py2k:
- items = _items
- values = _values
def iteritems(self):
return iter(self.items())
@@ -179,19 +180,6 @@ class WeakInstanceDict(IdentityMap):
def itervalues(self):
return iter(self.values())
- def __iter__(self):
- return iter(self.keys())
-
- else:
- def items(self):
- return iter(self._items())
-
- def values(self):
- return iter(self._values())
-
- def __iter__(self):
- return self.keys()
-
def all_states(self):
if util.py2k:
return self._dict.values()
@@ -217,11 +205,8 @@ class StrongInstanceDict(IdentityMap):
def iteritems(self):
return self._dict.iteritems()
- def __iter__(self):
- return iter(self.keys())
- else:
- def __iter__(self):
- return self.keys()
+ def __iter__(self):
+ return iter(self.dict_)
def __getitem__(self, key):
return self._dict[key]
diff --git a/lib/sqlalchemy/orm/mapper.py b/lib/sqlalchemy/orm/mapper.py
index 06ec2bf14..aab28ee0c 100644
--- a/lib/sqlalchemy/orm/mapper.py
+++ b/lib/sqlalchemy/orm/mapper.py
@@ -1127,7 +1127,6 @@ class Mapper(InspectionAttr):
event.listen(manager, 'first_init', _event_on_first_init, raw=True)
event.listen(manager, 'init', _event_on_init, raw=True)
- event.listen(manager, 'resurrect', _event_on_resurrect, raw=True)
for key, method in util.iterate_attributes(self.class_):
if isinstance(method, types.FunctionType):
@@ -1189,14 +1188,6 @@ class Mapper(InspectionAttr):
util.ordered_column_set(t.c).\
intersection(all_cols)
- # determine cols that aren't expressed within our tables; mark these
- # as "read only" properties which are refreshed upon INSERT/UPDATE
- self._readonly_props = set(
- self._columntoproperty[col]
- for col in self._columntoproperty
- if not hasattr(col, 'table') or
- col.table not in self._cols_by_table)
-
# if explicit PK argument sent, add those columns to the
# primary key mappings
if self._primary_key_argument:
@@ -1247,6 +1238,15 @@ class Mapper(InspectionAttr):
self.primary_key = tuple(primary_key)
self._log("Identified primary key columns: %s", primary_key)
+ # determine cols that aren't expressed within our tables; mark these
+ # as "read only" properties which are refreshed upon INSERT/UPDATE
+ self._readonly_props = set(
+ self._columntoproperty[col]
+ for col in self._columntoproperty
+ if self._columntoproperty[col] not in self._primary_key_props and
+ (not hasattr(col, 'table') or
+ col.table not in self._cols_by_table))
+
def _configure_properties(self):
# Column and other ClauseElement objects which are mapped
@@ -1892,6 +1892,54 @@ class Mapper(InspectionAttr):
"""
+ @_memoized_configured_property
+ def _insert_cols_as_none(self):
+ return dict(
+ (
+ table,
+ frozenset(
+ col.key for col in columns
+ if not col.primary_key and
+ not col.server_default and not col.default)
+ )
+ for table, columns in self._cols_by_table.items()
+ )
+
+ @_memoized_configured_property
+ def _propkey_to_col(self):
+ return dict(
+ (
+ table,
+ dict(
+ (self._columntoproperty[col].key, col)
+ for col in columns
+ )
+ )
+ for table, columns in self._cols_by_table.items()
+ )
+
+ @_memoized_configured_property
+ def _pk_keys_by_table(self):
+ return dict(
+ (
+ table,
+ frozenset([col.key for col in pks])
+ )
+ for table, pks in self._pks_by_table.items()
+ )
+
+ @_memoized_configured_property
+ def _server_default_cols(self):
+ return dict(
+ (
+ table,
+ frozenset([
+ col for col in columns
+ if col.server_default is not None])
+ )
+ for table, columns in self._cols_by_table.items()
+ )
+
@property
def selectable(self):
"""The :func:`.select` construct this :class:`.Mapper` selects from
@@ -2307,18 +2355,29 @@ class Mapper(InspectionAttr):
dict_ = state.dict
manager = state.manager
return [
- manager[self._columntoproperty[col].key].
+ manager[prop.key].
impl.get(state, dict_,
attributes.PASSIVE_RETURN_NEVER_SET)
- for col in self.primary_key
+ for prop in self._primary_key_props
]
+ @_memoized_configured_property
+ def _primary_key_props(self):
+ # TODO: this should really be called "identity key props",
+ # as it does not necessarily include primary key columns within
+ # individual tables
+ return [self._columntoproperty[col] for col in self.primary_key]
+
def _get_state_attr_by_column(
self, state, dict_, column,
passive=attributes.PASSIVE_RETURN_NEVER_SET):
prop = self._columntoproperty[column]
return state.manager[prop.key].impl.get(state, dict_, passive=passive)
+ def _set_committed_state_attr_by_column(self, state, dict_, column, value):
+ prop = self._columntoproperty[column]
+ state.manager[prop.key].impl.set_committed_value(state, dict_, value)
+
def _set_state_attr_by_column(self, state, dict_, column, value):
prop = self._columntoproperty[column]
state.manager[prop.key].impl.set(state, dict_, value, None)
@@ -2702,16 +2761,6 @@ def _event_on_init(state, args, kwargs):
instrumenting_mapper._set_polymorphic_identity(state)
-def _event_on_resurrect(state):
- # re-populate the primary key elements
- # of the dict based on the mapping.
- instrumenting_mapper = state.manager.info.get(_INSTRUMENTOR)
- if instrumenting_mapper:
- for col, val in zip(instrumenting_mapper.primary_key, state.key[1]):
- instrumenting_mapper._set_state_attr_by_column(
- state, state.dict, col, val)
-
-
class _ColumnMapping(dict):
"""Error reporting helper for mapper._columntoproperty."""
diff --git a/lib/sqlalchemy/orm/persistence.py b/lib/sqlalchemy/orm/persistence.py
index 295d4a3d0..511a9cef0 100644
--- a/lib/sqlalchemy/orm/persistence.py
+++ b/lib/sqlalchemy/orm/persistence.py
@@ -18,7 +18,7 @@ import operator
from itertools import groupby
from .. import sql, util, exc as sa_exc, schema
from . import attributes, sync, exc as orm_exc, evaluator
-from .base import _state_mapper, state_str, _attr_as_key
+from .base import state_str, _attr_as_key
from ..sql import expression
from . import loading
@@ -40,32 +40,55 @@ def save_obj(base_mapper, states, uowtransaction, single=False):
save_obj(base_mapper, [state], uowtransaction, single=True)
return
- states_to_insert, states_to_update = _organize_states_for_save(
- base_mapper,
- states,
- uowtransaction)
-
+ states_to_update = []
+ states_to_insert = []
cached_connections = _cached_connection_dict(base_mapper)
- for table, mapper in base_mapper._sorted_tables.items():
- insert = _collect_insert_commands(base_mapper, uowtransaction,
- table, states_to_insert)
-
- update = _collect_update_commands(base_mapper, uowtransaction,
- table, states_to_update)
-
- if update:
- _emit_update_statements(base_mapper, uowtransaction,
- cached_connections,
- mapper, table, update)
-
- if insert:
- _emit_insert_statements(base_mapper, uowtransaction,
- cached_connections,
- mapper, table, insert)
+ for (state, dict_, mapper, connection,
+ has_identity,
+ row_switch, update_version_id) in _organize_states_for_save(
+ base_mapper, states, uowtransaction
+ ):
+ if has_identity or row_switch:
+ states_to_update.append(
+ (state, dict_, mapper, connection, update_version_id)
+ )
+ else:
+ states_to_insert.append(
+ (state, dict_, mapper, connection)
+ )
- _finalize_insert_update_commands(base_mapper, uowtransaction,
- states_to_insert, states_to_update)
+ for table, mapper in base_mapper._sorted_tables.items():
+ if table not in mapper._pks_by_table:
+ continue
+ insert = _collect_insert_commands(table, states_to_insert)
+
+ update = _collect_update_commands(
+ uowtransaction, table, states_to_update)
+
+ _emit_update_statements(base_mapper, uowtransaction,
+ cached_connections,
+ mapper, table, update)
+
+ _emit_insert_statements(base_mapper, uowtransaction,
+ cached_connections,
+ mapper, table, insert)
+
+ _finalize_insert_update_commands(
+ base_mapper, uowtransaction,
+ (
+ (state, state_dict, mapper, connection, False)
+ for state, state_dict, mapper, connection in states_to_insert
+ )
+ )
+ _finalize_insert_update_commands(
+ base_mapper, uowtransaction,
+ (
+ (state, state_dict, mapper, connection, True)
+ for state, state_dict, mapper, connection,
+ update_version_id in states_to_update
+ )
+ )
def post_update(base_mapper, states, uowtransaction, post_update_cols):
@@ -75,19 +98,28 @@ def post_update(base_mapper, states, uowtransaction, post_update_cols):
"""
cached_connections = _cached_connection_dict(base_mapper)
- states_to_update = _organize_states_for_post_update(
+ states_to_update = list(_organize_states_for_post_update(
base_mapper,
- states, uowtransaction)
+ states, uowtransaction))
for table, mapper in base_mapper._sorted_tables.items():
+ if table not in mapper._pks_by_table:
+ continue
+
+ update = (
+ (state, state_dict, sub_mapper, connection)
+ for
+ state, state_dict, sub_mapper, connection in states_to_update
+ if table in sub_mapper._pks_by_table
+ )
+
update = _collect_post_update_commands(base_mapper, uowtransaction,
- table, states_to_update,
+ table, update,
post_update_cols)
- if update:
- _emit_post_update_statements(base_mapper, uowtransaction,
- cached_connections,
- mapper, table, update)
+ _emit_post_update_statements(base_mapper, uowtransaction,
+ cached_connections,
+ mapper, table, update)
def delete_obj(base_mapper, states, uowtransaction):
@@ -100,24 +132,26 @@ def delete_obj(base_mapper, states, uowtransaction):
cached_connections = _cached_connection_dict(base_mapper)
- states_to_delete = _organize_states_for_delete(
+ states_to_delete = list(_organize_states_for_delete(
base_mapper,
states,
- uowtransaction)
+ uowtransaction))
table_to_mapper = base_mapper._sorted_tables
for table in reversed(list(table_to_mapper.keys())):
+ mapper = table_to_mapper[table]
+ if table not in mapper._pks_by_table:
+ continue
+
delete = _collect_delete_commands(base_mapper, uowtransaction,
table, states_to_delete)
- mapper = table_to_mapper[table]
-
_emit_delete_statements(base_mapper, uowtransaction,
cached_connections, mapper, table, delete)
- for state, state_dict, mapper, has_identity, connection \
- in states_to_delete:
+ for state, state_dict, mapper, connection, \
+ update_version_id in states_to_delete:
mapper.dispatch.after_delete(mapper, connection, state)
@@ -133,17 +167,15 @@ def _organize_states_for_save(base_mapper, states, uowtransaction):
"""
- states_to_insert = []
- states_to_update = []
-
for state, dict_, mapper, connection in _connections_for_states(
base_mapper, uowtransaction,
states):
has_identity = bool(state.key)
+
instance_key = state.key or mapper._identity_key_from_state(state)
- row_switch = None
+ row_switch = update_version_id = None
# call before_XXX extensions
if not has_identity:
@@ -180,18 +212,14 @@ def _organize_states_for_save(base_mapper, states, uowtransaction):
uowtransaction.remove_state_actions(existing)
row_switch = existing
- if not has_identity and not row_switch:
- states_to_insert.append(
- (state, dict_, mapper, connection,
- has_identity, instance_key, row_switch)
- )
- else:
- states_to_update.append(
- (state, dict_, mapper, connection,
- has_identity, instance_key, row_switch)
- )
+ if (has_identity or row_switch) and mapper.version_id_col is not None:
+ update_version_id = mapper._get_committed_state_attr_by_column(
+ row_switch if row_switch else state,
+ row_switch.dict if row_switch else dict_,
+ mapper.version_id_col)
- return states_to_insert, states_to_update
+ yield (state, dict_, mapper, connection,
+ has_identity, row_switch, update_version_id)
def _organize_states_for_post_update(base_mapper, states,
@@ -204,8 +232,7 @@ def _organize_states_for_post_update(base_mapper, states,
the execution per state.
"""
- return list(_connections_for_states(base_mapper, uowtransaction,
- states))
+ return _connections_for_states(base_mapper, uowtransaction, states)
def _organize_states_for_delete(base_mapper, states, uowtransaction):
@@ -216,72 +243,73 @@ def _organize_states_for_delete(base_mapper, states, uowtransaction):
mapper, the connection to use for the execution per state.
"""
- states_to_delete = []
-
for state, dict_, mapper, connection in _connections_for_states(
base_mapper, uowtransaction,
states):
mapper.dispatch.before_delete(mapper, connection, state)
- states_to_delete.append((state, dict_, mapper,
- bool(state.key), connection))
- return states_to_delete
+ if mapper.version_id_col is not None:
+ update_version_id = \
+ mapper._get_committed_state_attr_by_column(
+ state, dict_,
+ mapper.version_id_col)
+ else:
+ update_version_id = None
+
+ yield (
+ state, dict_, mapper, connection, update_version_id)
-def _collect_insert_commands(base_mapper, uowtransaction, table,
- states_to_insert):
+def _collect_insert_commands(table, states_to_insert):
"""Identify sets of values to use in INSERT statements for a
list of states.
"""
- insert = []
- for state, state_dict, mapper, connection, has_identity, \
- instance_key, row_switch in states_to_insert:
+ for state, state_dict, mapper, connection in states_to_insert:
if table not in mapper._pks_by_table:
continue
- pks = mapper._pks_by_table[table]
-
params = {}
value_params = {}
- has_all_pks = True
- has_all_defaults = True
- for col in mapper._cols_by_table[table]:
- if col is mapper.version_id_col and \
- mapper.version_id_generator is not False:
- val = mapper.version_id_generator(None)
- params[col.key] = val
+ propkey_to_col = mapper._propkey_to_col[table]
+
+ for propkey in set(propkey_to_col).intersection(state_dict):
+ value = state_dict[propkey]
+ col = propkey_to_col[propkey]
+ if value is None:
+ continue
+ elif isinstance(value, sql.ClauseElement):
+ value_params[col.key] = value
else:
- # pull straight from the dict for
- # pending objects
- prop = mapper._columntoproperty[col]
- value = state_dict.get(prop.key, None)
-
- if value is None:
- if col in pks:
- has_all_pks = False
- elif col.default is None and \
- col.server_default is None:
- params[col.key] = value
- elif col.server_default is not None and \
- mapper.base_mapper.eager_defaults:
- has_all_defaults = False
-
- elif isinstance(value, sql.ClauseElement):
- value_params[col] = value
- else:
- params[col.key] = value
+ params[col.key] = value
+
+ for colkey in mapper._insert_cols_as_none[table].\
+ difference(params).difference(value_params):
+ params[colkey] = None
+
+ has_all_pks = mapper._pk_keys_by_table[table].issubset(params)
+
+ if mapper.base_mapper.eager_defaults:
+ has_all_defaults = mapper._server_default_cols[table].\
+ issubset(params)
+ else:
+ has_all_defaults = True
+
+ if mapper.version_id_generator is not False \
+ and mapper.version_id_col is not None and \
+ mapper.version_id_col in mapper._cols_by_table[table]:
+ params[mapper.version_id_col.key] = \
+ mapper.version_id_generator(None)
- insert.append((state, state_dict, params, mapper,
- connection, value_params, has_all_pks,
- has_all_defaults))
- return insert
+ yield (
+ state, state_dict, params, mapper,
+ connection, value_params, has_all_pks,
+ has_all_defaults)
-def _collect_update_commands(base_mapper, uowtransaction,
- table, states_to_update):
+def _collect_update_commands(uowtransaction, table, states_to_update):
"""Identify sets of values to use in UPDATE statements for a
list of states.
@@ -293,9 +321,9 @@ def _collect_update_commands(base_mapper, uowtransaction,
"""
- update = []
- for state, state_dict, mapper, connection, has_identity, \
- instance_key, row_switch in states_to_update:
+ for state, state_dict, mapper, connection, \
+ update_version_id in states_to_update:
+
if table not in mapper._pks_by_table:
continue
@@ -304,98 +332,59 @@ def _collect_update_commands(base_mapper, uowtransaction,
params = {}
value_params = {}
- hasdata = hasnull = False
- for col in mapper._cols_by_table[table]:
- if col is mapper.version_id_col:
- params[col._label] = \
- mapper._get_committed_state_attr_by_column(
- row_switch or state,
- row_switch and row_switch.dict
- or state_dict,
- col)
+ propkey_to_col = mapper._propkey_to_col[table]
- prop = mapper._columntoproperty[col]
- history = state.manager[prop.key].impl.get_history(
- state, state_dict, attributes.PASSIVE_NO_INITIALIZE
- )
- if history.added:
- params[col.key] = history.added[0]
- hasdata = True
+ for propkey in set(propkey_to_col).intersection(state.committed_state):
+ value = state_dict[propkey]
+ col = propkey_to_col[propkey]
+
+ if not state.manager[propkey].impl.is_equal(
+ value, state.committed_state[propkey]):
+ if isinstance(value, sql.ClauseElement):
+ value_params[col] = value
+ else:
+ params[col.key] = value
+
+ if update_version_id is not None:
+ col = mapper.version_id_col
+ params[col._label] = update_version_id
+
+ if col.key not in params and \
+ mapper.version_id_generator is not False:
+ val = mapper.version_id_generator(update_version_id)
+ params[col.key] = val
+
+ if not (params or value_params):
+ continue
+
+ pk_params = {}
+ for col in pks:
+ propkey = mapper._columntoproperty[col].key
+ history = state.manager[propkey].impl.get_history(
+ state, state_dict, attributes.PASSIVE_OFF)
+
+ if history.added:
+ if not history.deleted or \
+ ("pk_cascaded", state, col) in \
+ uowtransaction.attributes:
+ pk_params[col._label] = history.added[0]
+ params.pop(col.key, None)
else:
- if mapper.version_id_generator is not False:
- val = mapper.version_id_generator(params[col._label])
- params[col.key] = val
-
- # HACK: check for history, in case the
- # history is only
- # in a different table than the one
- # where the version_id_col is.
- for prop in mapper._columntoproperty.values():
- history = (
- state.manager[prop.key].impl.get_history(
- state, state_dict,
- attributes.PASSIVE_NO_INITIALIZE))
- if history.added:
- hasdata = True
+ # else, use the old value to locate the row
+ pk_params[col._label] = history.deleted[0]
+ params[col.key] = history.added[0]
else:
- prop = mapper._columntoproperty[col]
- history = state.manager[prop.key].impl.get_history(
- state, state_dict,
- attributes.PASSIVE_NO_INITIALIZE)
- if history.added:
- if isinstance(history.added[0],
- sql.ClauseElement):
- value_params[col] = history.added[0]
- else:
- value = history.added[0]
- params[col.key] = value
-
- if col in pks:
- if history.deleted and \
- not row_switch:
- # if passive_updates and sync detected
- # this was a pk->pk sync, use the new
- # value to locate the row, since the
- # DB would already have set this
- if ("pk_cascaded", state, col) in \
- uowtransaction.attributes:
- value = history.added[0]
- params[col._label] = value
- else:
- # use the old value to
- # locate the row
- value = history.deleted[0]
- params[col._label] = value
- hasdata = True
- else:
- # row switch logic can reach us here
- # remove the pk from the update params
- # so the update doesn't
- # attempt to include the pk in the
- # update statement
- del params[col.key]
- value = history.added[0]
- params[col._label] = value
- if value is None:
- hasnull = True
- else:
- hasdata = True
- elif col in pks:
- value = state.manager[prop.key].impl.get(
- state, state_dict)
- if value is None:
- hasnull = True
- params[col._label] = value
+ pk_params[col._label] = history.unchanged[0]
- if hasdata:
- if hasnull:
+ if params or value_params:
+ if None in pk_params.values():
raise orm_exc.FlushError(
- "Can't update table "
- "using NULL for primary "
+ "Can't update table using NULL for primary "
"key value")
- update.append((state, state_dict, params, mapper,
- connection, value_params))
- return update
+ params.update(pk_params)
+ yield (
+ state, state_dict, params, mapper,
+ connection, value_params)
def _collect_post_update_commands(base_mapper, uowtransaction, table,
@@ -405,10 +394,10 @@ def _collect_post_update_commands(base_mapper, uowtransaction, table,
"""
- update = []
for state, state_dict, mapper, connection in states_to_update:
- if table not in mapper._pks_by_table:
- continue
+
+ # assert table in mapper._pks_by_table
+
pks = mapper._pks_by_table[table]
params = {}
hasdata = False
@@ -417,8 +406,8 @@ def _collect_post_update_commands(base_mapper, uowtransaction, table,
if col in pks:
params[col._label] = \
mapper._get_state_attr_by_column(
- state,
- state_dict, col)
+ state,
+ state_dict, col)
elif col in post_update_cols:
prop = mapper._columntoproperty[col]
@@ -430,9 +419,7 @@ def _collect_post_update_commands(base_mapper, uowtransaction, table,
params[col.key] = value
hasdata = True
if hasdata:
- update.append((state, state_dict, params, mapper,
- connection))
- return update
+ yield params, connection
def _collect_delete_commands(base_mapper, uowtransaction, table,
@@ -440,33 +427,28 @@ def _collect_delete_commands(base_mapper, uowtransaction, table,
"""Identify values to use in DELETE statements for a list of
states to be deleted."""
- delete = util.defaultdict(list)
+ for state, state_dict, mapper, connection, \
+ update_version_id in states_to_delete:
- for state, state_dict, mapper, has_identity, connection \
- in states_to_delete:
- if not has_identity or table not in mapper._pks_by_table:
+ if table not in mapper._pks_by_table:
continue
params = {}
- delete[connection].append(params)
for col in mapper._pks_by_table[table]:
params[col.key] = \
value = \
mapper._get_committed_state_attr_by_column(
- state, state_dict, col)
+ state, state_dict, col)
if value is None:
raise orm_exc.FlushError(
"Can't delete from table "
"using NULL for primary "
"key value")
- if mapper.version_id_col is not None and \
+ if update_version_id is not None and \
table.c.contains_column(mapper.version_id_col):
- params[mapper.version_id_col.key] = \
- mapper._get_committed_state_attr_by_column(
- state, state_dict,
- mapper.version_id_col)
- return delete
+ params[mapper.version_id_col.key] = update_version_id
+ yield params, connection
def _emit_update_statements(base_mapper, uowtransaction,
@@ -500,41 +482,80 @@ def _emit_update_statements(base_mapper, uowtransaction,
statement = base_mapper._memo(('update', table), update_stmt)
- rows = 0
- for state, state_dict, params, mapper, \
- connection, value_params in update:
-
- if value_params:
- c = connection.execute(
- statement.values(value_params),
- params)
+ for (connection, paramkeys, hasvalue), \
+ records in groupby(
+ update,
+ lambda rec: (
+ rec[4],
+ tuple(sorted(rec[2])),
+ bool(rec[5]))):
+
+ rows = 0
+ records = list(records)
+ if hasvalue:
+ for state, state_dict, params, mapper, \
+ connection, value_params in records:
+ c = connection.execute(
+ statement.values(value_params),
+ params)
+ _postfetch(
+ mapper,
+ uowtransaction,
+ table,
+ state,
+ state_dict,
+ c,
+ c.context.compiled_parameters[0],
+ value_params)
+ rows += c.rowcount
else:
- c = cached_connections[connection].\
- execute(statement, params)
-
- _postfetch(
- mapper,
- uowtransaction,
- table,
- state,
- state_dict,
- c,
- c.context.compiled_parameters[0],
- value_params)
- rows += c.rowcount
-
- if connection.dialect.supports_sane_rowcount:
- if rows != len(update):
- raise orm_exc.StaleDataError(
- "UPDATE statement on table '%s' expected to "
- "update %d row(s); %d were matched." %
- (table.description, len(update), rows))
-
- elif needs_version_id:
- util.warn("Dialect %s does not support updated rowcount "
- "- versioning cannot be verified." %
- c.dialect.dialect_description,
- stacklevel=12)
+ if needs_version_id and \
+ not connection.dialect.supports_sane_multi_rowcount and \
+ connection.dialect.supports_sane_rowcount:
+ for state, state_dict, params, mapper, \
+ connection, value_params in records:
+ c = cached_connections[connection].\
+ execute(statement, params)
+ _postfetch(
+ mapper,
+ uowtransaction,
+ table,
+ state,
+ state_dict,
+ c,
+ c.context.compiled_parameters[0],
+ value_params)
+ rows += c.rowcount
+ else:
+ multiparams = [rec[2] for rec in records]
+ c = cached_connections[connection].\
+ execute(statement, multiparams)
+
+ rows += c.rowcount
+ for state, state_dict, params, mapper, \
+ connection, value_params in records:
+ _postfetch(
+ mapper,
+ uowtransaction,
+ table,
+ state,
+ state_dict,
+ c,
+ c.context.compiled_parameters[0],
+ value_params)
+
+ if connection.dialect.supports_sane_rowcount:
+ if rows != len(records):
+ raise orm_exc.StaleDataError(
+ "UPDATE statement on table '%s' expected to "
+ "update %d row(s); %d were matched." %
+ (table.description, len(records), rows))
+
+ elif needs_version_id:
+ util.warn("Dialect %s does not support updated rowcount "
+ "- versioning cannot be verified." %
+ c.dialect.dialect_description,
+ stacklevel=12)
def _emit_insert_statements(base_mapper, uowtransaction,
@@ -547,7 +568,7 @@ def _emit_insert_statements(base_mapper, uowtransaction,
for (connection, pkeys, hasvalue, has_all_pks, has_all_defaults), \
records in groupby(insert,
lambda rec: (rec[4],
- list(rec[2].keys()),
+ tuple(sorted(rec[2].keys())),
bool(rec[5]),
rec[6], rec[7])
):
@@ -604,13 +625,7 @@ def _emit_insert_statements(base_mapper, uowtransaction,
mapper._pks_by_table[table]):
prop = mapper_rec._columntoproperty[col]
if state_dict.get(prop.key) is None:
- # TODO: would rather say:
- # state_dict[prop.key] = pk
- mapper_rec._set_state_attr_by_column(
- state,
- state_dict,
- col, pk)
-
+ state_dict[prop.key] = pk
_postfetch(
mapper_rec,
uowtransaction,
@@ -643,11 +658,10 @@ def _emit_post_update_statements(base_mapper, uowtransaction,
# also group them into common (connection, cols) sets
# to support executemany().
for key, grouper in groupby(
- update, lambda rec: (rec[4], list(rec[2].keys()))
+ update, lambda rec: (rec[1], sorted(rec[0]))
):
connection = key[0]
- multiparams = [params for state, state_dict,
- params, mapper, conn in grouper]
+ multiparams = [params for params, conn in grouper]
cached_connections[connection].\
execute(statement, multiparams)
@@ -677,8 +691,15 @@ def _emit_delete_statements(base_mapper, uowtransaction, cached_connections,
return table.delete(clause)
- for connection, del_objects in delete.items():
- statement = base_mapper._memo(('delete', table), delete_stmt)
+ statement = base_mapper._memo(('delete', table), delete_stmt)
+ for connection, recs in groupby(
+ delete,
+ lambda rec: rec[1]
+ ):
+ del_objects = [
+ params
+ for params, connection in recs
+ ]
connection = cached_connections[connection]
@@ -731,15 +752,12 @@ def _emit_delete_statements(base_mapper, uowtransaction, cached_connections,
)
-def _finalize_insert_update_commands(base_mapper, uowtransaction,
- states_to_insert, states_to_update):
+def _finalize_insert_update_commands(base_mapper, uowtransaction, states):
"""finalize state on states that have been inserted or updated,
including calling after_insert/after_update events.
"""
- for state, state_dict, mapper, connection, has_identity, \
- instance_key, row_switch in states_to_insert + \
- states_to_update:
+ for state, state_dict, mapper, connection, has_identity in states:
if mapper._readonly_props:
readonly = state.unmodified_intersection(
@@ -795,11 +813,11 @@ def _postfetch(mapper, uowtransaction, table,
for col in returning_cols:
if col.primary_key:
continue
- mapper._set_state_attr_by_column(state, dict_, col, row[col])
+ dict_[mapper._columntoproperty[col].key] = row[col]
for c in prefetch_cols:
if c.key in params and c in mapper._columntoproperty:
- mapper._set_state_attr_by_column(state, dict_, c, params[c.key])
+ dict_[mapper._columntoproperty[c].key] = params[c.key]
if postfetch_cols:
state._expire_attributes(state.dict,
@@ -833,17 +851,14 @@ def _connections_for_states(base_mapper, uowtransaction, states):
connection_callable = \
uowtransaction.session.connection_callable
else:
- connection = None
+ connection = uowtransaction.transaction.connection(base_mapper)
connection_callable = None
for state in _sort_states(states):
if connection_callable:
connection = connection_callable(base_mapper, state.obj())
- elif not connection:
- connection = uowtransaction.transaction.connection(
- base_mapper)
- mapper = _state_mapper(state)
+ mapper = state.manager.mapper
yield state, state.dict, mapper, connection
diff --git a/lib/sqlalchemy/pool.py b/lib/sqlalchemy/pool.py
index d26bbf32c..bc9affe4a 100644
--- a/lib/sqlalchemy/pool.py
+++ b/lib/sqlalchemy/pool.py
@@ -305,7 +305,7 @@ class Pool(log.Identified):
"""Return a new :class:`.Pool`, of the same class as this one
and configured with identical creation arguments.
- This method is used in conjunection with :meth:`dispose`
+ This method is used in conjunction with :meth:`dispose`
to close out an entire :class:`.Pool` and create a new one in
its place.
@@ -443,16 +443,17 @@ class _ConnectionRecord(object):
except:
rec.checkin()
raise
- fairy = _ConnectionFairy(dbapi_connection, rec)
+ echo = pool._should_log_debug()
+ fairy = _ConnectionFairy(dbapi_connection, rec, echo)
rec.fairy_ref = weakref.ref(
fairy,
lambda ref: _finalize_fairy and
_finalize_fairy(
dbapi_connection,
- rec, pool, ref, pool._echo)
+ rec, pool, ref, echo)
)
_refs.add(rec)
- if pool._echo:
+ if echo:
pool.logger.debug("Connection %r checked out from pool",
dbapi_connection)
return fairy
@@ -560,9 +561,10 @@ def _finalize_fairy(connection, connection_record,
connection)
try:
- fairy = fairy or _ConnectionFairy(connection, connection_record)
+ fairy = fairy or _ConnectionFairy(
+ connection, connection_record, echo)
assert fairy.connection is connection
- fairy._reset(pool, echo)
+ fairy._reset(pool)
# Immediately close detached instances
if not connection_record:
@@ -603,9 +605,10 @@ class _ConnectionFairy(object):
"""
- def __init__(self, dbapi_connection, connection_record):
+ def __init__(self, dbapi_connection, connection_record, echo):
self.connection = dbapi_connection
self._connection_record = connection_record
+ self._echo = echo
connection = None
"""A reference to the actual DBAPI connection being tracked."""
@@ -642,7 +645,6 @@ class _ConnectionFairy(object):
fairy._pool = pool
fairy._counter = 0
- fairy._echo = pool._should_log_debug()
if threadconns is not None:
threadconns.current = weakref.ref(fairy)
@@ -684,11 +686,11 @@ class _ConnectionFairy(object):
_close = _checkin
- def _reset(self, pool, echo):
+ def _reset(self, pool):
if pool.dispatch.reset:
pool.dispatch.reset(self, self._connection_record)
if pool._reset_on_return is reset_rollback:
- if echo:
+ if self._echo:
pool.logger.debug("Connection %s rollback-on-return%s",
self.connection,
", via agent"
@@ -698,7 +700,7 @@ class _ConnectionFairy(object):
else:
pool._dialect.do_rollback(self)
elif pool._reset_on_return is reset_commit:
- if echo:
+ if self._echo:
pool.logger.debug("Connection %s commit-on-return%s",
self.connection,
", via agent"
diff --git a/lib/sqlalchemy/sql/compiler.py b/lib/sqlalchemy/sql/compiler.py
index e45510aa4..fac4980b0 100644
--- a/lib/sqlalchemy/sql/compiler.py
+++ b/lib/sqlalchemy/sql/compiler.py
@@ -1981,11 +1981,13 @@ class SQLCompiler(Compiled):
need_pks = self.isinsert and \
not self.inline and \
- not stmt._returning
+ not stmt._returning and \
+ not stmt._has_multi_parameters
implicit_returning = need_pks and \
self.dialect.implicit_returning and \
stmt.table.implicit_returning
+
if self.isinsert:
implicit_return_defaults = (implicit_returning and
stmt._return_defaults)
diff --git a/lib/sqlalchemy/sql/dml.py b/lib/sqlalchemy/sql/dml.py
index f7e033d85..1934d0776 100644
--- a/lib/sqlalchemy/sql/dml.py
+++ b/lib/sqlalchemy/sql/dml.py
@@ -269,6 +269,13 @@ class ValuesBase(UpdateBase):
.. versionadded:: 0.8
Support for multiple-VALUES INSERT statements.
+ .. versionchanged:: 1.0.0 an INSERT that uses a multiple-VALUES
+ clause, even a list of length one,
+ implies that the :paramref:`.Insert.inline` flag is set to
+ True, indicating that the statement will not attempt to fetch
+ the "last inserted primary key" or other defaults. The statement
+ deals with an arbitrary number of rows, so the
+ :attr:`.ResultProxy.inserted_primary_key` accessor does not apply.
.. seealso::
@@ -434,8 +441,13 @@ class Insert(ValuesBase):
dynamically render the VALUES clause at execution time based on
the parameters passed to :meth:`.Connection.execute`.
- :param inline: if True, SQL defaults will be compiled 'inline' into
- the statement and not pre-executed.
+ :param inline: if True, no attempt will be made to retrieve the
+ SQL-generated default values to be provided within the statement;
+ in particular,
+ this allows SQL expressions to be rendered 'inline' within the
+ statement without the need to pre-execute them beforehand; for
+ backends that support "returning", this turns off the "implicit
+ returning" feature for the statement.
If both `values` and compile-time bind parameters are present, the
compile-time bind parameters override the information specified
@@ -495,17 +507,12 @@ class Insert(ValuesBase):
would normally raise an exception if these column lists don't
correspond.
- .. note::
-
- Depending on backend, it may be necessary for the :class:`.Insert`
- statement to be constructed using the ``inline=True`` flag; this
- flag will prevent the implicit usage of ``RETURNING`` when the
- ``INSERT`` statement is rendered, which isn't supported on a
- backend such as Oracle in conjunction with an ``INSERT..SELECT``
- combination::
-
- sel = select([table1.c.a, table1.c.b]).where(table1.c.c > 5)
- ins = table2.insert(inline=True).from_select(['a', 'b'], sel)
+ .. versionchanged:: 1.0.0 an INSERT that uses FROM SELECT
+ implies that the :paramref:`.insert.inline` flag is set to
+ True, indicating that the statement will not attempt to fetch
+ the "last inserted primary key" or other defaults. The statement
+ deals with an arbitrary number of rows, so the
+ :attr:`.ResultProxy.inserted_primary_key` accessor does not apply.
.. note::
@@ -525,6 +532,7 @@ class Insert(ValuesBase):
self._process_colparams(dict((n, Null()) for n in names))
self.select_names = names
+ self.inline = True
self.select = _interpret_as_select(select)
def _copy_internals(self, clone=_clone, **kw):
@@ -728,10 +736,10 @@ class Delete(UpdateBase):
:meth:`~.TableClause.delete` method on
:class:`~.schema.Table`.
- :param table: The table to be updated.
+ :param table: The table to delete rows from.
:param whereclause: A :class:`.ClauseElement` describing the ``WHERE``
- condition of the ``UPDATE`` statement. Note that the
+ condition of the ``DELETE`` statement. Note that the
:meth:`~Delete.where()` generative method may be used instead.
.. seealso::
diff --git a/lib/sqlalchemy/sql/schema.py b/lib/sqlalchemy/sql/schema.py
index 8099dca75..c8e815d24 100644
--- a/lib/sqlalchemy/sql/schema.py
+++ b/lib/sqlalchemy/sql/schema.py
@@ -1269,7 +1269,8 @@ class Column(SchemaItem, ColumnClause):
def _on_table_attach(self, fn):
if self.table is not None:
fn(self, self.table)
- event.listen(self, 'after_parent_attach', fn)
+ else:
+ event.listen(self, 'after_parent_attach', fn)
def copy(self, **kw):
"""Create a copy of this ``Column``, unitialized.
diff --git a/lib/sqlalchemy/testing/engines.py b/lib/sqlalchemy/testing/engines.py
index 9052df570..67c13231e 100644
--- a/lib/sqlalchemy/testing/engines.py
+++ b/lib/sqlalchemy/testing/engines.py
@@ -7,15 +7,12 @@
from __future__ import absolute_import
-import types
import weakref
-from collections import deque
from . import config
from .util import decorator
from .. import event, pool
import re
import warnings
-from .. import util
class ConnectionKiller(object):
@@ -339,112 +336,3 @@ def proxying_engine(conn_cls=DBAPIProxyConnection,
return testing_engine(options={'creator': mock_conn})
-class ReplayableSession(object):
- """A simple record/playback tool.
-
- This is *not* a mock testing class. It only records a session for later
- playback and makes no assertions on call consistency whatsoever. It's
- unlikely to be suitable for anything other than DB-API recording.
-
- """
-
- Callable = object()
- NoAttribute = object()
-
- if util.py2k:
- Natives = set([getattr(types, t)
- for t in dir(types) if not t.startswith('_')]).\
- difference([getattr(types, t)
- for t in ('FunctionType', 'BuiltinFunctionType',
- 'MethodType', 'BuiltinMethodType',
- 'LambdaType', 'UnboundMethodType',)])
- else:
- Natives = set([getattr(types, t)
- for t in dir(types) if not t.startswith('_')]).\
- union([type(t) if not isinstance(t, type)
- else t for t in __builtins__.values()]).\
- difference([getattr(types, t)
- for t in ('FunctionType', 'BuiltinFunctionType',
- 'MethodType', 'BuiltinMethodType',
- 'LambdaType', )])
-
- def __init__(self):
- self.buffer = deque()
-
- def recorder(self, base):
- return self.Recorder(self.buffer, base)
-
- def player(self):
- return self.Player(self.buffer)
-
- class Recorder(object):
- def __init__(self, buffer, subject):
- self._buffer = buffer
- self._subject = subject
-
- def __call__(self, *args, **kw):
- subject, buffer = [object.__getattribute__(self, x)
- for x in ('_subject', '_buffer')]
-
- result = subject(*args, **kw)
- if type(result) not in ReplayableSession.Natives:
- buffer.append(ReplayableSession.Callable)
- return type(self)(buffer, result)
- else:
- buffer.append(result)
- return result
-
- @property
- def _sqla_unwrap(self):
- return self._subject
-
- def __getattribute__(self, key):
- try:
- return object.__getattribute__(self, key)
- except AttributeError:
- pass
-
- subject, buffer = [object.__getattribute__(self, x)
- for x in ('_subject', '_buffer')]
- try:
- result = type(subject).__getattribute__(subject, key)
- except AttributeError:
- buffer.append(ReplayableSession.NoAttribute)
- raise
- else:
- if type(result) not in ReplayableSession.Natives:
- buffer.append(ReplayableSession.Callable)
- return type(self)(buffer, result)
- else:
- buffer.append(result)
- return result
-
- class Player(object):
- def __init__(self, buffer):
- self._buffer = buffer
-
- def __call__(self, *args, **kw):
- buffer = object.__getattribute__(self, '_buffer')
- result = buffer.popleft()
- if result is ReplayableSession.Callable:
- return self
- else:
- return result
-
- @property
- def _sqla_unwrap(self):
- return None
-
- def __getattribute__(self, key):
- try:
- return object.__getattribute__(self, key)
- except AttributeError:
- pass
- buffer = object.__getattribute__(self, '_buffer')
- result = buffer.popleft()
- if result is ReplayableSession.Callable:
- return self
- elif result is ReplayableSession.NoAttribute:
- raise AttributeError(key)
- else:
- return result
diff --git a/lib/sqlalchemy/testing/plugin/provision.py b/lib/sqlalchemy/testing/plugin/provision.py
index baec8a299..c6b9030f5 100644
--- a/lib/sqlalchemy/testing/plugin/provision.py
+++ b/lib/sqlalchemy/testing/plugin/provision.py
@@ -36,14 +36,8 @@ class register(object):
def create_follower_db(follower_ident):
for cfg in _configs_for_db_operation():
- url = cfg.db.url
- backend = url.get_backend_name()
_create_db(cfg, cfg.db, follower_ident)
- new_url = sa_url.make_url(str(url))
-
- new_url.database = follower_ident
-
def configure_follower(follower_ident):
for cfg in config.Config.all_configs():
@@ -63,7 +57,6 @@ def setup_config(db_url, db_opts, options, file_config, follower_ident):
def drop_follower_db(follower_ident):
for cfg in _configs_for_db_operation():
- url = cfg.db.url
_drop_db(cfg, cfg.db, follower_ident)
@@ -110,9 +103,13 @@ def _follower_url_from_main(url, ident):
return url
-#@_follower_url_from_main.for_db("sqlite")
-#def _sqlite_follower_url_from_main(url, ident):
-# return sa_url.make_url("sqlite:///%s.db" % ident)
+@_follower_url_from_main.for_db("sqlite")
+def _sqlite_follower_url_from_main(url, ident):
+ url = sa_url.make_url(url)
+ if not url.database or url.database == ':memory:':
+ return url
+ else:
+ return sa_url.make_url("sqlite:///%s.db" % ident)
@_create_db.for_db("postgresql")
diff --git a/lib/sqlalchemy/testing/plugin/pytestplugin.py b/lib/sqlalchemy/testing/plugin/pytestplugin.py
index fd0616327..005942913 100644
--- a/lib/sqlalchemy/testing/plugin/pytestplugin.py
+++ b/lib/sqlalchemy/testing/plugin/pytestplugin.py
@@ -74,6 +74,9 @@ def pytest_collection_modifyitems(session, config, items):
# new classes to a module on the fly.
rebuilt_items = collections.defaultdict(list)
+ items[:] = [
+ item for item in
+ items if isinstance(item.parent, pytest.Instance)]
test_classes = set(item.parent for item in items)
for test_class in test_classes:
for sub_cls in plugin_base.generate_sub_tests(
@@ -115,7 +118,6 @@ def pytest_pycollect_makeitem(collector, name, obj):
_current_class = None
-
def pytest_runtest_setup(item):
# here we seem to get called only based on what we collected
# in pytest_collection_modifyitems. So to do class-based stuff
@@ -126,16 +128,18 @@ def pytest_runtest_setup(item):
return
# ... so we're doing a little dance here to figure it out...
- if item.parent.parent is not _current_class:
-
+ if _current_class is None:
class_setup(item.parent.parent)
_current_class = item.parent.parent
# this is needed for the class-level, to ensure that the
# teardown runs after the class is completed with its own
# class-level teardown...
- item.parent.parent.addfinalizer(
- lambda: class_teardown(item.parent.parent))
+ def finalize():
+ global _current_class
+ class_teardown(item.parent.parent)
+ _current_class = None
+ item.parent.parent.addfinalizer(finalize)
test_setup(item)
diff --git a/lib/sqlalchemy/testing/profiling.py b/lib/sqlalchemy/testing/profiling.py
index 75baec987..fcb888f86 100644
--- a/lib/sqlalchemy/testing/profiling.py
+++ b/lib/sqlalchemy/testing/profiling.py
@@ -14,13 +14,12 @@ in a more fine-grained way than nose's profiling plugin.
import os
import sys
-from .util import gc_collect, decorator
+from .util import gc_collect
from . import config
from .plugin.plugin_base import SkipTest
import pstats
-import time
import collections
-from .. import util
+import contextlib
try:
import cProfile
@@ -30,64 +29,8 @@ from ..util import jython, pypy, win32, update_wrapper
_current_test = None
-
-def profiled(target=None, **target_opts):
- """Function profiling.
-
- @profiled()
- or
- @profiled(report=True, sort=('calls',), limit=20)
-
- Outputs profiling info for a decorated function.
-
- """
-
- profile_config = {'targets': set(),
- 'report': True,
- 'print_callers': False,
- 'print_callees': False,
- 'graphic': False,
- 'sort': ('time', 'calls'),
- 'limit': None}
- if target is None:
- target = 'anonymous_target'
-
- @decorator
- def decorate(fn, *args, **kw):
- elapsed, load_stats, result = _profile(
- fn, *args, **kw)
-
- graphic = target_opts.get('graphic', profile_config['graphic'])
- if graphic:
- os.system("runsnake %s" % filename)
- else:
- report = target_opts.get('report', profile_config['report'])
- if report:
- sort_ = target_opts.get('sort', profile_config['sort'])
- limit = target_opts.get('limit', profile_config['limit'])
- print(("Profile report for target '%s'" % (
- target, )
- ))
-
- stats = load_stats()
- stats.sort_stats(*sort_)
- if limit:
- stats.print_stats(limit)
- else:
- stats.print_stats()
-
- print_callers = target_opts.get(
- 'print_callers', profile_config['print_callers'])
- if print_callers:
- stats.print_callers()
-
- print_callees = target_opts.get(
- 'print_callees', profile_config['print_callees'])
- if print_callees:
- stats.print_callees()
-
- return result
- return decorate
+# ProfileStatsFile instance, set up in plugin_base
+_profile_stats = None
class ProfileStatsFile(object):
@@ -177,20 +120,23 @@ class ProfileStatsFile(object):
self._write()
def _header(self):
- return \
- "# %s\n"\
- "# This file is written out on a per-environment basis.\n"\
- "# For each test in aaa_profiling, the corresponding function and \n"\
- "# environment is located within this file. If it doesn't exist,\n"\
- "# the test is skipped.\n"\
- "# If a callcount does exist, it is compared to what we received. \n"\
- "# assertions are raised if the counts do not match.\n"\
- "# \n"\
- "# To add a new callcount test, apply the function_call_count \n"\
- "# decorator and re-run the tests using the --write-profiles \n"\
- "# option - this file will be rewritten including the new count.\n"\
- "# \n"\
- "" % (self.fname)
+ return (
+ "# %s\n"
+ "# This file is written out on a per-environment basis.\n"
+ "# For each test in aaa_profiling, the corresponding "
+ "function and \n"
+ "# environment is located within this file. "
+ "If it doesn't exist,\n"
+ "# the test is skipped.\n"
+ "# If a callcount does exist, it is compared "
+ "to what we received. \n"
+ "# assertions are raised if the counts do not match.\n"
+ "# \n"
+ "# To add a new callcount test, apply the function_call_count \n"
+ "# decorator and re-run the tests using the --write-profiles \n"
+ "# option - this file will be rewritten including the new count.\n"
+ "# \n"
+ ) % (self.fname)
def _read(self):
try:
@@ -239,72 +185,66 @@ def function_call_count(variance=0.05):
def decorate(fn):
def wrap(*args, **kw):
-
- if cProfile is None:
- raise SkipTest("cProfile is not installed")
-
- if not _profile_stats.has_stats() and not _profile_stats.write:
- # run the function anyway, to support dependent tests
- # (not a great idea but we have these in test_zoomark)
- fn(*args, **kw)
- raise SkipTest("No profiling stats available on this "
- "platform for this function. Run tests with "
- "--write-profiles to add statistics to %s for "
- "this platform." % _profile_stats.short_fname)
-
- gc_collect()
-
- timespent, load_stats, fn_result = _profile(
- fn, *args, **kw
- )
- stats = load_stats()
- callcount = stats.total_calls
-
- expected = _profile_stats.result(callcount)
- if expected is None:
- expected_count = None
- else:
- line_no, expected_count = expected
-
- print(("Pstats calls: %d Expected %s" % (
- callcount,
- expected_count
- )
- ))
- stats.print_stats()
- # stats.print_callers()
-
- if expected_count:
- deviance = int(callcount * variance)
- failed = abs(callcount - expected_count) > deviance
-
- if failed:
- if _profile_stats.write:
- _profile_stats.replace(callcount)
- else:
- raise AssertionError(
- "Adjusted function call count %s not within %s%% "
- "of expected %s. Rerun with --write-profiles to "
- "regenerate this callcount."
- % (
- callcount, (variance * 100),
- expected_count))
- return fn_result
+ with count_functions(variance=variance):
+ return fn(*args, **kw)
return update_wrapper(wrap, fn)
return decorate
-def _profile(fn, *args, **kw):
- filename = "%s.prof" % fn.__name__
-
- def load_stats():
- st = pstats.Stats(filename)
- os.unlink(filename)
- return st
+@contextlib.contextmanager
+def count_functions(variance=0.05):
+ if cProfile is None:
+ raise SkipTest("cProfile is not installed")
+
+ if not _profile_stats.has_stats() and not _profile_stats.write:
+ raise SkipTest("No profiling stats available on this "
+ "platform for this function. Run tests with "
+ "--write-profiles to add statistics to %s for "
+ "this platform." % _profile_stats.short_fname)
+
+ gc_collect()
+
+ pr = cProfile.Profile()
+ pr.enable()
+ #began = time.time()
+ yield
+ #ended = time.time()
+ pr.disable()
+
+ #s = compat.StringIO()
+ stats = pstats.Stats(pr, stream=sys.stdout)
+
+ #timespent = ended - began
+ callcount = stats.total_calls
+
+ expected = _profile_stats.result(callcount)
+ if expected is None:
+ expected_count = None
+ else:
+ line_no, expected_count = expected
+
+ print(("Pstats calls: %d Expected %s" % (
+ callcount,
+ expected_count
+ )
+ ))
+ stats.sort_stats("cumulative")
+ stats.print_stats()
+
+ if expected_count:
+ deviance = int(callcount * variance)
+ failed = abs(callcount - expected_count) > deviance
+
+ if failed:
+ if _profile_stats.write:
+ _profile_stats.replace(callcount)
+ else:
+ raise AssertionError(
+ "Adjusted function call count %s not within %s%% "
+ "of expected %s. Rerun with --write-profiles to "
+ "regenerate this callcount."
+ % (
+ callcount, (variance * 100),
+ expected_count))
- began = time.time()
- cProfile.runctx('result = fn(*args, **kw)', globals(), locals(),
- filename=filename)
- ended = time.time()
- return ended - began, load_stats, locals()['result']
diff --git a/lib/sqlalchemy/testing/replay_fixture.py b/lib/sqlalchemy/testing/replay_fixture.py
new file mode 100644
index 000000000..b8a0f6df1
--- /dev/null
+++ b/lib/sqlalchemy/testing/replay_fixture.py
@@ -0,0 +1,167 @@
+from . import fixtures
+from . import profiling
+from .. import util
+import types
+from collections import deque
+import contextlib
+from . import config
+from sqlalchemy import MetaData
+from sqlalchemy import create_engine
+from sqlalchemy.orm import Session
+
+
+class ReplayFixtureTest(fixtures.TestBase):
+
+ @contextlib.contextmanager
+ def _dummy_ctx(self, *arg, **kw):
+ yield
+
+ def test_invocation(self):
+
+ dbapi_session = ReplayableSession()
+ creator = config.db.pool._creator
+ recorder = lambda: dbapi_session.recorder(creator())
+ engine = create_engine(
+ config.db.url, creator=recorder,
+ use_native_hstore=False)
+ self.metadata = MetaData(engine)
+ self.engine = engine
+ self.session = Session(engine)
+
+ self.setup_engine()
+ self._run_steps(ctx=self._dummy_ctx)
+ self.teardown_engine()
+ engine.dispose()
+
+ player = lambda: dbapi_session.player()
+ engine = create_engine(
+ config.db.url, creator=player,
+ use_native_hstore=False)
+
+ self.metadata = MetaData(engine)
+ self.engine = engine
+ self.session = Session(engine)
+
+ self.setup_engine()
+ self._run_steps(ctx=profiling.count_functions)
+ self.teardown_engine()
+
+ def setup_engine(self):
+ pass
+
+ def teardown_engine(self):
+ pass
+
+ def _run_steps(self, ctx):
+ raise NotImplementedError()
+
+
+class ReplayableSession(object):
+ """A simple record/playback tool.
+
+ This is *not* a mock testing class. It only records a session for later
+ playback and makes no assertions on call consistency whatsoever. It's
+ unlikely to be suitable for anything other than DB-API recording.
+
+ """
+
+ Callable = object()
+ NoAttribute = object()
+
+ if util.py2k:
+ Natives = set([getattr(types, t)
+ for t in dir(types) if not t.startswith('_')]).\
+ difference([getattr(types, t)
+ for t in ('FunctionType', 'BuiltinFunctionType',
+ 'MethodType', 'BuiltinMethodType',
+ 'LambdaType', 'UnboundMethodType',)])
+ else:
+ Natives = set([getattr(types, t)
+ for t in dir(types) if not t.startswith('_')]).\
+ union([type(t) if not isinstance(t, type)
+ else t for t in __builtins__.values()]).\
+ difference([getattr(types, t)
+ for t in ('FunctionType', 'BuiltinFunctionType',
+ 'MethodType', 'BuiltinMethodType',
+ 'LambdaType', )])
+
+ def __init__(self):
+ self.buffer = deque()
+
+ def recorder(self, base):
+ return self.Recorder(self.buffer, base)
+
+ def player(self):
+ return self.Player(self.buffer)
+
+ class Recorder(object):
+ def __init__(self, buffer, subject):
+ self._buffer = buffer
+ self._subject = subject
+
+ def __call__(self, *args, **kw):
+ subject, buffer = [object.__getattribute__(self, x)
+ for x in ('_subject', '_buffer')]
+
+ result = subject(*args, **kw)
+ if type(result) not in ReplayableSession.Natives:
+ buffer.append(ReplayableSession.Callable)
+ return type(self)(buffer, result)
+ else:
+ buffer.append(result)
+ return result
+
+ @property
+ def _sqla_unwrap(self):
+ return self._subject
+
+ def __getattribute__(self, key):
+ try:
+ return object.__getattribute__(self, key)
+ except AttributeError:
+ pass
+
+ subject, buffer = [object.__getattribute__(self, x)
+ for x in ('_subject', '_buffer')]
+ try:
+ result = type(subject).__getattribute__(subject, key)
+ except AttributeError:
+ buffer.append(ReplayableSession.NoAttribute)
+ raise
+ else:
+ if type(result) not in ReplayableSession.Natives:
+ buffer.append(ReplayableSession.Callable)
+ return type(self)(buffer, result)
+ else:
+ buffer.append(result)
+ return result
+
+ class Player(object):
+ def __init__(self, buffer):
+ self._buffer = buffer
+
+ def __call__(self, *args, **kw):
+ buffer = object.__getattribute__(self, '_buffer')
+ result = buffer.popleft()
+ if result is ReplayableSession.Callable:
+ return self
+ else:
+ return result
+
+ @property
+ def _sqla_unwrap(self):
+ return None
+
+ def __getattribute__(self, key):
+ try:
+ return object.__getattribute__(self, key)
+ except AttributeError:
+ pass
+ buffer = object.__getattribute__(self, '_buffer')
+ result = buffer.popleft()
+ if result is ReplayableSession.Callable:
+ return self
+ elif result is ReplayableSession.NoAttribute:
+ raise AttributeError(key)
+ else:
+ return result
diff --git a/lib/sqlalchemy/util/_collections.py b/lib/sqlalchemy/util/_collections.py
index 5236d0120..0904d454e 100644
--- a/lib/sqlalchemy/util/_collections.py
+++ b/lib/sqlalchemy/util/_collections.py
@@ -264,15 +264,18 @@ class OrderedDict(dict):
def __iter__(self):
return iter(self._list)
- if py2k:
- def values(self):
- return [self[key] for key in self._list]
+ def keys(self):
+ return list(self)
- def keys(self):
- return self._list
+ def values(self):
+ return [self[key] for key in self._list]
+
+ def items(self):
+ return [(key, self[key]) for key in self._list]
+ if py2k:
def itervalues(self):
- return iter([self[key] for key in self._list])
+ return iter(self.values())
def iterkeys(self):
return iter(self)
@@ -280,41 +283,6 @@ class OrderedDict(dict):
def iteritems(self):
return iter(self.items())
- def items(self):
- return [(key, self[key]) for key in self._list]
- else:
- def values(self):
- # return (self[key] for key in self)
- return (self[key] for key in self._list)
-
- def keys(self):
- # return iter(self)
- return iter(self._list)
-
- def items(self):
- # return ((key, self[key]) for key in self)
- return ((key, self[key]) for key in self._list)
-
- _debug_iter = False
- if _debug_iter:
- # normally disabled to reduce function call
- # overhead
- def __iter__(self):
- len_ = len(self._list)
- for item in self._list:
- yield item
- assert len_ == len(self._list), \
- "Dictionary changed size during iteration"
-
- def values(self):
- return (self[key] for key in self)
-
- def keys(self):
- return iter(self)
-
- def items(self):
- return ((key, self[key]) for key in self)
-
def __setitem__(self, key, object):
if key not in self:
try:
diff --git a/setup.cfg b/setup.cfg
index 4ec4b0837..b70086605 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -9,7 +9,7 @@ first-package-wins = true
where = test
[pytest]
-addopts= --tb native -v -r fxX
+addopts= --tb native -v -r fxX --maxfail=25
python_files=test/*test_*.py
[upload]
diff --git a/test/aaa_profiling/test_compiler.py b/test/aaa_profiling/test_compiler.py
index 47a412e73..5eece4602 100644
--- a/test/aaa_profiling/test_compiler.py
+++ b/test/aaa_profiling/test_compiler.py
@@ -42,7 +42,7 @@ class CompileTest(fixtures.TestBase, AssertsExecutionResults):
def test_insert(self):
t1.insert().compile(dialect=self.dialect)
- @profiling.function_call_count()
+ @profiling.function_call_count(variance=.15)
def test_update(self):
t1.update().compile(dialect=self.dialect)
diff --git a/test/aaa_profiling/test_zoomark.py b/test/aaa_profiling/test_zoomark.py
index 4c4708503..5b8a0f785 100644
--- a/test/aaa_profiling/test_zoomark.py
+++ b/test/aaa_profiling/test_zoomark.py
@@ -7,43 +7,42 @@ An adaptation of Robert Brewers' ZooMark speed tests. """
import datetime
from sqlalchemy import Table, Column, Integer, Unicode, Date, \
- DateTime, Time, Float, MetaData, Sequence, ForeignKey, create_engine, \
+ DateTime, Time, Float, Sequence, ForeignKey, \
select, join, and_, outerjoin, func
-from sqlalchemy.testing import fixtures, engines, profiling
-from sqlalchemy import testing
-ITERATIONS = 1
-dbapi_session = engines.ReplayableSession()
-metadata = None
-
+from sqlalchemy.testing import replay_fixture
-class ZooMarkTest(fixtures.TestBase):
+ITERATIONS = 1
- """Runs the ZooMark and squawks if method counts vary from the norm.
- Each test has an associated `call_range`, the total number of
- accepted function calls made during the test. The count can vary
- between Python 2.4 and 2.5.
+class ZooMarkTest(replay_fixture.ReplayFixtureTest):
- Unlike a unit test, this is a ordered collection of steps. Running
- components individually will fail.
+ """Runs the ZooMark and squawks if method counts vary from the norm."""
- """
__requires__ = 'cpython',
__only_on__ = 'postgresql+psycopg2'
- def test_baseline_0_setup(self):
- global metadata
- creator = testing.db.pool._creator
- recorder = lambda: dbapi_session.recorder(creator())
- engine = engines.testing_engine(options={'creator': recorder,
- 'use_reaper': False})
- metadata = MetaData(engine)
- engine.connect()
-
- def test_baseline_1_create_tables(self):
+ def _run_steps(self, ctx):
+ self._baseline_1_create_tables()
+ with ctx():
+ self._baseline_1a_populate()
+ with ctx():
+ self._baseline_2_insert()
+ with ctx():
+ self._baseline_3_properties()
+ with ctx():
+ self._baseline_4_expressions()
+ with ctx():
+ self._baseline_5_aggregates()
+ with ctx():
+ self._baseline_6_editing()
+ with ctx():
+ self._baseline_7_multiview()
+ self._baseline_8_drop()
+
+ def _baseline_1_create_tables(self):
Table(
'Zoo',
- metadata,
+ self.metadata,
Column('ID', Integer, Sequence('zoo_id_seq'),
primary_key=True, index=True),
Column('Name', Unicode(255)),
@@ -54,7 +53,7 @@ class ZooMarkTest(fixtures.TestBase):
)
Table(
'Animal',
- metadata,
+ self.metadata,
Column('ID', Integer, Sequence('animal_id_seq'),
primary_key=True),
Column('ZooID', Integer, ForeignKey('Zoo.ID'), index=True),
@@ -67,12 +66,12 @@ class ZooMarkTest(fixtures.TestBase):
Column('PreferredFoodID', Integer),
Column('AlternateFoodID', Integer),
)
- metadata.create_all()
+ self.metadata.create_all()
- def test_baseline_1a_populate(self):
- Zoo = metadata.tables['Zoo']
- Animal = metadata.tables['Animal']
- engine = metadata.bind
+ def _baseline_1a_populate(self):
+ Zoo = self.metadata.tables['Zoo']
+ Animal = self.metadata.tables['Animal']
+ engine = self.metadata.bind
wap = engine.execute(Zoo.insert(), Name='Wild Animal Park',
Founded=datetime.date(2000, 1, 1),
Opens=datetime.time(8, 15, 59),
@@ -137,16 +136,16 @@ class ZooMarkTest(fixtures.TestBase):
engine.execute(Animal.insert(inline=True), Species='Ape',
Name='Hua Mei', Legs=2, MotherID=bai_yun)
- def test_baseline_2_insert(self):
- Animal = metadata.tables['Animal']
+ def _baseline_2_insert(self):
+ Animal = self.metadata.tables['Animal']
i = Animal.insert(inline=True)
for x in range(ITERATIONS):
i.execute(Species='Tick', Name='Tick %d' % x, Legs=8)
- def test_baseline_3_properties(self):
- Zoo = metadata.tables['Zoo']
- Animal = metadata.tables['Animal']
- engine = metadata.bind
+ def _baseline_3_properties(self):
+ Zoo = self.metadata.tables['Zoo']
+ Animal = self.metadata.tables['Animal']
+ engine = self.metadata.bind
def fullobject(select):
"""Iterate over the full result row."""
@@ -171,10 +170,10 @@ class ZooMarkTest(fixtures.TestBase):
fullobject(Animal.select(Animal.c.Legs == 1000000))
fullobject(Animal.select(Animal.c.Species == 'Tick'))
- def test_baseline_4_expressions(self):
- Zoo = metadata.tables['Zoo']
- Animal = metadata.tables['Animal']
- engine = metadata.bind
+ def _baseline_4_expressions(self):
+ Zoo = self.metadata.tables['Zoo']
+ Animal = self.metadata.tables['Animal']
+ engine = self.metadata.bind
def fulltable(select):
"""Iterate over the full result table."""
@@ -280,10 +279,10 @@ class ZooMarkTest(fixtures.TestBase):
'day',
Animal.c.LastEscape) == 21))) == 1
- def test_baseline_5_aggregates(self):
- Animal = metadata.tables['Animal']
- Zoo = metadata.tables['Zoo']
- engine = metadata.bind
+ def _baseline_5_aggregates(self):
+ Animal = self.metadata.tables['Animal']
+ Zoo = self.metadata.tables['Zoo']
+ engine = self.metadata.bind
for x in range(ITERATIONS):
@@ -327,9 +326,9 @@ class ZooMarkTest(fixtures.TestBase):
distinct=True)).fetchall()]
legs.sort()
- def test_baseline_6_editing(self):
- Zoo = metadata.tables['Zoo']
- engine = metadata.bind
+ def _baseline_6_editing(self):
+ Zoo = self.metadata.tables['Zoo']
+ engine = self.metadata.bind
for x in range(ITERATIONS):
# Edit
@@ -364,10 +363,10 @@ class ZooMarkTest(fixtures.TestBase):
)).first()
assert SDZ['Founded'] == datetime.date(1935, 9, 13)
- def test_baseline_7_multiview(self):
- Zoo = metadata.tables['Zoo']
- Animal = metadata.tables['Animal']
- engine = metadata.bind
+ def _baseline_7_multiview(self):
+ Zoo = self.metadata.tables['Zoo']
+ Animal = self.metadata.tables['Animal']
+ engine = self.metadata.bind
def fulltable(select):
"""Iterate over the full result table."""
@@ -403,52 +402,6 @@ class ZooMarkTest(fixtures.TestBase):
Zoo.c.Name, Animal.c.Species],
from_obj=[outerjoin(Animal, Zoo)]))
- def test_baseline_8_drop(self):
- metadata.drop_all()
-
- # Now, run all of these tests again with the DB-API driver factored
- # out: the ReplayableSession playback stands in for the database.
- #
- # How awkward is this in a unittest framework? Very.
-
- def test_profile_0(self):
- global metadata
- player = lambda: dbapi_session.player()
- engine = create_engine('postgresql:///', creator=player,
- use_native_hstore=False)
- metadata = MetaData(engine)
- engine.connect()
-
- def test_profile_1_create_tables(self):
- self.test_baseline_1_create_tables()
-
- @profiling.function_call_count()
- def test_profile_1a_populate(self):
- self.test_baseline_1a_populate()
-
- @profiling.function_call_count()
- def test_profile_2_insert(self):
- self.test_baseline_2_insert()
-
- @profiling.function_call_count()
- def test_profile_3_properties(self):
- self.test_baseline_3_properties()
-
- @profiling.function_call_count()
- def test_profile_4_expressions(self):
- self.test_baseline_4_expressions()
-
- @profiling.function_call_count()
- def test_profile_5_aggregates(self):
- self.test_baseline_5_aggregates()
-
- @profiling.function_call_count()
- def test_profile_6_editing(self):
- self.test_baseline_6_editing()
-
- @profiling.function_call_count()
- def test_profile_7_multiview(self):
- self.test_baseline_7_multiview()
+ def _baseline_8_drop(self):
+ self.metadata.drop_all()
- def test_profile_8_drop(self):
- self.test_baseline_8_drop()
diff --git a/test/aaa_profiling/test_zoomark_orm.py b/test/aaa_profiling/test_zoomark_orm.py
index 6b781af9b..500d7c2cb 100644
--- a/test/aaa_profiling/test_zoomark_orm.py
+++ b/test/aaa_profiling/test_zoomark_orm.py
@@ -7,48 +7,52 @@ An adaptation of Robert Brewers' ZooMark speed tests. """
import datetime
from sqlalchemy import Table, Column, Integer, Unicode, Date, \
- DateTime, Time, Float, MetaData, Sequence, ForeignKey, create_engine, \
+ DateTime, Time, Float, Sequence, ForeignKey, \
select, and_, func
-from sqlalchemy.orm import sessionmaker, mapper
-from sqlalchemy.testing import fixtures, engines, profiling
-from sqlalchemy import testing
+from sqlalchemy.orm import mapper
+from sqlalchemy.testing import replay_fixture
+
ITERATIONS = 1
-dbapi_session = engines.ReplayableSession()
-metadata = None
Zoo = Animal = session = None
-class ZooMarkTest(fixtures.TestBase):
+class ZooMarkTest(replay_fixture.ReplayFixtureTest):
"""Runs the ZooMark and squawks if method counts vary from the norm.
- Each test has an associated `call_range`, the total number of
- accepted function calls made during the test. The count can vary
- between Python 2.4 and 2.5.
-
- Unlike a unit test, this is a ordered collection of steps. Running
- components individually will fail.
"""
__requires__ = 'cpython',
__only_on__ = 'postgresql+psycopg2'
- def test_baseline_0_setup(self):
- global metadata, session
- creator = testing.db.pool._creator
- recorder = lambda: dbapi_session.recorder(creator())
- engine = engines.testing_engine(
- options={'creator': recorder, 'use_reaper': False})
- metadata = MetaData(engine)
- session = sessionmaker(engine)()
- engine.connect()
-
- def test_baseline_1_create_tables(self):
+ def _run_steps(self, ctx):
+ #self._baseline_1_create_tables()
+ with ctx():
+ self._baseline_1a_populate()
+ with ctx():
+ self._baseline_2_insert()
+ with ctx():
+ self._baseline_3_properties()
+ with ctx():
+ self._baseline_4_expressions()
+ with ctx():
+ self._baseline_5_aggregates()
+ with ctx():
+ self._baseline_6_editing()
+ #self._baseline_7_drop()
+
+ def setup_engine(self):
+ self._baseline_1_create_tables()
+
+ def teardown_engine(self):
+ self._baseline_7_drop()
+
+ def _baseline_1_create_tables(self):
zoo = Table(
'Zoo',
- metadata,
+ self.metadata,
Column('ID', Integer, Sequence('zoo_id_seq'),
primary_key=True, index=True),
Column('Name', Unicode(255)),
@@ -59,7 +63,7 @@ class ZooMarkTest(fixtures.TestBase):
)
animal = Table(
'Animal',
- metadata,
+ self.metadata,
Column('ID', Integer, Sequence('animal_id_seq'),
primary_key=True),
Column('ZooID', Integer, ForeignKey('Zoo.ID'), index=True),
@@ -72,7 +76,7 @@ class ZooMarkTest(fixtures.TestBase):
Column('PreferredFoodID', Integer),
Column('AlternateFoodID', Integer),
)
- metadata.create_all()
+ self.metadata.create_all()
global Zoo, Animal
class Zoo(object):
@@ -90,131 +94,129 @@ class ZooMarkTest(fixtures.TestBase):
mapper(Zoo, zoo)
mapper(Animal, animal)
- def test_baseline_1a_populate(self):
+ def _baseline_1a_populate(self):
wap = Zoo(
Name='Wild Animal Park', Founded=datetime.date(
2000, 1, 1), Opens=datetime.time(
8, 15, 59), LastEscape=datetime.datetime(
2004, 7, 29, 5, 6, 7, ), Admission=4.95)
- session.add(wap)
+ self.session.add(wap)
sdz = Zoo(
Name='San Diego Zoo', Founded=datetime.date(
1835, 9, 13), Opens=datetime.time(
9, 0, 0), Admission=0)
- session.add(sdz)
+ self.session.add(sdz)
bio = Zoo(Name='Montr\xe9al Biod\xf4me',
Founded=datetime.date(1992, 6, 19),
Opens=datetime.time(9, 0, 0), Admission=11.75)
- session.add(bio)
+ self.session.add(bio)
seaworld = Zoo(Name='Sea_World', Admission=60)
- session.add(seaworld)
+ self.session.add(seaworld)
# Let's add a crazy futuristic Zoo to test large date values.
lp = Zoo(Name='Luna Park', Founded=datetime.date(2072, 7, 17),
Opens=datetime.time(0, 0, 0), Admission=134.95)
- session.add(lp)
- session.flush()
+ self.session.add(lp)
# Animals
leopard = Animal(Species='Leopard', Lifespan=73.5)
- session.add(leopard)
+ self.session.add(leopard)
leopard.ZooID = wap.ID
leopard.LastEscape = \
datetime.datetime(2004, 12, 21, 8, 15, 0, 999907, )
- session.add(Animal(Species='Lion', ZooID=wap.ID))
- session.add(Animal(Species='Slug', Legs=1, Lifespan=.75))
- session.add(Animal(Species='Tiger', ZooID=sdz.ID))
+ self.session.add(Animal(Species='Lion', ZooID=wap.ID))
+ self.session.add(Animal(Species='Slug', Legs=1, Lifespan=.75))
+ self.session.add(Animal(Species='Tiger', ZooID=sdz.ID))
# Override Legs.default with itself just to make sure it works.
- session.add(Animal(Species='Bear', Legs=4))
- session.add(Animal(Species='Ostrich', Legs=2, Lifespan=103.2))
- session.add(Animal(Species='Centipede', Legs=100))
- session.add(Animal(Species='Emperor Penguin', Legs=2,
+ self.session.add(Animal(Species='Bear', Legs=4))
+ self.session.add(Animal(Species='Ostrich', Legs=2, Lifespan=103.2))
+ self.session.add(Animal(Species='Centipede', Legs=100))
+ self.session.add(Animal(Species='Emperor Penguin', Legs=2,
ZooID=seaworld.ID))
- session.add(Animal(Species='Adelie Penguin', Legs=2,
+ self.session.add(Animal(Species='Adelie Penguin', Legs=2,
ZooID=seaworld.ID))
- session.add(Animal(Species='Millipede', Legs=1000000,
+ self.session.add(Animal(Species='Millipede', Legs=1000000,
ZooID=sdz.ID))
# Add a mother and child to test relationships
bai_yun = Animal(Species='Ape', Nameu='Bai Yun', Legs=2)
- session.add(bai_yun)
- session.add(Animal(Species='Ape', Name='Hua Mei', Legs=2,
+ self.session.add(bai_yun)
+ self.session.add(Animal(Species='Ape', Name='Hua Mei', Legs=2,
MotherID=bai_yun.ID))
- session.flush()
- session.commit()
+ self.session.commit()
- def test_baseline_2_insert(self):
+ def _baseline_2_insert(self):
for x in range(ITERATIONS):
- session.add(Animal(Species='Tick', Name='Tick %d' % x,
+ self.session.add(Animal(Species='Tick', Name='Tick %d' % x,
Legs=8))
- session.flush()
+ self.session.flush()
- def test_baseline_3_properties(self):
+ def _baseline_3_properties(self):
for x in range(ITERATIONS):
# Zoos
- list(session.query(Zoo).filter(
+ list(self.session.query(Zoo).filter(
Zoo.Name == 'Wild Animal Park'))
list(
- session.query(Zoo).filter(
+ self.session.query(Zoo).filter(
Zoo.Founded == datetime.date(
1835,
9,
13)))
list(
- session.query(Zoo).filter(
+ self.session.query(Zoo).filter(
Zoo.Name == 'Montr\xe9al Biod\xf4me'))
- list(session.query(Zoo).filter(Zoo.Admission == float(60)))
+ list(self.session.query(Zoo).filter(Zoo.Admission == float(60)))
# Animals
- list(session.query(Animal).filter(Animal.Species == 'Leopard'))
- list(session.query(Animal).filter(Animal.Species == 'Ostrich'))
- list(session.query(Animal).filter(Animal.Legs == 1000000))
- list(session.query(Animal).filter(Animal.Species == 'Tick'))
+ list(self.session.query(Animal).filter(Animal.Species == 'Leopard'))
+ list(self.session.query(Animal).filter(Animal.Species == 'Ostrich'))
+ list(self.session.query(Animal).filter(Animal.Legs == 1000000))
+ list(self.session.query(Animal).filter(Animal.Species == 'Tick'))
- def test_baseline_4_expressions(self):
+ def _baseline_4_expressions(self):
for x in range(ITERATIONS):
- assert len(list(session.query(Zoo))) == 5
- assert len(list(session.query(Animal))) == ITERATIONS + 12
- assert len(list(session.query(Animal).filter(Animal.Legs
+ assert len(list(self.session.query(Zoo))) == 5
+ assert len(list(self.session.query(Animal))) == ITERATIONS + 12
+ assert len(list(self.session.query(Animal).filter(Animal.Legs
== 4))) == 4
- assert len(list(session.query(Animal).filter(Animal.Legs
+ assert len(list(self.session.query(Animal).filter(Animal.Legs
== 2))) == 5
assert len(
list(
- session.query(Animal).filter(
+ self.session.query(Animal).filter(
and_(
Animal.Legs >= 2,
Animal.Legs < 20)))) == ITERATIONS + 9
- assert len(list(session.query(Animal).filter(Animal.Legs
+ assert len(list(self.session.query(Animal).filter(Animal.Legs
> 10))) == 2
- assert len(list(session.query(Animal).filter(Animal.Lifespan
+ assert len(list(self.session.query(Animal).filter(Animal.Lifespan
> 70))) == 2
- assert len(list(session.query(Animal).
+ assert len(list(self.session.query(Animal).
filter(Animal.Species.like('L%')))) == 2
- assert len(list(session.query(Animal).
+ assert len(list(self.session.query(Animal).
filter(Animal.Species.like('%pede')))) == 2
- assert len(list(session.query(Animal).filter(Animal.LastEscape
+ assert len(list(self.session.query(Animal).filter(Animal.LastEscape
!= None))) == 1
assert len(
list(
- session.query(Animal).filter(
+ self.session.query(Animal).filter(
Animal.LastEscape == None))) == ITERATIONS + 11
# In operator (containedby)
- assert len(list(session.query(Animal).filter(
+ assert len(list(self.session.query(Animal).filter(
Animal.Species.like('%pede%')))) == 2
assert len(
list(
- session.query(Animal). filter(
+ self.session.query(Animal). filter(
Animal.Species.in_(
('Lion', 'Tiger', 'Bear'))))) == 3
@@ -224,17 +226,17 @@ class ZooMarkTest(fixtures.TestBase):
pet, pet2 = thing(), thing()
pet.Name, pet2.Name = 'Slug', 'Ostrich'
- assert len(list(session.query(Animal).
+ assert len(list(self.session.query(Animal).
filter(Animal.Species.in_((pet.Name,
pet2.Name))))) == 2
# logic and other functions
name = 'Lion'
- assert len(list(session.query(Animal).
+ assert len(list(self.session.query(Animal).
filter(func.length(Animal.Species)
== len(name)))) == ITERATIONS + 3
- assert len(list(session.query(Animal).
+ assert len(list(self.session.query(Animal).
filter(Animal.Species.like('%i%'
)))) == ITERATIONS + 7
@@ -242,29 +244,29 @@ class ZooMarkTest(fixtures.TestBase):
assert len(
list(
- session.query(Zoo).filter(
+ self.session.query(Zoo).filter(
and_(
Zoo.Founded != None,
Zoo.Founded < func.now())))) == 3
- assert len(list(session.query(Animal).filter(Animal.LastEscape
+ assert len(list(self.session.query(Animal).filter(Animal.LastEscape
== func.now()))) == 0
- assert len(list(session.query(Animal).filter(
+ assert len(list(self.session.query(Animal).filter(
func.date_part('year', Animal.LastEscape) == 2004))) == 1
assert len(
list(
- session.query(Animal). filter(
+ self.session.query(Animal). filter(
func.date_part(
'month',
Animal.LastEscape) == 12))) == 1
- assert len(list(session.query(Animal).filter(
+ assert len(list(self.session.query(Animal).filter(
func.date_part('day', Animal.LastEscape) == 21))) == 1
- def test_baseline_5_aggregates(self):
- Animal = metadata.tables['Animal']
- Zoo = metadata.tables['Zoo']
+ def _baseline_5_aggregates(self):
+ Animal = self.metadata.tables['Animal']
+ Zoo = self.metadata.tables['Zoo']
# TODO: convert to ORM
- engine = metadata.bind
+ engine = self.metadata.bind
for x in range(ITERATIONS):
# views
@@ -307,12 +309,12 @@ class ZooMarkTest(fixtures.TestBase):
distinct=True)).fetchall()]
legs.sort()
- def test_baseline_6_editing(self):
+ def _baseline_6_editing(self):
for x in range(ITERATIONS):
# Edit
- SDZ = session.query(Zoo).filter(Zoo.Name == 'San Diego Zoo'
+ SDZ = self.session.query(Zoo).filter(Zoo.Name == 'San Diego Zoo'
).one()
SDZ.Name = 'The San Diego Zoo'
SDZ.Founded = datetime.date(1900, 1, 1)
@@ -321,7 +323,7 @@ class ZooMarkTest(fixtures.TestBase):
# Test edits
- SDZ = session.query(Zoo).filter(Zoo.Name
+ SDZ = self.session.query(Zoo).filter(Zoo.Name
== 'The San Diego Zoo').one()
assert SDZ.Founded == datetime.date(1900, 1, 1), SDZ.Founded
@@ -334,55 +336,12 @@ class ZooMarkTest(fixtures.TestBase):
# Test re-edits
- SDZ = session.query(Zoo).filter(Zoo.Name == 'San Diego Zoo'
+ SDZ = self.session.query(Zoo).filter(Zoo.Name == 'San Diego Zoo'
).one()
assert SDZ.Founded == datetime.date(1835, 9, 13), \
SDZ.Founded
- def test_baseline_7_drop(self):
- session.rollback()
- metadata.drop_all()
-
- # Now, run all of these tests again with the DB-API driver factored
- # out: the ReplayableSession playback stands in for the database.
- #
- # How awkward is this in a unittest framework? Very.
-
- def test_profile_0(self):
- global metadata, session
- player = lambda: dbapi_session.player()
- engine = create_engine('postgresql:///', creator=player,
- use_native_hstore=False)
- metadata = MetaData(engine)
- session = sessionmaker(engine)()
- engine.connect()
-
- def test_profile_1_create_tables(self):
- self.test_baseline_1_create_tables()
-
- @profiling.function_call_count()
- def test_profile_1a_populate(self):
- self.test_baseline_1a_populate()
-
- @profiling.function_call_count()
- def test_profile_2_insert(self):
- self.test_baseline_2_insert()
-
- @profiling.function_call_count()
- def test_profile_3_properties(self):
- self.test_baseline_3_properties()
-
- @profiling.function_call_count()
- def test_profile_4_expressions(self):
- self.test_baseline_4_expressions()
-
- @profiling.function_call_count()
- def test_profile_5_aggregates(self):
- self.test_baseline_5_aggregates()
-
- @profiling.function_call_count()
- def test_profile_6_editing(self):
- self.test_baseline_6_editing()
-
- def test_profile_7_drop(self):
- self.test_baseline_7_drop()
+ def _baseline_7_drop(self):
+ self.session.rollback()
+ self.metadata.drop_all()
+
diff --git a/test/base/test_events.py b/test/base/test_events.py
index 4ae89fe17..30b728cd3 100644
--- a/test/base/test_events.py
+++ b/test/base/test_events.py
@@ -8,6 +8,7 @@ from sqlalchemy.testing.util import gc_collect
from sqlalchemy.testing.mock import Mock, call
from sqlalchemy import testing
+
class EventsTest(fixtures.TestBase):
"""Test class- and instance-level event registration."""
@@ -155,8 +156,8 @@ class EventsTest(fixtures.TestBase):
t2.dispatch.event_one(5, 6)
is_(
t1.dispatch.__dict__['event_one'],
- self.Target.dispatch.event_one.\
- _empty_listeners[self.Target]
+ self.Target.dispatch.event_one.
+ _empty_listeners[self.Target]
)
@event.listens_for(t1, "event_one")
@@ -164,13 +165,13 @@ class EventsTest(fixtures.TestBase):
pass
is_not_(
t1.dispatch.__dict__['event_one'],
- self.Target.dispatch.event_one.\
- _empty_listeners[self.Target]
+ self.Target.dispatch.event_one.
+ _empty_listeners[self.Target]
)
is_(
t2.dispatch.__dict__['event_one'],
- self.Target.dispatch.event_one.\
- _empty_listeners[self.Target]
+ self.Target.dispatch.event_one.
+ _empty_listeners[self.Target]
)
def test_immutable_methods(self):
@@ -188,6 +189,7 @@ class EventsTest(fixtures.TestBase):
meth
)
+
class NamedCallTest(fixtures.TestBase):
def setUp(self):
@@ -206,8 +208,8 @@ class NamedCallTest(fixtures.TestBase):
self.TargetOne = TargetOne
def tearDown(self):
- event.base._remove_dispatcher(self.TargetOne.__dict__['dispatch'].events)
-
+ event.base._remove_dispatcher(
+ self.TargetOne.__dict__['dispatch'].events)
def test_kw_accept(self):
canary = Mock()
@@ -255,7 +257,6 @@ class NamedCallTest(fixtures.TestBase):
class LegacySignatureTest(fixtures.TestBase):
"""test adaption of legacy args"""
-
def setUp(self):
class TargetEventsOne(event.Events):
@@ -267,18 +268,19 @@ class LegacySignatureTest(fixtures.TestBase):
def event_four(self, x, y, z, q, **kw):
pass
- @event._legacy_signature("0.9", ["x", "y", "z", "q"],
- lambda x, y: (x, y, x + y, x * y))
+ @event._legacy_signature(
+ "0.9", ["x", "y", "z", "q"],
+ lambda x, y: (x, y, x + y, x * y))
def event_six(self, x, y):
pass
-
class TargetOne(object):
dispatch = event.dispatcher(TargetEventsOne)
self.TargetOne = TargetOne
def tearDown(self):
- event.base._remove_dispatcher(self.TargetOne.__dict__['dispatch'].events)
+ event.base._remove_dispatcher(
+ self.TargetOne.__dict__['dispatch'].events)
def test_legacy_accept(self):
canary = Mock()
@@ -306,6 +308,7 @@ class LegacySignatureTest(fixtures.TestBase):
canary = Mock()
inst = self.TargetOne()
+
@event.listens_for(inst, "event_four")
def handler1(x, y, **kw):
canary(x, y, kw)
@@ -313,6 +316,7 @@ class LegacySignatureTest(fixtures.TestBase):
def test_legacy_accept_partial(self):
canary = Mock()
+
def evt(a, x, y, **kw):
canary(a, x, y, **kw)
from functools import partial
@@ -330,7 +334,6 @@ class LegacySignatureTest(fixtures.TestBase):
[call(5, 4, 5, foo="bar")]
)
-
def _test_legacy_accept_kw(self, target, canary):
target.dispatch.event_four(4, 5, 6, 7, foo="bar")
@@ -410,22 +413,19 @@ class LegacySignatureTest(fixtures.TestBase):
class ClsLevelListenTest(fixtures.TestBase):
-
def tearDown(self):
- event.base._remove_dispatcher(self.TargetOne.__dict__['dispatch'].events)
+ event.base._remove_dispatcher(
+ self.TargetOne.__dict__['dispatch'].events)
def setUp(self):
class TargetEventsOne(event.Events):
def event_one(self, x, y):
pass
+
class TargetOne(object):
dispatch = event.dispatcher(TargetEventsOne)
self.TargetOne = TargetOne
- def tearDown(self):
- event.base._remove_dispatcher(
- self.TargetOne.__dict__['dispatch'].events)
-
def test_lis_subcalss_lis(self):
@event.listens_for(self.TargetOne, "event_one")
def handler1(x, y):
@@ -470,12 +470,14 @@ class ClsLevelListenTest(fixtures.TestBase):
def test_two_sub_lis(self):
class SubTarget1(self.TargetOne):
pass
+
class SubTarget2(self.TargetOne):
pass
@event.listens_for(self.TargetOne, "event_one")
def handler1(x, y):
pass
+
@event.listens_for(SubTarget1, "event_one")
def handler2(x, y):
pass
@@ -510,8 +512,10 @@ class AcceptTargetsTest(fixtures.TestBase):
self.TargetTwo = TargetTwo
def tearDown(self):
- event.base._remove_dispatcher(self.TargetOne.__dict__['dispatch'].events)
- event.base._remove_dispatcher(self.TargetTwo.__dict__['dispatch'].events)
+ event.base._remove_dispatcher(
+ self.TargetOne.__dict__['dispatch'].events)
+ event.base._remove_dispatcher(
+ self.TargetTwo.__dict__['dispatch'].events)
def test_target_accept(self):
"""Test that events of the same name are routed to the correct
@@ -560,6 +564,7 @@ class AcceptTargetsTest(fixtures.TestBase):
[listen_two, listen_four]
)
+
class CustomTargetsTest(fixtures.TestBase):
"""Test custom target acceptance."""
@@ -599,6 +604,7 @@ class CustomTargetsTest(fixtures.TestBase):
listen, "event_one", self.Target
)
+
class SubclassGrowthTest(fixtures.TestBase):
"""test that ad-hoc subclasses are garbage collected."""
@@ -625,7 +631,8 @@ class SubclassGrowthTest(fixtures.TestBase):
class ListenOverrideTest(fixtures.TestBase):
- """Test custom listen functions which change the listener function signature."""
+ """Test custom listen functions which change the listener function
+ signature."""
def setUp(self):
class TargetEvents(event.Events):
@@ -715,7 +722,6 @@ class PropagateTest(fixtures.TestBase):
dispatch = event.dispatcher(TargetEvents)
self.Target = Target
-
def test_propagate(self):
listen_one = Mock()
listen_two = Mock()
@@ -741,6 +747,7 @@ class PropagateTest(fixtures.TestBase):
[]
)
+
class JoinTest(fixtures.TestBase):
def setUp(self):
class TargetEvents(event.Events):
@@ -767,7 +774,8 @@ class JoinTest(fixtures.TestBase):
self.TargetElement = TargetElement
def tearDown(self):
- for cls in (self.TargetElement,
+ for cls in (
+ self.TargetElement,
self.TargetFactory, self.BaseTarget):
if 'dispatch' in cls.__dict__:
event.base._remove_dispatcher(cls.__dict__['dispatch'].events)
@@ -780,6 +788,7 @@ class JoinTest(fixtures.TestBase):
def test_kw_ok(self):
l1 = Mock()
+
def listen(**kw):
l1(kw)
@@ -789,8 +798,10 @@ class JoinTest(fixtures.TestBase):
element.run_event(2)
eq_(
l1.mock_calls,
- [call({"target": element, "arg": 1}),
- call({"target": element, "arg": 2}),]
+ [
+ call({"target": element, "arg": 1}),
+ call({"target": element, "arg": 2}),
+ ]
)
def test_parent_class_only(self):
@@ -895,7 +906,6 @@ class JoinTest(fixtures.TestBase):
[call(element, 1), call(element, 2), call(element, 3)]
)
-
def test_parent_instance_child_class_apply_after(self):
l1 = Mock()
l2 = Mock()
@@ -969,6 +979,7 @@ class JoinTest(fixtures.TestBase):
[call(element, 1), call(element, 2), call(element, 3)]
)
+
class RemovalTest(fixtures.TestBase):
def _fixture(self):
class TargetEvents(event.Events):
@@ -1003,6 +1014,7 @@ class RemovalTest(fixtures.TestBase):
def test_clslevel_subclass(self):
Target = self._fixture()
+
class SubTarget(Target):
pass
@@ -1097,8 +1109,10 @@ class RemovalTest(fixtures.TestBase):
t2.dispatch.event_two("t2e2y")
eq_(m1.mock_calls,
- [call('t1e1x'), call('t1e2x'),
- call('t2e1x')])
+ [
+ call('t1e1x'), call('t1e2x'),
+ call('t2e1x')
+ ])
@testing.requires.predictable_gc
def test_listener_collection_removed_cleanup(self):
@@ -1140,8 +1154,43 @@ class RemovalTest(fixtures.TestBase):
event.remove(t1, "event_one", m1)
assert_raises_message(
exc.InvalidRequestError,
- r"No listeners found for event <.*Target.*> / 'event_two' / <Mock.*> ",
+ r"No listeners found for event <.*Target.*> / "
+ r"'event_two' / <Mock.*> ",
event.remove, t1, "event_two", m1
)
event.remove(t1, "event_three", m1)
+
+ def test_no_remove_in_event(self):
+ Target = self._fixture()
+
+ t1 = Target()
+
+ def evt():
+ event.remove(t1, "event_one", evt)
+
+ event.listen(t1, "event_one", evt)
+
+ assert_raises_message(
+ Exception,
+ "deque mutated during iteration",
+ t1.dispatch.event_one
+ )
+
+ def test_no_add_in_event(self):
+ Target = self._fixture()
+
+ t1 = Target()
+
+ m1 = Mock()
+
+ def evt():
+ event.listen(t1, "event_one", m1)
+
+ event.listen(t1, "event_one", evt)
+
+ assert_raises_message(
+ Exception,
+ "deque mutated during iteration",
+ t1.dispatch.event_one
+ )
diff --git a/test/dialect/postgresql/test_compiler.py b/test/dialect/postgresql/test_compiler.py
index b08fb0160..6c4f3c8cc 100644
--- a/test/dialect/postgresql/test_compiler.py
+++ b/test/dialect/postgresql/test_compiler.py
@@ -166,6 +166,90 @@ class CompileTest(fixtures.TestBase, AssertsCompiledSQL):
"VARCHAR(1), CHECK (somecolumn IN ('x', "
"'y', 'z')))")
+ def test_create_table_with_tablespace(self):
+ m = MetaData()
+ tbl = Table(
+ 'atable', m, Column("id", Integer),
+ postgresql_tablespace='sometablespace')
+ self.assert_compile(
+ schema.CreateTable(tbl),
+ "CREATE TABLE atable (id INTEGER) TABLESPACE sometablespace")
+
+ def test_create_table_with_tablespace_quoted(self):
+ # testing quoting of tablespace name
+ m = MetaData()
+ tbl = Table(
+ 'anothertable', m, Column("id", Integer),
+ postgresql_tablespace='table')
+ self.assert_compile(
+ schema.CreateTable(tbl),
+ 'CREATE TABLE anothertable (id INTEGER) TABLESPACE "table"')
+
+ def test_create_table_inherits(self):
+ m = MetaData()
+ tbl = Table(
+ 'atable', m, Column("id", Integer),
+ postgresql_inherits='i1')
+ self.assert_compile(
+ schema.CreateTable(tbl),
+ "CREATE TABLE atable (id INTEGER) INHERITS ( i1 )")
+
+ def test_create_table_inherits_tuple(self):
+ m = MetaData()
+ tbl = Table(
+ 'atable', m, Column("id", Integer),
+ postgresql_inherits=('i1', 'i2'))
+ self.assert_compile(
+ schema.CreateTable(tbl),
+ "CREATE TABLE atable (id INTEGER) INHERITS ( i1, i2 )")
+
+ def test_create_table_inherits_quoting(self):
+ m = MetaData()
+ tbl = Table(
+ 'atable', m, Column("id", Integer),
+ postgresql_inherits=('Quote Me', 'quote Me Too'))
+ self.assert_compile(
+ schema.CreateTable(tbl),
+ 'CREATE TABLE atable (id INTEGER) INHERITS '
+ '( "Quote Me", "quote Me Too" )')
+
+ def test_create_table_with_oids(self):
+ m = MetaData()
+ tbl = Table(
+ 'atable', m, Column("id", Integer),
+ postgresql_with_oids=True, )
+ self.assert_compile(
+ schema.CreateTable(tbl),
+ "CREATE TABLE atable (id INTEGER) WITH OIDS")
+
+ tbl2 = Table(
+ 'anothertable', m, Column("id", Integer),
+ postgresql_with_oids=False)
+ self.assert_compile(
+ schema.CreateTable(tbl2),
+ "CREATE TABLE anothertable (id INTEGER) WITHOUT OIDS")
+
+ def test_create_table_with_oncommit_option(self):
+ m = MetaData()
+ tbl = Table(
+ 'atable', m, Column("id", Integer),
+ postgresql_on_commit="drop")
+ self.assert_compile(
+ schema.CreateTable(tbl),
+ "CREATE TABLE atable (id INTEGER) ON COMMIT DROP")
+
+ def test_create_table_with_multiple_options(self):
+ m = MetaData()
+ tbl = Table(
+ 'atable', m, Column("id", Integer),
+ postgresql_tablespace='sometablespace',
+ postgresql_with_oids=False,
+ postgresql_on_commit="preserve_rows")
+ self.assert_compile(
+ schema.CreateTable(tbl),
+ "CREATE TABLE atable (id INTEGER) WITHOUT OIDS "
+ "ON COMMIT PRESERVE ROWS TABLESPACE sometablespace")
+
def test_create_partial_index(self):
m = MetaData()
tbl = Table('testtbl', m, Column('data', Integer))
diff --git a/test/engine/test_execute.py b/test/engine/test_execute.py
index 291aee2f3..d8e1c655e 100644
--- a/test/engine/test_execute.py
+++ b/test/engine/test_execute.py
@@ -21,7 +21,6 @@ from sqlalchemy.testing import fixtures
from sqlalchemy.testing.mock import Mock, call, patch
from contextlib import contextmanager
from sqlalchemy.util import nested
-import logging.handlers # needed for logging tests to work correctly
users, metadata, users_autoinc = None, None, None
@@ -688,6 +687,7 @@ class CompiledCacheTest(fixtures.TestBase):
Column('user_id', INT, primary_key=True,
test_needs_autoincrement=True),
Column('user_name', VARCHAR(20)),
+ Column("extra_data", VARCHAR(20))
)
metadata.create_all()
@@ -705,12 +705,53 @@ class CompiledCacheTest(fixtures.TestBase):
cached_conn = conn.execution_options(compiled_cache=cache)
ins = users.insert()
- cached_conn.execute(ins, {'user_name': 'u1'})
- cached_conn.execute(ins, {'user_name': 'u2'})
- cached_conn.execute(ins, {'user_name': 'u3'})
+ with patch.object(
+ ins, "compile",
+ Mock(side_effect=ins.compile)) as compile_mock:
+ cached_conn.execute(ins, {'user_name': 'u1'})
+ cached_conn.execute(ins, {'user_name': 'u2'})
+ cached_conn.execute(ins, {'user_name': 'u3'})
+ eq_(compile_mock.call_count, 1)
assert len(cache) == 1
eq_(conn.execute("select count(*) from users").scalar(), 3)
+ def test_keys_independent_of_ordering(self):
+ conn = testing.db.connect()
+ conn.execute(
+ users.insert(),
+ {"user_id": 1, "user_name": "u1", "extra_data": "e1"})
+ cache = {}
+ cached_conn = conn.execution_options(compiled_cache=cache)
+
+ upd = users.update().where(users.c.user_id == bindparam("b_user_id"))
+
+ with patch.object(
+ upd, "compile",
+ Mock(side_effect=upd.compile)) as compile_mock:
+ cached_conn.execute(
+ upd, util.OrderedDict([
+ ("b_user_id", 1),
+ ("user_name", "u2"),
+ ("extra_data", "e2")
+ ])
+ )
+ cached_conn.execute(
+ upd, util.OrderedDict([
+ ("b_user_id", 1),
+ ("extra_data", "e3"),
+ ("user_name", "u3"),
+ ])
+ )
+ cached_conn.execute(
+ upd, util.OrderedDict([
+ ("extra_data", "e4"),
+ ("user_name", "u4"),
+ ("b_user_id", 1),
+ ])
+ )
+ eq_(compile_mock.call_count, 1)
+ eq_(len(cache), 1)
+
class MockStrategyTest(fixtures.TestBase):
diff --git a/test/engine/test_logging.py b/test/engine/test_logging.py
index ea2ad3964..1432a0f7b 100644
--- a/test/engine/test_logging.py
+++ b/test/engine/test_logging.py
@@ -1,35 +1,23 @@
-from sqlalchemy.testing import eq_, assert_raises, assert_raises_message, \
- config, is_
-import re
-from sqlalchemy.testing.util import picklers
-from sqlalchemy.interfaces import ConnectionProxy
-from sqlalchemy import MetaData, Integer, String, INT, VARCHAR, func, \
- bindparam, select, event, TypeDecorator, create_engine, Sequence
-from sqlalchemy.sql import column, literal
-from sqlalchemy.testing.schema import Table, Column
+from sqlalchemy.testing import eq_, assert_raises_message
+from sqlalchemy import select
import sqlalchemy as tsa
-from sqlalchemy import testing
from sqlalchemy.testing import engines
-from sqlalchemy import util
-from sqlalchemy.testing.engines import testing_engine
import logging.handlers
-from sqlalchemy.dialects.oracle.zxjdbc import ReturningParam
-from sqlalchemy.engine import result as _result, default
-from sqlalchemy.engine.base import Engine
from sqlalchemy.testing import fixtures
-from sqlalchemy.testing.mock import Mock, call, patch
+from sqlalchemy.testing import mock
+from sqlalchemy.testing.util import lazy_gc
+
class LogParamsTest(fixtures.TestBase):
__only_on__ = 'sqlite'
__requires__ = 'ad_hoc_engines',
def setup(self):
- self.eng = engines.testing_engine(options={'echo':True})
+ self.eng = engines.testing_engine(options={'echo': True})
self.eng.execute("create table foo (data string)")
self.buf = logging.handlers.BufferingHandler(100)
for log in [
logging.getLogger('sqlalchemy.engine'),
- logging.getLogger('sqlalchemy.pool')
]:
log.addHandler(self.buf)
@@ -37,14 +25,13 @@ class LogParamsTest(fixtures.TestBase):
self.eng.execute("drop table foo")
for log in [
logging.getLogger('sqlalchemy.engine'),
- logging.getLogger('sqlalchemy.pool')
]:
log.removeHandler(self.buf)
def test_log_large_dict(self):
self.eng.execute(
"INSERT INTO foo (data) values (:data)",
- [{"data":str(i)} for i in range(100)]
+ [{"data": str(i)} for i in range(100)]
)
eq_(
self.buf.buffer[1].message,
@@ -76,7 +63,7 @@ class LogParamsTest(fixtures.TestBase):
"100 total bound parameter sets ... {'data': '98'}, {'data': '99'}\]",
lambda: self.eng.execute(
"INSERT INTO nonexistent (data) values (:data)",
- [{"data":str(i)} for i in range(100)]
+ [{"data": str(i)} for i in range(100)]
)
)
@@ -94,6 +81,88 @@ class LogParamsTest(fixtures.TestBase):
)
)
+
+class PoolLoggingTest(fixtures.TestBase):
+ def setup(self):
+ self.existing_level = logging.getLogger("sqlalchemy.pool").level
+
+ self.buf = logging.handlers.BufferingHandler(100)
+ for log in [
+ logging.getLogger('sqlalchemy.pool')
+ ]:
+ log.addHandler(self.buf)
+
+ def teardown(self):
+ for log in [
+ logging.getLogger('sqlalchemy.pool')
+ ]:
+ log.removeHandler(self.buf)
+ logging.getLogger("sqlalchemy.pool").setLevel(self.existing_level)
+
+ def _queuepool_echo_fixture(self):
+ return tsa.pool.QueuePool(creator=mock.Mock(), echo='debug')
+
+ def _queuepool_logging_fixture(self):
+ logging.getLogger("sqlalchemy.pool").setLevel(logging.DEBUG)
+ return tsa.pool.QueuePool(creator=mock.Mock())
+
+ def _stpool_echo_fixture(self):
+ return tsa.pool.SingletonThreadPool(creator=mock.Mock(), echo='debug')
+
+ def _stpool_logging_fixture(self):
+ logging.getLogger("sqlalchemy.pool").setLevel(logging.DEBUG)
+ return tsa.pool.SingletonThreadPool(creator=mock.Mock())
+
+ def _test_queuepool(self, q, dispose=True):
+ conn = q.connect()
+ conn.close()
+ conn = None
+
+ conn = q.connect()
+ conn.close()
+ conn = None
+
+ conn = q.connect()
+ conn = None
+ del conn
+ lazy_gc()
+ q.dispose()
+
+ eq_(
+ [buf.msg for buf in self.buf.buffer],
+ [
+ 'Created new connection %r',
+ 'Connection %r checked out from pool',
+ 'Connection %r being returned to pool',
+ 'Connection %s rollback-on-return%s',
+ 'Connection %r checked out from pool',
+ 'Connection %r being returned to pool',
+ 'Connection %s rollback-on-return%s',
+ 'Connection %r checked out from pool',
+ 'Connection %r being returned to pool',
+ 'Connection %s rollback-on-return%s',
+ 'Closing connection %r',
+
+ ] + (['Pool disposed. %s'] if dispose else [])
+ )
+
+ def test_stpool_echo(self):
+ q = self._stpool_echo_fixture()
+ self._test_queuepool(q, False)
+
+ def test_stpool_logging(self):
+ q = self._stpool_logging_fixture()
+ self._test_queuepool(q, False)
+
+ def test_queuepool_echo(self):
+ q = self._queuepool_echo_fixture()
+ self._test_queuepool(q)
+
+ def test_queuepool_logging(self):
+ q = self._queuepool_logging_fixture()
+ self._test_queuepool(q)
+
+
class LoggingNameTest(fixtures.TestBase):
__requires__ = 'ad_hoc_engines',
@@ -104,7 +173,7 @@ class LoggingNameTest(fixtures.TestBase):
assert name in (
'sqlalchemy.engine.base.Engine.%s' % eng_name,
'sqlalchemy.pool.%s.%s' %
- (eng.pool.__class__.__name__, pool_name)
+ (eng.pool.__class__.__name__, pool_name)
)
def _assert_no_name_in_execute(self, eng):
@@ -118,15 +187,15 @@ class LoggingNameTest(fixtures.TestBase):
def _named_engine(self, **kw):
options = {
- 'logging_name':'myenginename',
- 'pool_logging_name':'mypoolname',
- 'echo':True
+ 'logging_name': 'myenginename',
+ 'pool_logging_name': 'mypoolname',
+ 'echo': True
}
options.update(kw)
return engines.testing_engine(options=options)
def _unnamed_engine(self, **kw):
- kw.update({'echo':True})
+ kw.update({'echo': True})
return engines.testing_engine(options=kw)
def setup(self):
@@ -183,6 +252,7 @@ class LoggingNameTest(fixtures.TestBase):
eng = self._unnamed_engine(echo='debug', echo_pool='debug')
self._assert_no_name_in_execute(eng)
+
class EchoTest(fixtures.TestBase):
__requires__ = 'ad_hoc_engines',
diff --git a/test/engine/test_reconnect.py b/test/engine/test_reconnect.py
index f92b874da..c82cca5a1 100644
--- a/test/engine/test_reconnect.py
+++ b/test/engine/test_reconnect.py
@@ -1,23 +1,24 @@
from sqlalchemy.testing import eq_, assert_raises, assert_raises_message
import time
-from sqlalchemy import select, MetaData, Integer, String, create_engine, pool
+from sqlalchemy import (
+ select, MetaData, Integer, String, create_engine, pool, exc, util)
from sqlalchemy.testing.schema import Table, Column
import sqlalchemy as tsa
from sqlalchemy import testing
from sqlalchemy.testing import engines
-from sqlalchemy.testing.util import gc_collect
-from sqlalchemy import exc, util
from sqlalchemy.testing import fixtures
from sqlalchemy.testing.engines import testing_engine
-from sqlalchemy.testing import is_not_
from sqlalchemy.testing.mock import Mock, call
+
class MockError(Exception):
pass
+
class MockDisconnect(MockError):
pass
+
def mock_connection():
def mock_cursor():
def execute(*args, **kwargs):
@@ -25,10 +26,12 @@ def mock_connection():
raise MockDisconnect("Lost the DB connection on execute")
elif conn.explode in ('execute_no_disconnect', ):
raise MockError(
- "something broke on execute but we didn't lose the connection")
+ "something broke on execute but we didn't lose the "
+ "connection")
elif conn.explode in ('rollback', 'rollback_no_disconnect'):
raise MockError(
- "something broke on execute but we didn't lose the connection")
+ "something broke on execute but we didn't lose the "
+ "connection")
elif args and "SELECT" in args[0]:
cursor.description = [('foo', None, None, None, None, None)]
else:
@@ -38,9 +41,8 @@ def mock_connection():
cursor.fetchall = cursor.fetchone = \
Mock(side_effect=MockError("cursor closed"))
cursor = Mock(
- execute=Mock(side_effect=execute),
- close=Mock(side_effect=close)
- )
+ execute=Mock(side_effect=execute),
+ close=Mock(side_effect=close))
return cursor
def cursor():
@@ -52,18 +54,20 @@ def mock_connection():
raise MockDisconnect("Lost the DB connection on rollback")
if conn.explode == 'rollback_no_disconnect':
raise MockError(
- "something broke on rollback but we didn't lose the connection")
+ "something broke on rollback but we didn't lose the "
+ "connection")
else:
return
conn = Mock(
- rollback=Mock(side_effect=rollback),
- cursor=Mock(side_effect=cursor())
- )
+ rollback=Mock(side_effect=rollback),
+ cursor=Mock(side_effect=cursor()))
return conn
+
def MockDBAPI():
connections = []
+
def connect():
while True:
conn = mock_connection()
@@ -80,13 +84,12 @@ def MockDBAPI():
connections[:] = []
return Mock(
- connect=Mock(side_effect=connect()),
- shutdown=Mock(side_effect=shutdown),
- dispose=Mock(side_effect=dispose),
- paramstyle='named',
- connections=connections,
- Error=MockError
- )
+ connect=Mock(side_effect=connect()),
+ shutdown=Mock(side_effect=shutdown),
+ dispose=Mock(side_effect=dispose),
+ paramstyle='named',
+ connections=connections,
+ Error=MockError)
class MockReconnectTest(fixtures.TestBase):
@@ -94,13 +97,14 @@ class MockReconnectTest(fixtures.TestBase):
self.dbapi = MockDBAPI()
self.db = testing_engine(
- 'postgresql://foo:bar@localhost/test',
- options=dict(module=self.dbapi, _initialize=False))
+ 'postgresql://foo:bar@localhost/test',
+ options=dict(module=self.dbapi, _initialize=False))
- self.mock_connect = call(host='localhost', password='bar',
- user='foo', database='test')
+ self.mock_connect = call(
+ host='localhost', password='bar', user='foo', database='test')
# monkeypatch disconnect checker
- self.db.dialect.is_disconnect = lambda e, conn, cursor: isinstance(e, MockDisconnect)
+ self.db.dialect.is_disconnect = \
+ lambda e, conn, cursor: isinstance(e, MockDisconnect)
def teardown(self):
self.dbapi.dispose()
@@ -194,10 +198,8 @@ class MockReconnectTest(fixtures.TestBase):
assert_raises_message(
tsa.exc.InvalidRequestError,
- "Can't reconnect until invalid transaction is "
- "rolled back",
- trans.commit
- )
+ "Can't reconnect until invalid transaction is rolled back",
+ trans.commit)
assert trans.is_active
trans.rollback()
@@ -351,16 +353,16 @@ class MockReconnectTest(fixtures.TestBase):
)
def test_dialect_initialize_once(self):
- from sqlalchemy.engine.base import Engine
from sqlalchemy.engine.url import URL
from sqlalchemy.engine.default import DefaultDialect
- from sqlalchemy.pool import QueuePool
dbapi = self.dbapi
mock_dialect = Mock()
+
class MyURL(URL):
def get_dialect(self):
return Dialect
+
class Dialect(DefaultDialect):
initialize = Mock()
@@ -371,7 +373,6 @@ class MockReconnectTest(fixtures.TestBase):
eq_(Dialect.initialize.call_count, 1)
-
class CursorErrTest(fixtures.TestBase):
# this isn't really a "reconnect" test, it's more of
# a generic "recovery". maybe this test suite should have been
@@ -394,29 +395,24 @@ class CursorErrTest(fixtures.TestBase):
description=[],
close=Mock(side_effect=Exception("explode")),
)
+
def connect():
while True:
yield Mock(
- spec=['cursor', 'commit', 'rollback', 'close'],
- cursor=Mock(side_effect=cursor()),
- )
+ spec=['cursor', 'commit', 'rollback', 'close'],
+ cursor=Mock(side_effect=cursor()),)
return Mock(
- Error = DBAPIError,
- paramstyle='qmark',
- connect=Mock(side_effect=connect())
- )
+ Error=DBAPIError, paramstyle='qmark',
+ connect=Mock(side_effect=connect()))
dbapi = MockDBAPI()
from sqlalchemy.engine import default
url = Mock(
- get_dialect=lambda: default.DefaultDialect,
- translate_connect_args=lambda: {},
- query={},
- )
+ get_dialect=lambda: default.DefaultDialect,
+ translate_connect_args=lambda: {}, query={},)
eng = testing_engine(
- url,
- options=dict(module=dbapi, _initialize=initialize))
+ url, options=dict(module=dbapi, _initialize=initialize))
eng.pool.logger = Mock()
return eng
@@ -508,7 +504,6 @@ class RealReconnectTest(fixtures.TestBase):
# pool isn't replaced
assert self.engine.pool is p2
-
def test_ensure_is_disconnect_gets_connection(self):
def is_disconnect(e, conn, cursor):
# connection is still present
@@ -556,6 +551,7 @@ class RealReconnectTest(fixtures.TestBase):
"Crashes on py3k+cx_oracle")
def test_explode_in_initializer(self):
engine = engines.testing_engine()
+
def broken_initialize(connection):
connection.execute("select fake_stuff from _fake_table")
@@ -569,6 +565,7 @@ class RealReconnectTest(fixtures.TestBase):
"Crashes on py3k+cx_oracle")
def test_explode_in_initializer_disconnect(self):
engine = engines.testing_engine()
+
def broken_initialize(connection):
connection.execute("select fake_stuff from _fake_table")
@@ -584,7 +581,6 @@ class RealReconnectTest(fixtures.TestBase):
# invalidate() also doesn't screw up
assert_raises(exc.DBAPIError, engine.connect)
-
def test_null_pool(self):
engine = \
engines.reconnecting_engine(options=dict(poolclass=pool.NullPool))
@@ -623,10 +619,8 @@ class RealReconnectTest(fixtures.TestBase):
assert trans.is_active
assert_raises_message(
tsa.exc.StatementError,
- "Can't reconnect until invalid transaction is "\
- "rolled back",
- conn.execute, select([1])
- )
+ "Can't reconnect until invalid transaction is rolled back",
+ conn.execute, select([1]))
assert trans.is_active
assert_raises_message(
tsa.exc.InvalidRequestError,
@@ -640,13 +634,14 @@ class RealReconnectTest(fixtures.TestBase):
eq_(conn.execute(select([1])).scalar(), 1)
assert not conn.invalidated
+
class RecycleTest(fixtures.TestBase):
__backend__ = True
def test_basic(self):
for threadlocal in False, True:
engine = engines.reconnecting_engine(
- options={'pool_threadlocal': threadlocal})
+ options={'pool_threadlocal': threadlocal})
conn = engine.contextual_connect()
eq_(conn.execute(select([1])).scalar(), 1)
@@ -671,13 +666,15 @@ class RecycleTest(fixtures.TestBase):
eq_(conn.execute(select([1])).scalar(), 1)
conn.close()
+
class InvalidateDuringResultTest(fixtures.TestBase):
__backend__ = True
def setup(self):
self.engine = engines.reconnecting_engine()
self.meta = MetaData(self.engine)
- table = Table('sometable', self.meta,
+ table = Table(
+ 'sometable', self.meta,
Column('id', Integer, primary_key=True),
Column('name', String(50)))
self.meta.create_all()
@@ -690,10 +687,8 @@ class InvalidateDuringResultTest(fixtures.TestBase):
self.engine.dispose()
@testing.fails_if([
- '+mysqlconnector', '+mysqldb',
- '+cymysql', '+pymysql', '+pg8000'
- ], "Buffers the result set and doesn't check for "
- "connection close")
+ '+mysqlconnector', '+mysqldb', '+cymysql', '+pymysql', '+pg8000'],
+ "Buffers the result set and doesn't check for connection close")
def test_invalidate_on_results(self):
conn = self.engine.connect()
result = conn.execute('select * from sometable')
@@ -702,4 +697,3 @@ class InvalidateDuringResultTest(fixtures.TestBase):
self.engine.test_shutdown()
_assert_invalidated(result.fetchone)
assert conn.invalidated
-
diff --git a/test/engine/test_transaction.py b/test/engine/test_transaction.py
index f9744444d..8a5303642 100644
--- a/test/engine/test_transaction.py
+++ b/test/engine/test_transaction.py
@@ -347,9 +347,10 @@ class TransactionTest(fixtures.TestBase):
connection.invalidate()
connection2 = testing.db.connect()
- eq_(connection2.execute(select([users.c.user_id]).
- order_by(users.c.user_id)).fetchall(),
- [])
+ eq_(
+ connection2.execution_options(autocommit=True).
+ execute(select([users.c.user_id]).
+ order_by(users.c.user_id)).fetchall(), [])
recoverables = connection2.recover_twophase()
assert transaction.xid in recoverables
connection2.commit_prepared(transaction.xid, recover=True)
diff --git a/test/ext/test_mutable.py b/test/ext/test_mutable.py
index dc0b5ba1c..f2d0123bd 100644
--- a/test/ext/test_mutable.py
+++ b/test/ext/test_mutable.py
@@ -119,6 +119,18 @@ class _MutableDictTestBase(object):
eq_(f1.data, {})
+ def test_update(self):
+ sess = Session()
+
+ f1 = Foo(data={'a': 'b'})
+ sess.add(f1)
+ sess.commit()
+
+ f1.data.update({'a': 'z'})
+ sess.commit()
+
+ eq_(f1.data, {'a': 'z'})
+
def test_setdefault(self):
sess = Session()
@@ -332,6 +344,59 @@ class MutableAssociationScalarJSONTest(_MutableDictTestBase, fixtures.MappedTest
)
+class CustomMutableAssociationScalarJSONTest(_MutableDictTestBase, fixtures.MappedTest):
+
+ CustomMutableDict = None
+
+ @classmethod
+ def _type_fixture(cls):
+ if not(getattr(cls, 'CustomMutableDict')):
+ MutableDict = super(CustomMutableAssociationScalarJSONTest, cls)._type_fixture()
+ class CustomMutableDict(MutableDict):
+ pass
+ cls.CustomMutableDict = CustomMutableDict
+ return cls.CustomMutableDict
+
+ @classmethod
+ def define_tables(cls, metadata):
+ import json
+
+ class JSONEncodedDict(TypeDecorator):
+ impl = VARCHAR(50)
+
+ def process_bind_param(self, value, dialect):
+ if value is not None:
+ value = json.dumps(value)
+
+ return value
+
+ def process_result_value(self, value, dialect):
+ if value is not None:
+ value = json.loads(value)
+ return value
+
+ CustomMutableDict = cls._type_fixture()
+ CustomMutableDict.associate_with(JSONEncodedDict)
+
+ Table('foo', metadata,
+ Column('id', Integer, primary_key=True,
+ test_needs_autoincrement=True),
+ Column('data', JSONEncodedDict),
+ Column('unrelated_data', String(50))
+ )
+
+ def test_pickle_parent(self):
+ # Picklers don't know how to pickle CustomMutableDict, but we aren't testing that here
+ pass
+
+ def test_coerce(self):
+ sess = Session()
+ f1 = Foo(data={'a': 'b'})
+ sess.add(f1)
+ sess.flush()
+ eq_(type(f1.data), self._type_fixture())
+
+
class _CompositeTestBase(object):
@classmethod
def define_tables(cls, metadata):
diff --git a/test/orm/test_dynamic.py b/test/orm/test_dynamic.py
index bc47ba3f3..950ff1953 100644
--- a/test/orm/test_dynamic.py
+++ b/test/orm/test_dynamic.py
@@ -510,10 +510,6 @@ class UOWTest(
testing.db,
sess.flush,
CompiledSQL(
- "SELECT users.id AS users_id, users.name AS users_name "
- "FROM users WHERE users.id = :param_1",
- lambda ctx: [{"param_1": u1_id}]),
- CompiledSQL(
"SELECT addresses.id AS addresses_id, addresses.email_address "
"AS addresses_email_address FROM addresses "
"WHERE addresses.id = :param_1",
@@ -523,7 +519,11 @@ class UOWTest(
"UPDATE addresses SET user_id=:user_id WHERE addresses.id = "
":addresses_id",
lambda ctx: [{'addresses_id': a2_id, 'user_id': None}]
- )
+ ),
+ CompiledSQL(
+ "SELECT users.id AS users_id, users.name AS users_name "
+ "FROM users WHERE users.id = :param_1",
+ lambda ctx: [{"param_1": u1_id}]),
)
def test_rollback(self):
diff --git a/test/orm/test_naturalpks.py b/test/orm/test_naturalpks.py
index 53b661a49..a4e982f84 100644
--- a/test/orm/test_naturalpks.py
+++ b/test/orm/test_naturalpks.py
@@ -184,7 +184,7 @@ class NaturalPKTest(fixtures.MappedTest):
if not passive_updates:
# test passive_updates=False;
#load addresses, update user, update 2 addresses
- self.assert_sql_count(testing.db, go, 4)
+ self.assert_sql_count(testing.db, go, 3)
else:
# test passive_updates=True; update user
self.assert_sql_count(testing.db, go, 1)
@@ -239,7 +239,7 @@ class NaturalPKTest(fixtures.MappedTest):
def go():
sess.flush()
- self.assert_sql_count(testing.db, go, 3)
+ self.assert_sql_count(testing.db, go, 2)
def _test_manytoone(self, passive_updates):
users, Address, addresses, User = (self.tables.users,
@@ -270,7 +270,7 @@ class NaturalPKTest(fixtures.MappedTest):
if passive_updates:
self.assert_sql_count(testing.db, go, 1)
else:
- self.assert_sql_count(testing.db, go, 3)
+ self.assert_sql_count(testing.db, go, 2)
def go():
sess.flush()
@@ -366,7 +366,8 @@ class NaturalPKTest(fixtures.MappedTest):
if passive_updates:
self.assert_sql_count(testing.db, go, 1)
else:
- self.assert_sql_count(testing.db, go, 3)
+ # two updates bundled
+ self.assert_sql_count(testing.db, go, 2)
eq_([Address(username='ed'), Address(username='ed')], [ad1, ad2])
sess.expunge_all()
eq_(
@@ -383,7 +384,8 @@ class NaturalPKTest(fixtures.MappedTest):
if passive_updates:
self.assert_sql_count(testing.db, go, 1)
else:
- self.assert_sql_count(testing.db, go, 3)
+ # two updates bundled
+ self.assert_sql_count(testing.db, go, 2)
sess.expunge_all()
eq_(
[Address(username='fred'), Address(username='fred')],
@@ -789,8 +791,8 @@ class NonPKCascadeTest(fixtures.MappedTest):
sess.flush()
if not passive_updates:
# test passive_updates=False; load addresses,
- # update user, update 2 addresses
- self.assert_sql_count(testing.db, go, 4)
+ # update user, update 2 addresses (in one executemany)
+ self.assert_sql_count(testing.db, go, 3)
else:
# test passive_updates=True; update user
self.assert_sql_count(testing.db, go, 1)
diff --git a/test/orm/test_options.py b/test/orm/test_options.py
index 6eba38d15..1c1a797a6 100644
--- a/test/orm/test_options.py
+++ b/test/orm/test_options.py
@@ -497,7 +497,7 @@ class OptionsTest(PathTest, QueryTest):
class OptionsNoPropTest(_fixtures.FixtureTest):
"""test the error messages emitted when using property
- options in conjunection with column-only entities, or
+ options in conjunction with column-only entities, or
for not existing options
"""
diff --git a/test/orm/test_unitofwork.py b/test/orm/test_unitofwork.py
index 6eb763213..a54097b03 100644
--- a/test/orm/test_unitofwork.py
+++ b/test/orm/test_unitofwork.py
@@ -1126,11 +1126,12 @@ class OneToManyTest(_fixtures.FixtureTest):
("UPDATE addresses SET user_id=:user_id "
"WHERE addresses.id = :addresses_id",
- {'user_id': None, 'addresses_id': a1.id}),
+ [
+ {'user_id': None, 'addresses_id': a1.id},
+ {'user_id': u1.id, 'addresses_id': a3.id}
+ ]),
- ("UPDATE addresses SET user_id=:user_id "
- "WHERE addresses.id = :addresses_id",
- {'user_id': u1.id, 'addresses_id': a3.id})])
+ ])
def test_child_move(self):
"""Moving a child from one parent to another, with a delete.
diff --git a/test/orm/test_unitofworkv2.py b/test/orm/test_unitofworkv2.py
index 9fedc9590..374a77237 100644
--- a/test/orm/test_unitofworkv2.py
+++ b/test/orm/test_unitofworkv2.py
@@ -1,4 +1,4 @@
-from sqlalchemy.testing import eq_, assert_raises, assert_raises_message
+from sqlalchemy.testing import eq_, assert_raises_message
from sqlalchemy import testing
from sqlalchemy.testing import engines
from sqlalchemy.testing.schema import Table, Column
@@ -7,12 +7,15 @@ from sqlalchemy import exc
from sqlalchemy.testing import fixtures
from sqlalchemy import Integer, String, ForeignKey, func
from sqlalchemy.orm import mapper, relationship, backref, \
- create_session, unitofwork, attributes,\
- Session, class_mapper, sync, exc as orm_exc
+ create_session, unitofwork, attributes,\
+ Session, exc as orm_exc
+from sqlalchemy.testing.mock import Mock
+from sqlalchemy.testing.assertsql import AllOf, CompiledSQL
+from sqlalchemy import event
-from sqlalchemy.testing.assertsql import AllOf, CompiledSQL, Or
class AssertsUOW(object):
+
def _get_test_uow(self, session):
uow = unitofwork.UOWTransaction(session)
deleted = set(session._deleted)
@@ -24,26 +27,29 @@ class AssertsUOW(object):
uow.register_object(d, isdelete=True)
return uow
- def _assert_uow_size(self, session, expected ):
+ def _assert_uow_size(self, session, expected):
uow = self._get_test_uow(session)
postsort_actions = uow._generate_actions()
print(postsort_actions)
eq_(len(postsort_actions), expected, postsort_actions)
-class UOWTest(_fixtures.FixtureTest,
- testing.AssertsExecutionResults, AssertsUOW):
+
+class UOWTest(
+ _fixtures.FixtureTest,
+ testing.AssertsExecutionResults, AssertsUOW):
run_inserts = None
+
class RudimentaryFlushTest(UOWTest):
def test_one_to_many_save(self):
users, Address, addresses, User = (self.tables.users,
- self.classes.Address,
- self.tables.addresses,
- self.classes.User)
+ self.classes.Address,
+ self.tables.addresses,
+ self.classes.User)
mapper(User, users, properties={
- 'addresses':relationship(Address),
+ 'addresses': relationship(Address),
})
mapper(Address, addresses)
sess = create_session()
@@ -53,32 +59,32 @@ class RudimentaryFlushTest(UOWTest):
sess.add(u1)
self.assert_sql_execution(
- testing.db,
- sess.flush,
- CompiledSQL(
- "INSERT INTO users (name) VALUES (:name)",
- {'name': 'u1'}
- ),
- CompiledSQL(
- "INSERT INTO addresses (user_id, email_address) "
- "VALUES (:user_id, :email_address)",
- lambda ctx: {'email_address': 'a1', 'user_id':u1.id}
- ),
- CompiledSQL(
- "INSERT INTO addresses (user_id, email_address) "
- "VALUES (:user_id, :email_address)",
- lambda ctx: {'email_address': 'a2', 'user_id':u1.id}
- ),
- )
+ testing.db,
+ sess.flush,
+ CompiledSQL(
+ "INSERT INTO users (name) VALUES (:name)",
+ {'name': 'u1'}
+ ),
+ CompiledSQL(
+ "INSERT INTO addresses (user_id, email_address) "
+ "VALUES (:user_id, :email_address)",
+ lambda ctx: {'email_address': 'a1', 'user_id': u1.id}
+ ),
+ CompiledSQL(
+ "INSERT INTO addresses (user_id, email_address) "
+ "VALUES (:user_id, :email_address)",
+ lambda ctx: {'email_address': 'a2', 'user_id': u1.id}
+ ),
+ )
def test_one_to_many_delete_all(self):
users, Address, addresses, User = (self.tables.users,
- self.classes.Address,
- self.tables.addresses,
- self.classes.User)
+ self.classes.Address,
+ self.tables.addresses,
+ self.classes.User)
mapper(User, users, properties={
- 'addresses':relationship(Address),
+ 'addresses': relationship(Address),
})
mapper(Address, addresses)
sess = create_session()
@@ -91,26 +97,26 @@ class RudimentaryFlushTest(UOWTest):
sess.delete(a1)
sess.delete(a2)
self.assert_sql_execution(
- testing.db,
- sess.flush,
- CompiledSQL(
- "DELETE FROM addresses WHERE addresses.id = :id",
- [{'id':a1.id},{'id':a2.id}]
- ),
- CompiledSQL(
- "DELETE FROM users WHERE users.id = :id",
- {'id':u1.id}
- ),
+ testing.db,
+ sess.flush,
+ CompiledSQL(
+ "DELETE FROM addresses WHERE addresses.id = :id",
+ [{'id': a1.id}, {'id': a2.id}]
+ ),
+ CompiledSQL(
+ "DELETE FROM users WHERE users.id = :id",
+ {'id': u1.id}
+ ),
)
def test_one_to_many_delete_parent(self):
users, Address, addresses, User = (self.tables.users,
- self.classes.Address,
- self.tables.addresses,
- self.classes.User)
+ self.classes.Address,
+ self.tables.addresses,
+ self.classes.User)
mapper(User, users, properties={
- 'addresses':relationship(Address),
+ 'addresses': relationship(Address),
})
mapper(Address, addresses)
sess = create_session()
@@ -121,76 +127,73 @@ class RudimentaryFlushTest(UOWTest):
sess.delete(u1)
self.assert_sql_execution(
- testing.db,
- sess.flush,
- CompiledSQL(
- "UPDATE addresses SET user_id=:user_id WHERE "
- "addresses.id = :addresses_id",
- lambda ctx: [{'addresses_id': a1.id, 'user_id': None}]
- ),
- CompiledSQL(
- "UPDATE addresses SET user_id=:user_id WHERE "
- "addresses.id = :addresses_id",
- lambda ctx: [{'addresses_id': a2.id, 'user_id': None}]
- ),
- CompiledSQL(
- "DELETE FROM users WHERE users.id = :id",
- {'id':u1.id}
- ),
+ testing.db,
+ sess.flush,
+ CompiledSQL(
+ "UPDATE addresses SET user_id=:user_id WHERE "
+ "addresses.id = :addresses_id",
+ lambda ctx: [
+ {'addresses_id': a1.id, 'user_id': None},
+ {'addresses_id': a2.id, 'user_id': None}
+ ]
+ ),
+ CompiledSQL(
+ "DELETE FROM users WHERE users.id = :id",
+ {'id': u1.id}
+ ),
)
def test_many_to_one_save(self):
users, Address, addresses, User = (self.tables.users,
- self.classes.Address,
- self.tables.addresses,
- self.classes.User)
-
+ self.classes.Address,
+ self.tables.addresses,
+ self.classes.User)
mapper(User, users)
mapper(Address, addresses, properties={
- 'user':relationship(User)
+ 'user': relationship(User)
})
sess = create_session()
u1 = User(name='u1')
a1, a2 = Address(email_address='a1', user=u1), \
- Address(email_address='a2', user=u1)
+ Address(email_address='a2', user=u1)
sess.add_all([a1, a2])
self.assert_sql_execution(
- testing.db,
- sess.flush,
- CompiledSQL(
- "INSERT INTO users (name) VALUES (:name)",
- {'name': 'u1'}
- ),
- CompiledSQL(
- "INSERT INTO addresses (user_id, email_address) "
- "VALUES (:user_id, :email_address)",
- lambda ctx: {'email_address': 'a1', 'user_id':u1.id}
- ),
- CompiledSQL(
- "INSERT INTO addresses (user_id, email_address) "
- "VALUES (:user_id, :email_address)",
- lambda ctx: {'email_address': 'a2', 'user_id':u1.id}
- ),
- )
+ testing.db,
+ sess.flush,
+ CompiledSQL(
+ "INSERT INTO users (name) VALUES (:name)",
+ {'name': 'u1'}
+ ),
+ CompiledSQL(
+ "INSERT INTO addresses (user_id, email_address) "
+ "VALUES (:user_id, :email_address)",
+ lambda ctx: {'email_address': 'a1', 'user_id': u1.id}
+ ),
+ CompiledSQL(
+ "INSERT INTO addresses (user_id, email_address) "
+ "VALUES (:user_id, :email_address)",
+ lambda ctx: {'email_address': 'a2', 'user_id': u1.id}
+ ),
+ )
def test_many_to_one_delete_all(self):
users, Address, addresses, User = (self.tables.users,
- self.classes.Address,
- self.tables.addresses,
- self.classes.User)
+ self.classes.Address,
+ self.tables.addresses,
+ self.classes.User)
mapper(User, users)
mapper(Address, addresses, properties={
- 'user':relationship(User)
+ 'user': relationship(User)
})
sess = create_session()
u1 = User(name='u1')
a1, a2 = Address(email_address='a1', user=u1), \
- Address(email_address='a2', user=u1)
+ Address(email_address='a2', user=u1)
sess.add_all([a1, a2])
sess.flush()
@@ -198,71 +201,69 @@ class RudimentaryFlushTest(UOWTest):
sess.delete(a1)
sess.delete(a2)
self.assert_sql_execution(
- testing.db,
- sess.flush,
- CompiledSQL(
- "DELETE FROM addresses WHERE addresses.id = :id",
- [{'id':a1.id},{'id':a2.id}]
- ),
- CompiledSQL(
- "DELETE FROM users WHERE users.id = :id",
- {'id':u1.id}
- ),
+ testing.db,
+ sess.flush,
+ CompiledSQL(
+ "DELETE FROM addresses WHERE addresses.id = :id",
+ [{'id': a1.id}, {'id': a2.id}]
+ ),
+ CompiledSQL(
+ "DELETE FROM users WHERE users.id = :id",
+ {'id': u1.id}
+ ),
)
def test_many_to_one_delete_target(self):
users, Address, addresses, User = (self.tables.users,
- self.classes.Address,
- self.tables.addresses,
- self.classes.User)
+ self.classes.Address,
+ self.tables.addresses,
+ self.classes.User)
mapper(User, users)
mapper(Address, addresses, properties={
- 'user':relationship(User)
+ 'user': relationship(User)
})
sess = create_session()
u1 = User(name='u1')
a1, a2 = Address(email_address='a1', user=u1), \
- Address(email_address='a2', user=u1)
+ Address(email_address='a2', user=u1)
sess.add_all([a1, a2])
sess.flush()
sess.delete(u1)
a1.user = a2.user = None
self.assert_sql_execution(
- testing.db,
- sess.flush,
- CompiledSQL(
- "UPDATE addresses SET user_id=:user_id WHERE "
- "addresses.id = :addresses_id",
- lambda ctx: [{'addresses_id': a1.id, 'user_id': None}]
- ),
- CompiledSQL(
- "UPDATE addresses SET user_id=:user_id WHERE "
- "addresses.id = :addresses_id",
- lambda ctx: [{'addresses_id': a2.id, 'user_id': None}]
- ),
- CompiledSQL(
- "DELETE FROM users WHERE users.id = :id",
- {'id':u1.id}
- ),
+ testing.db,
+ sess.flush,
+ CompiledSQL(
+ "UPDATE addresses SET user_id=:user_id WHERE "
+ "addresses.id = :addresses_id",
+ lambda ctx: [
+ {'addresses_id': a1.id, 'user_id': None},
+ {'addresses_id': a2.id, 'user_id': None}
+ ]
+ ),
+ CompiledSQL(
+ "DELETE FROM users WHERE users.id = :id",
+ {'id': u1.id}
+ ),
)
def test_many_to_one_delete_unloaded(self):
users, Address, addresses, User = (self.tables.users,
- self.classes.Address,
- self.tables.addresses,
- self.classes.User)
+ self.classes.Address,
+ self.tables.addresses,
+ self.classes.User)
mapper(User, users)
mapper(Address, addresses, properties={
- 'parent':relationship(User)
+ 'parent': relationship(User)
})
parent = User(name='p1')
c1, c2 = Address(email_address='c1', parent=parent), \
- Address(email_address='c2', parent=parent)
+ Address(email_address='c2', parent=parent)
session = Session()
session.add_all([c1, c2])
@@ -295,16 +296,20 @@ class RudimentaryFlushTest(UOWTest):
# the User row might be handled before or the addresses
# are loaded so need to use AllOf
CompiledSQL(
- "SELECT addresses.id AS addresses_id, addresses.user_id AS "
+ "SELECT addresses.id AS addresses_id, "
+ "addresses.user_id AS "
"addresses_user_id, addresses.email_address AS "
- "addresses_email_address FROM addresses WHERE addresses.id = "
+ "addresses_email_address FROM addresses "
+ "WHERE addresses.id = "
":param_1",
lambda ctx: {'param_1': c1id}
),
CompiledSQL(
- "SELECT addresses.id AS addresses_id, addresses.user_id AS "
+ "SELECT addresses.id AS addresses_id, "
+ "addresses.user_id AS "
"addresses_user_id, addresses.email_address AS "
- "addresses_email_address FROM addresses WHERE addresses.id = "
+ "addresses_email_address FROM addresses "
+ "WHERE addresses.id = "
":param_1",
lambda ctx: {'param_1': c2id}
),
@@ -326,18 +331,18 @@ class RudimentaryFlushTest(UOWTest):
def test_many_to_one_delete_childonly_unloaded(self):
users, Address, addresses, User = (self.tables.users,
- self.classes.Address,
- self.tables.addresses,
- self.classes.User)
+ self.classes.Address,
+ self.tables.addresses,
+ self.classes.User)
mapper(User, users)
mapper(Address, addresses, properties={
- 'parent':relationship(User)
+ 'parent': relationship(User)
})
parent = User(name='p1')
c1, c2 = Address(email_address='c1', parent=parent), \
- Address(email_address='c2', parent=parent)
+ Address(email_address='c2', parent=parent)
session = Session()
session.add_all([c1, c2])
@@ -345,7 +350,7 @@ class RudimentaryFlushTest(UOWTest):
session.flush()
- pid = parent.id
+ #pid = parent.id
c1id = c1.id
c2id = c2.id
@@ -360,18 +365,23 @@ class RudimentaryFlushTest(UOWTest):
session.flush,
AllOf(
# [ticket:2049] - we aren't deleting User,
- # relationship is simple m2o, no SELECT should be emitted for it.
+ # relationship is simple m2o, no SELECT should be emitted for
+ # it.
CompiledSQL(
- "SELECT addresses.id AS addresses_id, addresses.user_id AS "
+ "SELECT addresses.id AS addresses_id, "
+ "addresses.user_id AS "
"addresses_user_id, addresses.email_address AS "
- "addresses_email_address FROM addresses WHERE addresses.id = "
+ "addresses_email_address FROM addresses "
+ "WHERE addresses.id = "
":param_1",
lambda ctx: {'param_1': c1id}
),
CompiledSQL(
- "SELECT addresses.id AS addresses_id, addresses.user_id AS "
+ "SELECT addresses.id AS addresses_id, "
+ "addresses.user_id AS "
"addresses_user_id, addresses.email_address AS "
- "addresses_email_address FROM addresses WHERE addresses.id = "
+ "addresses_email_address FROM addresses "
+ "WHERE addresses.id = "
":param_1",
lambda ctx: {'param_1': c2id}
),
@@ -384,18 +394,18 @@ class RudimentaryFlushTest(UOWTest):
def test_many_to_one_delete_childonly_unloaded_expired(self):
users, Address, addresses, User = (self.tables.users,
- self.classes.Address,
- self.tables.addresses,
- self.classes.User)
+ self.classes.Address,
+ self.tables.addresses,
+ self.classes.User)
mapper(User, users)
mapper(Address, addresses, properties={
- 'parent':relationship(User)
+ 'parent': relationship(User)
})
parent = User(name='p1')
c1, c2 = Address(email_address='c1', parent=parent), \
- Address(email_address='c2', parent=parent)
+ Address(email_address='c2', parent=parent)
session = Session()
session.add_all([c1, c2])
@@ -403,7 +413,7 @@ class RudimentaryFlushTest(UOWTest):
session.flush()
- pid = parent.id
+ #pid = parent.id
c1id = c1.id
c2id = c2.id
@@ -420,16 +430,20 @@ class RudimentaryFlushTest(UOWTest):
AllOf(
# the parent User is expired, so it gets loaded here.
CompiledSQL(
- "SELECT addresses.id AS addresses_id, addresses.user_id AS "
+ "SELECT addresses.id AS addresses_id, "
+ "addresses.user_id AS "
"addresses_user_id, addresses.email_address AS "
- "addresses_email_address FROM addresses WHERE addresses.id = "
+ "addresses_email_address FROM addresses "
+ "WHERE addresses.id = "
":param_1",
lambda ctx: {'param_1': c1id}
),
CompiledSQL(
- "SELECT addresses.id AS addresses_id, addresses.user_id AS "
+ "SELECT addresses.id AS addresses_id, "
+ "addresses.user_id AS "
"addresses_user_id, addresses.email_address AS "
- "addresses_email_address FROM addresses WHERE addresses.id = "
+ "addresses_email_address FROM addresses "
+ "WHERE addresses.id = "
":param_1",
lambda ctx: {'param_1': c2id}
),
@@ -441,17 +455,17 @@ class RudimentaryFlushTest(UOWTest):
)
def test_natural_ordering(self):
- """test that unconnected items take relationship() into account regardless."""
+ """test that unconnected items take relationship()
+ into account regardless."""
users, Address, addresses, User = (self.tables.users,
- self.classes.Address,
- self.tables.addresses,
- self.classes.User)
-
+ self.classes.Address,
+ self.tables.addresses,
+ self.classes.User)
mapper(User, users)
mapper(Address, addresses, properties={
- 'parent':relationship(User)
+ 'parent': relationship(User)
})
sess = create_session()
@@ -465,7 +479,7 @@ class RudimentaryFlushTest(UOWTest):
sess.flush,
CompiledSQL(
"INSERT INTO users (id, name) VALUES (:id, :name)",
- {'id':1, 'name':'u1'}),
+ {'id': 1, 'name': 'u1'}),
CompiledSQL(
"INSERT INTO addresses (id, user_id, email_address) "
"VALUES (:id, :user_id, :email_address)",
@@ -489,13 +503,13 @@ class RudimentaryFlushTest(UOWTest):
)
def test_natural_selfref(self):
- """test that unconnected items take relationship() into account regardless."""
+ """test that unconnected items take relationship()
+ into account regardless."""
Node, nodes = self.classes.Node, self.tables.nodes
-
mapper(Node, nodes, properties={
- 'children':relationship(Node)
+ 'children': relationship(Node)
})
sess = create_session()
@@ -515,20 +529,18 @@ class RudimentaryFlushTest(UOWTest):
"INSERT INTO nodes (id, parent_id, data) VALUES "
"(:id, :parent_id, :data)",
[{'parent_id': None, 'data': None, 'id': 1},
- {'parent_id': 1, 'data': None, 'id': 2},
- {'parent_id': 2, 'data': None, 'id': 3}]
- ),
+ {'parent_id': 1, 'data': None, 'id': 2},
+ {'parent_id': 2, 'data': None, 'id': 3}]
+ ),
)
def test_many_to_many(self):
- keywords, items, item_keywords, Keyword, Item = (self.tables.keywords,
- self.tables.items,
- self.tables.item_keywords,
- self.classes.Keyword,
- self.classes.Item)
+ keywords, items, item_keywords, Keyword, Item = (
+ self.tables.keywords, self.tables.items, self.tables.item_keywords,
+ self.classes.Keyword, self.classes.Item)
mapper(Item, items, properties={
- 'keywords':relationship(Keyword, secondary=item_keywords)
+ 'keywords': relationship(Keyword, secondary=item_keywords)
})
mapper(Keyword, keywords)
@@ -537,45 +549,45 @@ class RudimentaryFlushTest(UOWTest):
i1 = Item(description='i1', keywords=[k1])
sess.add(i1)
self.assert_sql_execution(
- testing.db,
- sess.flush,
- AllOf(
- CompiledSQL(
+ testing.db,
+ sess.flush,
+ AllOf(
+ CompiledSQL(
"INSERT INTO keywords (name) VALUES (:name)",
- {'name':'k1'}
- ),
- CompiledSQL(
- "INSERT INTO items (description) VALUES (:description)",
- {'description':'i1'}
- ),
+ {'name': 'k1'}
),
CompiledSQL(
- "INSERT INTO item_keywords (item_id, keyword_id) "
- "VALUES (:item_id, :keyword_id)",
- lambda ctx:{'item_id':i1.id, 'keyword_id':k1.id}
- )
+ "INSERT INTO items (description) VALUES (:description)",
+ {'description': 'i1'}
+ ),
+ ),
+ CompiledSQL(
+ "INSERT INTO item_keywords (item_id, keyword_id) "
+ "VALUES (:item_id, :keyword_id)",
+ lambda ctx: {'item_id': i1.id, 'keyword_id': k1.id}
+ )
)
# test that keywords collection isn't loaded
sess.expire(i1, ['keywords'])
i1.description = 'i2'
self.assert_sql_execution(
- testing.db,
- sess.flush,
- CompiledSQL("UPDATE items SET description=:description "
- "WHERE items.id = :items_id",
- lambda ctx:{'description':'i2', 'items_id':i1.id})
+ testing.db,
+ sess.flush,
+ CompiledSQL("UPDATE items SET description=:description "
+ "WHERE items.id = :items_id",
+ lambda ctx: {'description': 'i2', 'items_id': i1.id})
)
def test_m2o_flush_size(self):
users, Address, addresses, User = (self.tables.users,
- self.classes.Address,
- self.tables.addresses,
- self.classes.User)
+ self.classes.Address,
+ self.tables.addresses,
+ self.classes.User)
mapper(User, users)
mapper(Address, addresses, properties={
- 'user':relationship(User, passive_updates=True)
+ 'user': relationship(User, passive_updates=True)
})
sess = create_session()
u1 = User(name='ed')
@@ -584,12 +596,12 @@ class RudimentaryFlushTest(UOWTest):
def test_o2m_flush_size(self):
users, Address, addresses, User = (self.tables.users,
- self.classes.Address,
- self.tables.addresses,
- self.classes.User)
+ self.classes.Address,
+ self.tables.addresses,
+ self.classes.User)
mapper(User, users, properties={
- 'addresses':relationship(Address),
+ 'addresses': relationship(Address),
})
mapper(Address, addresses)
@@ -600,7 +612,7 @@ class RudimentaryFlushTest(UOWTest):
sess.flush()
- u1.name='jack'
+ u1.name = 'jack'
self._assert_uow_size(sess, 2)
sess.flush()
@@ -617,7 +629,7 @@ class RudimentaryFlushTest(UOWTest):
sess = create_session()
u1 = sess.query(User).first()
- u1.name='ed'
+ u1.name = 'ed'
self._assert_uow_size(sess, 2)
u1.addresses
@@ -625,6 +637,7 @@ class RudimentaryFlushTest(UOWTest):
class SingleCycleTest(UOWTest):
+
def teardown(self):
engines.testing_reaper.rollback_all()
# mysql can't handle delete from nodes
@@ -639,7 +652,7 @@ class SingleCycleTest(UOWTest):
Node, nodes = self.classes.Node, self.tables.nodes
mapper(Node, nodes, properties={
- 'children':relationship(Node)
+ 'children': relationship(Node)
})
sess = create_session()
@@ -649,15 +662,15 @@ class SingleCycleTest(UOWTest):
sess.add(n1)
self.assert_sql_execution(
- testing.db,
- sess.flush,
+ testing.db,
+ sess.flush,
- CompiledSQL(
- "INSERT INTO nodes (parent_id, data) VALUES "
- "(:parent_id, :data)",
- {'parent_id': None, 'data': 'n1'}
- ),
- AllOf(
+ CompiledSQL(
+ "INSERT INTO nodes (parent_id, data) VALUES "
+ "(:parent_id, :data)",
+ {'parent_id': None, 'data': 'n1'}
+ ),
+ AllOf(
CompiledSQL(
"INSERT INTO nodes (parent_id, data) VALUES "
"(:parent_id, :data)",
@@ -668,14 +681,14 @@ class SingleCycleTest(UOWTest):
"(:parent_id, :data)",
lambda ctx: {'parent_id': n1.id, 'data': 'n3'}
),
- )
)
+ )
def test_one_to_many_delete_all(self):
Node, nodes = self.classes.Node, self.tables.nodes
mapper(Node, nodes, properties={
- 'children':relationship(Node)
+ 'children': relationship(Node)
})
sess = create_session()
@@ -689,19 +702,19 @@ class SingleCycleTest(UOWTest):
sess.delete(n2)
sess.delete(n3)
self.assert_sql_execution(
- testing.db,
- sess.flush,
- CompiledSQL("DELETE FROM nodes WHERE nodes.id = :id",
- lambda ctx:[{'id':n2.id}, {'id':n3.id}]),
- CompiledSQL("DELETE FROM nodes WHERE nodes.id = :id",
- lambda ctx: {'id':n1.id})
+ testing.db,
+ sess.flush,
+ CompiledSQL("DELETE FROM nodes WHERE nodes.id = :id",
+ lambda ctx: [{'id': n2.id}, {'id': n3.id}]),
+ CompiledSQL("DELETE FROM nodes WHERE nodes.id = :id",
+ lambda ctx: {'id': n1.id})
)
def test_one_to_many_delete_parent(self):
Node, nodes = self.classes.Node, self.tables.nodes
mapper(Node, nodes, properties={
- 'children':relationship(Node)
+ 'children': relationship(Node)
})
sess = create_session()
@@ -713,25 +726,24 @@ class SingleCycleTest(UOWTest):
sess.delete(n1)
self.assert_sql_execution(
- testing.db,
- sess.flush,
- AllOf(
- CompiledSQL("UPDATE nodes SET parent_id=:parent_id "
- "WHERE nodes.id = :nodes_id",
- lambda ctx: {'nodes_id':n3.id, 'parent_id':None}),
- CompiledSQL("UPDATE nodes SET parent_id=:parent_id "
- "WHERE nodes.id = :nodes_id",
- lambda ctx: {'nodes_id':n2.id, 'parent_id':None}),
- ),
- CompiledSQL("DELETE FROM nodes WHERE nodes.id = :id",
- lambda ctx:{'id':n1.id})
- )
+ testing.db, sess.flush, AllOf(
+ CompiledSQL(
+ "UPDATE nodes SET parent_id=:parent_id "
+ "WHERE nodes.id = :nodes_id", lambda ctx: [
+ {'nodes_id': n3.id, 'parent_id': None},
+ {'nodes_id': n2.id, 'parent_id': None}
+ ]
+ )
+ ),
+ CompiledSQL(
+ "DELETE FROM nodes WHERE nodes.id = :id", lambda ctx: {
+ 'id': n1.id}))
def test_many_to_one_save(self):
Node, nodes = self.classes.Node, self.tables.nodes
mapper(Node, nodes, properties={
- 'parent':relationship(Node, remote_side=nodes.c.id)
+ 'parent': relationship(Node, remote_side=nodes.c.id)
})
sess = create_session()
@@ -741,15 +753,15 @@ class SingleCycleTest(UOWTest):
sess.add_all([n2, n3])
self.assert_sql_execution(
- testing.db,
- sess.flush,
+ testing.db,
+ sess.flush,
- CompiledSQL(
- "INSERT INTO nodes (parent_id, data) VALUES "
- "(:parent_id, :data)",
- {'parent_id': None, 'data': 'n1'}
- ),
- AllOf(
+ CompiledSQL(
+ "INSERT INTO nodes (parent_id, data) VALUES "
+ "(:parent_id, :data)",
+ {'parent_id': None, 'data': 'n1'}
+ ),
+ AllOf(
CompiledSQL(
"INSERT INTO nodes (parent_id, data) VALUES "
"(:parent_id, :data)",
@@ -760,14 +772,14 @@ class SingleCycleTest(UOWTest):
"(:parent_id, :data)",
lambda ctx: {'parent_id': n1.id, 'data': 'n3'}
),
- )
)
+ )
def test_many_to_one_delete_all(self):
Node, nodes = self.classes.Node, self.tables.nodes
mapper(Node, nodes, properties={
- 'parent':relationship(Node, remote_side=nodes.c.id)
+ 'parent': relationship(Node, remote_side=nodes.c.id)
})
sess = create_session()
@@ -781,19 +793,19 @@ class SingleCycleTest(UOWTest):
sess.delete(n2)
sess.delete(n3)
self.assert_sql_execution(
- testing.db,
- sess.flush,
- CompiledSQL("DELETE FROM nodes WHERE nodes.id = :id",
- lambda ctx:[{'id':n2.id},{'id':n3.id}]),
- CompiledSQL("DELETE FROM nodes WHERE nodes.id = :id",
- lambda ctx: {'id':n1.id})
+ testing.db,
+ sess.flush,
+ CompiledSQL("DELETE FROM nodes WHERE nodes.id = :id",
+ lambda ctx: [{'id': n2.id}, {'id': n3.id}]),
+ CompiledSQL("DELETE FROM nodes WHERE nodes.id = :id",
+ lambda ctx: {'id': n1.id})
)
def test_many_to_one_set_null_unloaded(self):
Node, nodes = self.classes.Node, self.tables.nodes
mapper(Node, nodes, properties={
- 'parent':relationship(Node, remote_side=nodes.c.id)
+ 'parent': relationship(Node, remote_side=nodes.c.id)
})
sess = create_session()
n1 = Node(data='n1')
@@ -810,7 +822,7 @@ class SingleCycleTest(UOWTest):
CompiledSQL(
"UPDATE nodes SET parent_id=:parent_id WHERE "
"nodes.id = :nodes_id",
- lambda ctx: {"parent_id":None, "nodes_id":n2.id}
+ lambda ctx: {"parent_id": None, "nodes_id": n2.id}
)
)
@@ -818,7 +830,7 @@ class SingleCycleTest(UOWTest):
Node, nodes = self.classes.Node, self.tables.nodes
mapper(Node, nodes, properties={
- 'children':relationship(Node)
+ 'children': relationship(Node)
})
sess = create_session()
@@ -836,9 +848,9 @@ class SingleCycleTest(UOWTest):
Node, nodes = self.classes.Node, self.tables.nodes
mapper(Node, nodes, properties={
- 'children':relationship(Node,
- backref=backref('parent',
- remote_side=nodes.c.id))
+ 'children': relationship(Node,
+ backref=backref('parent',
+ remote_side=nodes.c.id))
})
sess = create_session()
@@ -857,11 +869,15 @@ class SingleCycleTest(UOWTest):
def test_bidirectional_multilevel_save(self):
Node, nodes = self.classes.Node, self.tables.nodes
- mapper(Node, nodes, properties={
- 'children':relationship(Node,
- backref=backref('parent', remote_side=nodes.c.id)
- )
- })
+ mapper(
+ Node,
+ nodes,
+ properties={
+ 'children': relationship(
+ Node,
+ backref=backref(
+ 'parent',
+ remote_side=nodes.c.id))})
sess = create_session()
n1 = Node(data='n1')
n1.children.append(Node(data='n11'))
@@ -878,37 +894,37 @@ class SingleCycleTest(UOWTest):
CompiledSQL(
"INSERT INTO nodes (parent_id, data) VALUES "
"(:parent_id, :data)",
- lambda ctx:{'parent_id':None, 'data':'n1'}
+ lambda ctx: {'parent_id': None, 'data': 'n1'}
),
CompiledSQL(
"INSERT INTO nodes (parent_id, data) VALUES "
"(:parent_id, :data)",
- lambda ctx:{'parent_id':n1.id, 'data':'n11'}
+ lambda ctx: {'parent_id': n1.id, 'data': 'n11'}
),
CompiledSQL(
"INSERT INTO nodes (parent_id, data) VALUES "
"(:parent_id, :data)",
- lambda ctx:{'parent_id':n1.id, 'data':'n12'}
+ lambda ctx: {'parent_id': n1.id, 'data': 'n12'}
),
CompiledSQL(
"INSERT INTO nodes (parent_id, data) VALUES "
"(:parent_id, :data)",
- lambda ctx:{'parent_id':n1.id, 'data':'n13'}
+ lambda ctx: {'parent_id': n1.id, 'data': 'n13'}
),
CompiledSQL(
"INSERT INTO nodes (parent_id, data) VALUES "
"(:parent_id, :data)",
- lambda ctx:{'parent_id':n12.id, 'data':'n121'}
+ lambda ctx: {'parent_id': n12.id, 'data': 'n121'}
),
CompiledSQL(
"INSERT INTO nodes (parent_id, data) VALUES "
"(:parent_id, :data)",
- lambda ctx:{'parent_id':n12.id, 'data':'n122'}
+ lambda ctx: {'parent_id': n12.id, 'data': 'n122'}
),
CompiledSQL(
"INSERT INTO nodes (parent_id, data) VALUES "
"(:parent_id, :data)",
- lambda ctx:{'parent_id':n12.id, 'data':'n123'}
+ lambda ctx: {'parent_id': n12.id, 'data': 'n123'}
),
)
@@ -916,7 +932,7 @@ class SingleCycleTest(UOWTest):
Node, nodes = self.classes.Node, self.tables.nodes
mapper(Node, nodes, properties={
- 'children':relationship(Node)
+ 'children': relationship(Node)
})
sess = create_session()
n1 = Node(data='ed')
@@ -925,7 +941,7 @@ class SingleCycleTest(UOWTest):
sess.flush()
- n1.data='jack'
+ n1.data = 'jack'
self._assert_uow_size(sess, 2)
sess.flush()
@@ -942,18 +958,17 @@ class SingleCycleTest(UOWTest):
sess = create_session()
n1 = sess.query(Node).first()
- n1.data='ed'
+ n1.data = 'ed'
self._assert_uow_size(sess, 2)
n1.children
self._assert_uow_size(sess, 2)
-
def test_delete_unloaded_m2o(self):
Node, nodes = self.classes.Node, self.tables.nodes
mapper(Node, nodes, properties={
- 'parent':relationship(Node, remote_side=nodes.c.id)
+ 'parent': relationship(Node, remote_side=nodes.c.id)
})
parent = Node()
@@ -1022,35 +1037,38 @@ class SingleCycleTest(UOWTest):
)
+class SingleCyclePlusAttributeTest(
+ fixtures.MappedTest,
+ testing.AssertsExecutionResults,
+ AssertsUOW):
-class SingleCyclePlusAttributeTest(fixtures.MappedTest,
- testing.AssertsExecutionResults, AssertsUOW):
@classmethod
def define_tables(cls, metadata):
Table('nodes', metadata,
- Column('id', Integer, primary_key=True,
- test_needs_autoincrement=True),
- Column('parent_id', Integer, ForeignKey('nodes.id')),
- Column('data', String(30))
- )
+ Column('id', Integer, primary_key=True,
+ test_needs_autoincrement=True),
+ Column('parent_id', Integer, ForeignKey('nodes.id')),
+ Column('data', String(30))
+ )
Table('foobars', metadata,
- Column('id', Integer, primary_key=True,
- test_needs_autoincrement=True),
- Column('parent_id', Integer, ForeignKey('nodes.id')),
- )
+ Column('id', Integer, primary_key=True,
+ test_needs_autoincrement=True),
+ Column('parent_id', Integer, ForeignKey('nodes.id')),
+ )
def test_flush_size(self):
foobars, nodes = self.tables.foobars, self.tables.nodes
class Node(fixtures.ComparableEntity):
pass
+
class FooBar(fixtures.ComparableEntity):
pass
mapper(Node, nodes, properties={
- 'children':relationship(Node),
- 'foobars':relationship(FooBar)
+ 'children': relationship(Node),
+ 'foobars': relationship(FooBar)
})
mapper(FooBar, foobars)
@@ -1070,25 +1088,30 @@ class SingleCyclePlusAttributeTest(fixtures.MappedTest,
sess.flush()
+
class SingleCycleM2MTest(fixtures.MappedTest,
- testing.AssertsExecutionResults, AssertsUOW):
+ testing.AssertsExecutionResults, AssertsUOW):
@classmethod
def define_tables(cls, metadata):
- nodes = Table('nodes', metadata,
- Column('id', Integer,
- primary_key=True,
- test_needs_autoincrement=True),
- Column('data', String(30)),
- Column('favorite_node_id', Integer, ForeignKey('nodes.id'))
- )
+ Table(
+ 'nodes', metadata,
+ Column(
+ 'id', Integer, primary_key=True,
+ test_needs_autoincrement=True),
+ Column(
+ 'data', String(30)), Column(
+ 'favorite_node_id', Integer, ForeignKey('nodes.id')))
- node_to_nodes =Table('node_to_nodes', metadata,
- Column('left_node_id', Integer,
- ForeignKey('nodes.id'),primary_key=True),
- Column('right_node_id', Integer,
- ForeignKey('nodes.id'),primary_key=True),
- )
+ Table(
+ 'node_to_nodes', metadata,
+ Column(
+ 'left_node_id', Integer,
+ ForeignKey('nodes.id'), primary_key=True),
+ Column(
+ 'right_node_id', Integer,
+ ForeignKey('nodes.id'), primary_key=True),
+ )
def test_many_to_many_one(self):
nodes, node_to_nodes = self.tables.nodes, self.tables.node_to_nodes
@@ -1096,14 +1119,19 @@ class SingleCycleM2MTest(fixtures.MappedTest,
class Node(fixtures.ComparableEntity):
pass
- mapper(Node, nodes, properties={
- 'children':relationship(Node, secondary=node_to_nodes,
- primaryjoin=nodes.c.id==node_to_nodes.c.left_node_id,
- secondaryjoin=nodes.c.id==node_to_nodes.c.right_node_id,
- backref='parents'
- ),
- 'favorite':relationship(Node, remote_side=nodes.c.id)
- })
+ mapper(
+ Node,
+ nodes,
+ properties={
+ 'children': relationship(
+ Node,
+ secondary=node_to_nodes,
+ primaryjoin=nodes.c.id == node_to_nodes.c.left_node_id,
+ secondaryjoin=nodes.c.id == node_to_nodes.c.right_node_id,
+ backref='parents'),
+ 'favorite': relationship(
+ Node,
+ remote_side=nodes.c.id)})
sess = create_session()
n1 = Node(data='n1')
@@ -1128,46 +1156,46 @@ class SingleCycleM2MTest(fixtures.MappedTest,
sess.flush()
eq_(
sess.query(node_to_nodes.c.left_node_id,
- node_to_nodes.c.right_node_id).\
- order_by(node_to_nodes.c.left_node_id,
- node_to_nodes.c.right_node_id).\
- all(),
+ node_to_nodes.c.right_node_id).
+ order_by(node_to_nodes.c.left_node_id,
+ node_to_nodes.c.right_node_id).
+ all(),
sorted([
- (n1.id, n2.id), (n1.id, n3.id), (n1.id, n4.id),
- (n2.id, n3.id), (n2.id, n5.id),
- (n3.id, n5.id), (n3.id, n4.id)
- ])
+ (n1.id, n2.id), (n1.id, n3.id), (n1.id, n4.id),
+ (n2.id, n3.id), (n2.id, n5.id),
+ (n3.id, n5.id), (n3.id, n4.id)
+ ])
)
sess.delete(n1)
self.assert_sql_execution(
- testing.db,
- sess.flush,
- # this is n1.parents firing off, as it should, since
- # passive_deletes is False for n1.parents
- CompiledSQL(
- "SELECT nodes.id AS nodes_id, nodes.data AS nodes_data, "
- "nodes.favorite_node_id AS nodes_favorite_node_id FROM "
- "nodes, node_to_nodes WHERE :param_1 = "
- "node_to_nodes.right_node_id AND nodes.id = "
- "node_to_nodes.left_node_id" ,
- lambda ctx:{'param_1': n1.id},
- ),
- CompiledSQL(
- "DELETE FROM node_to_nodes WHERE "
- "node_to_nodes.left_node_id = :left_node_id AND "
- "node_to_nodes.right_node_id = :right_node_id",
- lambda ctx:[
- {'right_node_id': n2.id, 'left_node_id': n1.id},
- {'right_node_id': n3.id, 'left_node_id': n1.id},
- {'right_node_id': n4.id, 'left_node_id': n1.id}
- ]
- ),
- CompiledSQL(
- "DELETE FROM nodes WHERE nodes.id = :id",
- lambda ctx:{'id': n1.id}
- ),
+ testing.db,
+ sess.flush,
+ # this is n1.parents firing off, as it should, since
+ # passive_deletes is False for n1.parents
+ CompiledSQL(
+ "SELECT nodes.id AS nodes_id, nodes.data AS nodes_data, "
+ "nodes.favorite_node_id AS nodes_favorite_node_id FROM "
+ "nodes, node_to_nodes WHERE :param_1 = "
+ "node_to_nodes.right_node_id AND nodes.id = "
+ "node_to_nodes.left_node_id",
+ lambda ctx: {'param_1': n1.id},
+ ),
+ CompiledSQL(
+ "DELETE FROM node_to_nodes WHERE "
+ "node_to_nodes.left_node_id = :left_node_id AND "
+ "node_to_nodes.right_node_id = :right_node_id",
+ lambda ctx: [
+ {'right_node_id': n2.id, 'left_node_id': n1.id},
+ {'right_node_id': n3.id, 'left_node_id': n1.id},
+ {'right_node_id': n4.id, 'left_node_id': n1.id}
+ ]
+ ),
+ CompiledSQL(
+ "DELETE FROM nodes WHERE nodes.id = :id",
+ lambda ctx: {'id': n1.id}
+ ),
)
for n in [n2, n3, n4, n5]:
@@ -1185,7 +1213,7 @@ class SingleCycleM2MTest(fixtures.MappedTest,
"DELETE FROM node_to_nodes WHERE node_to_nodes.left_node_id "
"= :left_node_id AND node_to_nodes.right_node_id = "
":right_node_id",
- lambda ctx:[
+ lambda ctx: [
{'right_node_id': n5.id, 'left_node_id': n3.id},
{'right_node_id': n4.id, 'left_node_id': n3.id},
{'right_node_id': n3.id, 'left_node_id': n2.id},
@@ -1194,38 +1222,41 @@ class SingleCycleM2MTest(fixtures.MappedTest,
),
CompiledSQL(
"DELETE FROM nodes WHERE nodes.id = :id",
- lambda ctx:[{'id': n4.id}, {'id': n5.id}]
+ lambda ctx: [{'id': n4.id}, {'id': n5.id}]
),
CompiledSQL(
"DELETE FROM nodes WHERE nodes.id = :id",
- lambda ctx:[{'id': n2.id}, {'id': n3.id}]
+ lambda ctx: [{'id': n2.id}, {'id': n3.id}]
),
)
+
class RowswitchAccountingTest(fixtures.MappedTest):
+
@classmethod
def define_tables(cls, metadata):
Table('parent', metadata,
- Column('id', Integer, primary_key=True),
- Column('data', Integer)
- )
+ Column('id', Integer, primary_key=True),
+ Column('data', Integer)
+ )
Table('child', metadata,
- Column('id', Integer, ForeignKey('parent.id'), primary_key=True),
- Column('data', Integer)
- )
+ Column('id', Integer, ForeignKey('parent.id'), primary_key=True),
+ Column('data', Integer)
+ )
def _fixture(self):
parent, child = self.tables.parent, self.tables.child
class Parent(fixtures.BasicEntity):
pass
+
class Child(fixtures.BasicEntity):
pass
mapper(Parent, parent, properties={
- 'child':relationship(Child, uselist=False,
- cascade="all, delete-orphan",
- backref="parent")
+ 'child': relationship(Child, uselist=False,
+ cascade="all, delete-orphan",
+ backref="parent")
})
mapper(Child, child)
return Parent, Child
@@ -1246,6 +1277,8 @@ class RowswitchAccountingTest(fixtures.MappedTest):
old = attributes.get_history(p3, 'child')[2][0]
assert old in sess
+ # essentially no SQL should emit here,
+ # because we've replaced the row with another identical one
sess.flush()
assert p3.child._sa_instance_state.session_id == sess.hash_key
@@ -1274,8 +1307,10 @@ class RowswitchAccountingTest(fixtures.MappedTest):
eq_(sess.scalar(self.tables.parent.count()), 0)
+
class RowswitchM2OTest(fixtures.MappedTest):
# tests for #3060 and related issues
+
@classmethod
def define_tables(cls, metadata):
Table(
@@ -1299,17 +1334,18 @@ class RowswitchM2OTest(fixtures.MappedTest):
class A(fixtures.BasicEntity):
pass
+
class B(fixtures.BasicEntity):
pass
+
class C(fixtures.BasicEntity):
pass
-
mapper(A, a, properties={
- 'bs': relationship(B, cascade="all, delete-orphan")
+ 'bs': relationship(B, cascade="all, delete-orphan")
})
mapper(B, b, properties={
- 'c': relationship(C)
+ 'c': relationship(C)
})
mapper(C, c)
return A, B, C
@@ -1391,29 +1427,31 @@ class RowswitchM2OTest(fixtures.MappedTest):
class BasicStaleChecksTest(fixtures.MappedTest):
+
@classmethod
def define_tables(cls, metadata):
Table('parent', metadata,
- Column('id', Integer, primary_key=True),
- Column('data', Integer)
- )
+ Column('id', Integer, primary_key=True),
+ Column('data', Integer)
+ )
Table('child', metadata,
- Column('id', Integer, ForeignKey('parent.id'), primary_key=True),
- Column('data', Integer)
- )
+ Column('id', Integer, ForeignKey('parent.id'), primary_key=True),
+ Column('data', Integer)
+ )
def _fixture(self, confirm_deleted_rows=True):
parent, child = self.tables.parent, self.tables.child
class Parent(fixtures.BasicEntity):
pass
+
class Child(fixtures.BasicEntity):
pass
mapper(Parent, parent, properties={
- 'child':relationship(Child, uselist=False,
- cascade="all, delete-orphan",
- backref="parent"),
+ 'child': relationship(Child, uselist=False,
+ cascade="all, delete-orphan",
+ backref="parent"),
}, confirm_deleted_rows=confirm_deleted_rows)
mapper(Child, child)
return Parent, Child
@@ -1431,7 +1469,7 @@ class BasicStaleChecksTest(fixtures.MappedTest):
assert_raises_message(
orm_exc.StaleDataError,
"UPDATE statement on table 'parent' expected to "
- "update 1 row\(s\); 0 were matched.",
+ "update 1 row\(s\); 0 were matched.",
sess.flush
)
@@ -1451,7 +1489,7 @@ class BasicStaleChecksTest(fixtures.MappedTest):
assert_raises_message(
exc.SAWarning,
"DELETE statement on table 'parent' expected to "
- "delete 2 row\(s\); 0 were matched.",
+ "delete 2 row\(s\); 0 were matched.",
sess.flush
)
@@ -1471,14 +1509,15 @@ class BasicStaleChecksTest(fixtures.MappedTest):
class BatchInsertsTest(fixtures.MappedTest, testing.AssertsExecutionResults):
+
@classmethod
def define_tables(cls, metadata):
Table('t', metadata,
- Column('id', Integer, primary_key=True,
- test_needs_autoincrement=True),
- Column('data', String(50)),
- Column('def_', String(50), server_default='def1')
- )
+ Column('id', Integer, primary_key=True,
+ test_needs_autoincrement=True),
+ Column('data', String(50)),
+ Column('def_', String(50), server_default='def1')
+ )
def test_batch_interaction(self):
"""test batching groups same-structured, primary
@@ -1532,8 +1571,8 @@ class BatchInsertsTest(fixtures.MappedTest, testing.AssertsExecutionResults):
),
CompiledSQL(
"INSERT INTO t (id, data, def_) VALUES (:id, :data, :def_)",
- [{'data': 't9', 'id': 9, 'def_':'def2'},
- {'data': 't10', 'id': 10, 'def_':'def3'}]
+ [{'data': 't9', 'id': 9, 'def_': 'def2'},
+ {'data': 't10', 'id': 10, 'def_': 'def3'}]
),
CompiledSQL(
"INSERT INTO t (id, data) VALUES (:id, :data)",
@@ -1541,126 +1580,172 @@ class BatchInsertsTest(fixtures.MappedTest, testing.AssertsExecutionResults):
),
)
+
class LoadersUsingCommittedTest(UOWTest):
- """Test that events which occur within a flush()
- get the same attribute loading behavior as on the outside
- of the flush, and that the unit of work itself uses the
- "committed" version of primary/foreign key attributes
- when loading a collection for historical purposes (this typically
- has importance for when primary key values change).
+
+ """Test that events which occur within a flush()
+ get the same attribute loading behavior as on the outside
+ of the flush, and that the unit of work itself uses the
+ "committed" version of primary/foreign key attributes
+ when loading a collection for historical purposes (this typically
+ has importance for when primary key values change).
+
+ """
+
+ def _mapper_setup(self, passive_updates=True):
+ users, Address, addresses, User = (self.tables.users,
+ self.classes.Address,
+ self.tables.addresses,
+ self.classes.User)
+
+ mapper(User, users, properties={
+ 'addresses': relationship(Address,
+ order_by=addresses.c.email_address,
+ passive_updates=passive_updates,
+ backref='user')
+ })
+ mapper(Address, addresses)
+ return create_session(autocommit=False)
+
+ def test_before_update_m2o(self):
+ """Expect normal many to one attribute load behavior
+ (should not get committed value)
+ from within public 'before_update' event"""
+ sess = self._mapper_setup()
+
+ Address, User = self.classes.Address, self.classes.User
+
+ def before_update(mapper, connection, target):
+ # if get committed is used to find target.user, then
+ # it will be still be u1 instead of u2
+ assert target.user.id == target.user_id == u2.id
+ from sqlalchemy import event
+ event.listen(Address, 'before_update', before_update)
+
+ a1 = Address(email_address='a1')
+ u1 = User(name='u1', addresses=[a1])
+ sess.add(u1)
+
+ u2 = User(name='u2')
+ sess.add(u2)
+ sess.commit()
+
+ sess.expunge_all()
+ # lookup an address and move it to the other user
+ a1 = sess.query(Address).get(a1.id)
+
+ # move address to another user's fk
+ assert a1.user_id == u1.id
+ a1.user_id = u2.id
+
+ sess.flush()
+
+ def test_before_update_o2m_passive(self):
+ """Expect normal one to many attribute load behavior
+ (should not get committed value)
+ from within public 'before_update' event"""
+ self._test_before_update_o2m(True)
+
+ def test_before_update_o2m_notpassive(self):
+ """Expect normal one to many attribute load behavior
+ (should not get committed value)
+ from within public 'before_update' event with
+ passive_updates=False
"""
+ self._test_before_update_o2m(False)
- def _mapper_setup(self, passive_updates=True):
- users, Address, addresses, User = (self.tables.users,
- self.classes.Address,
- self.tables.addresses,
- self.classes.User)
-
- mapper(User, users, properties={
- 'addresses': relationship(Address,
- order_by=addresses.c.email_address,
- passive_updates=passive_updates,
- backref='user')
- })
- mapper(Address, addresses)
- return create_session(autocommit=False)
-
- def test_before_update_m2o(self):
- """Expect normal many to one attribute load behavior
- (should not get committed value)
- from within public 'before_update' event"""
- sess = self._mapper_setup()
-
- Address, User = self.classes.Address, self.classes.User
-
- def before_update(mapper, connection, target):
- # if get committed is used to find target.user, then
- # it will be still be u1 instead of u2
- assert target.user.id == target.user_id == u2.id
- from sqlalchemy import event
- event.listen(Address, 'before_update', before_update)
-
- a1 = Address(email_address='a1')
- u1 = User(name='u1', addresses=[a1])
- sess.add(u1)
-
- u2 = User(name='u2')
- sess.add(u2)
- sess.commit()
-
- sess.expunge_all()
- # lookup an address and move it to the other user
- a1 = sess.query(Address).get(a1.id)
-
- # move address to another user's fk
- assert a1.user_id == u1.id
- a1.user_id = u2.id
+ def _test_before_update_o2m(self, passive_updates):
+ sess = self._mapper_setup(passive_updates=passive_updates)
- sess.flush()
+ Address, User = self.classes.Address, self.classes.User
- def test_before_update_o2m_passive(self):
- """Expect normal one to many attribute load behavior
- (should not get committed value)
- from within public 'before_update' event"""
- self._test_before_update_o2m(True)
+ class AvoidReferencialError(Exception):
- def test_before_update_o2m_notpassive(self):
- """Expect normal one to many attribute load behavior
- (should not get committed value)
- from within public 'before_update' event with
- passive_updates=False
+ """the test here would require ON UPDATE CASCADE on FKs
+ for the flush to fully succeed; this exception is used
+ to cancel the flush before we get that far.
"""
- self._test_before_update_o2m(False)
-
- def _test_before_update_o2m(self, passive_updates):
- sess = self._mapper_setup(passive_updates=passive_updates)
-
- Address, User = self.classes.Address, self.classes.User
-
- class AvoidReferencialError(Exception):
- """the test here would require ON UPDATE CASCADE on FKs
- for the flush to fully succeed; this exception is used
- to cancel the flush before we get that far.
-
- """
-
- def before_update(mapper, connection, target):
- if passive_updates:
- # we shouldn't be using committed value.
- # so, having switched target's primary key,
- # we expect no related items in the collection
- # since we are using passive_updates
- # this is a behavior change since #2350
- assert 'addresses' not in target.__dict__
- eq_(target.addresses, [])
- else:
- # in contrast with passive_updates=True,
- # here we expect the orm to have looked up the addresses
- # with the committed value (it needs to in order to
- # update the foreign keys). So we expect addresses
- # collection to move with the user,
- # (just like they will be after the update)
-
- # collection is already loaded
- assert 'addresses' in target.__dict__
- eq_([a.id for a in target.addresses],
- [a.id for a in [a1, a2]])
- raise AvoidReferencialError()
- from sqlalchemy import event
- event.listen(User, 'before_update', before_update)
-
- a1 = Address(email_address='jack1')
- a2 = Address(email_address='jack2')
- u1 = User(id=1, name='jack', addresses=[a1, a2])
- sess.add(u1)
- sess.commit()
-
- sess.expunge_all()
- u1 = sess.query(User).get(u1.id)
- u1.id = 2
- try:
- sess.flush()
- except AvoidReferencialError:
- pass
+
+ def before_update(mapper, connection, target):
+ if passive_updates:
+ # we shouldn't be using committed value.
+ # so, having switched target's primary key,
+ # we expect no related items in the collection
+ # since we are using passive_updates
+ # this is a behavior change since #2350
+ assert 'addresses' not in target.__dict__
+ eq_(target.addresses, [])
+ else:
+ # in contrast with passive_updates=True,
+ # here we expect the orm to have looked up the addresses
+ # with the committed value (it needs to in order to
+ # update the foreign keys). So we expect addresses
+ # collection to move with the user,
+ # (just like they will be after the update)
+
+ # collection is already loaded
+ assert 'addresses' in target.__dict__
+ eq_([a.id for a in target.addresses],
+ [a.id for a in [a1, a2]])
+ raise AvoidReferencialError()
+ from sqlalchemy import event
+ event.listen(User, 'before_update', before_update)
+
+ a1 = Address(email_address='jack1')
+ a2 = Address(email_address='jack2')
+ u1 = User(id=1, name='jack', addresses=[a1, a2])
+ sess.add(u1)
+ sess.commit()
+
+ sess.expunge_all()
+ u1 = sess.query(User).get(u1.id)
+ u1.id = 2
+ try:
+ sess.flush()
+ except AvoidReferencialError:
+ pass
+
+
+class NoAttrEventInFlushTest(fixtures.MappedTest):
+ """test [ticket:3167]"""
+
+ __backend__ = True
+
+ @classmethod
+ def define_tables(cls, metadata):
+ Table(
+ 'test', metadata,
+ Column('id', Integer, primary_key=True,
+ test_needs_autoincrement=True),
+ Column('prefetch_val', Integer, default=5),
+ Column('returning_val', Integer, server_default="5")
+ )
+
+ @classmethod
+ def setup_classes(cls):
+ class Thing(cls.Basic):
+ pass
+
+ @classmethod
+ def setup_mappers(cls):
+ Thing = cls.classes.Thing
+
+ mapper(Thing, cls.tables.test, eager_defaults=True)
+
+ def test_no_attr_events_flush(self):
+ Thing = self.classes.Thing
+ mock = Mock()
+ event.listen(Thing.id, "set", mock.id)
+ event.listen(Thing.prefetch_val, "set", mock.prefetch_val)
+ event.listen(Thing.returning_val, "set", mock.prefetch_val)
+ t1 = Thing()
+ s = Session()
+ s.add(t1)
+ s.flush()
+
+ eq_(len(mock.mock_calls), 0)
+ eq_(t1.id, 1)
+ eq_(t1.prefetch_val, 5)
+ eq_(t1.returning_val, 5)
diff --git a/test/profiles.txt b/test/profiles.txt
index 59ce23db3..ca84cdc26 100644
--- a/test/profiles.txt
+++ b/test/profiles.txt
@@ -13,507 +13,248 @@
# TEST: test.aaa_profiling.test_compiler.CompileTest.test_insert
-test.aaa_profiling.test_compiler.CompileTest.test_insert 2.7_mysql_mysqlconnector_cextensions 73
-test.aaa_profiling.test_compiler.CompileTest.test_insert 2.7_mysql_mysqlconnector_nocextensions 73
-test.aaa_profiling.test_compiler.CompileTest.test_insert 2.7_postgresql_psycopg2_cextensions 73
-test.aaa_profiling.test_compiler.CompileTest.test_insert 2.7_postgresql_psycopg2_nocextensions 73
-test.aaa_profiling.test_compiler.CompileTest.test_insert 2.7_sqlite_pysqlite_cextensions 73
-test.aaa_profiling.test_compiler.CompileTest.test_insert 2.7_sqlite_pysqlite_nocextensions 73
-test.aaa_profiling.test_compiler.CompileTest.test_insert 3.3_mysql_mysqlconnector_cextensions 78
-test.aaa_profiling.test_compiler.CompileTest.test_insert 3.3_mysql_mysqlconnector_nocextensions 78
-test.aaa_profiling.test_compiler.CompileTest.test_insert 3.3_postgresql_psycopg2_cextensions 78
-test.aaa_profiling.test_compiler.CompileTest.test_insert 3.3_postgresql_psycopg2_nocextensions 78
-test.aaa_profiling.test_compiler.CompileTest.test_insert 3.3_sqlite_pysqlite_cextensions 78
-test.aaa_profiling.test_compiler.CompileTest.test_insert 3.3_sqlite_pysqlite_nocextensions 78
-test.aaa_profiling.test_compiler.CompileTest.test_insert 3.4_mysql_mysqlconnector_cextensions 78
-test.aaa_profiling.test_compiler.CompileTest.test_insert 3.4_mysql_mysqlconnector_nocextensions 78
-test.aaa_profiling.test_compiler.CompileTest.test_insert 3.4_postgresql_psycopg2_cextensions 78
-test.aaa_profiling.test_compiler.CompileTest.test_insert 3.4_postgresql_psycopg2_nocextensions 78
-test.aaa_profiling.test_compiler.CompileTest.test_insert 3.4_sqlite_pysqlite_cextensions 78
-test.aaa_profiling.test_compiler.CompileTest.test_insert 3.4_sqlite_pysqlite_nocextensions 78
+test.aaa_profiling.test_compiler.CompileTest.test_insert 2.7_mysql_mysqlconnector_cextensions 74
+test.aaa_profiling.test_compiler.CompileTest.test_insert 2.7_mysql_mysqlconnector_nocextensions 74
+test.aaa_profiling.test_compiler.CompileTest.test_insert 2.7_postgresql_psycopg2_cextensions 74
+test.aaa_profiling.test_compiler.CompileTest.test_insert 2.7_postgresql_psycopg2_nocextensions 74
+test.aaa_profiling.test_compiler.CompileTest.test_insert 2.7_sqlite_pysqlite_cextensions 74
+test.aaa_profiling.test_compiler.CompileTest.test_insert 2.7_sqlite_pysqlite_nocextensions 74
+test.aaa_profiling.test_compiler.CompileTest.test_insert 3.3_mysql_mysqlconnector_cextensions 77
+test.aaa_profiling.test_compiler.CompileTest.test_insert 3.3_mysql_mysqlconnector_nocextensions 77
+test.aaa_profiling.test_compiler.CompileTest.test_insert 3.3_postgresql_psycopg2_cextensions 77
+test.aaa_profiling.test_compiler.CompileTest.test_insert 3.3_postgresql_psycopg2_nocextensions 77
+test.aaa_profiling.test_compiler.CompileTest.test_insert 3.3_sqlite_pysqlite_cextensions 77
+test.aaa_profiling.test_compiler.CompileTest.test_insert 3.3_sqlite_pysqlite_nocextensions 77
# TEST: test.aaa_profiling.test_compiler.CompileTest.test_select
-test.aaa_profiling.test_compiler.CompileTest.test_select 2.7_mysql_mysqlconnector_cextensions 151
-test.aaa_profiling.test_compiler.CompileTest.test_select 2.7_mysql_mysqlconnector_nocextensions 151
-test.aaa_profiling.test_compiler.CompileTest.test_select 2.7_postgresql_psycopg2_cextensions 151
-test.aaa_profiling.test_compiler.CompileTest.test_select 2.7_postgresql_psycopg2_nocextensions 151
-test.aaa_profiling.test_compiler.CompileTest.test_select 2.7_sqlite_pysqlite_cextensions 151
-test.aaa_profiling.test_compiler.CompileTest.test_select 2.7_sqlite_pysqlite_nocextensions 151
-test.aaa_profiling.test_compiler.CompileTest.test_select 3.3_mysql_mysqlconnector_cextensions 166
-test.aaa_profiling.test_compiler.CompileTest.test_select 3.3_mysql_mysqlconnector_nocextensions 166
-test.aaa_profiling.test_compiler.CompileTest.test_select 3.3_postgresql_psycopg2_cextensions 166
-test.aaa_profiling.test_compiler.CompileTest.test_select 3.3_postgresql_psycopg2_nocextensions 166
-test.aaa_profiling.test_compiler.CompileTest.test_select 3.3_sqlite_pysqlite_cextensions 166
-test.aaa_profiling.test_compiler.CompileTest.test_select 3.3_sqlite_pysqlite_nocextensions 166
-test.aaa_profiling.test_compiler.CompileTest.test_select 3.4_mysql_mysqlconnector_cextensions 166
-test.aaa_profiling.test_compiler.CompileTest.test_select 3.4_mysql_mysqlconnector_nocextensions 166
-test.aaa_profiling.test_compiler.CompileTest.test_select 3.4_postgresql_psycopg2_cextensions 166
-test.aaa_profiling.test_compiler.CompileTest.test_select 3.4_postgresql_psycopg2_nocextensions 166
-test.aaa_profiling.test_compiler.CompileTest.test_select 3.4_sqlite_pysqlite_cextensions 166
-test.aaa_profiling.test_compiler.CompileTest.test_select 3.4_sqlite_pysqlite_nocextensions 166
+test.aaa_profiling.test_compiler.CompileTest.test_select 2.7_mysql_mysqlconnector_cextensions 152
+test.aaa_profiling.test_compiler.CompileTest.test_select 2.7_mysql_mysqlconnector_nocextensions 152
+test.aaa_profiling.test_compiler.CompileTest.test_select 2.7_postgresql_psycopg2_cextensions 152
+test.aaa_profiling.test_compiler.CompileTest.test_select 2.7_postgresql_psycopg2_nocextensions 152
+test.aaa_profiling.test_compiler.CompileTest.test_select 2.7_sqlite_pysqlite_cextensions 152
+test.aaa_profiling.test_compiler.CompileTest.test_select 2.7_sqlite_pysqlite_nocextensions 152
+test.aaa_profiling.test_compiler.CompileTest.test_select 3.3_mysql_mysqlconnector_cextensions 165
+test.aaa_profiling.test_compiler.CompileTest.test_select 3.3_mysql_mysqlconnector_nocextensions 165
+test.aaa_profiling.test_compiler.CompileTest.test_select 3.3_postgresql_psycopg2_cextensions 165
+test.aaa_profiling.test_compiler.CompileTest.test_select 3.3_postgresql_psycopg2_nocextensions 165
+test.aaa_profiling.test_compiler.CompileTest.test_select 3.3_sqlite_pysqlite_cextensions 165
+test.aaa_profiling.test_compiler.CompileTest.test_select 3.3_sqlite_pysqlite_nocextensions 165
# TEST: test.aaa_profiling.test_compiler.CompileTest.test_select_labels
-test.aaa_profiling.test_compiler.CompileTest.test_select_labels 2.7_mysql_mysqlconnector_cextensions 185
-test.aaa_profiling.test_compiler.CompileTest.test_select_labels 2.7_mysql_mysqlconnector_nocextensions 185
-test.aaa_profiling.test_compiler.CompileTest.test_select_labels 2.7_postgresql_psycopg2_cextensions 185
-test.aaa_profiling.test_compiler.CompileTest.test_select_labels 2.7_postgresql_psycopg2_nocextensions 185
-test.aaa_profiling.test_compiler.CompileTest.test_select_labels 2.7_sqlite_pysqlite_cextensions 185
-test.aaa_profiling.test_compiler.CompileTest.test_select_labels 2.7_sqlite_pysqlite_nocextensions 185
-test.aaa_profiling.test_compiler.CompileTest.test_select_labels 3.3_mysql_mysqlconnector_cextensions 200
-test.aaa_profiling.test_compiler.CompileTest.test_select_labels 3.3_mysql_mysqlconnector_nocextensions 200
-test.aaa_profiling.test_compiler.CompileTest.test_select_labels 3.3_postgresql_psycopg2_cextensions 200
-test.aaa_profiling.test_compiler.CompileTest.test_select_labels 3.3_postgresql_psycopg2_nocextensions 200
-test.aaa_profiling.test_compiler.CompileTest.test_select_labels 3.3_sqlite_pysqlite_cextensions 200
-test.aaa_profiling.test_compiler.CompileTest.test_select_labels 3.3_sqlite_pysqlite_nocextensions 200
-test.aaa_profiling.test_compiler.CompileTest.test_select_labels 3.4_mysql_mysqlconnector_cextensions 200
-test.aaa_profiling.test_compiler.CompileTest.test_select_labels 3.4_mysql_mysqlconnector_nocextensions 200
-test.aaa_profiling.test_compiler.CompileTest.test_select_labels 3.4_postgresql_psycopg2_cextensions 200
-test.aaa_profiling.test_compiler.CompileTest.test_select_labels 3.4_postgresql_psycopg2_nocextensions 200
-test.aaa_profiling.test_compiler.CompileTest.test_select_labels 3.4_sqlite_pysqlite_cextensions 200
-test.aaa_profiling.test_compiler.CompileTest.test_select_labels 3.4_sqlite_pysqlite_nocextensions 200
+test.aaa_profiling.test_compiler.CompileTest.test_select_labels 2.7_mysql_mysqlconnector_cextensions 186
+test.aaa_profiling.test_compiler.CompileTest.test_select_labels 2.7_mysql_mysqlconnector_nocextensions 186
+test.aaa_profiling.test_compiler.CompileTest.test_select_labels 2.7_postgresql_psycopg2_cextensions 186
+test.aaa_profiling.test_compiler.CompileTest.test_select_labels 2.7_postgresql_psycopg2_nocextensions 186
+test.aaa_profiling.test_compiler.CompileTest.test_select_labels 2.7_sqlite_pysqlite_cextensions 186
+test.aaa_profiling.test_compiler.CompileTest.test_select_labels 2.7_sqlite_pysqlite_nocextensions 186
+test.aaa_profiling.test_compiler.CompileTest.test_select_labels 3.3_mysql_mysqlconnector_cextensions 199
+test.aaa_profiling.test_compiler.CompileTest.test_select_labels 3.3_mysql_mysqlconnector_nocextensions 199
+test.aaa_profiling.test_compiler.CompileTest.test_select_labels 3.3_postgresql_psycopg2_cextensions 199
+test.aaa_profiling.test_compiler.CompileTest.test_select_labels 3.3_postgresql_psycopg2_nocextensions 199
+test.aaa_profiling.test_compiler.CompileTest.test_select_labels 3.3_sqlite_pysqlite_cextensions 199
+test.aaa_profiling.test_compiler.CompileTest.test_select_labels 3.3_sqlite_pysqlite_nocextensions 199
# TEST: test.aaa_profiling.test_compiler.CompileTest.test_update
-test.aaa_profiling.test_compiler.CompileTest.test_update 2.7_mysql_mysqlconnector_cextensions 78
-test.aaa_profiling.test_compiler.CompileTest.test_update 2.7_mysql_mysqlconnector_nocextensions 78
-test.aaa_profiling.test_compiler.CompileTest.test_update 2.7_postgresql_psycopg2_cextensions 76
-test.aaa_profiling.test_compiler.CompileTest.test_update 2.7_postgresql_psycopg2_nocextensions 76
-test.aaa_profiling.test_compiler.CompileTest.test_update 2.7_sqlite_pysqlite_cextensions 76
-test.aaa_profiling.test_compiler.CompileTest.test_update 2.7_sqlite_pysqlite_nocextensions 76
-test.aaa_profiling.test_compiler.CompileTest.test_update 3.3_mysql_mysqlconnector_cextensions 81
-test.aaa_profiling.test_compiler.CompileTest.test_update 3.3_mysql_mysqlconnector_nocextensions 81
-test.aaa_profiling.test_compiler.CompileTest.test_update 3.3_postgresql_psycopg2_cextensions 79
-test.aaa_profiling.test_compiler.CompileTest.test_update 3.3_postgresql_psycopg2_nocextensions 79
-test.aaa_profiling.test_compiler.CompileTest.test_update 3.3_sqlite_pysqlite_cextensions 79
-test.aaa_profiling.test_compiler.CompileTest.test_update 3.3_sqlite_pysqlite_nocextensions 79
-test.aaa_profiling.test_compiler.CompileTest.test_update 3.4_mysql_mysqlconnector_cextensions 81
-test.aaa_profiling.test_compiler.CompileTest.test_update 3.4_mysql_mysqlconnector_nocextensions 81
-test.aaa_profiling.test_compiler.CompileTest.test_update 3.4_postgresql_psycopg2_cextensions 79
-test.aaa_profiling.test_compiler.CompileTest.test_update 3.4_postgresql_psycopg2_nocextensions 79
-test.aaa_profiling.test_compiler.CompileTest.test_update 3.4_sqlite_pysqlite_cextensions 79
-test.aaa_profiling.test_compiler.CompileTest.test_update 3.4_sqlite_pysqlite_nocextensions 79
+test.aaa_profiling.test_compiler.CompileTest.test_update 2.7_mysql_mysqlconnector_cextensions 79
+test.aaa_profiling.test_compiler.CompileTest.test_update 2.7_mysql_mysqlconnector_nocextensions 79
+test.aaa_profiling.test_compiler.CompileTest.test_update 2.7_postgresql_psycopg2_cextensions 77
+test.aaa_profiling.test_compiler.CompileTest.test_update 2.7_postgresql_psycopg2_nocextensions 77
+test.aaa_profiling.test_compiler.CompileTest.test_update 2.7_sqlite_pysqlite_cextensions 77
+test.aaa_profiling.test_compiler.CompileTest.test_update 2.7_sqlite_pysqlite_nocextensions 77
+test.aaa_profiling.test_compiler.CompileTest.test_update 3.3_mysql_mysqlconnector_cextensions 80
+test.aaa_profiling.test_compiler.CompileTest.test_update 3.3_mysql_mysqlconnector_nocextensions 80
+test.aaa_profiling.test_compiler.CompileTest.test_update 3.3_postgresql_psycopg2_cextensions 78
+test.aaa_profiling.test_compiler.CompileTest.test_update 3.3_postgresql_psycopg2_nocextensions 78
+test.aaa_profiling.test_compiler.CompileTest.test_update 3.3_sqlite_pysqlite_cextensions 78
+test.aaa_profiling.test_compiler.CompileTest.test_update 3.3_sqlite_pysqlite_nocextensions 78
# TEST: test.aaa_profiling.test_compiler.CompileTest.test_update_whereclause
-test.aaa_profiling.test_compiler.CompileTest.test_update_whereclause 2.7_mysql_mysqlconnector_cextensions 147
-test.aaa_profiling.test_compiler.CompileTest.test_update_whereclause 2.7_mysql_mysqlconnector_nocextensions 147
-test.aaa_profiling.test_compiler.CompileTest.test_update_whereclause 2.7_postgresql_psycopg2_cextensions 147
-test.aaa_profiling.test_compiler.CompileTest.test_update_whereclause 2.7_postgresql_psycopg2_nocextensions 147
-test.aaa_profiling.test_compiler.CompileTest.test_update_whereclause 2.7_sqlite_pysqlite_cextensions 147
-test.aaa_profiling.test_compiler.CompileTest.test_update_whereclause 2.7_sqlite_pysqlite_nocextensions 147
-test.aaa_profiling.test_compiler.CompileTest.test_update_whereclause 3.3_mysql_mysqlconnector_cextensions 149
-test.aaa_profiling.test_compiler.CompileTest.test_update_whereclause 3.3_mysql_mysqlconnector_nocextensions 149
-test.aaa_profiling.test_compiler.CompileTest.test_update_whereclause 3.3_postgresql_psycopg2_cextensions 149
-test.aaa_profiling.test_compiler.CompileTest.test_update_whereclause 3.3_postgresql_psycopg2_nocextensions 149
-test.aaa_profiling.test_compiler.CompileTest.test_update_whereclause 3.3_sqlite_pysqlite_cextensions 149
-test.aaa_profiling.test_compiler.CompileTest.test_update_whereclause 3.3_sqlite_pysqlite_nocextensions 149
-test.aaa_profiling.test_compiler.CompileTest.test_update_whereclause 3.4_mysql_mysqlconnector_cextensions 149
-test.aaa_profiling.test_compiler.CompileTest.test_update_whereclause 3.4_mysql_mysqlconnector_nocextensions 149
-test.aaa_profiling.test_compiler.CompileTest.test_update_whereclause 3.4_postgresql_psycopg2_cextensions 149
-test.aaa_profiling.test_compiler.CompileTest.test_update_whereclause 3.4_postgresql_psycopg2_nocextensions 149
-test.aaa_profiling.test_compiler.CompileTest.test_update_whereclause 3.4_sqlite_pysqlite_cextensions 149
-test.aaa_profiling.test_compiler.CompileTest.test_update_whereclause 3.4_sqlite_pysqlite_nocextensions 149
+test.aaa_profiling.test_compiler.CompileTest.test_update_whereclause 2.7_mysql_mysqlconnector_cextensions 148
+test.aaa_profiling.test_compiler.CompileTest.test_update_whereclause 2.7_mysql_mysqlconnector_nocextensions 148
+test.aaa_profiling.test_compiler.CompileTest.test_update_whereclause 2.7_postgresql_psycopg2_cextensions 148
+test.aaa_profiling.test_compiler.CompileTest.test_update_whereclause 2.7_postgresql_psycopg2_nocextensions 148
+test.aaa_profiling.test_compiler.CompileTest.test_update_whereclause 2.7_sqlite_pysqlite_cextensions 148
+test.aaa_profiling.test_compiler.CompileTest.test_update_whereclause 2.7_sqlite_pysqlite_nocextensions 148
+test.aaa_profiling.test_compiler.CompileTest.test_update_whereclause 3.3_mysql_mysqlconnector_cextensions 148
+test.aaa_profiling.test_compiler.CompileTest.test_update_whereclause 3.3_mysql_mysqlconnector_nocextensions 148
+test.aaa_profiling.test_compiler.CompileTest.test_update_whereclause 3.3_postgresql_psycopg2_cextensions 148
+test.aaa_profiling.test_compiler.CompileTest.test_update_whereclause 3.3_postgresql_psycopg2_nocextensions 148
+test.aaa_profiling.test_compiler.CompileTest.test_update_whereclause 3.3_sqlite_pysqlite_cextensions 148
+test.aaa_profiling.test_compiler.CompileTest.test_update_whereclause 3.3_sqlite_pysqlite_nocextensions 148
# TEST: test.aaa_profiling.test_orm.AttributeOverheadTest.test_attribute_set
test.aaa_profiling.test_orm.AttributeOverheadTest.test_attribute_set 2.7_sqlite_pysqlite_cextensions 4265
-test.aaa_profiling.test_orm.AttributeOverheadTest.test_attribute_set 2.7_sqlite_pysqlite_nocextensions 4265
-test.aaa_profiling.test_orm.AttributeOverheadTest.test_attribute_set 3.3_mysql_mysqlconnector_cextensions 4266
-test.aaa_profiling.test_orm.AttributeOverheadTest.test_attribute_set 3.3_mysql_mysqlconnector_nocextensions 4266
-test.aaa_profiling.test_orm.AttributeOverheadTest.test_attribute_set 3.3_postgresql_psycopg2_cextensions 4266
-test.aaa_profiling.test_orm.AttributeOverheadTest.test_attribute_set 3.3_postgresql_psycopg2_nocextensions 4266
+test.aaa_profiling.test_orm.AttributeOverheadTest.test_attribute_set 2.7_sqlite_pysqlite_nocextensions 4260
test.aaa_profiling.test_orm.AttributeOverheadTest.test_attribute_set 3.3_sqlite_pysqlite_cextensions 4266
test.aaa_profiling.test_orm.AttributeOverheadTest.test_attribute_set 3.3_sqlite_pysqlite_nocextensions 4266
-test.aaa_profiling.test_orm.AttributeOverheadTest.test_attribute_set 3.4_mysql_mysqlconnector_cextensions 4266
-test.aaa_profiling.test_orm.AttributeOverheadTest.test_attribute_set 3.4_postgresql_psycopg2_cextensions 4266
-test.aaa_profiling.test_orm.AttributeOverheadTest.test_attribute_set 3.4_sqlite_pysqlite_cextensions 4266
-test.aaa_profiling.test_orm.AttributeOverheadTest.test_attribute_set 3.4_sqlite_pysqlite_nocextensions 4266
# TEST: test.aaa_profiling.test_orm.AttributeOverheadTest.test_collection_append_remove
-test.aaa_profiling.test_orm.AttributeOverheadTest.test_collection_append_remove 2.7_sqlite_pysqlite_cextensions 6525
-test.aaa_profiling.test_orm.AttributeOverheadTest.test_collection_append_remove 2.7_sqlite_pysqlite_nocextensions 6525
-test.aaa_profiling.test_orm.AttributeOverheadTest.test_collection_append_remove 3.3_mysql_mysqlconnector_cextensions 6527
-test.aaa_profiling.test_orm.AttributeOverheadTest.test_collection_append_remove 3.3_mysql_mysqlconnector_nocextensions 6527
-test.aaa_profiling.test_orm.AttributeOverheadTest.test_collection_append_remove 3.3_postgresql_psycopg2_cextensions 6527
-test.aaa_profiling.test_orm.AttributeOverheadTest.test_collection_append_remove 3.3_postgresql_psycopg2_nocextensions 6527
-test.aaa_profiling.test_orm.AttributeOverheadTest.test_collection_append_remove 3.3_sqlite_pysqlite_cextensions 6527
-test.aaa_profiling.test_orm.AttributeOverheadTest.test_collection_append_remove 3.3_sqlite_pysqlite_nocextensions 6527
-test.aaa_profiling.test_orm.AttributeOverheadTest.test_collection_append_remove 3.4_mysql_mysqlconnector_cextensions 6527
-test.aaa_profiling.test_orm.AttributeOverheadTest.test_collection_append_remove 3.4_postgresql_psycopg2_cextensions 6527
-test.aaa_profiling.test_orm.AttributeOverheadTest.test_collection_append_remove 3.4_sqlite_pysqlite_cextensions 6527
-test.aaa_profiling.test_orm.AttributeOverheadTest.test_collection_append_remove 3.4_sqlite_pysqlite_nocextensions 6527
+test.aaa_profiling.test_orm.AttributeOverheadTest.test_collection_append_remove 2.7_sqlite_pysqlite_cextensions 6426
+test.aaa_profiling.test_orm.AttributeOverheadTest.test_collection_append_remove 2.7_sqlite_pysqlite_nocextensions 6426
+test.aaa_profiling.test_orm.AttributeOverheadTest.test_collection_append_remove 3.3_sqlite_pysqlite_cextensions 6428
+test.aaa_profiling.test_orm.AttributeOverheadTest.test_collection_append_remove 3.3_sqlite_pysqlite_nocextensions 6428
# TEST: test.aaa_profiling.test_orm.DeferOptionsTest.test_baseline
-test.aaa_profiling.test_orm.DeferOptionsTest.test_baseline 2.7_sqlite_pysqlite_cextensions 31372
-test.aaa_profiling.test_orm.DeferOptionsTest.test_baseline 2.7_sqlite_pysqlite_nocextensions 40389
-test.aaa_profiling.test_orm.DeferOptionsTest.test_baseline 3.3_mysql_mysqlconnector_cextensions 111690
-test.aaa_profiling.test_orm.DeferOptionsTest.test_baseline 3.3_mysql_mysqlconnector_nocextensions 120693
-test.aaa_profiling.test_orm.DeferOptionsTest.test_baseline 3.3_postgresql_psycopg2_cextensions 32222
-test.aaa_profiling.test_orm.DeferOptionsTest.test_baseline 3.3_postgresql_psycopg2_nocextensions 41225
-test.aaa_profiling.test_orm.DeferOptionsTest.test_baseline 3.3_sqlite_pysqlite_cextensions 32411
-test.aaa_profiling.test_orm.DeferOptionsTest.test_baseline 3.3_sqlite_pysqlite_nocextensions 41414
-test.aaa_profiling.test_orm.DeferOptionsTest.test_baseline 3.4_mysql_mysqlconnector_cextensions 91564
-test.aaa_profiling.test_orm.DeferOptionsTest.test_baseline 3.4_postgresql_psycopg2_cextensions 32222
-test.aaa_profiling.test_orm.DeferOptionsTest.test_baseline 3.4_sqlite_pysqlite_cextensions 32411
-test.aaa_profiling.test_orm.DeferOptionsTest.test_baseline 3.4_sqlite_pysqlite_nocextensions 41414
+test.aaa_profiling.test_orm.DeferOptionsTest.test_baseline 2.7_sqlite_pysqlite_cextensions 31373
+test.aaa_profiling.test_orm.DeferOptionsTest.test_baseline 2.7_sqlite_pysqlite_nocextensions 40336
+test.aaa_profiling.test_orm.DeferOptionsTest.test_baseline 3.3_sqlite_pysqlite_cextensions 32398
+test.aaa_profiling.test_orm.DeferOptionsTest.test_baseline 3.3_sqlite_pysqlite_nocextensions 41401
# TEST: test.aaa_profiling.test_orm.DeferOptionsTest.test_defer_many_cols
-test.aaa_profiling.test_orm.DeferOptionsTest.test_defer_many_cols 2.7_sqlite_pysqlite_cextensions 31164
-test.aaa_profiling.test_orm.DeferOptionsTest.test_defer_many_cols 2.7_sqlite_pysqlite_nocextensions 34169
-test.aaa_profiling.test_orm.DeferOptionsTest.test_defer_many_cols 3.3_mysql_mysqlconnector_cextensions 57315
-test.aaa_profiling.test_orm.DeferOptionsTest.test_defer_many_cols 3.3_mysql_mysqlconnector_nocextensions 60318
-test.aaa_profiling.test_orm.DeferOptionsTest.test_defer_many_cols 3.3_postgresql_psycopg2_cextensions 32099
-test.aaa_profiling.test_orm.DeferOptionsTest.test_defer_many_cols 3.3_postgresql_psycopg2_nocextensions 35102
-test.aaa_profiling.test_orm.DeferOptionsTest.test_defer_many_cols 3.3_sqlite_pysqlite_cextensions 32210
-test.aaa_profiling.test_orm.DeferOptionsTest.test_defer_many_cols 3.3_sqlite_pysqlite_nocextensions 35213
-test.aaa_profiling.test_orm.DeferOptionsTest.test_defer_many_cols 3.4_mysql_mysqlconnector_cextensions 55266
-test.aaa_profiling.test_orm.DeferOptionsTest.test_defer_many_cols 3.4_postgresql_psycopg2_cextensions 32099
-test.aaa_profiling.test_orm.DeferOptionsTest.test_defer_many_cols 3.4_sqlite_pysqlite_cextensions 32210
-test.aaa_profiling.test_orm.DeferOptionsTest.test_defer_many_cols 3.4_sqlite_pysqlite_nocextensions 35213
+test.aaa_profiling.test_orm.DeferOptionsTest.test_defer_many_cols 2.7_sqlite_pysqlite_cextensions 31165
+test.aaa_profiling.test_orm.DeferOptionsTest.test_defer_many_cols 2.7_sqlite_pysqlite_nocextensions 34170
+test.aaa_profiling.test_orm.DeferOptionsTest.test_defer_many_cols 3.3_sqlite_pysqlite_cextensions 32197
+test.aaa_profiling.test_orm.DeferOptionsTest.test_defer_many_cols 3.3_sqlite_pysqlite_nocextensions 35200
# TEST: test.aaa_profiling.test_orm.LoadManyToOneFromIdentityTest.test_many_to_one_load_identity
-test.aaa_profiling.test_orm.LoadManyToOneFromIdentityTest.test_many_to_one_load_identity 2.7_sqlite_pysqlite_cextensions 17987
-test.aaa_profiling.test_orm.LoadManyToOneFromIdentityTest.test_many_to_one_load_identity 2.7_sqlite_pysqlite_nocextensions 17987
-test.aaa_profiling.test_orm.LoadManyToOneFromIdentityTest.test_many_to_one_load_identity 3.3_mysql_mysqlconnector_cextensions 18987
-test.aaa_profiling.test_orm.LoadManyToOneFromIdentityTest.test_many_to_one_load_identity 3.3_mysql_mysqlconnector_nocextensions 18987
-test.aaa_profiling.test_orm.LoadManyToOneFromIdentityTest.test_many_to_one_load_identity 3.3_postgresql_psycopg2_cextensions 18987
-test.aaa_profiling.test_orm.LoadManyToOneFromIdentityTest.test_many_to_one_load_identity 3.3_postgresql_psycopg2_nocextensions 18987
-test.aaa_profiling.test_orm.LoadManyToOneFromIdentityTest.test_many_to_one_load_identity 3.3_sqlite_pysqlite_cextensions 18987
-test.aaa_profiling.test_orm.LoadManyToOneFromIdentityTest.test_many_to_one_load_identity 3.3_sqlite_pysqlite_nocextensions 18987
-test.aaa_profiling.test_orm.LoadManyToOneFromIdentityTest.test_many_to_one_load_identity 3.4_mysql_mysqlconnector_cextensions 18987
-test.aaa_profiling.test_orm.LoadManyToOneFromIdentityTest.test_many_to_one_load_identity 3.4_postgresql_psycopg2_cextensions 18987
-test.aaa_profiling.test_orm.LoadManyToOneFromIdentityTest.test_many_to_one_load_identity 3.4_sqlite_pysqlite_cextensions 18987
-test.aaa_profiling.test_orm.LoadManyToOneFromIdentityTest.test_many_to_one_load_identity 3.4_sqlite_pysqlite_nocextensions 18987
+test.aaa_profiling.test_orm.LoadManyToOneFromIdentityTest.test_many_to_one_load_identity 2.7_sqlite_pysqlite_cextensions 17988
+test.aaa_profiling.test_orm.LoadManyToOneFromIdentityTest.test_many_to_one_load_identity 2.7_sqlite_pysqlite_nocextensions 17988
+test.aaa_profiling.test_orm.LoadManyToOneFromIdentityTest.test_many_to_one_load_identity 3.3_sqlite_pysqlite_cextensions 18988
+test.aaa_profiling.test_orm.LoadManyToOneFromIdentityTest.test_many_to_one_load_identity 3.3_sqlite_pysqlite_nocextensions 18988
# TEST: test.aaa_profiling.test_orm.LoadManyToOneFromIdentityTest.test_many_to_one_load_no_identity
-test.aaa_profiling.test_orm.LoadManyToOneFromIdentityTest.test_many_to_one_load_no_identity 2.7_sqlite_pysqlite_cextensions 162360
-test.aaa_profiling.test_orm.LoadManyToOneFromIdentityTest.test_many_to_one_load_no_identity 2.7_sqlite_pysqlite_nocextensions 165110
-test.aaa_profiling.test_orm.LoadManyToOneFromIdentityTest.test_many_to_one_load_no_identity 3.3_mysql_mysqlconnector_cextensions 203865
-test.aaa_profiling.test_orm.LoadManyToOneFromIdentityTest.test_many_to_one_load_no_identity 3.3_mysql_mysqlconnector_nocextensions 205567
-test.aaa_profiling.test_orm.LoadManyToOneFromIdentityTest.test_many_to_one_load_no_identity 3.3_postgresql_psycopg2_cextensions 127615
-test.aaa_profiling.test_orm.LoadManyToOneFromIdentityTest.test_many_to_one_load_no_identity 3.3_postgresql_psycopg2_nocextensions 129365
-test.aaa_profiling.test_orm.LoadManyToOneFromIdentityTest.test_many_to_one_load_no_identity 3.3_sqlite_pysqlite_cextensions 170115
-test.aaa_profiling.test_orm.LoadManyToOneFromIdentityTest.test_many_to_one_load_no_identity 3.3_sqlite_pysqlite_nocextensions 171865
-test.aaa_profiling.test_orm.LoadManyToOneFromIdentityTest.test_many_to_one_load_no_identity 3.4_mysql_mysqlconnector_cextensions 184817
-test.aaa_profiling.test_orm.LoadManyToOneFromIdentityTest.test_many_to_one_load_no_identity 3.4_postgresql_psycopg2_cextensions 127567
-test.aaa_profiling.test_orm.LoadManyToOneFromIdentityTest.test_many_to_one_load_no_identity 3.4_sqlite_pysqlite_cextensions 170067
-test.aaa_profiling.test_orm.LoadManyToOneFromIdentityTest.test_many_to_one_load_no_identity 3.4_sqlite_pysqlite_nocextensions 171865
+test.aaa_profiling.test_orm.LoadManyToOneFromIdentityTest.test_many_to_one_load_no_identity 2.7_sqlite_pysqlite_cextensions 162315
+test.aaa_profiling.test_orm.LoadManyToOneFromIdentityTest.test_many_to_one_load_no_identity 2.7_sqlite_pysqlite_nocextensions 165111
+test.aaa_profiling.test_orm.LoadManyToOneFromIdentityTest.test_many_to_one_load_no_identity 3.3_sqlite_pysqlite_cextensions 169566
+test.aaa_profiling.test_orm.LoadManyToOneFromIdentityTest.test_many_to_one_load_no_identity 3.3_sqlite_pysqlite_nocextensions 171364
# TEST: test.aaa_profiling.test_orm.MergeBackrefsTest.test_merge_pending_with_all_pks
-test.aaa_profiling.test_orm.MergeBackrefsTest.test_merge_pending_with_all_pks 2.7_sqlite_pysqlite_cextensions 22448
-test.aaa_profiling.test_orm.MergeBackrefsTest.test_merge_pending_with_all_pks 2.7_sqlite_pysqlite_nocextensions 22662
-test.aaa_profiling.test_orm.MergeBackrefsTest.test_merge_pending_with_all_pks 3.3_mysql_mysqlconnector_cextensions 26042
-test.aaa_profiling.test_orm.MergeBackrefsTest.test_merge_pending_with_all_pks 3.3_mysql_mysqlconnector_nocextensions 26246
-test.aaa_profiling.test_orm.MergeBackrefsTest.test_merge_pending_with_all_pks 3.3_postgresql_psycopg2_cextensions 20541
-test.aaa_profiling.test_orm.MergeBackrefsTest.test_merge_pending_with_all_pks 3.3_postgresql_psycopg2_nocextensions 20685
-test.aaa_profiling.test_orm.MergeBackrefsTest.test_merge_pending_with_all_pks 3.3_sqlite_pysqlite_cextensions 23330
-test.aaa_profiling.test_orm.MergeBackrefsTest.test_merge_pending_with_all_pks 3.3_sqlite_pysqlite_nocextensions 23534
-test.aaa_profiling.test_orm.MergeBackrefsTest.test_merge_pending_with_all_pks 3.4_mysql_mysqlconnector_cextensions 24861
-test.aaa_profiling.test_orm.MergeBackrefsTest.test_merge_pending_with_all_pks 3.4_postgresql_psycopg2_cextensions 20377
-test.aaa_profiling.test_orm.MergeBackrefsTest.test_merge_pending_with_all_pks 3.4_sqlite_pysqlite_cextensions 23282
-test.aaa_profiling.test_orm.MergeBackrefsTest.test_merge_pending_with_all_pks 3.4_sqlite_pysqlite_nocextensions 23452
+test.aaa_profiling.test_orm.MergeBackrefsTest.test_merge_pending_with_all_pks 2.7_sqlite_pysqlite_cextensions 22288
+test.aaa_profiling.test_orm.MergeBackrefsTest.test_merge_pending_with_all_pks 2.7_sqlite_pysqlite_nocextensions 22530
+test.aaa_profiling.test_orm.MergeBackrefsTest.test_merge_pending_with_all_pks 3.3_sqlite_pysqlite_cextensions 23067
+test.aaa_profiling.test_orm.MergeBackrefsTest.test_merge_pending_with_all_pks 3.3_sqlite_pysqlite_nocextensions 23271
# TEST: test.aaa_profiling.test_orm.MergeTest.test_merge_load
-test.aaa_profiling.test_orm.MergeTest.test_merge_load 2.7_sqlite_pysqlite_cextensions 1600
-test.aaa_profiling.test_orm.MergeTest.test_merge_load 2.7_sqlite_pysqlite_nocextensions 1625
-test.aaa_profiling.test_orm.MergeTest.test_merge_load 3.3_mysql_mysqlconnector_cextensions 2268
-test.aaa_profiling.test_orm.MergeTest.test_merge_load 3.3_mysql_mysqlconnector_nocextensions 2283
-test.aaa_profiling.test_orm.MergeTest.test_merge_load 3.3_postgresql_psycopg2_cextensions 1394
-test.aaa_profiling.test_orm.MergeTest.test_merge_load 3.3_postgresql_psycopg2_nocextensions 1409
-test.aaa_profiling.test_orm.MergeTest.test_merge_load 3.3_sqlite_pysqlite_cextensions 1669
-test.aaa_profiling.test_orm.MergeTest.test_merge_load 3.3_sqlite_pysqlite_nocextensions 1684
-test.aaa_profiling.test_orm.MergeTest.test_merge_load 3.4_mysql_mysqlconnector_cextensions 2139
-test.aaa_profiling.test_orm.MergeTest.test_merge_load 3.4_postgresql_psycopg2_cextensions 1394
-test.aaa_profiling.test_orm.MergeTest.test_merge_load 3.4_sqlite_pysqlite_cextensions 1669
-test.aaa_profiling.test_orm.MergeTest.test_merge_load 3.4_sqlite_pysqlite_nocextensions 1684
+test.aaa_profiling.test_orm.MergeTest.test_merge_load 2.7_sqlite_pysqlite_cextensions 1601
+test.aaa_profiling.test_orm.MergeTest.test_merge_load 2.7_sqlite_pysqlite_nocextensions 1626
+test.aaa_profiling.test_orm.MergeTest.test_merge_load 3.3_sqlite_pysqlite_cextensions 1656
+test.aaa_profiling.test_orm.MergeTest.test_merge_load 3.3_sqlite_pysqlite_nocextensions 1671
# TEST: test.aaa_profiling.test_orm.MergeTest.test_merge_no_load
-test.aaa_profiling.test_orm.MergeTest.test_merge_no_load 2.7_sqlite_pysqlite_cextensions 116,17
-test.aaa_profiling.test_orm.MergeTest.test_merge_no_load 2.7_sqlite_pysqlite_nocextensions 116,17
-test.aaa_profiling.test_orm.MergeTest.test_merge_no_load 3.3_mysql_mysqlconnector_cextensions 128,18
-test.aaa_profiling.test_orm.MergeTest.test_merge_no_load 3.3_mysql_mysqlconnector_nocextensions 128,18
-test.aaa_profiling.test_orm.MergeTest.test_merge_no_load 3.3_postgresql_psycopg2_cextensions 128,18
-test.aaa_profiling.test_orm.MergeTest.test_merge_no_load 3.3_postgresql_psycopg2_nocextensions 128,18
-test.aaa_profiling.test_orm.MergeTest.test_merge_no_load 3.3_sqlite_pysqlite_cextensions 128,18
-test.aaa_profiling.test_orm.MergeTest.test_merge_no_load 3.3_sqlite_pysqlite_nocextensions 128,18
-test.aaa_profiling.test_orm.MergeTest.test_merge_no_load 3.4_mysql_mysqlconnector_cextensions 128,18
-test.aaa_profiling.test_orm.MergeTest.test_merge_no_load 3.4_postgresql_psycopg2_cextensions 128,18
-test.aaa_profiling.test_orm.MergeTest.test_merge_no_load 3.4_sqlite_pysqlite_cextensions 128,18
-test.aaa_profiling.test_orm.MergeTest.test_merge_no_load 3.4_sqlite_pysqlite_nocextensions 128,18
+test.aaa_profiling.test_orm.MergeTest.test_merge_no_load 2.7_sqlite_pysqlite_cextensions 117,18
+test.aaa_profiling.test_orm.MergeTest.test_merge_no_load 2.7_sqlite_pysqlite_nocextensions 117,18
+test.aaa_profiling.test_orm.MergeTest.test_merge_no_load 3.3_sqlite_pysqlite_cextensions 122,19
+test.aaa_profiling.test_orm.MergeTest.test_merge_no_load 3.3_sqlite_pysqlite_nocextensions 122,19
# TEST: test.aaa_profiling.test_pool.QueuePoolTest.test_first_connect
-test.aaa_profiling.test_pool.QueuePoolTest.test_first_connect 2.7_sqlite_pysqlite_cextensions 90
-test.aaa_profiling.test_pool.QueuePoolTest.test_first_connect 2.7_sqlite_pysqlite_nocextensions 90
-test.aaa_profiling.test_pool.QueuePoolTest.test_first_connect 3.3_mysql_mysqlconnector_cextensions 77
-test.aaa_profiling.test_pool.QueuePoolTest.test_first_connect 3.3_mysql_mysqlconnector_nocextensions 77
-test.aaa_profiling.test_pool.QueuePoolTest.test_first_connect 3.3_postgresql_psycopg2_cextensions 77
-test.aaa_profiling.test_pool.QueuePoolTest.test_first_connect 3.3_postgresql_psycopg2_nocextensions 77
-test.aaa_profiling.test_pool.QueuePoolTest.test_first_connect 3.3_sqlite_pysqlite_cextensions 77
-test.aaa_profiling.test_pool.QueuePoolTest.test_first_connect 3.3_sqlite_pysqlite_nocextensions 77
-test.aaa_profiling.test_pool.QueuePoolTest.test_first_connect 3.4_mysql_mysqlconnector_cextensions 77
-test.aaa_profiling.test_pool.QueuePoolTest.test_first_connect 3.4_postgresql_psycopg2_cextensions 77
-test.aaa_profiling.test_pool.QueuePoolTest.test_first_connect 3.4_sqlite_pysqlite_cextensions 77
-test.aaa_profiling.test_pool.QueuePoolTest.test_first_connect 3.4_sqlite_pysqlite_nocextensions 77
+test.aaa_profiling.test_pool.QueuePoolTest.test_first_connect 2.7_sqlite_pysqlite_cextensions 91
+test.aaa_profiling.test_pool.QueuePoolTest.test_first_connect 2.7_sqlite_pysqlite_nocextensions 91
+test.aaa_profiling.test_pool.QueuePoolTest.test_first_connect 3.3_sqlite_pysqlite_cextensions 78
+test.aaa_profiling.test_pool.QueuePoolTest.test_first_connect 3.3_sqlite_pysqlite_nocextensions 78
# TEST: test.aaa_profiling.test_pool.QueuePoolTest.test_second_connect
-test.aaa_profiling.test_pool.QueuePoolTest.test_second_connect 2.7_sqlite_pysqlite_cextensions 30
-test.aaa_profiling.test_pool.QueuePoolTest.test_second_connect 2.7_sqlite_pysqlite_nocextensions 30
-test.aaa_profiling.test_pool.QueuePoolTest.test_second_connect 3.3_mysql_mysqlconnector_cextensions 23
-test.aaa_profiling.test_pool.QueuePoolTest.test_second_connect 3.3_mysql_mysqlconnector_nocextensions 23
-test.aaa_profiling.test_pool.QueuePoolTest.test_second_connect 3.3_postgresql_psycopg2_cextensions 23
-test.aaa_profiling.test_pool.QueuePoolTest.test_second_connect 3.3_postgresql_psycopg2_nocextensions 23
-test.aaa_profiling.test_pool.QueuePoolTest.test_second_connect 3.3_sqlite_pysqlite_cextensions 23
-test.aaa_profiling.test_pool.QueuePoolTest.test_second_connect 3.3_sqlite_pysqlite_nocextensions 23
-test.aaa_profiling.test_pool.QueuePoolTest.test_second_connect 3.4_mysql_mysqlconnector_cextensions 23
-test.aaa_profiling.test_pool.QueuePoolTest.test_second_connect 3.4_postgresql_psycopg2_cextensions 23
-test.aaa_profiling.test_pool.QueuePoolTest.test_second_connect 3.4_sqlite_pysqlite_cextensions 23
-test.aaa_profiling.test_pool.QueuePoolTest.test_second_connect 3.4_sqlite_pysqlite_nocextensions 23
+test.aaa_profiling.test_pool.QueuePoolTest.test_second_connect 2.7_sqlite_pysqlite_cextensions 31
+test.aaa_profiling.test_pool.QueuePoolTest.test_second_connect 2.7_sqlite_pysqlite_nocextensions 31
+test.aaa_profiling.test_pool.QueuePoolTest.test_second_connect 3.3_sqlite_pysqlite_cextensions 24
+test.aaa_profiling.test_pool.QueuePoolTest.test_second_connect 3.3_sqlite_pysqlite_nocextensions 24
# TEST: test.aaa_profiling.test_pool.QueuePoolTest.test_second_samethread_connect
-test.aaa_profiling.test_pool.QueuePoolTest.test_second_samethread_connect 2.7_sqlite_pysqlite_cextensions 7
-test.aaa_profiling.test_pool.QueuePoolTest.test_second_samethread_connect 2.7_sqlite_pysqlite_nocextensions 7
-test.aaa_profiling.test_pool.QueuePoolTest.test_second_samethread_connect 3.3_mysql_mysqlconnector_cextensions 8
-test.aaa_profiling.test_pool.QueuePoolTest.test_second_samethread_connect 3.3_mysql_mysqlconnector_nocextensions 8
-test.aaa_profiling.test_pool.QueuePoolTest.test_second_samethread_connect 3.3_postgresql_psycopg2_cextensions 8
-test.aaa_profiling.test_pool.QueuePoolTest.test_second_samethread_connect 3.3_postgresql_psycopg2_nocextensions 8
-test.aaa_profiling.test_pool.QueuePoolTest.test_second_samethread_connect 3.3_sqlite_pysqlite_cextensions 8
-test.aaa_profiling.test_pool.QueuePoolTest.test_second_samethread_connect 3.3_sqlite_pysqlite_nocextensions 8
-test.aaa_profiling.test_pool.QueuePoolTest.test_second_samethread_connect 3.4_mysql_mysqlconnector_cextensions 8
-test.aaa_profiling.test_pool.QueuePoolTest.test_second_samethread_connect 3.4_postgresql_psycopg2_cextensions 8
-test.aaa_profiling.test_pool.QueuePoolTest.test_second_samethread_connect 3.4_sqlite_pysqlite_cextensions 8
-test.aaa_profiling.test_pool.QueuePoolTest.test_second_samethread_connect 3.4_sqlite_pysqlite_nocextensions 8
+test.aaa_profiling.test_pool.QueuePoolTest.test_second_samethread_connect 2.7_sqlite_pysqlite_cextensions 8
+test.aaa_profiling.test_pool.QueuePoolTest.test_second_samethread_connect 2.7_sqlite_pysqlite_nocextensions 8
+test.aaa_profiling.test_pool.QueuePoolTest.test_second_samethread_connect 3.3_sqlite_pysqlite_cextensions 9
+test.aaa_profiling.test_pool.QueuePoolTest.test_second_samethread_connect 3.3_sqlite_pysqlite_nocextensions 9
# TEST: test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_connection_execute
-test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_connection_execute 2.7_mysql_mysqlconnector_cextensions 42
-test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_connection_execute 2.7_mysql_mysqlconnector_nocextensions 44
-test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_connection_execute 2.7_postgresql_psycopg2_cextensions 42
-test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_connection_execute 2.7_postgresql_psycopg2_nocextensions 44
-test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_connection_execute 2.7_sqlite_pysqlite_cextensions 42
-test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_connection_execute 2.7_sqlite_pysqlite_nocextensions 44
-test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_connection_execute 3.3_mysql_mysqlconnector_cextensions 42
-test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_connection_execute 3.3_mysql_mysqlconnector_nocextensions 42
-test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_connection_execute 3.3_postgresql_psycopg2_cextensions 42
-test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_connection_execute 3.3_postgresql_psycopg2_nocextensions 42
-test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_connection_execute 3.3_sqlite_pysqlite_cextensions 42
-test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_connection_execute 3.3_sqlite_pysqlite_nocextensions 42
-test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_connection_execute 3.4_mysql_mysqlconnector_cextensions 42
-test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_connection_execute 3.4_mysql_mysqlconnector_nocextensions 42
-test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_connection_execute 3.4_postgresql_psycopg2_cextensions 42
-test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_connection_execute 3.4_postgresql_psycopg2_nocextensions 42
-test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_connection_execute 3.4_sqlite_pysqlite_cextensions 42
-test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_connection_execute 3.4_sqlite_pysqlite_nocextensions 42
+test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_connection_execute 2.7_mysql_mysqlconnector_cextensions 43
+test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_connection_execute 2.7_mysql_mysqlconnector_nocextensions 45
+test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_connection_execute 2.7_postgresql_psycopg2_cextensions 43
+test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_connection_execute 2.7_postgresql_psycopg2_nocextensions 45
+test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_connection_execute 2.7_sqlite_pysqlite_cextensions 43
+test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_connection_execute 2.7_sqlite_pysqlite_nocextensions 45
+test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_connection_execute 3.3_mysql_mysqlconnector_cextensions 43
+test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_connection_execute 3.3_mysql_mysqlconnector_nocextensions 43
+test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_connection_execute 3.3_postgresql_psycopg2_cextensions 43
+test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_connection_execute 3.3_postgresql_psycopg2_nocextensions 43
+test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_connection_execute 3.3_sqlite_pysqlite_cextensions 43
+test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_connection_execute 3.3_sqlite_pysqlite_nocextensions 43
# TEST: test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_engine_execute
-test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_engine_execute 2.7_mysql_mysqlconnector_cextensions 77
-test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_engine_execute 2.7_mysql_mysqlconnector_nocextensions 79
-test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_engine_execute 2.7_postgresql_psycopg2_cextensions 77
-test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_engine_execute 2.7_postgresql_psycopg2_nocextensions 79
-test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_engine_execute 2.7_sqlite_pysqlite_cextensions 77
-test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_engine_execute 2.7_sqlite_pysqlite_nocextensions 79
-test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_engine_execute 3.3_mysql_mysqlconnector_cextensions 77
-test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_engine_execute 3.3_mysql_mysqlconnector_nocextensions 77
-test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_engine_execute 3.3_postgresql_psycopg2_cextensions 77
-test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_engine_execute 3.3_postgresql_psycopg2_nocextensions 77
-test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_engine_execute 3.3_sqlite_pysqlite_cextensions 77
-test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_engine_execute 3.3_sqlite_pysqlite_nocextensions 77
-test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_engine_execute 3.4_mysql_mysqlconnector_cextensions 77
-test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_engine_execute 3.4_mysql_mysqlconnector_nocextensions 77
-test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_engine_execute 3.4_postgresql_psycopg2_cextensions 77
-test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_engine_execute 3.4_postgresql_psycopg2_nocextensions 77
-test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_engine_execute 3.4_sqlite_pysqlite_cextensions 77
-test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_engine_execute 3.4_sqlite_pysqlite_nocextensions 77
+test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_engine_execute 2.7_mysql_mysqlconnector_cextensions 78
+test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_engine_execute 2.7_mysql_mysqlconnector_nocextensions 80
+test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_engine_execute 2.7_postgresql_psycopg2_cextensions 78
+test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_engine_execute 2.7_postgresql_psycopg2_nocextensions 80
+test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_engine_execute 2.7_sqlite_pysqlite_cextensions 78
+test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_engine_execute 2.7_sqlite_pysqlite_nocextensions 80
+test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_engine_execute 3.3_mysql_mysqlconnector_cextensions 78
+test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_engine_execute 3.3_mysql_mysqlconnector_nocextensions 78
+test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_engine_execute 3.3_postgresql_psycopg2_cextensions 78
+test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_engine_execute 3.3_postgresql_psycopg2_nocextensions 78
+test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_engine_execute 3.3_sqlite_pysqlite_cextensions 78
+test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_engine_execute 3.3_sqlite_pysqlite_nocextensions 78
# TEST: test.aaa_profiling.test_resultset.ResultSetTest.test_contains_doesnt_compile
-test.aaa_profiling.test_resultset.ResultSetTest.test_contains_doesnt_compile 2.7_mysql_mysqlconnector_cextensions 14
-test.aaa_profiling.test_resultset.ResultSetTest.test_contains_doesnt_compile 2.7_mysql_mysqlconnector_nocextensions 14
-test.aaa_profiling.test_resultset.ResultSetTest.test_contains_doesnt_compile 2.7_postgresql_psycopg2_cextensions 14
-test.aaa_profiling.test_resultset.ResultSetTest.test_contains_doesnt_compile 2.7_postgresql_psycopg2_nocextensions 14
-test.aaa_profiling.test_resultset.ResultSetTest.test_contains_doesnt_compile 2.7_sqlite_pysqlite_cextensions 14
-test.aaa_profiling.test_resultset.ResultSetTest.test_contains_doesnt_compile 2.7_sqlite_pysqlite_nocextensions 14
-test.aaa_profiling.test_resultset.ResultSetTest.test_contains_doesnt_compile 3.3_mysql_mysqlconnector_cextensions 15
-test.aaa_profiling.test_resultset.ResultSetTest.test_contains_doesnt_compile 3.3_mysql_mysqlconnector_nocextensions 15
-test.aaa_profiling.test_resultset.ResultSetTest.test_contains_doesnt_compile 3.3_postgresql_psycopg2_cextensions 15
-test.aaa_profiling.test_resultset.ResultSetTest.test_contains_doesnt_compile 3.3_postgresql_psycopg2_nocextensions 15
-test.aaa_profiling.test_resultset.ResultSetTest.test_contains_doesnt_compile 3.3_sqlite_pysqlite_cextensions 15
-test.aaa_profiling.test_resultset.ResultSetTest.test_contains_doesnt_compile 3.3_sqlite_pysqlite_nocextensions 15
-test.aaa_profiling.test_resultset.ResultSetTest.test_contains_doesnt_compile 3.4_mysql_mysqlconnector_cextensions 15
-test.aaa_profiling.test_resultset.ResultSetTest.test_contains_doesnt_compile 3.4_mysql_mysqlconnector_nocextensions 15
-test.aaa_profiling.test_resultset.ResultSetTest.test_contains_doesnt_compile 3.4_postgresql_psycopg2_cextensions 15
-test.aaa_profiling.test_resultset.ResultSetTest.test_contains_doesnt_compile 3.4_postgresql_psycopg2_nocextensions 15
-test.aaa_profiling.test_resultset.ResultSetTest.test_contains_doesnt_compile 3.4_sqlite_pysqlite_cextensions 15
-test.aaa_profiling.test_resultset.ResultSetTest.test_contains_doesnt_compile 3.4_sqlite_pysqlite_nocextensions 15
+test.aaa_profiling.test_resultset.ResultSetTest.test_contains_doesnt_compile 2.7_mysql_mysqlconnector_cextensions 15
+test.aaa_profiling.test_resultset.ResultSetTest.test_contains_doesnt_compile 2.7_mysql_mysqlconnector_nocextensions 15
+test.aaa_profiling.test_resultset.ResultSetTest.test_contains_doesnt_compile 2.7_postgresql_psycopg2_cextensions 15
+test.aaa_profiling.test_resultset.ResultSetTest.test_contains_doesnt_compile 2.7_postgresql_psycopg2_nocextensions 15
+test.aaa_profiling.test_resultset.ResultSetTest.test_contains_doesnt_compile 2.7_sqlite_pysqlite_cextensions 15
+test.aaa_profiling.test_resultset.ResultSetTest.test_contains_doesnt_compile 2.7_sqlite_pysqlite_nocextensions 15
+test.aaa_profiling.test_resultset.ResultSetTest.test_contains_doesnt_compile 3.3_mysql_mysqlconnector_cextensions 16
+test.aaa_profiling.test_resultset.ResultSetTest.test_contains_doesnt_compile 3.3_mysql_mysqlconnector_nocextensions 16
+test.aaa_profiling.test_resultset.ResultSetTest.test_contains_doesnt_compile 3.3_postgresql_psycopg2_cextensions 16
+test.aaa_profiling.test_resultset.ResultSetTest.test_contains_doesnt_compile 3.3_postgresql_psycopg2_nocextensions 16
+test.aaa_profiling.test_resultset.ResultSetTest.test_contains_doesnt_compile 3.3_sqlite_pysqlite_cextensions 16
+test.aaa_profiling.test_resultset.ResultSetTest.test_contains_doesnt_compile 3.3_sqlite_pysqlite_nocextensions 16
# TEST: test.aaa_profiling.test_resultset.ResultSetTest.test_string
-test.aaa_profiling.test_resultset.ResultSetTest.test_string 2.7_mysql_mysqlconnector_cextensions 92958
-test.aaa_profiling.test_resultset.ResultSetTest.test_string 2.7_mysql_mysqlconnector_nocextensions 107978
-test.aaa_profiling.test_resultset.ResultSetTest.test_string 2.7_postgresql_psycopg2_cextensions 20500
-test.aaa_profiling.test_resultset.ResultSetTest.test_string 2.7_postgresql_psycopg2_nocextensions 35520
-test.aaa_profiling.test_resultset.ResultSetTest.test_string 2.7_sqlite_pysqlite_cextensions 456
-test.aaa_profiling.test_resultset.ResultSetTest.test_string 2.7_sqlite_pysqlite_nocextensions 15476
-test.aaa_profiling.test_resultset.ResultSetTest.test_string 3.3_mysql_mysqlconnector_cextensions 109145
-test.aaa_profiling.test_resultset.ResultSetTest.test_string 3.3_mysql_mysqlconnector_nocextensions 123145
-test.aaa_profiling.test_resultset.ResultSetTest.test_string 3.3_postgresql_psycopg2_cextensions 498
-test.aaa_profiling.test_resultset.ResultSetTest.test_string 3.3_postgresql_psycopg2_nocextensions 14498
-test.aaa_profiling.test_resultset.ResultSetTest.test_string 3.3_sqlite_pysqlite_cextensions 471
-test.aaa_profiling.test_resultset.ResultSetTest.test_string 3.3_sqlite_pysqlite_nocextensions 14471
-test.aaa_profiling.test_resultset.ResultSetTest.test_string 3.4_mysql_mysqlconnector_cextensions 79885
-test.aaa_profiling.test_resultset.ResultSetTest.test_string 3.4_mysql_mysqlconnector_nocextensions 93885
-test.aaa_profiling.test_resultset.ResultSetTest.test_string 3.4_postgresql_psycopg2_cextensions 498
-test.aaa_profiling.test_resultset.ResultSetTest.test_string 3.4_postgresql_psycopg2_nocextensions 14498
-test.aaa_profiling.test_resultset.ResultSetTest.test_string 3.4_sqlite_pysqlite_cextensions 471
-test.aaa_profiling.test_resultset.ResultSetTest.test_string 3.4_sqlite_pysqlite_nocextensions 14471
+test.aaa_profiling.test_resultset.ResultSetTest.test_string 2.7_mysql_mysqlconnector_cextensions 92959
+test.aaa_profiling.test_resultset.ResultSetTest.test_string 2.7_mysql_mysqlconnector_nocextensions 107979
+test.aaa_profiling.test_resultset.ResultSetTest.test_string 2.7_postgresql_psycopg2_cextensions 20501
+test.aaa_profiling.test_resultset.ResultSetTest.test_string 2.7_postgresql_psycopg2_nocextensions 35521
+test.aaa_profiling.test_resultset.ResultSetTest.test_string 2.7_sqlite_pysqlite_cextensions 457
+test.aaa_profiling.test_resultset.ResultSetTest.test_string 2.7_sqlite_pysqlite_nocextensions 15477
+test.aaa_profiling.test_resultset.ResultSetTest.test_string 3.3_mysql_mysqlconnector_cextensions 109136
+test.aaa_profiling.test_resultset.ResultSetTest.test_string 3.3_mysql_mysqlconnector_nocextensions 123136
+test.aaa_profiling.test_resultset.ResultSetTest.test_string 3.3_postgresql_psycopg2_cextensions 489
+test.aaa_profiling.test_resultset.ResultSetTest.test_string 3.3_postgresql_psycopg2_nocextensions 14489
+test.aaa_profiling.test_resultset.ResultSetTest.test_string 3.3_sqlite_pysqlite_cextensions 462
+test.aaa_profiling.test_resultset.ResultSetTest.test_string 3.3_sqlite_pysqlite_nocextensions 14462
# TEST: test.aaa_profiling.test_resultset.ResultSetTest.test_unicode
-test.aaa_profiling.test_resultset.ResultSetTest.test_unicode 2.7_mysql_mysqlconnector_cextensions 92958
-test.aaa_profiling.test_resultset.ResultSetTest.test_unicode 2.7_mysql_mysqlconnector_nocextensions 107978
-test.aaa_profiling.test_resultset.ResultSetTest.test_unicode 2.7_postgresql_psycopg2_cextensions 20500
-test.aaa_profiling.test_resultset.ResultSetTest.test_unicode 2.7_postgresql_psycopg2_nocextensions 35520
-test.aaa_profiling.test_resultset.ResultSetTest.test_unicode 2.7_sqlite_pysqlite_cextensions 456
-test.aaa_profiling.test_resultset.ResultSetTest.test_unicode 2.7_sqlite_pysqlite_nocextensions 15476
-test.aaa_profiling.test_resultset.ResultSetTest.test_unicode 3.3_mysql_mysqlconnector_cextensions 109145
-test.aaa_profiling.test_resultset.ResultSetTest.test_unicode 3.3_mysql_mysqlconnector_nocextensions 123145
-test.aaa_profiling.test_resultset.ResultSetTest.test_unicode 3.3_postgresql_psycopg2_cextensions 498
-test.aaa_profiling.test_resultset.ResultSetTest.test_unicode 3.3_postgresql_psycopg2_nocextensions 14498
-test.aaa_profiling.test_resultset.ResultSetTest.test_unicode 3.3_sqlite_pysqlite_cextensions 471
-test.aaa_profiling.test_resultset.ResultSetTest.test_unicode 3.3_sqlite_pysqlite_nocextensions 14471
-test.aaa_profiling.test_resultset.ResultSetTest.test_unicode 3.4_mysql_mysqlconnector_cextensions 79885
-test.aaa_profiling.test_resultset.ResultSetTest.test_unicode 3.4_mysql_mysqlconnector_nocextensions 93885
-test.aaa_profiling.test_resultset.ResultSetTest.test_unicode 3.4_postgresql_psycopg2_cextensions 498
-test.aaa_profiling.test_resultset.ResultSetTest.test_unicode 3.4_postgresql_psycopg2_nocextensions 14498
-test.aaa_profiling.test_resultset.ResultSetTest.test_unicode 3.4_sqlite_pysqlite_cextensions 471
-test.aaa_profiling.test_resultset.ResultSetTest.test_unicode 3.4_sqlite_pysqlite_nocextensions 14471
-
-# TEST: test.aaa_profiling.test_zoomark.ZooMarkTest.test_profile_1a_populate
-
-test.aaa_profiling.test_zoomark.ZooMarkTest.test_profile_1a_populate 2.7_postgresql_psycopg2_cextensions 5562
-test.aaa_profiling.test_zoomark.ZooMarkTest.test_profile_1a_populate 2.7_postgresql_psycopg2_nocextensions 5606
-test.aaa_profiling.test_zoomark.ZooMarkTest.test_profile_1a_populate 3.3_postgresql_psycopg2_cextensions 5381
-test.aaa_profiling.test_zoomark.ZooMarkTest.test_profile_1a_populate 3.3_postgresql_psycopg2_nocextensions 5403
-test.aaa_profiling.test_zoomark.ZooMarkTest.test_profile_1a_populate 3.4_postgresql_psycopg2_cextensions 5381
-test.aaa_profiling.test_zoomark.ZooMarkTest.test_profile_1a_populate 3.4_postgresql_psycopg2_nocextensions 5403
-
-# TEST: test.aaa_profiling.test_zoomark.ZooMarkTest.test_profile_2_insert
-
-test.aaa_profiling.test_zoomark.ZooMarkTest.test_profile_2_insert 2.7_postgresql_psycopg2_cextensions 277
-test.aaa_profiling.test_zoomark.ZooMarkTest.test_profile_2_insert 2.7_postgresql_psycopg2_nocextensions 277
-test.aaa_profiling.test_zoomark.ZooMarkTest.test_profile_2_insert 3.3_postgresql_psycopg2_cextensions 269
-test.aaa_profiling.test_zoomark.ZooMarkTest.test_profile_2_insert 3.3_postgresql_psycopg2_nocextensions 269
-test.aaa_profiling.test_zoomark.ZooMarkTest.test_profile_2_insert 3.4_postgresql_psycopg2_cextensions 269
-test.aaa_profiling.test_zoomark.ZooMarkTest.test_profile_2_insert 3.4_postgresql_psycopg2_nocextensions 269
-
-# TEST: test.aaa_profiling.test_zoomark.ZooMarkTest.test_profile_3_properties
-
-test.aaa_profiling.test_zoomark.ZooMarkTest.test_profile_3_properties 2.7_postgresql_psycopg2_cextensions 3697
-test.aaa_profiling.test_zoomark.ZooMarkTest.test_profile_3_properties 2.7_postgresql_psycopg2_nocextensions 3929
-test.aaa_profiling.test_zoomark.ZooMarkTest.test_profile_3_properties 3.3_postgresql_psycopg2_cextensions 3641
-test.aaa_profiling.test_zoomark.ZooMarkTest.test_profile_3_properties 3.3_postgresql_psycopg2_nocextensions 3737
-test.aaa_profiling.test_zoomark.ZooMarkTest.test_profile_3_properties 3.4_postgresql_psycopg2_cextensions 3641
-test.aaa_profiling.test_zoomark.ZooMarkTest.test_profile_3_properties 3.4_postgresql_psycopg2_nocextensions 3737
-
-# TEST: test.aaa_profiling.test_zoomark.ZooMarkTest.test_profile_4_expressions
-
-test.aaa_profiling.test_zoomark.ZooMarkTest.test_profile_4_expressions 2.7_postgresql_psycopg2_cextensions 11893
-test.aaa_profiling.test_zoomark.ZooMarkTest.test_profile_4_expressions 2.7_postgresql_psycopg2_nocextensions 13595
-test.aaa_profiling.test_zoomark.ZooMarkTest.test_profile_4_expressions 3.3_postgresql_psycopg2_cextensions 11751
-test.aaa_profiling.test_zoomark.ZooMarkTest.test_profile_4_expressions 3.3_postgresql_psycopg2_nocextensions 12923
-test.aaa_profiling.test_zoomark.ZooMarkTest.test_profile_4_expressions 3.4_postgresql_psycopg2_cextensions 11751
-test.aaa_profiling.test_zoomark.ZooMarkTest.test_profile_4_expressions 3.4_postgresql_psycopg2_nocextensions 12923
-
-# TEST: test.aaa_profiling.test_zoomark.ZooMarkTest.test_profile_5_aggregates
-
-test.aaa_profiling.test_zoomark.ZooMarkTest.test_profile_5_aggregates 2.7_postgresql_psycopg2_cextensions 1106
-test.aaa_profiling.test_zoomark.ZooMarkTest.test_profile_5_aggregates 2.7_postgresql_psycopg2_nocextensions 1223
-test.aaa_profiling.test_zoomark.ZooMarkTest.test_profile_5_aggregates 3.3_postgresql_psycopg2_cextensions 1077
-test.aaa_profiling.test_zoomark.ZooMarkTest.test_profile_5_aggregates 3.3_postgresql_psycopg2_nocextensions 1171
-test.aaa_profiling.test_zoomark.ZooMarkTest.test_profile_5_aggregates 3.4_postgresql_psycopg2_cextensions 1077
-test.aaa_profiling.test_zoomark.ZooMarkTest.test_profile_5_aggregates 3.4_postgresql_psycopg2_nocextensions 1171
-
-# TEST: test.aaa_profiling.test_zoomark.ZooMarkTest.test_profile_6_editing
-
-test.aaa_profiling.test_zoomark.ZooMarkTest.test_profile_6_editing 2.7_postgresql_psycopg2_cextensions 1968
-test.aaa_profiling.test_zoomark.ZooMarkTest.test_profile_6_editing 2.7_postgresql_psycopg2_nocextensions 2011
-test.aaa_profiling.test_zoomark.ZooMarkTest.test_profile_6_editing 3.3_postgresql_psycopg2_cextensions 1913
-test.aaa_profiling.test_zoomark.ZooMarkTest.test_profile_6_editing 3.3_postgresql_psycopg2_nocextensions 1920
-test.aaa_profiling.test_zoomark.ZooMarkTest.test_profile_6_editing 3.4_postgresql_psycopg2_cextensions 1913
-test.aaa_profiling.test_zoomark.ZooMarkTest.test_profile_6_editing 3.4_postgresql_psycopg2_nocextensions 1920
-
-# TEST: test.aaa_profiling.test_zoomark.ZooMarkTest.test_profile_7_multiview
-
-test.aaa_profiling.test_zoomark.ZooMarkTest.test_profile_7_multiview 2.7_postgresql_psycopg2_cextensions 2433
-test.aaa_profiling.test_zoomark.ZooMarkTest.test_profile_7_multiview 2.7_postgresql_psycopg2_nocextensions 2692
-test.aaa_profiling.test_zoomark.ZooMarkTest.test_profile_7_multiview 3.3_postgresql_psycopg2_cextensions 2449
-test.aaa_profiling.test_zoomark.ZooMarkTest.test_profile_7_multiview 3.3_postgresql_psycopg2_nocextensions 2641
-test.aaa_profiling.test_zoomark.ZooMarkTest.test_profile_7_multiview 3.4_postgresql_psycopg2_cextensions 2449
-test.aaa_profiling.test_zoomark.ZooMarkTest.test_profile_7_multiview 3.4_postgresql_psycopg2_nocextensions 2641
-
-# TEST: test.aaa_profiling.test_zoomark_orm.ZooMarkTest.test_profile_1a_populate
-
-test.aaa_profiling.test_zoomark_orm.ZooMarkTest.test_profile_1a_populate 2.7_postgresql_psycopg2_cextensions 6276
-test.aaa_profiling.test_zoomark_orm.ZooMarkTest.test_profile_1a_populate 2.7_postgresql_psycopg2_nocextensions 6395
-test.aaa_profiling.test_zoomark_orm.ZooMarkTest.test_profile_1a_populate 3.3_postgresql_psycopg2_cextensions 6412
-test.aaa_profiling.test_zoomark_orm.ZooMarkTest.test_profile_1a_populate 3.3_postgresql_psycopg2_nocextensions 6497
-test.aaa_profiling.test_zoomark_orm.ZooMarkTest.test_profile_1a_populate 3.4_postgresql_psycopg2_cextensions 6412
-test.aaa_profiling.test_zoomark_orm.ZooMarkTest.test_profile_1a_populate 3.4_postgresql_psycopg2_nocextensions 6497
-
-# TEST: test.aaa_profiling.test_zoomark_orm.ZooMarkTest.test_profile_2_insert
-
-test.aaa_profiling.test_zoomark_orm.ZooMarkTest.test_profile_2_insert 2.7_postgresql_psycopg2_cextensions 403
-test.aaa_profiling.test_zoomark_orm.ZooMarkTest.test_profile_2_insert 2.7_postgresql_psycopg2_nocextensions 410
-test.aaa_profiling.test_zoomark_orm.ZooMarkTest.test_profile_2_insert 3.3_postgresql_psycopg2_cextensions 401
-test.aaa_profiling.test_zoomark_orm.ZooMarkTest.test_profile_2_insert 3.3_postgresql_psycopg2_nocextensions 406
-test.aaa_profiling.test_zoomark_orm.ZooMarkTest.test_profile_2_insert 3.4_postgresql_psycopg2_cextensions 401
-test.aaa_profiling.test_zoomark_orm.ZooMarkTest.test_profile_2_insert 3.4_postgresql_psycopg2_nocextensions 406
-
-# TEST: test.aaa_profiling.test_zoomark_orm.ZooMarkTest.test_profile_3_properties
-
-test.aaa_profiling.test_zoomark_orm.ZooMarkTest.test_profile_3_properties 2.7_postgresql_psycopg2_cextensions 6878
-test.aaa_profiling.test_zoomark_orm.ZooMarkTest.test_profile_3_properties 2.7_postgresql_psycopg2_nocextensions 7110
-test.aaa_profiling.test_zoomark_orm.ZooMarkTest.test_profile_3_properties 3.3_postgresql_psycopg2_cextensions 7008
-test.aaa_profiling.test_zoomark_orm.ZooMarkTest.test_profile_3_properties 3.3_postgresql_psycopg2_nocextensions 7112
-test.aaa_profiling.test_zoomark_orm.ZooMarkTest.test_profile_3_properties 3.4_postgresql_psycopg2_cextensions 7008
-test.aaa_profiling.test_zoomark_orm.ZooMarkTest.test_profile_3_properties 3.4_postgresql_psycopg2_nocextensions 7112
-
-# TEST: test.aaa_profiling.test_zoomark_orm.ZooMarkTest.test_profile_4_expressions
-
-test.aaa_profiling.test_zoomark_orm.ZooMarkTest.test_profile_4_expressions 2.7_postgresql_psycopg2_cextensions 19521
-test.aaa_profiling.test_zoomark_orm.ZooMarkTest.test_profile_4_expressions 2.7_postgresql_psycopg2_nocextensions 20952
-test.aaa_profiling.test_zoomark_orm.ZooMarkTest.test_profile_4_expressions 3.3_postgresql_psycopg2_cextensions 19868
-test.aaa_profiling.test_zoomark_orm.ZooMarkTest.test_profile_4_expressions 3.3_postgresql_psycopg2_nocextensions 20895
-test.aaa_profiling.test_zoomark_orm.ZooMarkTest.test_profile_4_expressions 3.4_postgresql_psycopg2_cextensions 19868
-test.aaa_profiling.test_zoomark_orm.ZooMarkTest.test_profile_4_expressions 3.4_postgresql_psycopg2_nocextensions 20895
-
-# TEST: test.aaa_profiling.test_zoomark_orm.ZooMarkTest.test_profile_5_aggregates
-
-test.aaa_profiling.test_zoomark_orm.ZooMarkTest.test_profile_5_aggregates 2.7_postgresql_psycopg2_cextensions 1118
-test.aaa_profiling.test_zoomark_orm.ZooMarkTest.test_profile_5_aggregates 2.7_postgresql_psycopg2_nocextensions 1226
-test.aaa_profiling.test_zoomark_orm.ZooMarkTest.test_profile_5_aggregates 3.3_postgresql_psycopg2_cextensions 1091
-test.aaa_profiling.test_zoomark_orm.ZooMarkTest.test_profile_5_aggregates 3.3_postgresql_psycopg2_nocextensions 1177
-test.aaa_profiling.test_zoomark_orm.ZooMarkTest.test_profile_5_aggregates 3.4_postgresql_psycopg2_cextensions 1091
-test.aaa_profiling.test_zoomark_orm.ZooMarkTest.test_profile_5_aggregates 3.4_postgresql_psycopg2_nocextensions 1177
-
-# TEST: test.aaa_profiling.test_zoomark_orm.ZooMarkTest.test_profile_6_editing
-
-test.aaa_profiling.test_zoomark_orm.ZooMarkTest.test_profile_6_editing 2.7_postgresql_psycopg2_cextensions 2733
-test.aaa_profiling.test_zoomark_orm.ZooMarkTest.test_profile_6_editing 2.7_postgresql_psycopg2_nocextensions 2796
-test.aaa_profiling.test_zoomark_orm.ZooMarkTest.test_profile_6_editing 3.3_postgresql_psycopg2_cextensions 2784
-test.aaa_profiling.test_zoomark_orm.ZooMarkTest.test_profile_6_editing 3.3_postgresql_psycopg2_nocextensions 2811
-test.aaa_profiling.test_zoomark_orm.ZooMarkTest.test_profile_6_editing 3.4_postgresql_psycopg2_cextensions 2784
-test.aaa_profiling.test_zoomark_orm.ZooMarkTest.test_profile_6_editing 3.4_postgresql_psycopg2_nocextensions 2811
+test.aaa_profiling.test_resultset.ResultSetTest.test_unicode 2.7_mysql_mysqlconnector_cextensions 92959
+test.aaa_profiling.test_resultset.ResultSetTest.test_unicode 2.7_mysql_mysqlconnector_nocextensions 107979
+test.aaa_profiling.test_resultset.ResultSetTest.test_unicode 2.7_postgresql_psycopg2_cextensions 20501
+test.aaa_profiling.test_resultset.ResultSetTest.test_unicode 2.7_postgresql_psycopg2_nocextensions 35521
+test.aaa_profiling.test_resultset.ResultSetTest.test_unicode 2.7_sqlite_pysqlite_cextensions 457
+test.aaa_profiling.test_resultset.ResultSetTest.test_unicode 2.7_sqlite_pysqlite_nocextensions 15477
+test.aaa_profiling.test_resultset.ResultSetTest.test_unicode 3.3_mysql_mysqlconnector_cextensions 109136
+test.aaa_profiling.test_resultset.ResultSetTest.test_unicode 3.3_mysql_mysqlconnector_nocextensions 123136
+test.aaa_profiling.test_resultset.ResultSetTest.test_unicode 3.3_postgresql_psycopg2_cextensions 489
+test.aaa_profiling.test_resultset.ResultSetTest.test_unicode 3.3_postgresql_psycopg2_nocextensions 14489
+test.aaa_profiling.test_resultset.ResultSetTest.test_unicode 3.3_sqlite_pysqlite_cextensions 462
+test.aaa_profiling.test_resultset.ResultSetTest.test_unicode 3.3_sqlite_pysqlite_nocextensions 14462
+
+# TEST: test.aaa_profiling.test_zoomark.ZooMarkTest.test_invocation
+
+test.aaa_profiling.test_zoomark.ZooMarkTest.test_invocation 2.7_postgresql_psycopg2_cextensions 5562,277,3697,11893,1106,1968,2433
+test.aaa_profiling.test_zoomark.ZooMarkTest.test_invocation 2.7_postgresql_psycopg2_nocextensions 5606,277,3929,13595,1223,2011,2692
+test.aaa_profiling.test_zoomark.ZooMarkTest.test_invocation 3.3_postgresql_psycopg2_cextensions 5238,259,3577,11529,1077,1886,2439
+test.aaa_profiling.test_zoomark.ZooMarkTest.test_invocation 3.3_postgresql_psycopg2_nocextensions 5260,259,3673,12701,1171,1893,2631
+
+# TEST: test.aaa_profiling.test_zoomark_orm.ZooMarkTest.test_invocation
+
+test.aaa_profiling.test_zoomark_orm.ZooMarkTest.test_invocation 2.7_postgresql_psycopg2_cextensions 5908,396,6878,19521,1118,2725
+test.aaa_profiling.test_zoomark_orm.ZooMarkTest.test_invocation 2.7_postgresql_psycopg2_nocextensions 5999,401,7110,20952,1226,2790
+test.aaa_profiling.test_zoomark_orm.ZooMarkTest.test_invocation 3.3_postgresql_psycopg2_cextensions 5816,383,6928,19676,1091,2753
+test.aaa_profiling.test_zoomark_orm.ZooMarkTest.test_invocation 3.3_postgresql_psycopg2_nocextensions 5886,388,7032,20703,1177,2782
diff --git a/test/requirements.py b/test/requirements.py
index 927c94bfb..46c19389a 100644
--- a/test/requirements.py
+++ b/test/requirements.py
@@ -363,20 +363,9 @@ class DefaultRequirements(SuiteRequirements):
'need separate XA implementation'),
exclude('mysql', '<', (5, 0, 3),
'two-phase xact not supported by database'),
- no_support("postgresql+pg8000", "not supported and/or hangs")
])
@property
- def graceful_disconnects(self):
- """Target driver must raise a DBAPI-level exception, such as
- InterfaceError, when the underlying connection has been closed
- and the execute() method is called.
- """
- return fails_on(
- "postgresql+pg8000", "Driver crashes"
- )
-
- @property
def views(self):
"""Target database must support VIEWs."""
diff --git a/test/sql/test_insert.py b/test/sql/test_insert.py
index d59d79d89..d2fba5862 100644
--- a/test/sql/test_insert.py
+++ b/test/sql/test_insert.py
@@ -17,7 +17,7 @@ class _InsertTestBase(object):
Column('name', String(30)),
Column('description', String(30)))
Table('myothertable', metadata,
- Column('otherid', Integer),
+ Column('otherid', Integer, primary_key=True),
Column('othername', String(30)))
@@ -138,6 +138,23 @@ class InsertTest(_InsertTestBase, fixtures.TablesTest, AssertsCompiledSQL):
dialect=default.DefaultDialect()
)
+ def test_insert_from_select_returning(self):
+ table1 = self.tables.mytable
+ sel = select([table1.c.myid, table1.c.name]).where(
+ table1.c.name == 'foo')
+ ins = self.tables.myothertable.insert().\
+ from_select(("otherid", "othername"), sel).returning(
+ self.tables.myothertable.c.otherid
+ )
+ self.assert_compile(
+ ins,
+ "INSERT INTO myothertable (otherid, othername) "
+ "SELECT mytable.myid, mytable.name FROM mytable "
+ "WHERE mytable.name = %(name_1)s RETURNING myothertable.otherid",
+ checkparams={"name_1": "foo"},
+ dialect="postgresql"
+ )
+
def test_insert_from_select_select(self):
table1 = self.tables.mytable
sel = select([table1.c.myid, table1.c.name]).where(
@@ -230,7 +247,7 @@ class InsertTest(_InsertTestBase, fixtures.TablesTest, AssertsCompiledSQL):
)
ins = mytable.insert().\
from_select(
- [mytable.c.name, mytable.c.description], sel)
+ [mytable.c.name, mytable.c.description], sel)
self.assert_compile(
ins,
"INSERT INTO mytable (name, description) "
@@ -254,6 +271,94 @@ class InsertTest(_InsertTestBase, fixtures.TablesTest, AssertsCompiledSQL):
)
+class InsertImplicitReturningTest(
+ _InsertTestBase, fixtures.TablesTest, AssertsCompiledSQL):
+ __dialect__ = postgresql.dialect(implicit_returning=True)
+
+ def test_insert_select(self):
+ table1 = self.tables.mytable
+ sel = select([table1.c.myid, table1.c.name]).where(
+ table1.c.name == 'foo')
+ ins = self.tables.myothertable.insert().\
+ from_select(("otherid", "othername"), sel)
+ self.assert_compile(
+ ins,
+ "INSERT INTO myothertable (otherid, othername) "
+ "SELECT mytable.myid, mytable.name FROM mytable "
+ "WHERE mytable.name = %(name_1)s",
+ checkparams={"name_1": "foo"}
+ )
+
+ def test_insert_select_return_defaults(self):
+ table1 = self.tables.mytable
+ sel = select([table1.c.myid, table1.c.name]).where(
+ table1.c.name == 'foo')
+ ins = self.tables.myothertable.insert().\
+ from_select(("otherid", "othername"), sel).\
+ return_defaults(self.tables.myothertable.c.otherid)
+ self.assert_compile(
+ ins,
+ "INSERT INTO myothertable (otherid, othername) "
+ "SELECT mytable.myid, mytable.name FROM mytable "
+ "WHERE mytable.name = %(name_1)s",
+ checkparams={"name_1": "foo"}
+ )
+
+ def test_insert_multiple_values(self):
+ ins = self.tables.myothertable.insert().values([
+ {"othername": "foo"},
+ {"othername": "bar"},
+ ])
+ self.assert_compile(
+ ins,
+ "INSERT INTO myothertable (othername) "
+ "VALUES (%(othername_0)s), "
+ "(%(othername_1)s)",
+ checkparams={
+ 'othername_1': 'bar',
+ 'othername_0': 'foo'}
+ )
+
+ def test_insert_multiple_values_return_defaults(self):
+ # TODO: not sure if this should raise an
+ # error or what
+ ins = self.tables.myothertable.insert().values([
+ {"othername": "foo"},
+ {"othername": "bar"},
+ ]).return_defaults(self.tables.myothertable.c.otherid)
+ self.assert_compile(
+ ins,
+ "INSERT INTO myothertable (othername) "
+ "VALUES (%(othername_0)s), "
+ "(%(othername_1)s)",
+ checkparams={
+ 'othername_1': 'bar',
+ 'othername_0': 'foo'}
+ )
+
+ def test_insert_single_list_values(self):
+ ins = self.tables.myothertable.insert().values([
+ {"othername": "foo"},
+ ])
+ self.assert_compile(
+ ins,
+ "INSERT INTO myothertable (othername) "
+ "VALUES (%(othername_0)s)",
+ checkparams={'othername_0': 'foo'}
+ )
+
+ def test_insert_single_element_values(self):
+ ins = self.tables.myothertable.insert().values(
+ {"othername": "foo"},
+ )
+ self.assert_compile(
+ ins,
+ "INSERT INTO myothertable (othername) "
+ "VALUES (%(othername)s) RETURNING myothertable.otherid",
+ checkparams={'othername': 'foo'}
+ )
+
+
class EmptyTest(_InsertTestBase, fixtures.TablesTest, AssertsCompiledSQL):
__dialect__ = 'default'
diff --git a/test/sql/test_metadata.py b/test/sql/test_metadata.py
index ff2755ab1..4a484dbac 100644
--- a/test/sql/test_metadata.py
+++ b/test/sql/test_metadata.py
@@ -349,6 +349,20 @@ class MetaDataTest(fixtures.TestBase, ComparesTables):
assert t.c.x.default is s2
assert m1._sequences['x_seq'] is s2
+
+ def test_sequence_attach_to_table(self):
+ m1 = MetaData()
+ s1 = Sequence("s")
+ t = Table('a', m1, Column('x', Integer, s1))
+ assert s1.metadata is m1
+
+ def test_sequence_attach_to_existing_table(self):
+ m1 = MetaData()
+ s1 = Sequence("s")
+ t = Table('a', m1, Column('x', Integer))
+ t.c.x._init_items(s1)
+ assert s1.metadata is m1
+
def test_pickle_metadata_sequence_implicit(self):
m1 = MetaData()
Table('a', m1,
diff --git a/test/sql/test_query.py b/test/sql/test_query.py
index a475b899f..2075bcecf 100644
--- a/test/sql/test_query.py
+++ b/test/sql/test_query.py
@@ -276,6 +276,13 @@ class QueryTest(fixtures.TestBase):
r = t6.insert().values(manual_id=id).execute()
eq_(r.inserted_primary_key, [12, 1])
+ def test_implicit_id_insert_select(self):
+ stmt = users.insert().from_select(
+ (users.c.user_id, users.c.user_name),
+ users.select().where(users.c.user_id == 20))
+
+ testing.db.execute(stmt)
+
def test_row_iteration(self):
users.insert().execute(
{'user_id': 7, 'user_name': 'jack'},
diff --git a/test/sql/test_returning.py b/test/sql/test_returning.py
index 26dbcdaa2..79a0b38a5 100644
--- a/test/sql/test_returning.py
+++ b/test/sql/test_returning.py
@@ -1,13 +1,16 @@
from sqlalchemy.testing import eq_
-from sqlalchemy import *
from sqlalchemy import testing
from sqlalchemy.testing.schema import Table, Column
from sqlalchemy.types import TypeDecorator
from sqlalchemy.testing import fixtures, AssertsExecutionResults, engines, \
assert_raises_message
from sqlalchemy import exc as sa_exc
+from sqlalchemy import MetaData, String, Integer, Boolean, func, select, \
+ Sequence
import itertools
+table = GoofyType = seq = None
+
class ReturningTest(fixtures.TestBase, AssertsExecutionResults):
__requires__ = 'returning',
@@ -31,11 +34,13 @@ class ReturningTest(fixtures.TestBase, AssertsExecutionResults):
return value + "BAR"
table = Table(
- 'tables', meta, Column(
- 'id', Integer, primary_key=True, test_needs_autoincrement=True), Column(
- 'persons', Integer), Column(
- 'full', Boolean), Column(
- 'goofy', GoofyType(50)))
+ 'tables', meta,
+ Column(
+ 'id', Integer, primary_key=True,
+ test_needs_autoincrement=True),
+ Column('persons', Integer),
+ Column('full', Boolean),
+ Column('goofy', GoofyType(50)))
table.create(checkfirst=True)
def teardown(self):
@@ -47,9 +52,11 @@ class ReturningTest(fixtures.TestBase, AssertsExecutionResults):
row = result.first()
assert row[table.c.id] == row['id'] == 1
- assert row[table.c.full] == row['full'] == False
+ assert row[table.c.full] == row['full']
+ assert row['full'] is False
- result = table.insert().values(persons=5, full=True, goofy="somegoofy").\
+ result = table.insert().values(
+ persons=5, full=True, goofy="somegoofy").\
returning(table.c.persons, table.c.full, table.c.goofy).execute()
row = result.first()
assert row[table.c.persons] == row['persons'] == 5
@@ -238,11 +245,13 @@ class ReturnDefaultsTest(fixtures.TablesTest):
return str(next(counter))
Table(
- "t1", metadata, Column(
- "id", Integer, primary_key=True, test_needs_autoincrement=True), Column(
- "data", String(50)), Column(
- "insdef", Integer, default=IncDefault()), Column(
- "upddef", Integer, onupdate=IncDefault()))
+ "t1", metadata,
+ Column(
+ "id", Integer, primary_key=True,
+ test_needs_autoincrement=True),
+ Column("data", String(50)),
+ Column("insdef", Integer, default=IncDefault()),
+ Column("upddef", Integer, onupdate=IncDefault()))
def test_chained_insert_pk(self):
t1 = self.tables.t1
@@ -336,9 +345,10 @@ class ReturnDefaultsTest(fixtures.TablesTest):
testing.db.execute(
t1.insert().values(upddef=1)
)
- result = testing.db.execute(t1.update().
- values(insdef=2).return_defaults(
- t1.c.data, t1.c.upddef))
+ result = testing.db.execute(
+ t1.update().
+ values(insdef=2).return_defaults(
+ t1.c.data, t1.c.upddef))
eq_(
dict(result.returned_defaults),
{"data": None, 'upddef': 1}
@@ -352,12 +362,14 @@ class ImplicitReturningFlag(fixtures.TestBase):
e = engines.testing_engine(options={'implicit_returning': False})
assert e.dialect.implicit_returning is False
c = e.connect()
+ c.close()
assert e.dialect.implicit_returning is False
def test_flag_turned_on(self):
e = engines.testing_engine(options={'implicit_returning': True})
assert e.dialect.implicit_returning is True
c = e.connect()
+ c.close()
assert e.dialect.implicit_returning is True
def test_flag_turned_default(self):
@@ -377,4 +389,5 @@ class ImplicitReturningFlag(fixtures.TestBase):
# version detection on connect sets it
c = e.connect()
+ c.close()
assert e.dialect.implicit_returning is supports[0]