summaryrefslogtreecommitdiff
path: root/lib/sqlalchemy
diff options
context:
space:
mode:
authorRodrigo Menezes <rodrigo.menezes@moat.com>2014-08-26 12:57:00 -0400
committerRodrigo Menezes <rodrigo.menezes@moat.com>2014-08-26 12:57:00 -0400
commitb3f7cd8bf497febb80e6cd7dc39effc75ff1a7e7 (patch)
treee3a022b20405768bb4e1912c9a2f1347b751d64c /lib/sqlalchemy
parentbcf7a55da01633c4890502463a08cb96af9fe5e9 (diff)
parent8e84942aa6fa2644b3fe6407c79449715a7e2c8c (diff)
downloadsqlalchemy-b3f7cd8bf497febb80e6cd7dc39effc75ff1a7e7.tar.gz
Merge branch 'master' of https://github.com/zzzeek/sqlalchemy into feature/postgres-relkind
Diffstat (limited to 'lib/sqlalchemy')
-rw-r--r--lib/sqlalchemy/dialects/mysql/base.py12
-rw-r--r--lib/sqlalchemy/dialects/postgresql/base.py72
-rw-r--r--lib/sqlalchemy/dialects/postgresql/pg8000.py14
-rw-r--r--lib/sqlalchemy/engine/base.py4
-rw-r--r--lib/sqlalchemy/event/api.py54
-rw-r--r--lib/sqlalchemy/event/attr.py15
-rw-r--r--lib/sqlalchemy/event/registry.py2
-rw-r--r--lib/sqlalchemy/ext/mutable.py10
-rw-r--r--lib/sqlalchemy/orm/events.py12
-rw-r--r--lib/sqlalchemy/orm/identity.py29
-rw-r--r--lib/sqlalchemy/orm/mapper.py91
-rw-r--r--lib/sqlalchemy/orm/persistence.py547
-rw-r--r--lib/sqlalchemy/pool.py24
-rw-r--r--lib/sqlalchemy/sql/compiler.py4
-rw-r--r--lib/sqlalchemy/sql/dml.py38
-rw-r--r--lib/sqlalchemy/sql/schema.py3
-rw-r--r--lib/sqlalchemy/testing/engines.py112
-rw-r--r--lib/sqlalchemy/testing/plugin/provision.py17
-rw-r--r--lib/sqlalchemy/testing/plugin/pytestplugin.py14
-rw-r--r--lib/sqlalchemy/testing/profiling.py216
-rw-r--r--lib/sqlalchemy/testing/replay_fixture.py167
-rw-r--r--lib/sqlalchemy/util/_collections.py50
22 files changed, 828 insertions, 679 deletions
diff --git a/lib/sqlalchemy/dialects/mysql/base.py b/lib/sqlalchemy/dialects/mysql/base.py
index 374960765..012d178e7 100644
--- a/lib/sqlalchemy/dialects/mysql/base.py
+++ b/lib/sqlalchemy/dialects/mysql/base.py
@@ -190,15 +190,13 @@ SQLAlchemy standardizes the DBAPI ``cursor.rowcount`` attribute to be the
usual definition of "number of rows matched by an UPDATE or DELETE" statement.
This is in contradiction to the default setting on most MySQL DBAPI drivers,
which is "number of rows actually modified/deleted". For this reason, the
-SQLAlchemy MySQL dialects always set the ``constants.CLIENT.FOUND_ROWS`` flag,
-or whatever is equivalent for the DBAPI in use, on connect, unless the flag
-value is overridden using DBAPI-specific options
-(such as ``client_flag`` for the MySQL-Python driver, ``found_rows`` for the
-OurSQL driver).
+SQLAlchemy MySQL dialects always add the ``constants.CLIENT.FOUND_ROWS``
+flag, or whatever is equivalent for the target dialect, upon connection.
+This setting is currently hardcoded.
-See also:
+.. seealso::
-:attr:`.ResultProxy.rowcount`
+ :attr:`.ResultProxy.rowcount`
CAST Support
diff --git a/lib/sqlalchemy/dialects/postgresql/base.py b/lib/sqlalchemy/dialects/postgresql/base.py
index 75d0696ad..206a25d28 100644
--- a/lib/sqlalchemy/dialects/postgresql/base.py
+++ b/lib/sqlalchemy/dialects/postgresql/base.py
@@ -417,6 +417,42 @@ of :class:`.PGInspector`, which offers additional methods::
.. autoclass:: PGInspector
:members:
+.. postgresql_table_options:
+
+PostgreSQL Table Options
+-------------------------
+
+Several options for CREATE TABLE are supported directly by the PostgreSQL
+dialect in conjunction with the :class:`.Table` construct:
+
+* ``TABLESPACE``::
+
+ Table("some_table", metadata, ..., postgresql_tablespace='some_tablespace')
+
+* ``ON COMMIT``::
+
+ Table("some_table", metadata, ..., postgresql_on_commit='PRESERVE ROWS')
+
+* ``WITH OIDS``::
+
+ Table("some_table", metadata, ..., postgresql_with_oids=True)
+
+* ``WITHOUT OIDS``::
+
+ Table("some_table", metadata, ..., postgresql_with_oids=False)
+
+* ``INHERITS``::
+
+ Table("some_table", metadata, ..., postgresql_inherits="some_supertable")
+
+ Table("some_table", metadata, ..., postgresql_inherits=("t1", "t2", ...))
+
+.. versionadded:: 1.0.0
+
+.. seealso::
+
+ `Postgresql CREATE TABLE options
+ <http://www.postgresql.org/docs/9.3/static/sql-createtable.html>`_
"""
from collections import defaultdict
@@ -1448,6 +1484,36 @@ class PGDDLCompiler(compiler.DDLCompiler):
text += self.define_constraint_deferrability(constraint)
return text
+ def post_create_table(self, table):
+ table_opts = []
+ pg_opts = table.dialect_options['postgresql']
+
+ inherits = pg_opts.get('inherits')
+ if inherits is not None:
+ if not isinstance(inherits, (list, tuple)):
+ inherits = (inherits, )
+ table_opts.append(
+ '\n INHERITS ( ' +
+ ', '.join(self.preparer.quote(name) for name in inherits) +
+ ' )')
+
+ if pg_opts['with_oids'] is True:
+ table_opts.append('\n WITH OIDS')
+ elif pg_opts['with_oids'] is False:
+ table_opts.append('\n WITHOUT OIDS')
+
+ if pg_opts['on_commit']:
+ on_commit_options = pg_opts['on_commit'].replace("_", " ").upper()
+ table_opts.append('\n ON COMMIT %s' % on_commit_options)
+
+ if pg_opts['tablespace']:
+ tablespace_name = pg_opts['tablespace']
+ table_opts.append(
+ '\n TABLESPACE %s' % self.preparer.quote(tablespace_name)
+ )
+
+ return ''.join(table_opts)
+
class PGTypeCompiler(compiler.GenericTypeCompiler):
@@ -1707,7 +1773,11 @@ class PGDialect(default.DefaultDialect):
"ops": {}
}),
(schema.Table, {
- "ignore_search_path": False
+ "ignore_search_path": False,
+ "tablespace": None,
+ "with_oids": None,
+ "on_commit": None,
+ "inherits": None
})
]
diff --git a/lib/sqlalchemy/dialects/postgresql/pg8000.py b/lib/sqlalchemy/dialects/postgresql/pg8000.py
index 68da5b6d7..4ccc90208 100644
--- a/lib/sqlalchemy/dialects/postgresql/pg8000.py
+++ b/lib/sqlalchemy/dialects/postgresql/pg8000.py
@@ -119,7 +119,7 @@ class PGDialect_pg8000(PGDialect):
supports_unicode_binds = True
default_paramstyle = 'format'
- supports_sane_multi_rowcount = False
+ supports_sane_multi_rowcount = True
execution_ctx_cls = PGExecutionContext_pg8000
statement_compiler = PGCompiler_pg8000
preparer = PGIdentifierPreparer_pg8000
@@ -133,6 +133,16 @@ class PGDialect_pg8000(PGDialect):
}
)
+ def initialize(self, connection):
+ if self.dbapi and hasattr(self.dbapi, '__version__'):
+ self._dbapi_version = tuple([
+ int(x) for x in
+ self.dbapi.__version__.split(".")])
+ else:
+ self._dbapi_version = (99, 99, 99)
+ self.supports_sane_multi_rowcount = self._dbapi_version >= (1, 9, 14)
+ super(PGDialect_pg8000, self).initialize(connection)
+
@classmethod
def dbapi(cls):
return __import__('pg8000')
@@ -172,11 +182,9 @@ class PGDialect_pg8000(PGDialect):
)
def do_begin_twophase(self, connection, xid):
- print("begin twophase", xid)
connection.connection.tpc_begin((0, xid, ''))
def do_prepare_twophase(self, connection, xid):
- print("prepare twophase", xid)
connection.connection.tpc_prepare()
def do_rollback_twophase(
diff --git a/lib/sqlalchemy/engine/base.py b/lib/sqlalchemy/engine/base.py
index 2dc4d43f2..d2cc8890f 100644
--- a/lib/sqlalchemy/engine/base.py
+++ b/lib/sqlalchemy/engine/base.py
@@ -798,14 +798,14 @@ class Connection(Connectable):
distilled_params = _distill_params(multiparams, params)
if distilled_params:
# note this is usually dict but we support RowProxy
- # as well; but dict.keys() as an iterator is OK
+ # as well; but dict.keys() as an iterable is OK
keys = distilled_params[0].keys()
else:
keys = []
dialect = self.dialect
if 'compiled_cache' in self._execution_options:
- key = dialect, elem, tuple(keys), len(distilled_params) > 1
+ key = dialect, elem, tuple(sorted(keys)), len(distilled_params) > 1
if key in self._execution_options['compiled_cache']:
compiled_sql = self._execution_options['compiled_cache'][key]
else:
diff --git a/lib/sqlalchemy/event/api.py b/lib/sqlalchemy/event/api.py
index 270e95c9c..b3d79bcf4 100644
--- a/lib/sqlalchemy/event/api.py
+++ b/lib/sqlalchemy/event/api.py
@@ -58,6 +58,32 @@ def listen(target, identifier, fn, *args, **kw):
.. versionadded:: 0.9.4 Added ``once=True`` to :func:`.event.listen`
and :func:`.event.listens_for`.
+ .. note::
+
+ The :func:`.listen` function cannot be called at the same time
+ that the target event is being run. This has implications
+ for thread safety, and also means an event cannot be added
+ from inside the listener function for itself. The list of
+ events to be run are present inside of a mutable collection
+ that can't be changed during iteration.
+
+ Event registration and removal is not intended to be a "high
+ velocity" operation; it is a configurational operation. For
+ systems that need to quickly associate and deassociate with
+ events at high scale, use a mutable structure that is handled
+ from inside of a single listener.
+
+ .. versionchanged:: 1.0.0 - a ``collections.deque()`` object is now
+ used as the container for the list of events, which explicitly
+ disallows collection mutation while the collection is being
+ iterated.
+
+ .. seealso::
+
+ :func:`.listens_for`
+
+ :func:`.remove`
+
"""
_event_key(target, identifier, fn).listen(*args, **kw)
@@ -89,6 +115,10 @@ def listens_for(target, identifier, *args, **kw):
.. versionadded:: 0.9.4 Added ``once=True`` to :func:`.event.listen`
and :func:`.event.listens_for`.
+ .. seealso::
+
+ :func:`.listen` - general description of event listening
+
"""
def decorate(fn):
listen(target, identifier, fn, *args, **kw)
@@ -120,6 +150,30 @@ def remove(target, identifier, fn):
.. versionadded:: 0.9.0
+ .. note::
+
+ The :func:`.remove` function cannot be called at the same time
+ that the target event is being run. This has implications
+ for thread safety, and also means an event cannot be removed
+ from inside the listener function for itself. The list of
+ events to be run are present inside of a mutable collection
+ that can't be changed during iteration.
+
+ Event registration and removal is not intended to be a "high
+ velocity" operation; it is a configurational operation. For
+ systems that need to quickly associate and deassociate with
+ events at high scale, use a mutable structure that is handled
+ from inside of a single listener.
+
+ .. versionchanged:: 1.0.0 - a ``collections.deque()`` object is now
+ used as the container for the list of events, which explicitly
+ disallows collection mutation while the collection is being
+ iterated.
+
+ .. seealso::
+
+ :func:`.listen`
+
"""
_event_key(target, identifier, fn).remove()
diff --git a/lib/sqlalchemy/event/attr.py b/lib/sqlalchemy/event/attr.py
index 7641b595a..dba1063cf 100644
--- a/lib/sqlalchemy/event/attr.py
+++ b/lib/sqlalchemy/event/attr.py
@@ -37,6 +37,7 @@ from . import registry
from . import legacy
from itertools import chain
import weakref
+import collections
class RefCollection(object):
@@ -96,8 +97,8 @@ class _DispatchDescriptor(RefCollection):
self.update_subclass(cls)
else:
if cls not in self._clslevel:
- self._clslevel[cls] = []
- self._clslevel[cls].insert(0, event_key._listen_fn)
+ self._clslevel[cls] = collections.deque()
+ self._clslevel[cls].appendleft(event_key._listen_fn)
registry._stored_in_collection(event_key, self)
def append(self, event_key, propagate):
@@ -113,13 +114,13 @@ class _DispatchDescriptor(RefCollection):
self.update_subclass(cls)
else:
if cls not in self._clslevel:
- self._clslevel[cls] = []
+ self._clslevel[cls] = collections.deque()
self._clslevel[cls].append(event_key._listen_fn)
registry._stored_in_collection(event_key, self)
def update_subclass(self, target):
if target not in self._clslevel:
- self._clslevel[target] = []
+ self._clslevel[target] = collections.deque()
clslevel = self._clslevel[target]
for cls in target.__mro__[1:]:
if cls in self._clslevel:
@@ -145,7 +146,7 @@ class _DispatchDescriptor(RefCollection):
to_clear = set()
for dispatcher in self._clslevel.values():
to_clear.update(dispatcher)
- dispatcher[:] = []
+ dispatcher.clear()
registry._clear(self, to_clear)
def for_modify(self, obj):
@@ -287,7 +288,7 @@ class _ListenerCollection(RefCollection, _CompoundListener):
self.parent_listeners = parent._clslevel[target_cls]
self.parent = parent
self.name = parent.__name__
- self.listeners = []
+ self.listeners = collections.deque()
self.propagate = set()
def for_modify(self, obj):
@@ -337,7 +338,7 @@ class _ListenerCollection(RefCollection, _CompoundListener):
def clear(self):
registry._clear(self, self.listeners)
self.propagate.clear()
- self.listeners[:] = []
+ self.listeners.clear()
class _JoinedDispatchDescriptor(object):
diff --git a/lib/sqlalchemy/event/registry.py b/lib/sqlalchemy/event/registry.py
index a34de3cd7..ba2f671a3 100644
--- a/lib/sqlalchemy/event/registry.py
+++ b/lib/sqlalchemy/event/registry.py
@@ -243,4 +243,4 @@ class _EventKey(object):
def prepend_to_list(self, owner, list_):
_stored_in_collection(self, owner)
- list_.insert(0, self._listen_fn)
+ list_.appendleft(self._listen_fn)
diff --git a/lib/sqlalchemy/ext/mutable.py b/lib/sqlalchemy/ext/mutable.py
index 7469bcbda..e49e9ea8b 100644
--- a/lib/sqlalchemy/ext/mutable.py
+++ b/lib/sqlalchemy/ext/mutable.py
@@ -621,16 +621,20 @@ class MutableDict(Mutable, dict):
dict.__delitem__(self, key)
self.changed()
+ def update(self, *a, **kw):
+ dict.update(self, *a, **kw)
+ self.changed()
+
def clear(self):
dict.clear(self)
self.changed()
@classmethod
def coerce(cls, key, value):
- """Convert plain dictionary to MutableDict."""
- if not isinstance(value, MutableDict):
+ """Convert plain dictionary to instance of this class."""
+ if not isinstance(value, cls):
if isinstance(value, dict):
- return MutableDict(value)
+ return cls(value)
return Mutable.coerce(key, value)
else:
return value
diff --git a/lib/sqlalchemy/orm/events.py b/lib/sqlalchemy/orm/events.py
index aa99673ba..8edaa2744 100644
--- a/lib/sqlalchemy/orm/events.py
+++ b/lib/sqlalchemy/orm/events.py
@@ -293,18 +293,6 @@ class InstanceEvents(event.Events):
"""
- def resurrect(self, target):
- """Receive an object instance as it is 'resurrected' from
- garbage collection, which occurs when a "dirty" state falls
- out of scope.
-
- :param target: the mapped instance. If
- the event is configured with ``raw=True``, this will
- instead be the :class:`.InstanceState` state-management
- object associated with the instance.
-
- """
-
def pickle(self, target, state_dict):
"""Receive an object instance when its associated state is
being pickled.
diff --git a/lib/sqlalchemy/orm/identity.py b/lib/sqlalchemy/orm/identity.py
index d9cdd791f..0fa541194 100644
--- a/lib/sqlalchemy/orm/identity.py
+++ b/lib/sqlalchemy/orm/identity.py
@@ -150,7 +150,7 @@ class WeakInstanceDict(IdentityMap):
return default
return o
- def _items(self):
+ def items(self):
values = self.all_states()
result = []
for state in values:
@@ -159,7 +159,7 @@ class WeakInstanceDict(IdentityMap):
result.append((state.key, value))
return result
- def _values(self):
+ def values(self):
values = self.all_states()
result = []
for state in values:
@@ -169,9 +169,10 @@ class WeakInstanceDict(IdentityMap):
return result
+ def __iter__(self):
+ return iter(self.keys())
+
if util.py2k:
- items = _items
- values = _values
def iteritems(self):
return iter(self.items())
@@ -179,19 +180,6 @@ class WeakInstanceDict(IdentityMap):
def itervalues(self):
return iter(self.values())
- def __iter__(self):
- return iter(self.keys())
-
- else:
- def items(self):
- return iter(self._items())
-
- def values(self):
- return iter(self._values())
-
- def __iter__(self):
- return self.keys()
-
def all_states(self):
if util.py2k:
return self._dict.values()
@@ -217,11 +205,8 @@ class StrongInstanceDict(IdentityMap):
def iteritems(self):
return self._dict.iteritems()
- def __iter__(self):
- return iter(self.keys())
- else:
- def __iter__(self):
- return self.keys()
+ def __iter__(self):
+ return iter(self.dict_)
def __getitem__(self, key):
return self._dict[key]
diff --git a/lib/sqlalchemy/orm/mapper.py b/lib/sqlalchemy/orm/mapper.py
index 06ec2bf14..aab28ee0c 100644
--- a/lib/sqlalchemy/orm/mapper.py
+++ b/lib/sqlalchemy/orm/mapper.py
@@ -1127,7 +1127,6 @@ class Mapper(InspectionAttr):
event.listen(manager, 'first_init', _event_on_first_init, raw=True)
event.listen(manager, 'init', _event_on_init, raw=True)
- event.listen(manager, 'resurrect', _event_on_resurrect, raw=True)
for key, method in util.iterate_attributes(self.class_):
if isinstance(method, types.FunctionType):
@@ -1189,14 +1188,6 @@ class Mapper(InspectionAttr):
util.ordered_column_set(t.c).\
intersection(all_cols)
- # determine cols that aren't expressed within our tables; mark these
- # as "read only" properties which are refreshed upon INSERT/UPDATE
- self._readonly_props = set(
- self._columntoproperty[col]
- for col in self._columntoproperty
- if not hasattr(col, 'table') or
- col.table not in self._cols_by_table)
-
# if explicit PK argument sent, add those columns to the
# primary key mappings
if self._primary_key_argument:
@@ -1247,6 +1238,15 @@ class Mapper(InspectionAttr):
self.primary_key = tuple(primary_key)
self._log("Identified primary key columns: %s", primary_key)
+ # determine cols that aren't expressed within our tables; mark these
+ # as "read only" properties which are refreshed upon INSERT/UPDATE
+ self._readonly_props = set(
+ self._columntoproperty[col]
+ for col in self._columntoproperty
+ if self._columntoproperty[col] not in self._primary_key_props and
+ (not hasattr(col, 'table') or
+ col.table not in self._cols_by_table))
+
def _configure_properties(self):
# Column and other ClauseElement objects which are mapped
@@ -1892,6 +1892,54 @@ class Mapper(InspectionAttr):
"""
+ @_memoized_configured_property
+ def _insert_cols_as_none(self):
+ return dict(
+ (
+ table,
+ frozenset(
+ col.key for col in columns
+ if not col.primary_key and
+ not col.server_default and not col.default)
+ )
+ for table, columns in self._cols_by_table.items()
+ )
+
+ @_memoized_configured_property
+ def _propkey_to_col(self):
+ return dict(
+ (
+ table,
+ dict(
+ (self._columntoproperty[col].key, col)
+ for col in columns
+ )
+ )
+ for table, columns in self._cols_by_table.items()
+ )
+
+ @_memoized_configured_property
+ def _pk_keys_by_table(self):
+ return dict(
+ (
+ table,
+ frozenset([col.key for col in pks])
+ )
+ for table, pks in self._pks_by_table.items()
+ )
+
+ @_memoized_configured_property
+ def _server_default_cols(self):
+ return dict(
+ (
+ table,
+ frozenset([
+ col for col in columns
+ if col.server_default is not None])
+ )
+ for table, columns in self._cols_by_table.items()
+ )
+
@property
def selectable(self):
"""The :func:`.select` construct this :class:`.Mapper` selects from
@@ -2307,18 +2355,29 @@ class Mapper(InspectionAttr):
dict_ = state.dict
manager = state.manager
return [
- manager[self._columntoproperty[col].key].
+ manager[prop.key].
impl.get(state, dict_,
attributes.PASSIVE_RETURN_NEVER_SET)
- for col in self.primary_key
+ for prop in self._primary_key_props
]
+ @_memoized_configured_property
+ def _primary_key_props(self):
+ # TODO: this should really be called "identity key props",
+ # as it does not necessarily include primary key columns within
+ # individual tables
+ return [self._columntoproperty[col] for col in self.primary_key]
+
def _get_state_attr_by_column(
self, state, dict_, column,
passive=attributes.PASSIVE_RETURN_NEVER_SET):
prop = self._columntoproperty[column]
return state.manager[prop.key].impl.get(state, dict_, passive=passive)
+ def _set_committed_state_attr_by_column(self, state, dict_, column, value):
+ prop = self._columntoproperty[column]
+ state.manager[prop.key].impl.set_committed_value(state, dict_, value)
+
def _set_state_attr_by_column(self, state, dict_, column, value):
prop = self._columntoproperty[column]
state.manager[prop.key].impl.set(state, dict_, value, None)
@@ -2702,16 +2761,6 @@ def _event_on_init(state, args, kwargs):
instrumenting_mapper._set_polymorphic_identity(state)
-def _event_on_resurrect(state):
- # re-populate the primary key elements
- # of the dict based on the mapping.
- instrumenting_mapper = state.manager.info.get(_INSTRUMENTOR)
- if instrumenting_mapper:
- for col, val in zip(instrumenting_mapper.primary_key, state.key[1]):
- instrumenting_mapper._set_state_attr_by_column(
- state, state.dict, col, val)
-
-
class _ColumnMapping(dict):
"""Error reporting helper for mapper._columntoproperty."""
diff --git a/lib/sqlalchemy/orm/persistence.py b/lib/sqlalchemy/orm/persistence.py
index 295d4a3d0..511a9cef0 100644
--- a/lib/sqlalchemy/orm/persistence.py
+++ b/lib/sqlalchemy/orm/persistence.py
@@ -18,7 +18,7 @@ import operator
from itertools import groupby
from .. import sql, util, exc as sa_exc, schema
from . import attributes, sync, exc as orm_exc, evaluator
-from .base import _state_mapper, state_str, _attr_as_key
+from .base import state_str, _attr_as_key
from ..sql import expression
from . import loading
@@ -40,32 +40,55 @@ def save_obj(base_mapper, states, uowtransaction, single=False):
save_obj(base_mapper, [state], uowtransaction, single=True)
return
- states_to_insert, states_to_update = _organize_states_for_save(
- base_mapper,
- states,
- uowtransaction)
-
+ states_to_update = []
+ states_to_insert = []
cached_connections = _cached_connection_dict(base_mapper)
- for table, mapper in base_mapper._sorted_tables.items():
- insert = _collect_insert_commands(base_mapper, uowtransaction,
- table, states_to_insert)
-
- update = _collect_update_commands(base_mapper, uowtransaction,
- table, states_to_update)
-
- if update:
- _emit_update_statements(base_mapper, uowtransaction,
- cached_connections,
- mapper, table, update)
-
- if insert:
- _emit_insert_statements(base_mapper, uowtransaction,
- cached_connections,
- mapper, table, insert)
+ for (state, dict_, mapper, connection,
+ has_identity,
+ row_switch, update_version_id) in _organize_states_for_save(
+ base_mapper, states, uowtransaction
+ ):
+ if has_identity or row_switch:
+ states_to_update.append(
+ (state, dict_, mapper, connection, update_version_id)
+ )
+ else:
+ states_to_insert.append(
+ (state, dict_, mapper, connection)
+ )
- _finalize_insert_update_commands(base_mapper, uowtransaction,
- states_to_insert, states_to_update)
+ for table, mapper in base_mapper._sorted_tables.items():
+ if table not in mapper._pks_by_table:
+ continue
+ insert = _collect_insert_commands(table, states_to_insert)
+
+ update = _collect_update_commands(
+ uowtransaction, table, states_to_update)
+
+ _emit_update_statements(base_mapper, uowtransaction,
+ cached_connections,
+ mapper, table, update)
+
+ _emit_insert_statements(base_mapper, uowtransaction,
+ cached_connections,
+ mapper, table, insert)
+
+ _finalize_insert_update_commands(
+ base_mapper, uowtransaction,
+ (
+ (state, state_dict, mapper, connection, False)
+ for state, state_dict, mapper, connection in states_to_insert
+ )
+ )
+ _finalize_insert_update_commands(
+ base_mapper, uowtransaction,
+ (
+ (state, state_dict, mapper, connection, True)
+ for state, state_dict, mapper, connection,
+ update_version_id in states_to_update
+ )
+ )
def post_update(base_mapper, states, uowtransaction, post_update_cols):
@@ -75,19 +98,28 @@ def post_update(base_mapper, states, uowtransaction, post_update_cols):
"""
cached_connections = _cached_connection_dict(base_mapper)
- states_to_update = _organize_states_for_post_update(
+ states_to_update = list(_organize_states_for_post_update(
base_mapper,
- states, uowtransaction)
+ states, uowtransaction))
for table, mapper in base_mapper._sorted_tables.items():
+ if table not in mapper._pks_by_table:
+ continue
+
+ update = (
+ (state, state_dict, sub_mapper, connection)
+ for
+ state, state_dict, sub_mapper, connection in states_to_update
+ if table in sub_mapper._pks_by_table
+ )
+
update = _collect_post_update_commands(base_mapper, uowtransaction,
- table, states_to_update,
+ table, update,
post_update_cols)
- if update:
- _emit_post_update_statements(base_mapper, uowtransaction,
- cached_connections,
- mapper, table, update)
+ _emit_post_update_statements(base_mapper, uowtransaction,
+ cached_connections,
+ mapper, table, update)
def delete_obj(base_mapper, states, uowtransaction):
@@ -100,24 +132,26 @@ def delete_obj(base_mapper, states, uowtransaction):
cached_connections = _cached_connection_dict(base_mapper)
- states_to_delete = _organize_states_for_delete(
+ states_to_delete = list(_organize_states_for_delete(
base_mapper,
states,
- uowtransaction)
+ uowtransaction))
table_to_mapper = base_mapper._sorted_tables
for table in reversed(list(table_to_mapper.keys())):
+ mapper = table_to_mapper[table]
+ if table not in mapper._pks_by_table:
+ continue
+
delete = _collect_delete_commands(base_mapper, uowtransaction,
table, states_to_delete)
- mapper = table_to_mapper[table]
-
_emit_delete_statements(base_mapper, uowtransaction,
cached_connections, mapper, table, delete)
- for state, state_dict, mapper, has_identity, connection \
- in states_to_delete:
+ for state, state_dict, mapper, connection, \
+ update_version_id in states_to_delete:
mapper.dispatch.after_delete(mapper, connection, state)
@@ -133,17 +167,15 @@ def _organize_states_for_save(base_mapper, states, uowtransaction):
"""
- states_to_insert = []
- states_to_update = []
-
for state, dict_, mapper, connection in _connections_for_states(
base_mapper, uowtransaction,
states):
has_identity = bool(state.key)
+
instance_key = state.key or mapper._identity_key_from_state(state)
- row_switch = None
+ row_switch = update_version_id = None
# call before_XXX extensions
if not has_identity:
@@ -180,18 +212,14 @@ def _organize_states_for_save(base_mapper, states, uowtransaction):
uowtransaction.remove_state_actions(existing)
row_switch = existing
- if not has_identity and not row_switch:
- states_to_insert.append(
- (state, dict_, mapper, connection,
- has_identity, instance_key, row_switch)
- )
- else:
- states_to_update.append(
- (state, dict_, mapper, connection,
- has_identity, instance_key, row_switch)
- )
+ if (has_identity or row_switch) and mapper.version_id_col is not None:
+ update_version_id = mapper._get_committed_state_attr_by_column(
+ row_switch if row_switch else state,
+ row_switch.dict if row_switch else dict_,
+ mapper.version_id_col)
- return states_to_insert, states_to_update
+ yield (state, dict_, mapper, connection,
+ has_identity, row_switch, update_version_id)
def _organize_states_for_post_update(base_mapper, states,
@@ -204,8 +232,7 @@ def _organize_states_for_post_update(base_mapper, states,
the execution per state.
"""
- return list(_connections_for_states(base_mapper, uowtransaction,
- states))
+ return _connections_for_states(base_mapper, uowtransaction, states)
def _organize_states_for_delete(base_mapper, states, uowtransaction):
@@ -216,72 +243,73 @@ def _organize_states_for_delete(base_mapper, states, uowtransaction):
mapper, the connection to use for the execution per state.
"""
- states_to_delete = []
-
for state, dict_, mapper, connection in _connections_for_states(
base_mapper, uowtransaction,
states):
mapper.dispatch.before_delete(mapper, connection, state)
- states_to_delete.append((state, dict_, mapper,
- bool(state.key), connection))
- return states_to_delete
+ if mapper.version_id_col is not None:
+ update_version_id = \
+ mapper._get_committed_state_attr_by_column(
+ state, dict_,
+ mapper.version_id_col)
+ else:
+ update_version_id = None
+
+ yield (
+ state, dict_, mapper, connection, update_version_id)
-def _collect_insert_commands(base_mapper, uowtransaction, table,
- states_to_insert):
+def _collect_insert_commands(table, states_to_insert):
"""Identify sets of values to use in INSERT statements for a
list of states.
"""
- insert = []
- for state, state_dict, mapper, connection, has_identity, \
- instance_key, row_switch in states_to_insert:
+ for state, state_dict, mapper, connection in states_to_insert:
if table not in mapper._pks_by_table:
continue
- pks = mapper._pks_by_table[table]
-
params = {}
value_params = {}
- has_all_pks = True
- has_all_defaults = True
- for col in mapper._cols_by_table[table]:
- if col is mapper.version_id_col and \
- mapper.version_id_generator is not False:
- val = mapper.version_id_generator(None)
- params[col.key] = val
+ propkey_to_col = mapper._propkey_to_col[table]
+
+ for propkey in set(propkey_to_col).intersection(state_dict):
+ value = state_dict[propkey]
+ col = propkey_to_col[propkey]
+ if value is None:
+ continue
+ elif isinstance(value, sql.ClauseElement):
+ value_params[col.key] = value
else:
- # pull straight from the dict for
- # pending objects
- prop = mapper._columntoproperty[col]
- value = state_dict.get(prop.key, None)
-
- if value is None:
- if col in pks:
- has_all_pks = False
- elif col.default is None and \
- col.server_default is None:
- params[col.key] = value
- elif col.server_default is not None and \
- mapper.base_mapper.eager_defaults:
- has_all_defaults = False
-
- elif isinstance(value, sql.ClauseElement):
- value_params[col] = value
- else:
- params[col.key] = value
+ params[col.key] = value
+
+ for colkey in mapper._insert_cols_as_none[table].\
+ difference(params).difference(value_params):
+ params[colkey] = None
+
+ has_all_pks = mapper._pk_keys_by_table[table].issubset(params)
+
+ if mapper.base_mapper.eager_defaults:
+ has_all_defaults = mapper._server_default_cols[table].\
+ issubset(params)
+ else:
+ has_all_defaults = True
+
+ if mapper.version_id_generator is not False \
+ and mapper.version_id_col is not None and \
+ mapper.version_id_col in mapper._cols_by_table[table]:
+ params[mapper.version_id_col.key] = \
+ mapper.version_id_generator(None)
- insert.append((state, state_dict, params, mapper,
- connection, value_params, has_all_pks,
- has_all_defaults))
- return insert
+ yield (
+ state, state_dict, params, mapper,
+ connection, value_params, has_all_pks,
+ has_all_defaults)
-def _collect_update_commands(base_mapper, uowtransaction,
- table, states_to_update):
+def _collect_update_commands(uowtransaction, table, states_to_update):
"""Identify sets of values to use in UPDATE statements for a
list of states.
@@ -293,9 +321,9 @@ def _collect_update_commands(base_mapper, uowtransaction,
"""
- update = []
- for state, state_dict, mapper, connection, has_identity, \
- instance_key, row_switch in states_to_update:
+ for state, state_dict, mapper, connection, \
+ update_version_id in states_to_update:
+
if table not in mapper._pks_by_table:
continue
@@ -304,98 +332,59 @@ def _collect_update_commands(base_mapper, uowtransaction,
params = {}
value_params = {}
- hasdata = hasnull = False
- for col in mapper._cols_by_table[table]:
- if col is mapper.version_id_col:
- params[col._label] = \
- mapper._get_committed_state_attr_by_column(
- row_switch or state,
- row_switch and row_switch.dict
- or state_dict,
- col)
+ propkey_to_col = mapper._propkey_to_col[table]
- prop = mapper._columntoproperty[col]
- history = state.manager[prop.key].impl.get_history(
- state, state_dict, attributes.PASSIVE_NO_INITIALIZE
- )
- if history.added:
- params[col.key] = history.added[0]
- hasdata = True
+ for propkey in set(propkey_to_col).intersection(state.committed_state):
+ value = state_dict[propkey]
+ col = propkey_to_col[propkey]
+
+ if not state.manager[propkey].impl.is_equal(
+ value, state.committed_state[propkey]):
+ if isinstance(value, sql.ClauseElement):
+ value_params[col] = value
+ else:
+ params[col.key] = value
+
+ if update_version_id is not None:
+ col = mapper.version_id_col
+ params[col._label] = update_version_id
+
+ if col.key not in params and \
+ mapper.version_id_generator is not False:
+ val = mapper.version_id_generator(update_version_id)
+ params[col.key] = val
+
+ if not (params or value_params):
+ continue
+
+ pk_params = {}
+ for col in pks:
+ propkey = mapper._columntoproperty[col].key
+ history = state.manager[propkey].impl.get_history(
+ state, state_dict, attributes.PASSIVE_OFF)
+
+ if history.added:
+ if not history.deleted or \
+ ("pk_cascaded", state, col) in \
+ uowtransaction.attributes:
+ pk_params[col._label] = history.added[0]
+ params.pop(col.key, None)
else:
- if mapper.version_id_generator is not False:
- val = mapper.version_id_generator(params[col._label])
- params[col.key] = val
-
- # HACK: check for history, in case the
- # history is only
- # in a different table than the one
- # where the version_id_col is.
- for prop in mapper._columntoproperty.values():
- history = (
- state.manager[prop.key].impl.get_history(
- state, state_dict,
- attributes.PASSIVE_NO_INITIALIZE))
- if history.added:
- hasdata = True
+ # else, use the old value to locate the row
+ pk_params[col._label] = history.deleted[0]
+ params[col.key] = history.added[0]
else:
- prop = mapper._columntoproperty[col]
- history = state.manager[prop.key].impl.get_history(
- state, state_dict,
- attributes.PASSIVE_NO_INITIALIZE)
- if history.added:
- if isinstance(history.added[0],
- sql.ClauseElement):
- value_params[col] = history.added[0]
- else:
- value = history.added[0]
- params[col.key] = value
-
- if col in pks:
- if history.deleted and \
- not row_switch:
- # if passive_updates and sync detected
- # this was a pk->pk sync, use the new
- # value to locate the row, since the
- # DB would already have set this
- if ("pk_cascaded", state, col) in \
- uowtransaction.attributes:
- value = history.added[0]
- params[col._label] = value
- else:
- # use the old value to
- # locate the row
- value = history.deleted[0]
- params[col._label] = value
- hasdata = True
- else:
- # row switch logic can reach us here
- # remove the pk from the update params
- # so the update doesn't
- # attempt to include the pk in the
- # update statement
- del params[col.key]
- value = history.added[0]
- params[col._label] = value
- if value is None:
- hasnull = True
- else:
- hasdata = True
- elif col in pks:
- value = state.manager[prop.key].impl.get(
- state, state_dict)
- if value is None:
- hasnull = True
- params[col._label] = value
+ pk_params[col._label] = history.unchanged[0]
- if hasdata:
- if hasnull:
+ if params or value_params:
+ if None in pk_params.values():
raise orm_exc.FlushError(
- "Can't update table "
- "using NULL for primary "
+ "Can't update table using NULL for primary "
"key value")
- update.append((state, state_dict, params, mapper,
- connection, value_params))
- return update
+ params.update(pk_params)
+ yield (
+ state, state_dict, params, mapper,
+ connection, value_params)
def _collect_post_update_commands(base_mapper, uowtransaction, table,
@@ -405,10 +394,10 @@ def _collect_post_update_commands(base_mapper, uowtransaction, table,
"""
- update = []
for state, state_dict, mapper, connection in states_to_update:
- if table not in mapper._pks_by_table:
- continue
+
+ # assert table in mapper._pks_by_table
+
pks = mapper._pks_by_table[table]
params = {}
hasdata = False
@@ -417,8 +406,8 @@ def _collect_post_update_commands(base_mapper, uowtransaction, table,
if col in pks:
params[col._label] = \
mapper._get_state_attr_by_column(
- state,
- state_dict, col)
+ state,
+ state_dict, col)
elif col in post_update_cols:
prop = mapper._columntoproperty[col]
@@ -430,9 +419,7 @@ def _collect_post_update_commands(base_mapper, uowtransaction, table,
params[col.key] = value
hasdata = True
if hasdata:
- update.append((state, state_dict, params, mapper,
- connection))
- return update
+ yield params, connection
def _collect_delete_commands(base_mapper, uowtransaction, table,
@@ -440,33 +427,28 @@ def _collect_delete_commands(base_mapper, uowtransaction, table,
"""Identify values to use in DELETE statements for a list of
states to be deleted."""
- delete = util.defaultdict(list)
+ for state, state_dict, mapper, connection, \
+ update_version_id in states_to_delete:
- for state, state_dict, mapper, has_identity, connection \
- in states_to_delete:
- if not has_identity or table not in mapper._pks_by_table:
+ if table not in mapper._pks_by_table:
continue
params = {}
- delete[connection].append(params)
for col in mapper._pks_by_table[table]:
params[col.key] = \
value = \
mapper._get_committed_state_attr_by_column(
- state, state_dict, col)
+ state, state_dict, col)
if value is None:
raise orm_exc.FlushError(
"Can't delete from table "
"using NULL for primary "
"key value")
- if mapper.version_id_col is not None and \
+ if update_version_id is not None and \
table.c.contains_column(mapper.version_id_col):
- params[mapper.version_id_col.key] = \
- mapper._get_committed_state_attr_by_column(
- state, state_dict,
- mapper.version_id_col)
- return delete
+ params[mapper.version_id_col.key] = update_version_id
+ yield params, connection
def _emit_update_statements(base_mapper, uowtransaction,
@@ -500,41 +482,80 @@ def _emit_update_statements(base_mapper, uowtransaction,
statement = base_mapper._memo(('update', table), update_stmt)
- rows = 0
- for state, state_dict, params, mapper, \
- connection, value_params in update:
-
- if value_params:
- c = connection.execute(
- statement.values(value_params),
- params)
+ for (connection, paramkeys, hasvalue), \
+ records in groupby(
+ update,
+ lambda rec: (
+ rec[4],
+ tuple(sorted(rec[2])),
+ bool(rec[5]))):
+
+ rows = 0
+ records = list(records)
+ if hasvalue:
+ for state, state_dict, params, mapper, \
+ connection, value_params in records:
+ c = connection.execute(
+ statement.values(value_params),
+ params)
+ _postfetch(
+ mapper,
+ uowtransaction,
+ table,
+ state,
+ state_dict,
+ c,
+ c.context.compiled_parameters[0],
+ value_params)
+ rows += c.rowcount
else:
- c = cached_connections[connection].\
- execute(statement, params)
-
- _postfetch(
- mapper,
- uowtransaction,
- table,
- state,
- state_dict,
- c,
- c.context.compiled_parameters[0],
- value_params)
- rows += c.rowcount
-
- if connection.dialect.supports_sane_rowcount:
- if rows != len(update):
- raise orm_exc.StaleDataError(
- "UPDATE statement on table '%s' expected to "
- "update %d row(s); %d were matched." %
- (table.description, len(update), rows))
-
- elif needs_version_id:
- util.warn("Dialect %s does not support updated rowcount "
- "- versioning cannot be verified." %
- c.dialect.dialect_description,
- stacklevel=12)
+ if needs_version_id and \
+ not connection.dialect.supports_sane_multi_rowcount and \
+ connection.dialect.supports_sane_rowcount:
+ for state, state_dict, params, mapper, \
+ connection, value_params in records:
+ c = cached_connections[connection].\
+ execute(statement, params)
+ _postfetch(
+ mapper,
+ uowtransaction,
+ table,
+ state,
+ state_dict,
+ c,
+ c.context.compiled_parameters[0],
+ value_params)
+ rows += c.rowcount
+ else:
+ multiparams = [rec[2] for rec in records]
+ c = cached_connections[connection].\
+ execute(statement, multiparams)
+
+ rows += c.rowcount
+ for state, state_dict, params, mapper, \
+ connection, value_params in records:
+ _postfetch(
+ mapper,
+ uowtransaction,
+ table,
+ state,
+ state_dict,
+ c,
+ c.context.compiled_parameters[0],
+ value_params)
+
+ if connection.dialect.supports_sane_rowcount:
+ if rows != len(records):
+ raise orm_exc.StaleDataError(
+ "UPDATE statement on table '%s' expected to "
+ "update %d row(s); %d were matched." %
+ (table.description, len(records), rows))
+
+ elif needs_version_id:
+ util.warn("Dialect %s does not support updated rowcount "
+ "- versioning cannot be verified." %
+ c.dialect.dialect_description,
+ stacklevel=12)
def _emit_insert_statements(base_mapper, uowtransaction,
@@ -547,7 +568,7 @@ def _emit_insert_statements(base_mapper, uowtransaction,
for (connection, pkeys, hasvalue, has_all_pks, has_all_defaults), \
records in groupby(insert,
lambda rec: (rec[4],
- list(rec[2].keys()),
+ tuple(sorted(rec[2].keys())),
bool(rec[5]),
rec[6], rec[7])
):
@@ -604,13 +625,7 @@ def _emit_insert_statements(base_mapper, uowtransaction,
mapper._pks_by_table[table]):
prop = mapper_rec._columntoproperty[col]
if state_dict.get(prop.key) is None:
- # TODO: would rather say:
- # state_dict[prop.key] = pk
- mapper_rec._set_state_attr_by_column(
- state,
- state_dict,
- col, pk)
-
+ state_dict[prop.key] = pk
_postfetch(
mapper_rec,
uowtransaction,
@@ -643,11 +658,10 @@ def _emit_post_update_statements(base_mapper, uowtransaction,
# also group them into common (connection, cols) sets
# to support executemany().
for key, grouper in groupby(
- update, lambda rec: (rec[4], list(rec[2].keys()))
+ update, lambda rec: (rec[1], sorted(rec[0]))
):
connection = key[0]
- multiparams = [params for state, state_dict,
- params, mapper, conn in grouper]
+ multiparams = [params for params, conn in grouper]
cached_connections[connection].\
execute(statement, multiparams)
@@ -677,8 +691,15 @@ def _emit_delete_statements(base_mapper, uowtransaction, cached_connections,
return table.delete(clause)
- for connection, del_objects in delete.items():
- statement = base_mapper._memo(('delete', table), delete_stmt)
+ statement = base_mapper._memo(('delete', table), delete_stmt)
+ for connection, recs in groupby(
+ delete,
+ lambda rec: rec[1]
+ ):
+ del_objects = [
+ params
+ for params, connection in recs
+ ]
connection = cached_connections[connection]
@@ -731,15 +752,12 @@ def _emit_delete_statements(base_mapper, uowtransaction, cached_connections,
)
-def _finalize_insert_update_commands(base_mapper, uowtransaction,
- states_to_insert, states_to_update):
+def _finalize_insert_update_commands(base_mapper, uowtransaction, states):
"""finalize state on states that have been inserted or updated,
including calling after_insert/after_update events.
"""
- for state, state_dict, mapper, connection, has_identity, \
- instance_key, row_switch in states_to_insert + \
- states_to_update:
+ for state, state_dict, mapper, connection, has_identity in states:
if mapper._readonly_props:
readonly = state.unmodified_intersection(
@@ -795,11 +813,11 @@ def _postfetch(mapper, uowtransaction, table,
for col in returning_cols:
if col.primary_key:
continue
- mapper._set_state_attr_by_column(state, dict_, col, row[col])
+ dict_[mapper._columntoproperty[col].key] = row[col]
for c in prefetch_cols:
if c.key in params and c in mapper._columntoproperty:
- mapper._set_state_attr_by_column(state, dict_, c, params[c.key])
+ dict_[mapper._columntoproperty[c].key] = params[c.key]
if postfetch_cols:
state._expire_attributes(state.dict,
@@ -833,17 +851,14 @@ def _connections_for_states(base_mapper, uowtransaction, states):
connection_callable = \
uowtransaction.session.connection_callable
else:
- connection = None
+ connection = uowtransaction.transaction.connection(base_mapper)
connection_callable = None
for state in _sort_states(states):
if connection_callable:
connection = connection_callable(base_mapper, state.obj())
- elif not connection:
- connection = uowtransaction.transaction.connection(
- base_mapper)
- mapper = _state_mapper(state)
+ mapper = state.manager.mapper
yield state, state.dict, mapper, connection
diff --git a/lib/sqlalchemy/pool.py b/lib/sqlalchemy/pool.py
index d26bbf32c..bc9affe4a 100644
--- a/lib/sqlalchemy/pool.py
+++ b/lib/sqlalchemy/pool.py
@@ -305,7 +305,7 @@ class Pool(log.Identified):
"""Return a new :class:`.Pool`, of the same class as this one
and configured with identical creation arguments.
- This method is used in conjunection with :meth:`dispose`
+ This method is used in conjunction with :meth:`dispose`
to close out an entire :class:`.Pool` and create a new one in
its place.
@@ -443,16 +443,17 @@ class _ConnectionRecord(object):
except:
rec.checkin()
raise
- fairy = _ConnectionFairy(dbapi_connection, rec)
+ echo = pool._should_log_debug()
+ fairy = _ConnectionFairy(dbapi_connection, rec, echo)
rec.fairy_ref = weakref.ref(
fairy,
lambda ref: _finalize_fairy and
_finalize_fairy(
dbapi_connection,
- rec, pool, ref, pool._echo)
+ rec, pool, ref, echo)
)
_refs.add(rec)
- if pool._echo:
+ if echo:
pool.logger.debug("Connection %r checked out from pool",
dbapi_connection)
return fairy
@@ -560,9 +561,10 @@ def _finalize_fairy(connection, connection_record,
connection)
try:
- fairy = fairy or _ConnectionFairy(connection, connection_record)
+ fairy = fairy or _ConnectionFairy(
+ connection, connection_record, echo)
assert fairy.connection is connection
- fairy._reset(pool, echo)
+ fairy._reset(pool)
# Immediately close detached instances
if not connection_record:
@@ -603,9 +605,10 @@ class _ConnectionFairy(object):
"""
- def __init__(self, dbapi_connection, connection_record):
+ def __init__(self, dbapi_connection, connection_record, echo):
self.connection = dbapi_connection
self._connection_record = connection_record
+ self._echo = echo
connection = None
"""A reference to the actual DBAPI connection being tracked."""
@@ -642,7 +645,6 @@ class _ConnectionFairy(object):
fairy._pool = pool
fairy._counter = 0
- fairy._echo = pool._should_log_debug()
if threadconns is not None:
threadconns.current = weakref.ref(fairy)
@@ -684,11 +686,11 @@ class _ConnectionFairy(object):
_close = _checkin
- def _reset(self, pool, echo):
+ def _reset(self, pool):
if pool.dispatch.reset:
pool.dispatch.reset(self, self._connection_record)
if pool._reset_on_return is reset_rollback:
- if echo:
+ if self._echo:
pool.logger.debug("Connection %s rollback-on-return%s",
self.connection,
", via agent"
@@ -698,7 +700,7 @@ class _ConnectionFairy(object):
else:
pool._dialect.do_rollback(self)
elif pool._reset_on_return is reset_commit:
- if echo:
+ if self._echo:
pool.logger.debug("Connection %s commit-on-return%s",
self.connection,
", via agent"
diff --git a/lib/sqlalchemy/sql/compiler.py b/lib/sqlalchemy/sql/compiler.py
index e45510aa4..fac4980b0 100644
--- a/lib/sqlalchemy/sql/compiler.py
+++ b/lib/sqlalchemy/sql/compiler.py
@@ -1981,11 +1981,13 @@ class SQLCompiler(Compiled):
need_pks = self.isinsert and \
not self.inline and \
- not stmt._returning
+ not stmt._returning and \
+ not stmt._has_multi_parameters
implicit_returning = need_pks and \
self.dialect.implicit_returning and \
stmt.table.implicit_returning
+
if self.isinsert:
implicit_return_defaults = (implicit_returning and
stmt._return_defaults)
diff --git a/lib/sqlalchemy/sql/dml.py b/lib/sqlalchemy/sql/dml.py
index f7e033d85..1934d0776 100644
--- a/lib/sqlalchemy/sql/dml.py
+++ b/lib/sqlalchemy/sql/dml.py
@@ -269,6 +269,13 @@ class ValuesBase(UpdateBase):
.. versionadded:: 0.8
Support for multiple-VALUES INSERT statements.
+ .. versionchanged:: 1.0.0 an INSERT that uses a multiple-VALUES
+ clause, even a list of length one,
+ implies that the :paramref:`.Insert.inline` flag is set to
+ True, indicating that the statement will not attempt to fetch
+ the "last inserted primary key" or other defaults. The statement
+ deals with an arbitrary number of rows, so the
+ :attr:`.ResultProxy.inserted_primary_key` accessor does not apply.
.. seealso::
@@ -434,8 +441,13 @@ class Insert(ValuesBase):
dynamically render the VALUES clause at execution time based on
the parameters passed to :meth:`.Connection.execute`.
- :param inline: if True, SQL defaults will be compiled 'inline' into
- the statement and not pre-executed.
+ :param inline: if True, no attempt will be made to retrieve the
+ SQL-generated default values to be provided within the statement;
+ in particular,
+ this allows SQL expressions to be rendered 'inline' within the
+ statement without the need to pre-execute them beforehand; for
+ backends that support "returning", this turns off the "implicit
+ returning" feature for the statement.
If both `values` and compile-time bind parameters are present, the
compile-time bind parameters override the information specified
@@ -495,17 +507,12 @@ class Insert(ValuesBase):
would normally raise an exception if these column lists don't
correspond.
- .. note::
-
- Depending on backend, it may be necessary for the :class:`.Insert`
- statement to be constructed using the ``inline=True`` flag; this
- flag will prevent the implicit usage of ``RETURNING`` when the
- ``INSERT`` statement is rendered, which isn't supported on a
- backend such as Oracle in conjunction with an ``INSERT..SELECT``
- combination::
-
- sel = select([table1.c.a, table1.c.b]).where(table1.c.c > 5)
- ins = table2.insert(inline=True).from_select(['a', 'b'], sel)
+ .. versionchanged:: 1.0.0 an INSERT that uses FROM SELECT
+ implies that the :paramref:`.insert.inline` flag is set to
+ True, indicating that the statement will not attempt to fetch
+ the "last inserted primary key" or other defaults. The statement
+ deals with an arbitrary number of rows, so the
+ :attr:`.ResultProxy.inserted_primary_key` accessor does not apply.
.. note::
@@ -525,6 +532,7 @@ class Insert(ValuesBase):
self._process_colparams(dict((n, Null()) for n in names))
self.select_names = names
+ self.inline = True
self.select = _interpret_as_select(select)
def _copy_internals(self, clone=_clone, **kw):
@@ -728,10 +736,10 @@ class Delete(UpdateBase):
:meth:`~.TableClause.delete` method on
:class:`~.schema.Table`.
- :param table: The table to be updated.
+ :param table: The table to delete rows from.
:param whereclause: A :class:`.ClauseElement` describing the ``WHERE``
- condition of the ``UPDATE`` statement. Note that the
+ condition of the ``DELETE`` statement. Note that the
:meth:`~Delete.where()` generative method may be used instead.
.. seealso::
diff --git a/lib/sqlalchemy/sql/schema.py b/lib/sqlalchemy/sql/schema.py
index 8099dca75..c8e815d24 100644
--- a/lib/sqlalchemy/sql/schema.py
+++ b/lib/sqlalchemy/sql/schema.py
@@ -1269,7 +1269,8 @@ class Column(SchemaItem, ColumnClause):
def _on_table_attach(self, fn):
if self.table is not None:
fn(self, self.table)
- event.listen(self, 'after_parent_attach', fn)
+ else:
+ event.listen(self, 'after_parent_attach', fn)
def copy(self, **kw):
"""Create a copy of this ``Column``, unitialized.
diff --git a/lib/sqlalchemy/testing/engines.py b/lib/sqlalchemy/testing/engines.py
index 9052df570..67c13231e 100644
--- a/lib/sqlalchemy/testing/engines.py
+++ b/lib/sqlalchemy/testing/engines.py
@@ -7,15 +7,12 @@
from __future__ import absolute_import
-import types
import weakref
-from collections import deque
from . import config
from .util import decorator
from .. import event, pool
import re
import warnings
-from .. import util
class ConnectionKiller(object):
@@ -339,112 +336,3 @@ def proxying_engine(conn_cls=DBAPIProxyConnection,
return testing_engine(options={'creator': mock_conn})
-class ReplayableSession(object):
- """A simple record/playback tool.
-
- This is *not* a mock testing class. It only records a session for later
- playback and makes no assertions on call consistency whatsoever. It's
- unlikely to be suitable for anything other than DB-API recording.
-
- """
-
- Callable = object()
- NoAttribute = object()
-
- if util.py2k:
- Natives = set([getattr(types, t)
- for t in dir(types) if not t.startswith('_')]).\
- difference([getattr(types, t)
- for t in ('FunctionType', 'BuiltinFunctionType',
- 'MethodType', 'BuiltinMethodType',
- 'LambdaType', 'UnboundMethodType',)])
- else:
- Natives = set([getattr(types, t)
- for t in dir(types) if not t.startswith('_')]).\
- union([type(t) if not isinstance(t, type)
- else t for t in __builtins__.values()]).\
- difference([getattr(types, t)
- for t in ('FunctionType', 'BuiltinFunctionType',
- 'MethodType', 'BuiltinMethodType',
- 'LambdaType', )])
-
- def __init__(self):
- self.buffer = deque()
-
- def recorder(self, base):
- return self.Recorder(self.buffer, base)
-
- def player(self):
- return self.Player(self.buffer)
-
- class Recorder(object):
- def __init__(self, buffer, subject):
- self._buffer = buffer
- self._subject = subject
-
- def __call__(self, *args, **kw):
- subject, buffer = [object.__getattribute__(self, x)
- for x in ('_subject', '_buffer')]
-
- result = subject(*args, **kw)
- if type(result) not in ReplayableSession.Natives:
- buffer.append(ReplayableSession.Callable)
- return type(self)(buffer, result)
- else:
- buffer.append(result)
- return result
-
- @property
- def _sqla_unwrap(self):
- return self._subject
-
- def __getattribute__(self, key):
- try:
- return object.__getattribute__(self, key)
- except AttributeError:
- pass
-
- subject, buffer = [object.__getattribute__(self, x)
- for x in ('_subject', '_buffer')]
- try:
- result = type(subject).__getattribute__(subject, key)
- except AttributeError:
- buffer.append(ReplayableSession.NoAttribute)
- raise
- else:
- if type(result) not in ReplayableSession.Natives:
- buffer.append(ReplayableSession.Callable)
- return type(self)(buffer, result)
- else:
- buffer.append(result)
- return result
-
- class Player(object):
- def __init__(self, buffer):
- self._buffer = buffer
-
- def __call__(self, *args, **kw):
- buffer = object.__getattribute__(self, '_buffer')
- result = buffer.popleft()
- if result is ReplayableSession.Callable:
- return self
- else:
- return result
-
- @property
- def _sqla_unwrap(self):
- return None
-
- def __getattribute__(self, key):
- try:
- return object.__getattribute__(self, key)
- except AttributeError:
- pass
- buffer = object.__getattribute__(self, '_buffer')
- result = buffer.popleft()
- if result is ReplayableSession.Callable:
- return self
- elif result is ReplayableSession.NoAttribute:
- raise AttributeError(key)
- else:
- return result
diff --git a/lib/sqlalchemy/testing/plugin/provision.py b/lib/sqlalchemy/testing/plugin/provision.py
index baec8a299..c6b9030f5 100644
--- a/lib/sqlalchemy/testing/plugin/provision.py
+++ b/lib/sqlalchemy/testing/plugin/provision.py
@@ -36,14 +36,8 @@ class register(object):
def create_follower_db(follower_ident):
for cfg in _configs_for_db_operation():
- url = cfg.db.url
- backend = url.get_backend_name()
_create_db(cfg, cfg.db, follower_ident)
- new_url = sa_url.make_url(str(url))
-
- new_url.database = follower_ident
-
def configure_follower(follower_ident):
for cfg in config.Config.all_configs():
@@ -63,7 +57,6 @@ def setup_config(db_url, db_opts, options, file_config, follower_ident):
def drop_follower_db(follower_ident):
for cfg in _configs_for_db_operation():
- url = cfg.db.url
_drop_db(cfg, cfg.db, follower_ident)
@@ -110,9 +103,13 @@ def _follower_url_from_main(url, ident):
return url
-#@_follower_url_from_main.for_db("sqlite")
-#def _sqlite_follower_url_from_main(url, ident):
-# return sa_url.make_url("sqlite:///%s.db" % ident)
+@_follower_url_from_main.for_db("sqlite")
+def _sqlite_follower_url_from_main(url, ident):
+ url = sa_url.make_url(url)
+ if not url.database or url.database == ':memory:':
+ return url
+ else:
+ return sa_url.make_url("sqlite:///%s.db" % ident)
@_create_db.for_db("postgresql")
diff --git a/lib/sqlalchemy/testing/plugin/pytestplugin.py b/lib/sqlalchemy/testing/plugin/pytestplugin.py
index fd0616327..005942913 100644
--- a/lib/sqlalchemy/testing/plugin/pytestplugin.py
+++ b/lib/sqlalchemy/testing/plugin/pytestplugin.py
@@ -74,6 +74,9 @@ def pytest_collection_modifyitems(session, config, items):
# new classes to a module on the fly.
rebuilt_items = collections.defaultdict(list)
+ items[:] = [
+ item for item in
+ items if isinstance(item.parent, pytest.Instance)]
test_classes = set(item.parent for item in items)
for test_class in test_classes:
for sub_cls in plugin_base.generate_sub_tests(
@@ -115,7 +118,6 @@ def pytest_pycollect_makeitem(collector, name, obj):
_current_class = None
-
def pytest_runtest_setup(item):
# here we seem to get called only based on what we collected
# in pytest_collection_modifyitems. So to do class-based stuff
@@ -126,16 +128,18 @@ def pytest_runtest_setup(item):
return
# ... so we're doing a little dance here to figure it out...
- if item.parent.parent is not _current_class:
-
+ if _current_class is None:
class_setup(item.parent.parent)
_current_class = item.parent.parent
# this is needed for the class-level, to ensure that the
# teardown runs after the class is completed with its own
# class-level teardown...
- item.parent.parent.addfinalizer(
- lambda: class_teardown(item.parent.parent))
+ def finalize():
+ global _current_class
+ class_teardown(item.parent.parent)
+ _current_class = None
+ item.parent.parent.addfinalizer(finalize)
test_setup(item)
diff --git a/lib/sqlalchemy/testing/profiling.py b/lib/sqlalchemy/testing/profiling.py
index 75baec987..fcb888f86 100644
--- a/lib/sqlalchemy/testing/profiling.py
+++ b/lib/sqlalchemy/testing/profiling.py
@@ -14,13 +14,12 @@ in a more fine-grained way than nose's profiling plugin.
import os
import sys
-from .util import gc_collect, decorator
+from .util import gc_collect
from . import config
from .plugin.plugin_base import SkipTest
import pstats
-import time
import collections
-from .. import util
+import contextlib
try:
import cProfile
@@ -30,64 +29,8 @@ from ..util import jython, pypy, win32, update_wrapper
_current_test = None
-
-def profiled(target=None, **target_opts):
- """Function profiling.
-
- @profiled()
- or
- @profiled(report=True, sort=('calls',), limit=20)
-
- Outputs profiling info for a decorated function.
-
- """
-
- profile_config = {'targets': set(),
- 'report': True,
- 'print_callers': False,
- 'print_callees': False,
- 'graphic': False,
- 'sort': ('time', 'calls'),
- 'limit': None}
- if target is None:
- target = 'anonymous_target'
-
- @decorator
- def decorate(fn, *args, **kw):
- elapsed, load_stats, result = _profile(
- fn, *args, **kw)
-
- graphic = target_opts.get('graphic', profile_config['graphic'])
- if graphic:
- os.system("runsnake %s" % filename)
- else:
- report = target_opts.get('report', profile_config['report'])
- if report:
- sort_ = target_opts.get('sort', profile_config['sort'])
- limit = target_opts.get('limit', profile_config['limit'])
- print(("Profile report for target '%s'" % (
- target, )
- ))
-
- stats = load_stats()
- stats.sort_stats(*sort_)
- if limit:
- stats.print_stats(limit)
- else:
- stats.print_stats()
-
- print_callers = target_opts.get(
- 'print_callers', profile_config['print_callers'])
- if print_callers:
- stats.print_callers()
-
- print_callees = target_opts.get(
- 'print_callees', profile_config['print_callees'])
- if print_callees:
- stats.print_callees()
-
- return result
- return decorate
+# ProfileStatsFile instance, set up in plugin_base
+_profile_stats = None
class ProfileStatsFile(object):
@@ -177,20 +120,23 @@ class ProfileStatsFile(object):
self._write()
def _header(self):
- return \
- "# %s\n"\
- "# This file is written out on a per-environment basis.\n"\
- "# For each test in aaa_profiling, the corresponding function and \n"\
- "# environment is located within this file. If it doesn't exist,\n"\
- "# the test is skipped.\n"\
- "# If a callcount does exist, it is compared to what we received. \n"\
- "# assertions are raised if the counts do not match.\n"\
- "# \n"\
- "# To add a new callcount test, apply the function_call_count \n"\
- "# decorator and re-run the tests using the --write-profiles \n"\
- "# option - this file will be rewritten including the new count.\n"\
- "# \n"\
- "" % (self.fname)
+ return (
+ "# %s\n"
+ "# This file is written out on a per-environment basis.\n"
+ "# For each test in aaa_profiling, the corresponding "
+ "function and \n"
+ "# environment is located within this file. "
+ "If it doesn't exist,\n"
+ "# the test is skipped.\n"
+ "# If a callcount does exist, it is compared "
+ "to what we received. \n"
+ "# assertions are raised if the counts do not match.\n"
+ "# \n"
+ "# To add a new callcount test, apply the function_call_count \n"
+ "# decorator and re-run the tests using the --write-profiles \n"
+ "# option - this file will be rewritten including the new count.\n"
+ "# \n"
+ ) % (self.fname)
def _read(self):
try:
@@ -239,72 +185,66 @@ def function_call_count(variance=0.05):
def decorate(fn):
def wrap(*args, **kw):
-
- if cProfile is None:
- raise SkipTest("cProfile is not installed")
-
- if not _profile_stats.has_stats() and not _profile_stats.write:
- # run the function anyway, to support dependent tests
- # (not a great idea but we have these in test_zoomark)
- fn(*args, **kw)
- raise SkipTest("No profiling stats available on this "
- "platform for this function. Run tests with "
- "--write-profiles to add statistics to %s for "
- "this platform." % _profile_stats.short_fname)
-
- gc_collect()
-
- timespent, load_stats, fn_result = _profile(
- fn, *args, **kw
- )
- stats = load_stats()
- callcount = stats.total_calls
-
- expected = _profile_stats.result(callcount)
- if expected is None:
- expected_count = None
- else:
- line_no, expected_count = expected
-
- print(("Pstats calls: %d Expected %s" % (
- callcount,
- expected_count
- )
- ))
- stats.print_stats()
- # stats.print_callers()
-
- if expected_count:
- deviance = int(callcount * variance)
- failed = abs(callcount - expected_count) > deviance
-
- if failed:
- if _profile_stats.write:
- _profile_stats.replace(callcount)
- else:
- raise AssertionError(
- "Adjusted function call count %s not within %s%% "
- "of expected %s. Rerun with --write-profiles to "
- "regenerate this callcount."
- % (
- callcount, (variance * 100),
- expected_count))
- return fn_result
+ with count_functions(variance=variance):
+ return fn(*args, **kw)
return update_wrapper(wrap, fn)
return decorate
-def _profile(fn, *args, **kw):
- filename = "%s.prof" % fn.__name__
-
- def load_stats():
- st = pstats.Stats(filename)
- os.unlink(filename)
- return st
+@contextlib.contextmanager
+def count_functions(variance=0.05):
+ if cProfile is None:
+ raise SkipTest("cProfile is not installed")
+
+ if not _profile_stats.has_stats() and not _profile_stats.write:
+ raise SkipTest("No profiling stats available on this "
+ "platform for this function. Run tests with "
+ "--write-profiles to add statistics to %s for "
+ "this platform." % _profile_stats.short_fname)
+
+ gc_collect()
+
+ pr = cProfile.Profile()
+ pr.enable()
+ #began = time.time()
+ yield
+ #ended = time.time()
+ pr.disable()
+
+ #s = compat.StringIO()
+ stats = pstats.Stats(pr, stream=sys.stdout)
+
+ #timespent = ended - began
+ callcount = stats.total_calls
+
+ expected = _profile_stats.result(callcount)
+ if expected is None:
+ expected_count = None
+ else:
+ line_no, expected_count = expected
+
+ print(("Pstats calls: %d Expected %s" % (
+ callcount,
+ expected_count
+ )
+ ))
+ stats.sort_stats("cumulative")
+ stats.print_stats()
+
+ if expected_count:
+ deviance = int(callcount * variance)
+ failed = abs(callcount - expected_count) > deviance
+
+ if failed:
+ if _profile_stats.write:
+ _profile_stats.replace(callcount)
+ else:
+ raise AssertionError(
+ "Adjusted function call count %s not within %s%% "
+ "of expected %s. Rerun with --write-profiles to "
+ "regenerate this callcount."
+ % (
+ callcount, (variance * 100),
+ expected_count))
- began = time.time()
- cProfile.runctx('result = fn(*args, **kw)', globals(), locals(),
- filename=filename)
- ended = time.time()
- return ended - began, load_stats, locals()['result']
diff --git a/lib/sqlalchemy/testing/replay_fixture.py b/lib/sqlalchemy/testing/replay_fixture.py
new file mode 100644
index 000000000..b8a0f6df1
--- /dev/null
+++ b/lib/sqlalchemy/testing/replay_fixture.py
@@ -0,0 +1,167 @@
+from . import fixtures
+from . import profiling
+from .. import util
+import types
+from collections import deque
+import contextlib
+from . import config
+from sqlalchemy import MetaData
+from sqlalchemy import create_engine
+from sqlalchemy.orm import Session
+
+
+class ReplayFixtureTest(fixtures.TestBase):
+
+ @contextlib.contextmanager
+ def _dummy_ctx(self, *arg, **kw):
+ yield
+
+ def test_invocation(self):
+
+ dbapi_session = ReplayableSession()
+ creator = config.db.pool._creator
+ recorder = lambda: dbapi_session.recorder(creator())
+ engine = create_engine(
+ config.db.url, creator=recorder,
+ use_native_hstore=False)
+ self.metadata = MetaData(engine)
+ self.engine = engine
+ self.session = Session(engine)
+
+ self.setup_engine()
+ self._run_steps(ctx=self._dummy_ctx)
+ self.teardown_engine()
+ engine.dispose()
+
+ player = lambda: dbapi_session.player()
+ engine = create_engine(
+ config.db.url, creator=player,
+ use_native_hstore=False)
+
+ self.metadata = MetaData(engine)
+ self.engine = engine
+ self.session = Session(engine)
+
+ self.setup_engine()
+ self._run_steps(ctx=profiling.count_functions)
+ self.teardown_engine()
+
+ def setup_engine(self):
+ pass
+
+ def teardown_engine(self):
+ pass
+
+ def _run_steps(self, ctx):
+ raise NotImplementedError()
+
+
+class ReplayableSession(object):
+ """A simple record/playback tool.
+
+ This is *not* a mock testing class. It only records a session for later
+ playback and makes no assertions on call consistency whatsoever. It's
+ unlikely to be suitable for anything other than DB-API recording.
+
+ """
+
+ Callable = object()
+ NoAttribute = object()
+
+ if util.py2k:
+ Natives = set([getattr(types, t)
+ for t in dir(types) if not t.startswith('_')]).\
+ difference([getattr(types, t)
+ for t in ('FunctionType', 'BuiltinFunctionType',
+ 'MethodType', 'BuiltinMethodType',
+ 'LambdaType', 'UnboundMethodType',)])
+ else:
+ Natives = set([getattr(types, t)
+ for t in dir(types) if not t.startswith('_')]).\
+ union([type(t) if not isinstance(t, type)
+ else t for t in __builtins__.values()]).\
+ difference([getattr(types, t)
+ for t in ('FunctionType', 'BuiltinFunctionType',
+ 'MethodType', 'BuiltinMethodType',
+ 'LambdaType', )])
+
+ def __init__(self):
+ self.buffer = deque()
+
+ def recorder(self, base):
+ return self.Recorder(self.buffer, base)
+
+ def player(self):
+ return self.Player(self.buffer)
+
+ class Recorder(object):
+ def __init__(self, buffer, subject):
+ self._buffer = buffer
+ self._subject = subject
+
+ def __call__(self, *args, **kw):
+ subject, buffer = [object.__getattribute__(self, x)
+ for x in ('_subject', '_buffer')]
+
+ result = subject(*args, **kw)
+ if type(result) not in ReplayableSession.Natives:
+ buffer.append(ReplayableSession.Callable)
+ return type(self)(buffer, result)
+ else:
+ buffer.append(result)
+ return result
+
+ @property
+ def _sqla_unwrap(self):
+ return self._subject
+
+ def __getattribute__(self, key):
+ try:
+ return object.__getattribute__(self, key)
+ except AttributeError:
+ pass
+
+ subject, buffer = [object.__getattribute__(self, x)
+ for x in ('_subject', '_buffer')]
+ try:
+ result = type(subject).__getattribute__(subject, key)
+ except AttributeError:
+ buffer.append(ReplayableSession.NoAttribute)
+ raise
+ else:
+ if type(result) not in ReplayableSession.Natives:
+ buffer.append(ReplayableSession.Callable)
+ return type(self)(buffer, result)
+ else:
+ buffer.append(result)
+ return result
+
+ class Player(object):
+ def __init__(self, buffer):
+ self._buffer = buffer
+
+ def __call__(self, *args, **kw):
+ buffer = object.__getattribute__(self, '_buffer')
+ result = buffer.popleft()
+ if result is ReplayableSession.Callable:
+ return self
+ else:
+ return result
+
+ @property
+ def _sqla_unwrap(self):
+ return None
+
+ def __getattribute__(self, key):
+ try:
+ return object.__getattribute__(self, key)
+ except AttributeError:
+ pass
+ buffer = object.__getattribute__(self, '_buffer')
+ result = buffer.popleft()
+ if result is ReplayableSession.Callable:
+ return self
+ elif result is ReplayableSession.NoAttribute:
+ raise AttributeError(key)
+ else:
+ return result
diff --git a/lib/sqlalchemy/util/_collections.py b/lib/sqlalchemy/util/_collections.py
index 5236d0120..0904d454e 100644
--- a/lib/sqlalchemy/util/_collections.py
+++ b/lib/sqlalchemy/util/_collections.py
@@ -264,15 +264,18 @@ class OrderedDict(dict):
def __iter__(self):
return iter(self._list)
- if py2k:
- def values(self):
- return [self[key] for key in self._list]
+ def keys(self):
+ return list(self)
- def keys(self):
- return self._list
+ def values(self):
+ return [self[key] for key in self._list]
+
+ def items(self):
+ return [(key, self[key]) for key in self._list]
+ if py2k:
def itervalues(self):
- return iter([self[key] for key in self._list])
+ return iter(self.values())
def iterkeys(self):
return iter(self)
@@ -280,41 +283,6 @@ class OrderedDict(dict):
def iteritems(self):
return iter(self.items())
- def items(self):
- return [(key, self[key]) for key in self._list]
- else:
- def values(self):
- # return (self[key] for key in self)
- return (self[key] for key in self._list)
-
- def keys(self):
- # return iter(self)
- return iter(self._list)
-
- def items(self):
- # return ((key, self[key]) for key in self)
- return ((key, self[key]) for key in self._list)
-
- _debug_iter = False
- if _debug_iter:
- # normally disabled to reduce function call
- # overhead
- def __iter__(self):
- len_ = len(self._list)
- for item in self._list:
- yield item
- assert len_ == len(self._list), \
- "Dictionary changed size during iteration"
-
- def values(self):
- return (self[key] for key in self)
-
- def keys(self):
- return iter(self)
-
- def items(self):
- return ((key, self[key]) for key in self)
-
def __setitem__(self, key, object):
if key not in self:
try: