summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMike Bayer <mike_mp@zzzcomputing.com>2014-03-28 16:32:11 -0400
committerMike Bayer <mike_mp@zzzcomputing.com>2014-03-28 16:32:11 -0400
commitc01558ae7f4af08acc523786e107ea5e2e214184 (patch)
treea235ab825660b00c674f496354b364f58f8b464a
parent9cdbed37f8c420db0b42fb959813d079622c3f3a (diff)
downloadsqlalchemy-c01558ae7f4af08acc523786e107ea5e2e214184.tar.gz
- Fixed ORM bug where changing the primary key of an object, then marking
it for DELETE would fail to target the correct row for DELETE. Then to compound matters, basic "number of rows matched" checks were not being performed. Both issues are fixed, however note that the "rows matched" check requires so-called "sane multi-row count" functionality; the DBAPI's executemany() method must count up the rows matched by individual statements and SQLAlchemy's dialect must mark this feature as supported, currently applies to some mysql dialects, psycopg2, sqlite only. fixes #3006 - Enabled "sane multi-row count" checking for the psycopg2 DBAPI, as this seems to be supported as of psycopg2 2.0.9.
-rw-r--r--doc/build/changelog/changelog_08.rst22
-rw-r--r--lib/sqlalchemy/dialects/postgresql/psycopg2.py5
-rw-r--r--lib/sqlalchemy/engine/default.py2
-rw-r--r--lib/sqlalchemy/ext/orderinglist.py23
-rw-r--r--lib/sqlalchemy/orm/persistence.py26
-rw-r--r--lib/sqlalchemy/testing/exclusions.py3
-rw-r--r--lib/sqlalchemy/testing/requirements.py6
-rw-r--r--test/orm/test_naturalpks.py16
-rw-r--r--test/orm/test_unitofworkv2.py108
-rw-r--r--test/requirements.py9
10 files changed, 183 insertions, 37 deletions
diff --git a/doc/build/changelog/changelog_08.rst b/doc/build/changelog/changelog_08.rst
index 2b0fe2c39..2bcaf2bec 100644
--- a/doc/build/changelog/changelog_08.rst
+++ b/doc/build/changelog/changelog_08.rst
@@ -12,6 +12,28 @@
:version: 0.8.6
.. change::
+ :tags: bug, orm
+ :tickets: 3006
+ :versions: 0.9.4
+
+ Fixed ORM bug where changing the primary key of an object, then marking
+ it for DELETE would fail to target the correct row for DELETE.
+ Then to compound matters, basic "number of rows matched" checks were
+ not being performed. Both issues are fixed, however note that the
+ "rows matched" check requires so-called "sane multi-row count"
+ functionality; the DBAPI's executemany() method must count up the
+ rows matched by individual statements and SQLAlchemy's dialect must
+ mark this feature as supported, currently applies to some mysql dialects,
+ psycopg2, sqlite only.
+
+ .. change::
+ :tags: feature, postgresql
+ :versions: 0.9.4
+
+ Enabled "sane multi-row count" checking for the psycopg2 DBAPI, as
+ this seems to be supported as of psycopg2 2.0.9.
+
+ .. change::
:tags: bug, postgresql
:tickets: 3000
:versions: 0.9.4
diff --git a/lib/sqlalchemy/dialects/postgresql/psycopg2.py b/lib/sqlalchemy/dialects/postgresql/psycopg2.py
index 099ddf03d..ac1770625 100644
--- a/lib/sqlalchemy/dialects/postgresql/psycopg2.py
+++ b/lib/sqlalchemy/dialects/postgresql/psycopg2.py
@@ -347,7 +347,7 @@ class PGDialect_psycopg2(PGDialect):
supports_unicode_statements = False
default_paramstyle = 'pyformat'
- supports_sane_multi_rowcount = False
+ supports_sane_multi_rowcount = False # set to true based on psycopg2 version
execution_ctx_cls = PGExecutionContext_psycopg2
statement_compiler = PGCompiler_psycopg2
preparer = PGIdentifierPreparer_psycopg2
@@ -393,6 +393,9 @@ class PGDialect_psycopg2(PGDialect):
is not None
self._has_native_json = self.psycopg2_version >= (2, 5)
+ # http://initd.org/psycopg/docs/news.html#what-s-new-in-psycopg-2-0-9
+ self.supports_sane_multi_rowcount = self.psycopg2_version >= (2, 0, 9)
+
@classmethod
def dbapi(cls):
import psycopg2
diff --git a/lib/sqlalchemy/engine/default.py b/lib/sqlalchemy/engine/default.py
index 0fd41105c..368a08ec2 100644
--- a/lib/sqlalchemy/engine/default.py
+++ b/lib/sqlalchemy/engine/default.py
@@ -514,6 +514,8 @@ class DefaultExecutionContext(interfaces.ExecutionContext):
self.compiled = compiled
if not compiled.can_execute:
+ import pdb
+ pdb.set_trace()
raise exc.ArgumentError("Not an executable clause")
self.execution_options = compiled.statement._execution_options
diff --git a/lib/sqlalchemy/ext/orderinglist.py b/lib/sqlalchemy/ext/orderinglist.py
index 9310c6071..16bab6b29 100644
--- a/lib/sqlalchemy/ext/orderinglist.py
+++ b/lib/sqlalchemy/ext/orderinglist.py
@@ -91,8 +91,27 @@ attribute, so that the ordering is correct when first loaded.
.. warning::
:class:`.OrderingList` only provides limited functionality when a primary
- key column or unique column is the target of the sort. Since changing the
- order of entries often means that two rows must trade values, this is not
+ key column or unique column is the target of the sort. The two operations
+ that are unsupported or are problematic are:
+
+ * two entries must trade values. This is not supported directly in the
+ case of a primary key or unique constraint because it means at least
+ one row would need to be temporarily removed first, or changed to
+ a third, neutral value while the switch occurs.
+
+ * an entry must be deleted in order to make room for a new entry. SQLAlchemy's
+ unit of work performs all INSERTs before DELETEs within a single flush
+ by default. A future feature will allow this to be configurable for
+ specific sets of columns on mappings.
+
+ Additional issues when using primary keys as ordering keys are that UPDATE
+ or DELETE statements on target rows may fail to fire correctly as orderinglist
+ may change their primary key value
+
+
+ Since changing the
+ order of entries often means that either rows must trade values,
+ or rows must be deleted to make way for new inserts, this is not
possible when the value is constrained by a primary key or unique
constraint, since one of the rows would temporarily have to point to a
third available value so that the other row could take its old
diff --git a/lib/sqlalchemy/orm/persistence.py b/lib/sqlalchemy/orm/persistence.py
index 35631988b..b4c9c052b 100644
--- a/lib/sqlalchemy/orm/persistence.py
+++ b/lib/sqlalchemy/orm/persistence.py
@@ -450,7 +450,7 @@ def _collect_delete_commands(base_mapper, uowtransaction, table,
for col in mapper._pks_by_table[table]:
params[col.key] = \
value = \
- mapper._get_state_attr_by_column(
+ mapper._get_committed_state_attr_by_column(
state, state_dict, col)
if value is None:
raise orm_exc.FlushError(
@@ -679,21 +679,19 @@ def _emit_delete_statements(base_mapper, uowtransaction, cached_connections,
connection = cached_connections[connection]
- if need_version_id:
- # TODO: need test coverage for this [ticket:1761]
+ expected = len(del_objects)
+ rows_matched = -1
+ if connection.dialect.supports_sane_multi_rowcount:
+ c = connection.execute(statement, del_objects)
+ rows_matched = c.rowcount
+ elif need_version_id:
if connection.dialect.supports_sane_rowcount:
- rows = 0
+ rows_matched = 0
# execute deletes individually so that versioned
# rows can be verified
for params in del_objects:
c = connection.execute(statement, params)
- rows += c.rowcount
- if rows != len(del_objects):
- raise orm_exc.StaleDataError(
- "DELETE statement on table '%s' expected to "
- "delete %d row(s); %d were matched." %
- (table.description, len(del_objects), c.rowcount)
- )
+ rows_matched += c.rowcount
else:
util.warn(
"Dialect %s does not support deleted rowcount "
@@ -704,6 +702,12 @@ def _emit_delete_statements(base_mapper, uowtransaction, cached_connections,
else:
connection.execute(statement, del_objects)
+ if rows_matched > -1 and expected != rows_matched:
+ raise orm_exc.StaleDataError(
+ "DELETE statement on table '%s' expected to "
+ "delete %d row(s); %d were matched." %
+ (table.description, expected, rows_matched)
+ )
def _finalize_insert_update_commands(base_mapper, uowtransaction,
states_to_insert, states_to_update):
diff --git a/lib/sqlalchemy/testing/exclusions.py b/lib/sqlalchemy/testing/exclusions.py
index 00bb69cbc..00ca28428 100644
--- a/lib/sqlalchemy/testing/exclusions.py
+++ b/lib/sqlalchemy/testing/exclusions.py
@@ -74,6 +74,9 @@ class skip_if(object):
self._fails_on = skip_if(other, reason)
return self
+ def fails_on_everything_except(self, *dbs):
+ self._fails_on = skip_if(fails_on_everything_except(*dbs))
+ return self
class fails_if(skip_if):
def __call__(self, fn):
diff --git a/lib/sqlalchemy/testing/requirements.py b/lib/sqlalchemy/testing/requirements.py
index 76e48f8c8..07b5697e2 100644
--- a/lib/sqlalchemy/testing/requirements.py
+++ b/lib/sqlalchemy/testing/requirements.py
@@ -48,6 +48,12 @@ class SuiteRequirements(Requirements):
return exclusions.open()
@property
+ def non_updating_cascade(self):
+ """target database must *not* support ON UPDATE..CASCADE behavior in
+ foreign keys."""
+ return exclusions.closed()
+
+ @property
def deferrable_fks(self):
return exclusions.closed()
diff --git a/test/orm/test_naturalpks.py b/test/orm/test_naturalpks.py
index 1bb536d70..8c675b94d 100644
--- a/test/orm/test_naturalpks.py
+++ b/test/orm/test_naturalpks.py
@@ -389,10 +389,7 @@ class NaturalPKTest(fixtures.MappedTest):
def test_manytomany_passive(self):
self._test_manytomany(True)
- # mysqldb executemany() of the association table fails to
- # report the correct row count
- @testing.fails_if(lambda: testing.against('mysql')
- and not (testing.against('+zxjdbc') or testing.against('+cymysql')))
+ @testing.requires.non_updating_cascade
def test_manytomany_nonpassive(self):
self._test_manytomany(False)
@@ -798,8 +795,7 @@ class CascadeToFKPKTest(fixtures.MappedTest, testing.AssertsCompiledSQL):
def test_onetomany_passive(self):
self._test_onetomany(True)
- # PG etc. need passive=True to allow PK->PK cascade
- @testing.fails_on_everything_except('sqlite', 'oracle', '+zxjdbc')
+ @testing.requires.non_updating_cascade
def test_onetomany_nonpassive(self):
self._test_onetomany(False)
@@ -876,7 +872,7 @@ class CascadeToFKPKTest(fixtures.MappedTest, testing.AssertsCompiledSQL):
def test_change_m2o_passive(self):
self._test_change_m2o(True)
- @testing.fails_on_everything_except('sqlite', 'oracle', '+zxjdbc')
+ @testing.requires.non_updating_cascade
def test_change_m2o_nonpassive(self):
self._test_change_m2o(False)
@@ -1063,8 +1059,7 @@ class JoinedInheritanceTest(fixtures.MappedTest):
def test_pk_passive(self):
self._test_pk(True)
- # PG etc. need passive=True to allow PK->PK cascade
- @testing.fails_on_everything_except('sqlite', 'oracle', '+zxjdbc')
+ @testing.requires.non_updating_cascade
def test_pk_nonpassive(self):
self._test_pk(False)
@@ -1073,8 +1068,7 @@ class JoinedInheritanceTest(fixtures.MappedTest):
self._test_fk(True)
# PG etc. need passive=True to allow PK->PK cascade
- @testing.fails_on_everything_except('sqlite', 'mysql+zxjdbc', 'oracle',
- 'postgresql+zxjdbc')
+ @testing.requires.non_updating_cascade
def test_fk_nonpassive(self):
self._test_fk(False)
diff --git a/test/orm/test_unitofworkv2.py b/test/orm/test_unitofworkv2.py
index 34648c256..1dfc0401b 100644
--- a/test/orm/test_unitofworkv2.py
+++ b/test/orm/test_unitofworkv2.py
@@ -1203,20 +1203,20 @@ class RowswitchAccountingTest(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
Table('parent', metadata,
- Column('id', Integer, primary_key=True)
+ Column('id', Integer, primary_key=True),
+ Column('data', Integer)
)
Table('child', metadata,
- Column('id', Integer, ForeignKey('parent.id'), primary_key=True)
+ Column('id', Integer, ForeignKey('parent.id'), primary_key=True),
+ Column('data', Integer)
)
- def test_accounting_for_rowswitch(self):
+ def _fixture(self):
parent, child = self.tables.parent, self.tables.child
- class Parent(object):
- def __init__(self, id):
- self.id = id
- self.child = Child()
- class Child(object):
+ class Parent(fixtures.BasicEntity):
+ pass
+ class Child(fixtures.BasicEntity):
pass
mapper(Parent, parent, properties={
@@ -1225,15 +1225,19 @@ class RowswitchAccountingTest(fixtures.MappedTest):
backref="parent")
})
mapper(Child, child)
+ return Parent, Child
+
+ def test_switch_on_update(self):
+ Parent, Child = self._fixture()
sess = create_session(autocommit=False)
- p1 = Parent(1)
+ p1 = Parent(id=1, child=Child())
sess.add(p1)
sess.commit()
sess.close()
- p2 = Parent(1)
+ p2 = Parent(id=1, child=Child())
p3 = sess.merge(p2)
old = attributes.get_history(p3, 'child')[2][0]
@@ -1244,7 +1248,7 @@ class RowswitchAccountingTest(fixtures.MappedTest):
assert p3.child._sa_instance_state.session_id == sess.hash_key
assert p3.child in sess
- p4 = Parent(1)
+ p4 = Parent(id=1, child=Child())
p5 = sess.merge(p4)
old = attributes.get_history(p5, 'child')[2][0]
@@ -1252,6 +1256,88 @@ class RowswitchAccountingTest(fixtures.MappedTest):
sess.flush()
+ def test_switch_on_delete(self):
+ Parent, Child = self._fixture()
+
+ sess = Session()
+ p1 = Parent(id=1, data=2, child=None)
+ sess.add(p1)
+ sess.flush()
+
+ p1.id = 5
+ sess.delete(p1)
+ eq_(p1.id, 5)
+ sess.flush()
+
+ eq_(sess.scalar(self.tables.parent.count()), 0)
+
+
+class BasicStaleChecksTest(fixtures.MappedTest):
+ @classmethod
+ def define_tables(cls, metadata):
+ Table('parent', metadata,
+ Column('id', Integer, primary_key=True),
+ Column('data', Integer)
+ )
+ Table('child', metadata,
+ Column('id', Integer, ForeignKey('parent.id'), primary_key=True),
+ Column('data', Integer)
+ )
+
+ def _fixture(self):
+ parent, child = self.tables.parent, self.tables.child
+
+ class Parent(fixtures.BasicEntity):
+ pass
+ class Child(fixtures.BasicEntity):
+ pass
+
+ mapper(Parent, parent, properties={
+ 'child':relationship(Child, uselist=False,
+ cascade="all, delete-orphan",
+ backref="parent")
+ })
+ mapper(Child, child)
+ return Parent, Child
+
+ def test_update_single_missing(self):
+ Parent, Child = self._fixture()
+ sess = Session()
+ p1 = Parent(id=1, data=2)
+ sess.add(p1)
+ sess.flush()
+
+ sess.execute(self.tables.parent.delete())
+
+ p1.data = 3
+ assert_raises_message(
+ orm_exc.StaleDataError,
+ "UPDATE statement on table 'parent' expected to "
+ "update 1 row\(s\); 0 were matched.",
+ sess.flush
+ )
+
+ @testing.requires.sane_multi_rowcount
+ def test_delete_multi_missing(self):
+ Parent, Child = self._fixture()
+ sess = Session()
+ p1 = Parent(id=1, data=2, child=None)
+ p2 = Parent(id=2, data=3, child=None)
+ sess.add_all([p1, p2])
+ sess.flush()
+
+ sess.execute(self.tables.parent.delete())
+ sess.delete(p1)
+ sess.delete(p2)
+
+ assert_raises_message(
+ orm_exc.StaleDataError,
+ "DELETE statement on table 'parent' expected to "
+ "delete 2 row\(s\); 0 were matched.",
+ sess.flush
+ )
+
+
class BatchInsertsTest(fixtures.MappedTest, testing.AssertsExecutionResults):
@classmethod
def define_tables(cls, metadata):
diff --git a/test/requirements.py b/test/requirements.py
index 4184058b2..d6b1d3969 100644
--- a/test/requirements.py
+++ b/test/requirements.py
@@ -64,6 +64,13 @@ class DefaultRequirements(SuiteRequirements):
)
@property
+ def non_updating_cascade(self):
+ """target database must *not* support ON UPDATE..CASCADE behavior in
+ foreign keys."""
+
+ return fails_on_everything_except('sqlite', 'oracle', '+zxjdbc') + skip_if('mssql')
+
+ @property
def deferrable_fks(self):
"""target database must support deferrable fks"""
@@ -438,7 +445,7 @@ class DefaultRequirements(SuiteRequirements):
@property
def sane_multi_rowcount(self):
- return skip_if(
+ return fails_if(
lambda config: not config.db.dialect.supports_sane_multi_rowcount,
"driver doesn't support 'sane' multi row count"
)