diff options
| author | Mike Bayer <mike_mp@zzzcomputing.com> | 2019-01-06 01:14:26 -0500 |
|---|---|---|
| committer | mike bayer <mike_mp@zzzcomputing.com> | 2019-01-06 17:34:50 +0000 |
| commit | 1e1a38e7801f410f244e4bbb44ec795ae152e04e (patch) | |
| tree | 28e725c5c8188bd0cfd133d1e268dbca9b524978 /lib/sqlalchemy/orm/dependency.py | |
| parent | 404e69426b05a82d905cbb3ad33adafccddb00dd (diff) | |
| download | sqlalchemy-1e1a38e7801f410f244e4bbb44ec795ae152e04e.tar.gz | |
Run black -l 79 against all source files
This is a straight reformat run using black as is, with no edits
applied at all.
The black run will format code consistently, however in
some cases that are prevalent in SQLAlchemy code it produces
too-long lines. The too-long lines will be resolved in the
following commit that will resolve all remaining flake8 issues
including shadowed builtins, long lines, import order, unused
imports, duplicate imports, and docstring issues.
Change-Id: I7eda77fed3d8e73df84b3651fd6cfcfe858d4dc9
Diffstat (limited to 'lib/sqlalchemy/orm/dependency.py')
| -rw-r--r-- | lib/sqlalchemy/orm/dependency.py | 1086 |
1 files changed, 590 insertions, 496 deletions
diff --git a/lib/sqlalchemy/orm/dependency.py b/lib/sqlalchemy/orm/dependency.py index 960b9e5d5..cba4d2141 100644 --- a/lib/sqlalchemy/orm/dependency.py +++ b/lib/sqlalchemy/orm/dependency.py @@ -10,8 +10,7 @@ """ from .. import sql, util, exc as sa_exc -from . import attributes, exc, sync, unitofwork, \ - util as mapperutil +from . import attributes, exc, sync, unitofwork, util as mapperutil from .interfaces import ONETOMANY, MANYTOONE, MANYTOMANY @@ -41,8 +40,8 @@ class DependencyProcessor(object): raise sa_exc.ArgumentError( "Can't build a DependencyProcessor for relationship %s. " "No target attributes to populate between parent and " - "child are present" % - self.prop) + "child are present" % self.prop + ) @classmethod def from_relationship(cls, prop): @@ -70,31 +69,28 @@ class DependencyProcessor(object): before_delete = unitofwork.ProcessAll(uow, self, True, True) parent_saves = unitofwork.SaveUpdateAll( - uow, - self.parent.primary_base_mapper + uow, self.parent.primary_base_mapper ) child_saves = unitofwork.SaveUpdateAll( - uow, - self.mapper.primary_base_mapper + uow, self.mapper.primary_base_mapper ) parent_deletes = unitofwork.DeleteAll( - uow, - self.parent.primary_base_mapper + uow, self.parent.primary_base_mapper ) child_deletes = unitofwork.DeleteAll( - uow, - self.mapper.primary_base_mapper + uow, self.mapper.primary_base_mapper ) - self.per_property_dependencies(uow, - parent_saves, - child_saves, - parent_deletes, - child_deletes, - after_save, - before_delete - ) + self.per_property_dependencies( + uow, + parent_saves, + child_saves, + parent_deletes, + child_deletes, + after_save, + before_delete, + ) def per_state_flush_actions(self, uow, states, isdelete): """establish actions and dependencies related to a flush. @@ -130,9 +126,7 @@ class DependencyProcessor(object): # child side is not part of the cycle, so we will link per-state # actions to the aggregate "saves", "deletes" actions - child_actions = [ - (child_saves, False), (child_deletes, True) - ] + child_actions = [(child_saves, False), (child_deletes, True)] child_in_cycles = False else: child_in_cycles = True @@ -140,15 +134,13 @@ class DependencyProcessor(object): # check if the "parent" side is part of the cycle if not isdelete: parent_saves = unitofwork.SaveUpdateAll( - uow, - self.parent.base_mapper) + uow, self.parent.base_mapper + ) parent_deletes = before_delete = None if parent_saves in uow.cycles: parent_in_cycles = True else: - parent_deletes = unitofwork.DeleteAll( - uow, - self.parent.base_mapper) + parent_deletes = unitofwork.DeleteAll(uow, self.parent.base_mapper) parent_saves = after_save = None if parent_deletes in uow.cycles: parent_in_cycles = True @@ -160,17 +152,18 @@ class DependencyProcessor(object): # by a preprocessor on this state/attribute. In the # case of deletes we may try to load missing items here as well. sum_ = state.manager[self.key].impl.get_all_pending( - state, state.dict, + state, + state.dict, self._passive_delete_flag if isdelete - else attributes.PASSIVE_NO_INITIALIZE) + else attributes.PASSIVE_NO_INITIALIZE, + ) if not sum_: continue if isdelete: - before_delete = unitofwork.ProcessState(uow, - self, True, state) + before_delete = unitofwork.ProcessState(uow, self, True, state) if parent_in_cycles: parent_deletes = unitofwork.DeleteState(uow, state) else: @@ -188,21 +181,28 @@ class DependencyProcessor(object): if deleted: child_action = ( unitofwork.DeleteState(uow, child_state), - True) + True, + ) else: child_action = ( unitofwork.SaveUpdateState(uow, child_state), - False) + False, + ) child_actions.append(child_action) # establish dependencies between our possibly per-state # parent action and our possibly per-state child action. for child_action, childisdelete in child_actions: - self.per_state_dependencies(uow, parent_saves, - parent_deletes, - child_action, - after_save, before_delete, - isdelete, childisdelete) + self.per_state_dependencies( + uow, + parent_saves, + parent_deletes, + child_action, + after_save, + before_delete, + isdelete, + childisdelete, + ) def presort_deletes(self, uowcommit, states): return False @@ -228,76 +228,74 @@ class DependencyProcessor(object): # TODO: add a high speed method # to InstanceState which returns: attribute # has a non-None value, or had one - history = uowcommit.get_attribute_history( - s, - self.key, - passive) + history = uowcommit.get_attribute_history(s, self.key, passive) if history and not history.empty(): return True else: - return states and \ - not self.prop._is_self_referential and \ - self.mapper in uowcommit.mappers + return ( + states + and not self.prop._is_self_referential + and self.mapper in uowcommit.mappers + ) def _verify_canload(self, state): if self.prop.uselist and state is None: raise exc.FlushError( "Can't flush None value found in " - "collection %s" % (self.prop, )) - elif state is not None and \ - not self.mapper._canload( - state, allow_subtypes=not self.enable_typechecks): + "collection %s" % (self.prop,) + ) + elif state is not None and not self.mapper._canload( + state, allow_subtypes=not self.enable_typechecks + ): if self.mapper._canload(state, allow_subtypes=True): - raise exc.FlushError('Attempting to flush an item of type ' - '%(x)s as a member of collection ' - '"%(y)s". Expected an object of type ' - '%(z)s or a polymorphic subclass of ' - 'this type. If %(x)s is a subclass of ' - '%(z)s, configure mapper "%(zm)s" to ' - 'load this subtype polymorphically, or ' - 'set enable_typechecks=False to allow ' - 'any subtype to be accepted for flush. ' - % { - 'x': state.class_, - 'y': self.prop, - 'z': self.mapper.class_, - 'zm': self.mapper, - }) + raise exc.FlushError( + "Attempting to flush an item of type " + "%(x)s as a member of collection " + '"%(y)s". Expected an object of type ' + "%(z)s or a polymorphic subclass of " + "this type. If %(x)s is a subclass of " + '%(z)s, configure mapper "%(zm)s" to ' + "load this subtype polymorphically, or " + "set enable_typechecks=False to allow " + "any subtype to be accepted for flush. " + % { + "x": state.class_, + "y": self.prop, + "z": self.mapper.class_, + "zm": self.mapper, + } + ) else: raise exc.FlushError( - 'Attempting to flush an item of type ' - '%(x)s as a member of collection ' + "Attempting to flush an item of type " + "%(x)s as a member of collection " '"%(y)s". Expected an object of type ' - '%(z)s or a polymorphic subclass of ' - 'this type.' % { - 'x': state.class_, - 'y': self.prop, - 'z': self.mapper.class_, - }) - - def _synchronize(self, state, child, associationrow, - clearkeys, uowcommit): + "%(z)s or a polymorphic subclass of " + "this type." + % { + "x": state.class_, + "y": self.prop, + "z": self.mapper.class_, + } + ) + + def _synchronize(self, state, child, associationrow, clearkeys, uowcommit): raise NotImplementedError() def _get_reversed_processed_set(self, uow): if not self.prop._reverse_property: return None - process_key = tuple(sorted( - [self.key] + - [p.key for p in self.prop._reverse_property] - )) - return uow.memo( - ('reverse_key', process_key), - set + process_key = tuple( + sorted([self.key] + [p.key for p in self.prop._reverse_property]) ) + return uow.memo(("reverse_key", process_key), set) def _post_update(self, state, uowcommit, related, is_m2o_delete=False): for x in related: if not is_m2o_delete or x is not None: uowcommit.register_post_update( - state, - [r for l, r in self.prop.synchronize_pairs] + state, [r for l, r in self.prop.synchronize_pairs] ) break @@ -309,114 +307,126 @@ class DependencyProcessor(object): class OneToManyDP(DependencyProcessor): - - def per_property_dependencies(self, uow, parent_saves, - child_saves, - parent_deletes, - child_deletes, - after_save, - before_delete, - ): + def per_property_dependencies( + self, + uow, + parent_saves, + child_saves, + parent_deletes, + child_deletes, + after_save, + before_delete, + ): if self.post_update: child_post_updates = unitofwork.PostUpdateAll( - uow, - self.mapper.primary_base_mapper, - False) + uow, self.mapper.primary_base_mapper, False + ) child_pre_updates = unitofwork.PostUpdateAll( - uow, - self.mapper.primary_base_mapper, - True) - - uow.dependencies.update([ - (child_saves, after_save), - (parent_saves, after_save), - (after_save, child_post_updates), - - (before_delete, child_pre_updates), - (child_pre_updates, parent_deletes), - (child_pre_updates, child_deletes), - - ]) + uow, self.mapper.primary_base_mapper, True + ) + + uow.dependencies.update( + [ + (child_saves, after_save), + (parent_saves, after_save), + (after_save, child_post_updates), + (before_delete, child_pre_updates), + (child_pre_updates, parent_deletes), + (child_pre_updates, child_deletes), + ] + ) else: - uow.dependencies.update([ - (parent_saves, after_save), - (after_save, child_saves), - (after_save, child_deletes), - - (child_saves, parent_deletes), - (child_deletes, parent_deletes), - - (before_delete, child_saves), - (before_delete, child_deletes), - ]) - - def per_state_dependencies(self, uow, - save_parent, - delete_parent, - child_action, - after_save, before_delete, - isdelete, childisdelete): + uow.dependencies.update( + [ + (parent_saves, after_save), + (after_save, child_saves), + (after_save, child_deletes), + (child_saves, parent_deletes), + (child_deletes, parent_deletes), + (before_delete, child_saves), + (before_delete, child_deletes), + ] + ) + + def per_state_dependencies( + self, + uow, + save_parent, + delete_parent, + child_action, + after_save, + before_delete, + isdelete, + childisdelete, + ): if self.post_update: child_post_updates = unitofwork.PostUpdateAll( - uow, - self.mapper.primary_base_mapper, - False) + uow, self.mapper.primary_base_mapper, False + ) child_pre_updates = unitofwork.PostUpdateAll( - uow, - self.mapper.primary_base_mapper, - True) + uow, self.mapper.primary_base_mapper, True + ) # TODO: this whole block is not covered # by any tests if not isdelete: if childisdelete: - uow.dependencies.update([ - (child_action, after_save), - (after_save, child_post_updates), - ]) + uow.dependencies.update( + [ + (child_action, after_save), + (after_save, child_post_updates), + ] + ) else: - uow.dependencies.update([ - (save_parent, after_save), - (child_action, after_save), - (after_save, child_post_updates), - ]) + uow.dependencies.update( + [ + (save_parent, after_save), + (child_action, after_save), + (after_save, child_post_updates), + ] + ) else: if childisdelete: - uow.dependencies.update([ - (before_delete, child_pre_updates), - (child_pre_updates, delete_parent), - ]) + uow.dependencies.update( + [ + (before_delete, child_pre_updates), + (child_pre_updates, delete_parent), + ] + ) else: - uow.dependencies.update([ - (before_delete, child_pre_updates), - (child_pre_updates, delete_parent), - ]) + uow.dependencies.update( + [ + (before_delete, child_pre_updates), + (child_pre_updates, delete_parent), + ] + ) elif not isdelete: - uow.dependencies.update([ - (save_parent, after_save), - (after_save, child_action), - (save_parent, child_action) - ]) + uow.dependencies.update( + [ + (save_parent, after_save), + (after_save, child_action), + (save_parent, child_action), + ] + ) else: - uow.dependencies.update([ - (before_delete, child_action), - (child_action, delete_parent) - ]) + uow.dependencies.update( + [(before_delete, child_action), (child_action, delete_parent)] + ) def presort_deletes(self, uowcommit, states): # head object is being deleted, and we manage its list of # child objects the child objects have to have their # foreign key to the parent set to NULL - should_null_fks = not self.cascade.delete and \ - not self.passive_deletes == 'all' + should_null_fks = ( + not self.cascade.delete and not self.passive_deletes == "all" + ) for state in states: history = uowcommit.get_attribute_history( - state, - self.key, - self._passive_delete_flag) + state, self.key, self._passive_delete_flag + ) if history: for child in history.deleted: if child is not None and self.hasparent(child) is False: @@ -429,13 +439,16 @@ class OneToManyDP(DependencyProcessor): for child in history.unchanged: if child is not None: uowcommit.register_object( - child, operation="delete", prop=self.prop) + child, operation="delete", prop=self.prop + ) def presort_saves(self, uowcommit, states): - children_added = uowcommit.memo(('children_added', self), set) + children_added = uowcommit.memo(("children_added", self), set) - should_null_fks = not self.cascade.delete_orphan and \ - not self.passive_deletes == 'all' + should_null_fks = ( + not self.cascade.delete_orphan + and not self.passive_deletes == "all" + ) for state in states: pks_changed = self._pks_changed(uowcommit, state) @@ -445,34 +458,39 @@ class OneToManyDP(DependencyProcessor): else: passive = attributes.PASSIVE_OFF - history = uowcommit.get_attribute_history( - state, - self.key, - passive) + history = uowcommit.get_attribute_history(state, self.key, passive) if history: for child in history.added: if child is not None: - uowcommit.register_object(child, cancel_delete=True, - operation="add", - prop=self.prop) + uowcommit.register_object( + child, + cancel_delete=True, + operation="add", + prop=self.prop, + ) children_added.update(history.added) for child in history.deleted: if not self.cascade.delete_orphan: if should_null_fks: - uowcommit.register_object(child, isdelete=False, - operation='delete', - prop=self.prop) + uowcommit.register_object( + child, + isdelete=False, + operation="delete", + prop=self.prop, + ) elif self.hasparent(child) is False: uowcommit.register_object( - child, isdelete=True, - operation="delete", prop=self.prop) + child, + isdelete=True, + operation="delete", + prop=self.prop, + ) for c, m, st_, dct_ in self.mapper.cascade_iterator( - 'delete', child): - uowcommit.register_object( - st_, - isdelete=True) + "delete", child + ): + uowcommit.register_object(st_, isdelete=True) if pks_changed: if history: @@ -483,7 +501,8 @@ class OneToManyDP(DependencyProcessor): False, self.passive_updates, operation="pk change", - prop=self.prop) + prop=self.prop, + ) def process_deletes(self, uowcommit, states): # head object is being deleted, and we manage its list of @@ -492,39 +511,37 @@ class OneToManyDP(DependencyProcessor): # safely for any cascade but is unnecessary if delete cascade # is on. - if self.post_update or not self.passive_deletes == 'all': - children_added = uowcommit.memo(('children_added', self), set) + if self.post_update or not self.passive_deletes == "all": + children_added = uowcommit.memo(("children_added", self), set) for state in states: history = uowcommit.get_attribute_history( - state, - self.key, - self._passive_delete_flag) + state, self.key, self._passive_delete_flag + ) if history: for child in history.deleted: - if child is not None and \ - self.hasparent(child) is False: + if ( + child is not None + and self.hasparent(child) is False + ): self._synchronize( - state, - child, - None, True, - uowcommit, False) + state, child, None, True, uowcommit, False + ) if self.post_update and child: self._post_update(child, uowcommit, [state]) if self.post_update or not self.cascade.delete: - for child in set(history.unchanged).\ - difference(children_added): + for child in set(history.unchanged).difference( + children_added + ): if child is not None: self._synchronize( - state, - child, - None, True, - uowcommit, False) + state, child, None, True, uowcommit, False + ) if self.post_update and child: - self._post_update(child, - uowcommit, - [state]) + self._post_update( + child, uowcommit, [state] + ) # technically, we can even remove each child from the # collection here too. but this would be a somewhat @@ -532,54 +549,66 @@ class OneToManyDP(DependencyProcessor): # if the old parent wasn't deleted but child was moved. def process_saves(self, uowcommit, states): - should_null_fks = not self.cascade.delete_orphan and \ - not self.passive_deletes == 'all' + should_null_fks = ( + not self.cascade.delete_orphan + and not self.passive_deletes == "all" + ) for state in states: history = uowcommit.get_attribute_history( - state, - self.key, - attributes.PASSIVE_NO_INITIALIZE) + state, self.key, attributes.PASSIVE_NO_INITIALIZE + ) if history: for child in history.added: - self._synchronize(state, child, None, - False, uowcommit, False) + self._synchronize( + state, child, None, False, uowcommit, False + ) if child is not None and self.post_update: self._post_update(child, uowcommit, [state]) for child in history.deleted: - if should_null_fks and not self.cascade.delete_orphan and \ - not self.hasparent(child): - self._synchronize(state, child, None, True, - uowcommit, False) + if ( + should_null_fks + and not self.cascade.delete_orphan + and not self.hasparent(child) + ): + self._synchronize( + state, child, None, True, uowcommit, False + ) if self._pks_changed(uowcommit, state): for child in history.unchanged: - self._synchronize(state, child, None, - False, uowcommit, True) + self._synchronize( + state, child, None, False, uowcommit, True + ) - def _synchronize(self, state, child, - associationrow, clearkeys, uowcommit, - pks_changed): + def _synchronize( + self, state, child, associationrow, clearkeys, uowcommit, pks_changed + ): source = state dest = child self._verify_canload(child) - if dest is None or \ - (not self.post_update and uowcommit.is_deleted(dest)): + if dest is None or ( + not self.post_update and uowcommit.is_deleted(dest) + ): return if clearkeys: sync.clear(dest, self.mapper, self.prop.synchronize_pairs) else: - sync.populate(source, self.parent, dest, self.mapper, - self.prop.synchronize_pairs, uowcommit, - self.passive_updates and pks_changed) + sync.populate( + source, + self.parent, + dest, + self.mapper, + self.prop.synchronize_pairs, + uowcommit, + self.passive_updates and pks_changed, + ) def _pks_changed(self, uowcommit, state): return sync.source_modified( - uowcommit, - state, - self.parent, - self.prop.synchronize_pairs) + uowcommit, state, self.parent, self.prop.synchronize_pairs + ) class ManyToOneDP(DependencyProcessor): @@ -587,105 +616,110 @@ class ManyToOneDP(DependencyProcessor): DependencyProcessor.__init__(self, prop) self.mapper._dependency_processors.append(DetectKeySwitch(prop)) - def per_property_dependencies(self, uow, - parent_saves, - child_saves, - parent_deletes, - child_deletes, - after_save, - before_delete): + def per_property_dependencies( + self, + uow, + parent_saves, + child_saves, + parent_deletes, + child_deletes, + after_save, + before_delete, + ): if self.post_update: parent_post_updates = unitofwork.PostUpdateAll( - uow, - self.parent.primary_base_mapper, - False) + uow, self.parent.primary_base_mapper, False + ) parent_pre_updates = unitofwork.PostUpdateAll( - uow, - self.parent.primary_base_mapper, - True) - - uow.dependencies.update([ - (child_saves, after_save), - (parent_saves, after_save), - (after_save, parent_post_updates), - - (after_save, parent_pre_updates), - (before_delete, parent_pre_updates), - - (parent_pre_updates, child_deletes), - (parent_pre_updates, parent_deletes), - ]) + uow, self.parent.primary_base_mapper, True + ) + + uow.dependencies.update( + [ + (child_saves, after_save), + (parent_saves, after_save), + (after_save, parent_post_updates), + (after_save, parent_pre_updates), + (before_delete, parent_pre_updates), + (parent_pre_updates, child_deletes), + (parent_pre_updates, parent_deletes), + ] + ) else: - uow.dependencies.update([ - (child_saves, after_save), - (after_save, parent_saves), - (parent_saves, child_deletes), - (parent_deletes, child_deletes) - ]) - - def per_state_dependencies(self, uow, - save_parent, - delete_parent, - child_action, - after_save, before_delete, - isdelete, childisdelete): + uow.dependencies.update( + [ + (child_saves, after_save), + (after_save, parent_saves), + (parent_saves, child_deletes), + (parent_deletes, child_deletes), + ] + ) + + def per_state_dependencies( + self, + uow, + save_parent, + delete_parent, + child_action, + after_save, + before_delete, + isdelete, + childisdelete, + ): if self.post_update: if not isdelete: parent_post_updates = unitofwork.PostUpdateAll( - uow, - self.parent.primary_base_mapper, - False) + uow, self.parent.primary_base_mapper, False + ) if childisdelete: - uow.dependencies.update([ - (after_save, parent_post_updates), - (parent_post_updates, child_action) - ]) + uow.dependencies.update( + [ + (after_save, parent_post_updates), + (parent_post_updates, child_action), + ] + ) else: - uow.dependencies.update([ - (save_parent, after_save), - (child_action, after_save), - - (after_save, parent_post_updates) - ]) + uow.dependencies.update( + [ + (save_parent, after_save), + (child_action, after_save), + (after_save, parent_post_updates), + ] + ) else: parent_pre_updates = unitofwork.PostUpdateAll( - uow, - self.parent.primary_base_mapper, - True) + uow, self.parent.primary_base_mapper, True + ) - uow.dependencies.update([ - (before_delete, parent_pre_updates), - (parent_pre_updates, delete_parent), - (parent_pre_updates, child_action) - ]) + uow.dependencies.update( + [ + (before_delete, parent_pre_updates), + (parent_pre_updates, delete_parent), + (parent_pre_updates, child_action), + ] + ) elif not isdelete: if not childisdelete: - uow.dependencies.update([ - (child_action, after_save), - (after_save, save_parent), - ]) + uow.dependencies.update( + [(child_action, after_save), (after_save, save_parent)] + ) else: - uow.dependencies.update([ - (after_save, save_parent), - ]) + uow.dependencies.update([(after_save, save_parent)]) else: if childisdelete: - uow.dependencies.update([ - (delete_parent, child_action) - ]) + uow.dependencies.update([(delete_parent, child_action)]) def presort_deletes(self, uowcommit, states): if self.cascade.delete or self.cascade.delete_orphan: for state in states: history = uowcommit.get_attribute_history( - state, - self.key, - self._passive_delete_flag) + state, self.key, self._passive_delete_flag + ) if history: if self.cascade.delete_orphan: todelete = history.sum() @@ -695,36 +729,42 @@ class ManyToOneDP(DependencyProcessor): if child is None: continue uowcommit.register_object( - child, isdelete=True, - operation="delete", prop=self.prop) - t = self.mapper.cascade_iterator('delete', child) + child, + isdelete=True, + operation="delete", + prop=self.prop, + ) + t = self.mapper.cascade_iterator("delete", child) for c, m, st_, dct_ in t: - uowcommit.register_object( - st_, isdelete=True) + uowcommit.register_object(st_, isdelete=True) def presort_saves(self, uowcommit, states): for state in states: uowcommit.register_object(state, operation="add", prop=self.prop) if self.cascade.delete_orphan: history = uowcommit.get_attribute_history( - state, - self.key, - self._passive_delete_flag) + state, self.key, self._passive_delete_flag + ) if history: for child in history.deleted: if self.hasparent(child) is False: uowcommit.register_object( - child, isdelete=True, - operation="delete", prop=self.prop) + child, + isdelete=True, + operation="delete", + prop=self.prop, + ) - t = self.mapper.cascade_iterator('delete', child) + t = self.mapper.cascade_iterator("delete", child) for c, m, st_, dct_ in t: uowcommit.register_object(st_, isdelete=True) def process_deletes(self, uowcommit, states): - if self.post_update and \ - not self.cascade.delete_orphan and \ - not self.passive_deletes == 'all': + if ( + self.post_update + and not self.cascade.delete_orphan + and not self.passive_deletes == "all" + ): # post_update means we have to update our # row to not reference the child object @@ -733,55 +773,70 @@ class ManyToOneDP(DependencyProcessor): self._synchronize(state, None, None, True, uowcommit) if state and self.post_update: history = uowcommit.get_attribute_history( - state, - self.key, - self._passive_delete_flag) + state, self.key, self._passive_delete_flag + ) if history: self._post_update( - state, uowcommit, history.sum(), - is_m2o_delete=True) + state, uowcommit, history.sum(), is_m2o_delete=True + ) def process_saves(self, uowcommit, states): for state in states: history = uowcommit.get_attribute_history( - state, - self.key, - attributes.PASSIVE_NO_INITIALIZE) + state, self.key, attributes.PASSIVE_NO_INITIALIZE + ) if history: if history.added: for child in history.added: - self._synchronize(state, child, None, False, - uowcommit, "add") + self._synchronize( + state, child, None, False, uowcommit, "add" + ) elif history.deleted: self._synchronize( - state, None, None, True, uowcommit, "delete") + state, None, None, True, uowcommit, "delete" + ) if self.post_update: self._post_update(state, uowcommit, history.sum()) - def _synchronize(self, state, child, associationrow, - clearkeys, uowcommit, operation=None): - if state is None or \ - (not self.post_update and uowcommit.is_deleted(state)): + def _synchronize( + self, + state, + child, + associationrow, + clearkeys, + uowcommit, + operation=None, + ): + if state is None or ( + not self.post_update and uowcommit.is_deleted(state) + ): return - if operation is not None and \ - child is not None and \ - not uowcommit.session._contains_state(child): + if ( + operation is not None + and child is not None + and not uowcommit.session._contains_state(child) + ): util.warn( "Object of type %s not in session, %s " - "operation along '%s' won't proceed" % - (mapperutil.state_class_str(child), operation, self.prop)) + "operation along '%s' won't proceed" + % (mapperutil.state_class_str(child), operation, self.prop) + ) return if clearkeys or child is None: sync.clear(state, self.parent, self.prop.synchronize_pairs) else: self._verify_canload(child) - sync.populate(child, self.mapper, state, - self.parent, - self.prop.synchronize_pairs, - uowcommit, - False) + sync.populate( + child, + self.mapper, + state, + self.parent, + self.prop.synchronize_pairs, + uowcommit, + False, + ) class DetectKeySwitch(DependencyProcessor): @@ -801,20 +856,18 @@ class DetectKeySwitch(DependencyProcessor): if self.passive_updates: return else: - if False in (prop.passive_updates for - prop in self.prop._reverse_property): + if False in ( + prop.passive_updates + for prop in self.prop._reverse_property + ): return uow.register_preprocessor(self, False) def per_property_flush_actions(self, uow): - parent_saves = unitofwork.SaveUpdateAll( - uow, - self.parent.base_mapper) + parent_saves = unitofwork.SaveUpdateAll(uow, self.parent.base_mapper) after_save = unitofwork.ProcessAll(uow, self, False, False) - uow.dependencies.update([ - (parent_saves, after_save) - ]) + uow.dependencies.update([(parent_saves, after_save)]) def per_state_flush_actions(self, uow, states, isdelete): pass @@ -848,8 +901,7 @@ class DetectKeySwitch(DependencyProcessor): def _key_switchers(self, uow, states): switched, notswitched = uow.memo( - ('pk_switchers', self), - lambda: (set(), set()) + ("pk_switchers", self), lambda: (set(), set()) ) allstates = switched.union(notswitched) @@ -871,74 +923,86 @@ class DetectKeySwitch(DependencyProcessor): continue dict_ = state.dict related = state.get_impl(self.key).get( - state, dict_, passive=self._passive_update_flag) - if related is not attributes.PASSIVE_NO_RESULT and \ - related is not None: + state, dict_, passive=self._passive_update_flag + ) + if ( + related is not attributes.PASSIVE_NO_RESULT + and related is not None + ): related_state = attributes.instance_state(dict_[self.key]) if related_state in switchers: - uowcommit.register_object(state, - False, - self.passive_updates) + uowcommit.register_object( + state, False, self.passive_updates + ) sync.populate( related_state, - self.mapper, state, - self.parent, self.prop.synchronize_pairs, - uowcommit, self.passive_updates) + self.mapper, + state, + self.parent, + self.prop.synchronize_pairs, + uowcommit, + self.passive_updates, + ) def _pks_changed(self, uowcommit, state): return bool(state.key) and sync.source_modified( - uowcommit, state, self.mapper, self.prop.synchronize_pairs) + uowcommit, state, self.mapper, self.prop.synchronize_pairs + ) class ManyToManyDP(DependencyProcessor): + def per_property_dependencies( + self, + uow, + parent_saves, + child_saves, + parent_deletes, + child_deletes, + after_save, + before_delete, + ): + + uow.dependencies.update( + [ + (parent_saves, after_save), + (child_saves, after_save), + (after_save, child_deletes), + # a rowswitch on the parent from deleted to saved + # can make this one occur, as the "save" may remove + # an element from the + # "deleted" list before we have a chance to + # process its child rows + (before_delete, parent_saves), + (before_delete, parent_deletes), + (before_delete, child_deletes), + (before_delete, child_saves), + ] + ) - def per_property_dependencies(self, uow, parent_saves, - child_saves, - parent_deletes, - child_deletes, - after_save, - before_delete - ): - - uow.dependencies.update([ - (parent_saves, after_save), - (child_saves, after_save), - (after_save, child_deletes), - - # a rowswitch on the parent from deleted to saved - # can make this one occur, as the "save" may remove - # an element from the - # "deleted" list before we have a chance to - # process its child rows - (before_delete, parent_saves), - - (before_delete, parent_deletes), - (before_delete, child_deletes), - (before_delete, child_saves), - ]) - - def per_state_dependencies(self, uow, - save_parent, - delete_parent, - child_action, - after_save, before_delete, - isdelete, childisdelete): + def per_state_dependencies( + self, + uow, + save_parent, + delete_parent, + child_action, + after_save, + before_delete, + isdelete, + childisdelete, + ): if not isdelete: if childisdelete: - uow.dependencies.update([ - (save_parent, after_save), - (after_save, child_action), - ]) + uow.dependencies.update( + [(save_parent, after_save), (after_save, child_action)] + ) else: - uow.dependencies.update([ - (save_parent, after_save), - (child_action, after_save), - ]) + uow.dependencies.update( + [(save_parent, after_save), (child_action, after_save)] + ) else: - uow.dependencies.update([ - (before_delete, child_action), - (before_delete, delete_parent) - ]) + uow.dependencies.update( + [(before_delete, child_action), (before_delete, delete_parent)] + ) def presort_deletes(self, uowcommit, states): # TODO: no tests fail if this whole @@ -949,9 +1013,8 @@ class ManyToManyDP(DependencyProcessor): # returns True for state in states: uowcommit.get_attribute_history( - state, - self.key, - self._passive_delete_flag) + state, self.key, self._passive_delete_flag + ) def presort_saves(self, uowcommit, states): if not self.passive_updates: @@ -961,9 +1024,8 @@ class ManyToManyDP(DependencyProcessor): for state in states: if self._pks_changed(uowcommit, state): history = uowcommit.get_attribute_history( - state, - self.key, - attributes.PASSIVE_OFF) + state, self.key, attributes.PASSIVE_OFF + ) if not self.cascade.delete_orphan: return @@ -972,20 +1034,21 @@ class ManyToManyDP(DependencyProcessor): # if delete_orphan check is turned on. for state in states: history = uowcommit.get_attribute_history( - state, - self.key, - attributes.PASSIVE_NO_INITIALIZE) + state, self.key, attributes.PASSIVE_NO_INITIALIZE + ) if history: for child in history.deleted: if self.hasparent(child) is False: uowcommit.register_object( - child, isdelete=True, - operation="delete", prop=self.prop) + child, + isdelete=True, + operation="delete", + prop=self.prop, + ) for c, m, st_, dct_ in self.mapper.cascade_iterator( - 'delete', - child): - uowcommit.register_object( - st_, isdelete=True) + "delete", child + ): + uowcommit.register_object(st_, isdelete=True) def process_deletes(self, uowcommit, states): secondary_delete = [] @@ -998,21 +1061,23 @@ class ManyToManyDP(DependencyProcessor): # this history should be cached already, as # we loaded it in preprocess_deletes history = uowcommit.get_attribute_history( - state, - self.key, - self._passive_delete_flag) + state, self.key, self._passive_delete_flag + ) if history: for child in history.non_added(): - if child is None or \ - (processed is not None and - (state, child) in processed): + if child is None or ( + processed is not None and (state, child) in processed + ): continue associationrow = {} if not self._synchronize( - state, - child, - associationrow, - False, uowcommit, "delete"): + state, + child, + associationrow, + False, + uowcommit, + "delete", + ): continue secondary_delete.append(associationrow) @@ -1021,8 +1086,9 @@ class ManyToManyDP(DependencyProcessor): if processed is not None: processed.update(tmp) - self._run_crud(uowcommit, secondary_insert, - secondary_update, secondary_delete) + self._run_crud( + uowcommit, secondary_insert, secondary_update, secondary_delete + ) def process_saves(self, uowcommit, states): secondary_delete = [] @@ -1033,110 +1099,133 @@ class ManyToManyDP(DependencyProcessor): tmp = set() for state in states: - need_cascade_pks = not self.passive_updates and \ - self._pks_changed(uowcommit, state) + need_cascade_pks = not self.passive_updates and self._pks_changed( + uowcommit, state + ) if need_cascade_pks: passive = attributes.PASSIVE_OFF else: passive = attributes.PASSIVE_NO_INITIALIZE - history = uowcommit.get_attribute_history(state, self.key, - passive) + history = uowcommit.get_attribute_history(state, self.key, passive) if history: for child in history.added: - if (processed is not None and - (state, child) in processed): + if processed is not None and (state, child) in processed: continue associationrow = {} - if not self._synchronize(state, - child, - associationrow, - False, uowcommit, "add"): + if not self._synchronize( + state, child, associationrow, False, uowcommit, "add" + ): continue secondary_insert.append(associationrow) for child in history.deleted: - if (processed is not None and - (state, child) in processed): + if processed is not None and (state, child) in processed: continue associationrow = {} - if not self._synchronize(state, - child, - associationrow, - False, uowcommit, "delete"): + if not self._synchronize( + state, + child, + associationrow, + False, + uowcommit, + "delete", + ): continue secondary_delete.append(associationrow) - tmp.update((c, state) - for c in history.added + history.deleted) + tmp.update((c, state) for c in history.added + history.deleted) if need_cascade_pks: for child in history.unchanged: associationrow = {} - sync.update(state, - self.parent, - associationrow, - "old_", - self.prop.synchronize_pairs) - sync.update(child, - self.mapper, - associationrow, - "old_", - self.prop.secondary_synchronize_pairs) + sync.update( + state, + self.parent, + associationrow, + "old_", + self.prop.synchronize_pairs, + ) + sync.update( + child, + self.mapper, + associationrow, + "old_", + self.prop.secondary_synchronize_pairs, + ) secondary_update.append(associationrow) if processed is not None: processed.update(tmp) - self._run_crud(uowcommit, secondary_insert, - secondary_update, secondary_delete) + self._run_crud( + uowcommit, secondary_insert, secondary_update, secondary_delete + ) - def _run_crud(self, uowcommit, secondary_insert, - secondary_update, secondary_delete): + def _run_crud( + self, uowcommit, secondary_insert, secondary_update, secondary_delete + ): connection = uowcommit.transaction.connection(self.mapper) if secondary_delete: associationrow = secondary_delete[0] - statement = self.secondary.delete(sql.and_(*[ - c == sql.bindparam(c.key, type_=c.type) - for c in self.secondary.c - if c.key in associationrow - ])) + statement = self.secondary.delete( + sql.and_( + *[ + c == sql.bindparam(c.key, type_=c.type) + for c in self.secondary.c + if c.key in associationrow + ] + ) + ) result = connection.execute(statement, secondary_delete) - if result.supports_sane_multi_rowcount() and \ - result.rowcount != len(secondary_delete): + if result.supports_sane_multi_rowcount() and result.rowcount != len( + secondary_delete + ): raise exc.StaleDataError( "DELETE statement on table '%s' expected to delete " - "%d row(s); Only %d were matched." % - (self.secondary.description, len(secondary_delete), - result.rowcount) + "%d row(s); Only %d were matched." + % ( + self.secondary.description, + len(secondary_delete), + result.rowcount, + ) ) if secondary_update: associationrow = secondary_update[0] - statement = self.secondary.update(sql.and_(*[ - c == sql.bindparam("old_" + c.key, type_=c.type) - for c in self.secondary.c - if c.key in associationrow - ])) + statement = self.secondary.update( + sql.and_( + *[ + c == sql.bindparam("old_" + c.key, type_=c.type) + for c in self.secondary.c + if c.key in associationrow + ] + ) + ) result = connection.execute(statement, secondary_update) - if result.supports_sane_multi_rowcount() and \ - result.rowcount != len(secondary_update): + if result.supports_sane_multi_rowcount() and result.rowcount != len( + secondary_update + ): raise exc.StaleDataError( "UPDATE statement on table '%s' expected to update " - "%d row(s); Only %d were matched." % - (self.secondary.description, len(secondary_update), - result.rowcount) + "%d row(s); Only %d were matched." + % ( + self.secondary.description, + len(secondary_update), + result.rowcount, + ) ) if secondary_insert: statement = self.secondary.insert() connection.execute(statement, secondary_insert) - def _synchronize(self, state, child, associationrow, - clearkeys, uowcommit, operation): + def _synchronize( + self, state, child, associationrow, clearkeys, uowcommit, operation + ): # this checks for None if uselist=True self._verify_canload(child) @@ -1150,23 +1239,28 @@ class ManyToManyDP(DependencyProcessor): if not child.deleted: util.warn( "Object of type %s not in session, %s " - "operation along '%s' won't proceed" % - (mapperutil.state_class_str(child), operation, self.prop)) + "operation along '%s' won't proceed" + % (mapperutil.state_class_str(child), operation, self.prop) + ) return False - sync.populate_dict(state, self.parent, associationrow, - self.prop.synchronize_pairs) - sync.populate_dict(child, self.mapper, associationrow, - self.prop.secondary_synchronize_pairs) + sync.populate_dict( + state, self.parent, associationrow, self.prop.synchronize_pairs + ) + sync.populate_dict( + child, + self.mapper, + associationrow, + self.prop.secondary_synchronize_pairs, + ) return True def _pks_changed(self, uowcommit, state): return sync.source_modified( - uowcommit, - state, - self.parent, - self.prop.synchronize_pairs) + uowcommit, state, self.parent, self.prop.synchronize_pairs + ) + _direction_to_processor = { ONETOMANY: OneToManyDP, |
