summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorjonathan vanasco <jonathan@2xlp.com>2015-12-16 11:04:25 -0500
committerjonathan vanasco <jonathan@2xlp.com>2015-12-16 11:04:25 -0500
commitce25ac172d3b1be81025b7b541a9aa32b0286974 (patch)
tree7920084df122b2df19a44b2946ab0e52d4fe5958
parent0a5dcdc2c4112478d87e5cd68c187e302f586834 (diff)
parent03ee22f342bbef9b15bfc989edda6a4ac3910508 (diff)
downloadsqlalchemy-ce25ac172d3b1be81025b7b541a9aa32b0286974.tar.gz
Merge branch 'master' of bitbucket.org:zzzeek/sqlalchemy
-rw-r--r--.gitignore1
-rw-r--r--doc/build/changelog/changelog_09.rst39
-rw-r--r--doc/build/changelog/changelog_10.rst492
-rw-r--r--doc/build/changelog/changelog_11.rst449
-rw-r--r--doc/build/changelog/index.rst4
-rw-r--r--doc/build/changelog/migration_10.rst2
-rw-r--r--doc/build/changelog/migration_11.rst1124
-rw-r--r--doc/build/conf.py6
-rw-r--r--doc/build/core/ddl.rst240
-rw-r--r--doc/build/core/defaults.rst22
-rw-r--r--doc/build/core/events.rst4
-rw-r--r--doc/build/core/metadata.rst5
-rw-r--r--doc/build/core/pooling.rst97
-rw-r--r--doc/build/core/sqlelement.rst12
-rw-r--r--doc/build/core/tutorial.rst282
-rw-r--r--doc/build/core/type_api.rst4
-rw-r--r--doc/build/core/type_basics.rst3
-rw-r--r--doc/build/dialects/index.rst2
-rw-r--r--doc/build/dialects/postgresql.rst7
-rw-r--r--doc/build/faq/connections.rst81
-rw-r--r--doc/build/faq/sessions.rst73
-rw-r--r--doc/build/glossary.rst23
-rw-r--r--doc/build/index.rst2
-rw-r--r--doc/build/intro.rst59
-rw-r--r--doc/build/orm/basic_relationships.rst96
-rw-r--r--doc/build/orm/events.rst10
-rw-r--r--doc/build/orm/examples.rst2
-rw-r--r--doc/build/orm/extensions/associationproxy.rst1
-rw-r--r--doc/build/orm/extensions/baked.rst8
-rw-r--r--doc/build/orm/inheritance.rst6
-rw-r--r--doc/build/orm/loading_relationships.rst44
-rw-r--r--doc/build/orm/mapped_sql_expr.rst2
-rw-r--r--doc/build/orm/persistence_techniques.rst106
-rw-r--r--doc/build/orm/relationship_persistence.rst122
-rw-r--r--doc/build/orm/session.rst1
-rw-r--r--doc/build/orm/session_events.rst436
-rw-r--r--doc/build/orm/session_state_management.rst108
-rw-r--r--doc/build/orm/session_transaction.rst9
-rw-r--r--doc/build/orm/tutorial.rst351
-rw-r--r--doc/build/testdocs.py69
-rw-r--r--examples/versioned_history/history_meta.py8
-rw-r--r--examples/versioned_history/test_versioning.py65
-rw-r--r--lib/sqlalchemy/__init__.py5
-rw-r--r--lib/sqlalchemy/dialects/firebird/base.py2
-rw-r--r--lib/sqlalchemy/dialects/mssql/base.py126
-rw-r--r--lib/sqlalchemy/dialects/mssql/pymssql.py3
-rw-r--r--lib/sqlalchemy/dialects/mysql/base.py46
-rw-r--r--lib/sqlalchemy/dialects/oracle/base.py5
-rw-r--r--lib/sqlalchemy/dialects/oracle/cx_oracle.py7
-rw-r--r--lib/sqlalchemy/dialects/postgresql/__init__.py17
-rw-r--r--lib/sqlalchemy/dialects/postgresql/array.py306
-rw-r--r--lib/sqlalchemy/dialects/postgresql/base.py547
-rw-r--r--lib/sqlalchemy/dialects/postgresql/ext.py (renamed from lib/sqlalchemy/dialects/postgresql/constraints.py)78
-rw-r--r--lib/sqlalchemy/dialects/postgresql/hstore.py278
-rw-r--r--lib/sqlalchemy/dialects/postgresql/json.py400
-rw-r--r--lib/sqlalchemy/dialects/postgresql/psycopg2.py2
-rw-r--r--lib/sqlalchemy/dialects/sqlite/base.py55
-rw-r--r--lib/sqlalchemy/dialects/sybase/base.py15
-rw-r--r--lib/sqlalchemy/engine/__init__.py25
-rw-r--r--lib/sqlalchemy/engine/base.py6
-rw-r--r--lib/sqlalchemy/engine/interfaces.py6
-rw-r--r--lib/sqlalchemy/engine/result.py25
-rw-r--r--lib/sqlalchemy/event/attr.py14
-rw-r--r--lib/sqlalchemy/events.py5
-rw-r--r--lib/sqlalchemy/ext/associationproxy.py13
-rw-r--r--lib/sqlalchemy/ext/automap.py2
-rw-r--r--lib/sqlalchemy/ext/baked.py32
-rw-r--r--lib/sqlalchemy/ext/declarative/api.py13
-rw-r--r--lib/sqlalchemy/ext/declarative/base.py1
-rw-r--r--lib/sqlalchemy/ext/declarative/clsregistry.py3
-rw-r--r--lib/sqlalchemy/ext/hybrid.py2
-rw-r--r--lib/sqlalchemy/ext/mutable.py10
-rw-r--r--lib/sqlalchemy/orm/__init__.py5
-rw-r--r--lib/sqlalchemy/orm/attributes.py71
-rw-r--r--lib/sqlalchemy/orm/collections.py75
-rw-r--r--lib/sqlalchemy/orm/dependency.py8
-rw-r--r--lib/sqlalchemy/orm/dynamic.py7
-rw-r--r--lib/sqlalchemy/orm/events.py644
-rw-r--r--lib/sqlalchemy/orm/identity.py37
-rw-r--r--lib/sqlalchemy/orm/interfaces.py2
-rw-r--r--lib/sqlalchemy/orm/loading.py24
-rw-r--r--lib/sqlalchemy/orm/mapper.py75
-rw-r--r--lib/sqlalchemy/orm/persistence.py169
-rw-r--r--lib/sqlalchemy/orm/properties.py4
-rw-r--r--lib/sqlalchemy/orm/query.py302
-rw-r--r--lib/sqlalchemy/orm/relationships.py83
-rw-r--r--lib/sqlalchemy/orm/session.py286
-rw-r--r--lib/sqlalchemy/orm/state.py122
-rw-r--r--lib/sqlalchemy/orm/strategies.py24
-rw-r--r--lib/sqlalchemy/orm/strategy_options.py2
-rw-r--r--lib/sqlalchemy/orm/util.py15
-rw-r--r--lib/sqlalchemy/pool.py5
-rw-r--r--lib/sqlalchemy/sql/__init__.py2
-rw-r--r--lib/sqlalchemy/sql/compiler.py118
-rw-r--r--lib/sqlalchemy/sql/crud.py111
-rw-r--r--lib/sqlalchemy/sql/default_comparator.py35
-rw-r--r--lib/sqlalchemy/sql/dml.py162
-rw-r--r--lib/sqlalchemy/sql/elements.py450
-rw-r--r--lib/sqlalchemy/sql/expression.py14
-rw-r--r--lib/sqlalchemy/sql/functions.py196
-rw-r--r--lib/sqlalchemy/sql/operators.py66
-rw-r--r--lib/sqlalchemy/sql/schema.py232
-rw-r--r--lib/sqlalchemy/sql/selectable.py20
-rw-r--r--lib/sqlalchemy/sql/sqltypes.py306
-rw-r--r--lib/sqlalchemy/sql/type_api.py115
-rw-r--r--lib/sqlalchemy/sql/util.py23
-rw-r--r--lib/sqlalchemy/testing/__init__.py3
-rw-r--r--lib/sqlalchemy/testing/assertions.py12
-rw-r--r--lib/sqlalchemy/testing/assertsql.py17
-rw-r--r--lib/sqlalchemy/testing/distutils_run.py11
-rw-r--r--lib/sqlalchemy/testing/exclusions.py13
-rw-r--r--lib/sqlalchemy/testing/fixtures.py10
-rw-r--r--lib/sqlalchemy/testing/plugin/plugin_base.py7
-rw-r--r--lib/sqlalchemy/testing/provision.py34
-rw-r--r--lib/sqlalchemy/testing/requirements.py26
-rw-r--r--lib/sqlalchemy/testing/schema.py5
-rw-r--r--lib/sqlalchemy/testing/suite/test_reflection.py8
-rw-r--r--lib/sqlalchemy/testing/suite/test_select.py124
-rw-r--r--lib/sqlalchemy/types.py6
-rw-r--r--lib/sqlalchemy/util/__init__.py4
-rw-r--r--lib/sqlalchemy/util/compat.py1
-rw-r--r--lib/sqlalchemy/util/langhelpers.py36
-rw-r--r--setup.cfg2
-rw-r--r--setup.py173
-rw-r--r--test/aaa_profiling/test_compiler.py4
-rw-r--r--test/base/test_tutorials.py144
-rw-r--r--test/base/test_utils.py104
-rw-r--r--test/dialect/mssql/test_compiler.py2
-rw-r--r--test/dialect/mssql/test_query.py16
-rw-r--r--test/dialect/mssql/test_reflection.py38
-rw-r--r--test/dialect/mssql/test_types.py126
-rw-r--r--test/dialect/mysql/test_compiler.py8
-rw-r--r--test/dialect/mysql/test_query.py55
-rw-r--r--test/dialect/mysql/test_reflection.py328
-rw-r--r--test/dialect/postgresql/test_compiler.py81
-rw-r--r--test/dialect/postgresql/test_query.py620
-rw-r--r--test/dialect/postgresql/test_reflection.py11
-rw-r--r--test/dialect/postgresql/test_types.py574
-rw-r--r--test/dialect/test_oracle.py26
-rw-r--r--test/dialect/test_sqlite.py85
-rw-r--r--test/engine/test_pool.py95
-rw-r--r--test/engine/test_reflection.py19
-rw-r--r--test/ext/declarative/test_basic.py26
-rw-r--r--test/ext/declarative/test_inheritance.py30
-rw-r--r--test/ext/declarative/test_mixin.py2
-rw-r--r--test/ext/test_associationproxy.py20
-rw-r--r--test/ext/test_baked.py142
-rw-r--r--test/ext/test_mutable.py32
-rw-r--r--test/orm/inheritance/test_poly_persistence.py38
-rw-r--r--test/orm/inheritance/test_relationship.py212
-rw-r--r--test/orm/inheritance/test_single.py15
-rw-r--r--test/orm/test_bulk.py55
-rw-r--r--test/orm/test_composites.py3
-rw-r--r--test/orm/test_cycles.py49
-rw-r--r--test/orm/test_eager_relations.py113
-rw-r--r--test/orm/test_events.py537
-rw-r--r--test/orm/test_hasparent.py4
-rw-r--r--test/orm/test_lazy_relations.py75
-rw-r--r--test/orm/test_load_on_fks.py3
-rw-r--r--test/orm/test_mapper.py1248
-rw-r--r--test/orm/test_merge.py95
-rw-r--r--test/orm/test_options.py12
-rw-r--r--test/orm/test_query.py189
-rw-r--r--test/orm/test_relationships.py11
-rw-r--r--test/orm/test_session.py108
-rw-r--r--test/orm/test_transaction.py36
-rw-r--r--test/orm/test_unitofwork.py2
-rw-r--r--test/orm/test_unitofworkv2.py659
-rw-r--r--test/orm/test_update_delete.py39
-rw-r--r--test/orm/test_versioning.py144
-rw-r--r--test/profiles.txt58
-rw-r--r--test/requirements.py33
-rw-r--r--test/sql/test_compiler.py116
-rw-r--r--test/sql/test_defaults.py91
-rw-r--r--test/sql/test_functions.py145
-rw-r--r--test/sql/test_insert.py157
-rw-r--r--test/sql/test_insert_exec.py445
-rw-r--r--test/sql/test_metadata.py133
-rw-r--r--test/sql/test_operators.py467
-rw-r--r--test/sql/test_query.py1326
-rw-r--r--test/sql/test_resultset.py1136
-rw-r--r--test/sql/test_returning.py27
-rw-r--r--test/sql/test_selectable.py24
-rw-r--r--test/sql/test_types.py192
-rw-r--r--test/sql/test_update.py124
-rw-r--r--tox.ini9
186 files changed, 16501 insertions, 5045 deletions
diff --git a/.gitignore b/.gitignore
index 55066f843..81fd2d9ed 100644
--- a/.gitignore
+++ b/.gitignore
@@ -19,3 +19,4 @@ coverage.xml
sqlnet.log
/mapping_setup.py
/test.py
+/.cache/
diff --git a/doc/build/changelog/changelog_09.rst b/doc/build/changelog/changelog_09.rst
index 2d2964ba4..be8872975 100644
--- a/doc/build/changelog/changelog_09.rst
+++ b/doc/build/changelog/changelog_09.rst
@@ -12,7 +12,46 @@
:start-line: 5
.. changelog::
+ :version: 0.9.11
+
+ .. change::
+ :tags: bug, oracle, py3k
+ :tickets: 3491
+ :versions: 1.1.0b1, 1.0.9
+
+ Fixed support for cx_Oracle version 5.2, which was tripping
+ up SQLAlchemy's version detection under Python 3 and inadvertently
+ not using the correct unicode mode for Python 3. This would cause
+ issues such as bound variables mis-interpreted as NULL and rows
+ silently not being returned.
+
+ .. change::
+ :tags: bug, engine
+ :tickets: 3497
+ :versions: 1.0.8
+
+ Fixed critical issue whereby the pool "checkout" event handler
+ may be called against a stale connection without the "connect"
+ event handler having been called, in the case where the pool
+ attempted to reconnect after being invalidated and failed; the stale
+ connection would remain present and would be used on a subsequent
+ attempt. This issue has a greater impact in the 1.0 series subsequent
+ to 1.0.2, as it also delivers a blanked-out ``.info`` dictionary to
+ the event handler; prior to 1.0.2 the ``.info`` dictionary is still
+ the previous one.
+
+.. changelog::
:version: 0.9.10
+ :released: July 22, 2015
+
+ .. change::
+ :tags: bug, sqlite
+ :tickets: 3495
+ :versions: 1.0.8
+
+ Fixed bug in SQLite dialect where reflection of UNIQUE constraints
+ that included non-alphabetic characters in the names, like dots or
+ spaces, would not be reflected with their name.
.. change::
:tags: feature, sql
diff --git a/doc/build/changelog/changelog_10.rst b/doc/build/changelog/changelog_10.rst
index 8ac3d5844..b9eae73e6 100644
--- a/doc/build/changelog/changelog_10.rst
+++ b/doc/build/changelog/changelog_10.rst
@@ -16,7 +16,499 @@
:start-line: 5
.. changelog::
+ :version: 1.0.11
+
+ .. change::
+ :tags: bug, ext
+ :tickets: 3612
+ :versions: 1.1.0b1
+
+ Fixed bug in baked loader system where the systemwide monkeypatch
+ for setting up baked lazy loaders would interfere with other
+ loader strategies that rely on lazy loading as a fallback, e.g.
+ joined and subquery eager loaders, leading to ``IndexError``
+ exceptions at mapper configuration time.
+
+ .. change::
+ :tags: bug, orm
+ :tickets: 3611
+ :versions: 1.1.0b1
+
+ Fixed regression caused in 1.0.10 by the fix for :ticket:`3593` where
+ the check added for a polymorphic joinedload from a
+ poly_subclass->class->poly_baseclass connection would fail for the
+ scenario of class->poly_subclass->class.
+
+ .. change::
+ :tags: bug, orm
+ :tickets: 3610
+ :versions: 1.1.0b1
+
+ Fixed bug where :meth:`.Session.bulk_update_mappings` and related
+ would not bump a version id counter when in use. The experience
+ here is still a little rough as the original version id is required
+ in the given dictionaries and there's not clean error reporting
+ on that yet.
+
+ .. change::
+ :tags: bug, sql
+ :tickets: 3609
+ :versions: 1.1.0b1
+
+ Fixed bug in :meth:`.Update.return_defaults` which would cause all
+ insert-default holding columns not otherwise included in the SET
+ clause (such as primary key cols) to get rendered into the RETURNING
+ even though this is an UPDATE.
+
+ .. change::
+ :tags: bug, orm
+ :tickets: 3609
+ :versions: 1.1.0b1
+
+ Major fixes to the :paramref:`.Mapper.eager_defaults` flag, this
+ flag would not be honored correctly in the case that multiple
+ UPDATE statements were to be emitted, either as part of a flush
+ or a bulk update operation. Additionally, RETURNING
+ would be emitted unnecessarily within update statements.
+
+ .. change::
+ :tags: bug, orm
+ :tickets: 3606
+ :versions: 1.1.0b1
+
+ Fixed bug where use of the :meth:`.Query.select_from` method would
+ cause a subsequent call to the :meth:`.Query.with_parent` method to
+ fail.
+
+.. changelog::
+ :version: 1.0.10
+ :released: December 11, 2015
+
+ .. change::
+ :tags: bug, ext
+ :tickets: 3605
+ :versions: 1.1.0b1
+
+ Added support for the ``dict.pop()`` and ``dict.popitem()`` methods
+ to the :class:`.mutable.MutableDict` class.
+
+ .. change::
+ :tags: change, tests
+ :versions: 1.1.0b1
+
+ The ORM and Core tutorials, which have always been in doctest format,
+ are now exercised within the normal unit test suite in both Python
+ 2 and Python 3.
+
+ .. change::
+ :tags: bug, sql
+ :tickets: 3603
+ :versions: 1.1.0b1
+
+ Fixed issue within the :meth:`.Insert.from_select` construct whereby
+ the :class:`.Select` construct would have its ``._raw_columns``
+ collection mutated in-place when compiling the :class:`.Insert`
+ construct, when the target :class:`.Table` has Python-side defaults.
+ The :class:`.Select` construct would compile standalone with the
+ erroneous column present subsequent to compilation of the
+ :class:`.Insert`, and the the :class:`.Insert` statement itself would
+ fail on a second compile attempt due to duplicate bound parameters.
+
+ .. change::
+ :tags: bug, mysql
+ :tickets: 3602
+ :versions: 1.1.0b1
+
+ Fixed bug in MySQL reflection where the "fractional sections portion"
+ of the :class:`.mysql.DATETIME`, :class:`.mysql.TIMESTAMP` and
+ :class:`.mysql.TIME` types would be incorrectly placed into the
+ ``timezone`` attribute, which is unused by MySQL, instead of the
+ ``fsp`` attribute.
+
+ .. change::
+ :tags: bug, orm
+ :tickets: 3599
+ :versions: 1.1.0b1
+
+ Fixed issue where post_update on a many-to-one relationship would
+ fail to emit an UPDATE in the case where the attribute were set to
+ None and not previously loaded.
+
+ .. change::
+ :tags: bug, sql, postgresql
+ :tickets: 3598
+ :versions: 1.1.0b1
+
+ Fixed bug where CREATE TABLE with a no-column table, but a constraint
+ such as a CHECK constraint would render an erroneous comma in the
+ definition; this scenario can occur such as with a Postgresql
+ INHERITS table that has no columns of its own.
+
+ .. change::
+ :tags: bug, mssql
+ :tickets: 3585
+ :versions: 1.1.0b1
+
+ Added the error "20006: Write to the server failed" to the list
+ of disconnect errors for the pymssql driver, as this has been observed
+ to render a connection unusable.
+
+ .. change::
+ :tags: bug, postgresql
+ :pullreq: github:216
+ :tickets: 3573
+ :versions: 1.1.0b1
+
+ Fixed issue where the "FOR UPDATE OF" Postgresql-specific SELECT
+ modifier would fail if the referred table had a schema qualifier;
+ PG needs the schema name to be omitted. Pull request courtesy
+ Diana Clarke.
+
+ .. change::
+ :tags: bug, postgresql
+ :pullreq: github:215
+ :versions: 1.1.0b1
+
+ Fixed bug where some varieties of SQL expression passed to the
+ "where" clause of :class:`.postgresql.ExcludeConstraint` would fail
+ to be accepted correctly. Pull request courtesy aisch.
+
+ .. change::
+ :tags: bug, orm, declarative
+ :pullreq: github:212
+ :versions: 1.1.0b1
+
+ Fixed bug where in Py2K a unicode literal would not be accepted as the
+ string name of a class or other argument within declarative using
+ :func:`.backref` on :func:`.relationship`. Pull request courtesy
+ Nils Philippsen.
+
+ .. change::
+ :tags: bug, mssql
+ :versions: 1.1.0b1
+ :pullreq: github:206
+
+ A descriptive ValueError is now raised in the event that SQL server
+ returns an invalid date or time format from a DATE or TIME
+ column, rather than failing with a NoneType error. Pull request
+ courtesy Ed Avis.
+
+ .. change::
+ :tags: bug, py3k
+ :versions: 1.1.0b1
+ :pullreq: github:210, github:218, github:211
+
+ Updates to internal getargspec() calls, some py36-related
+ fixture updates, and alterations to two iterators to "return" instead
+ of raising StopIteration, to allow tests to pass without
+ errors or warnings on Py3.5, Py3.6, pull requests courtesy
+ Jacob MacDonald, Luri de Silvio, and Phil Jones.
+
+ .. change::
+ :tags: bug, ext
+ :versions: 1.1.0b1
+ :tickets: 3597
+
+ Fixed an issue in baked queries where the .get() method, used either
+ directly or within lazy loads, didn't consider the mapper's "get clause"
+ as part of the cache key, causing bound parameter mismatches if the
+ clause got re-generated. This clause is cached by mappers
+ on the fly but in highly concurrent scenarios may be generated more
+ than once when first accessed.
+
+ .. change::
+ :tags: feature, sql
+ :versions: 1.1.0b1
+ :pullreq: github:200
+
+ Added support for parameter-ordered SET clauses in an UPDATE
+ statement. This feature is available by passing the
+ :paramref:`~.sqlalchemy.sql.expression.update.preserve_parameter_order`
+ flag either to the core :class:`.Update` construct or alternatively
+ adding it to the :paramref:`.Query.update.update_args` dictionary at
+ the ORM-level, also passing the parameters themselves as a list of 2-tuples.
+ Thanks to Gorka Eguileor for implementation and tests.
+
+ .. seealso::
+
+ :ref:`updates_order_parameters`
+
+ .. change::
+ :tags: bug, orm
+ :versions: 1.1.0b1
+ :tickets: 3593
+
+ Fixed bug which is actually a regression that occurred between
+ versions 0.8.0 and 0.8.1, due :ticket:`2714`. The case where
+ joined eager loading needs to join out over a subclass-bound
+ relationship when "with_polymorphic" were also used would fail
+ to join from the correct entity.
+
+ .. change::
+ :tags: bug, orm
+ :versions: 1.1.0b1
+ :tickets: 3592
+
+ Fixed joinedload bug which would occur when a. the query includes
+ limit/offset criteria that forces a subquery b. the relationship
+ uses "secondary" c. the primaryjoin of the relationship refers to
+ a column that is either not part of the primary key, or is a PK
+ col in a joined-inheritance subclass table that is under a different
+ attribute name than the parent table's primary key column d. the
+ query defers the columns that are present in the primaryjoin, typically
+ via not being included in load_only(); the necessary column(s) would
+ not be present in the subquery and produce invalid SQL.
+
+ .. change::
+ :tags: bug, orm
+ :versions: 1.1.0b1
+ :tickets: 2696
+
+ A rare case which occurs when a :meth:`.Session.rollback` fails in the
+ scope of a :meth:`.Session.flush` operation that's raising an
+ exception, as has been observed in some MySQL SAVEPOINT cases, prevents
+ the original database exception from being observed when it was
+ emitted during flush, but only on Py2K because Py2K does not support
+ exception chaining; on Py3K the originating exception is chained. As
+ a workaround, a warning is emitted in this specific case showing at
+ least the string message of the original database error before we
+ proceed to raise the rollback-originating exception.
+
+ .. change::
+ :tags: bug, postgresql
+ :versions: 1.1.0b1
+ :tickets: 3571
+
+ Fixed the ``.python_type`` attribute of :class:`.postgresql.INTERVAL`
+ to return ``datetime.timedelta`` in the same way as that of
+ :obj:`.types.Interval.python_type`, rather than raising
+ ``NotImplementedError``.
+
+ .. change::
+ :tags: bug, mssql
+ :pullreq: github:213
+ :versions: 1.1.0b1
+
+ Fixed issue where DDL generated for the MSSQL types DATETIME2,
+ TIME and DATETIMEOFFSET with a precision of "zero" would not generate
+ the precision field. Pull request courtesy Jacobo de Vera.
+
+
+.. changelog::
+ :version: 1.0.9
+ :released: October 20, 2015
+
+ .. change::
+ :tags: bug, orm, postgresql
+ :versions: 1.1.0b1
+ :tickets: 3556
+
+ Fixed regression in 1.0 where new feature of using "executemany"
+ for UPDATE statements in the ORM (e.g. :ref:`feature_updatemany`)
+ would break on Postgresql and other RETURNING backends
+ when using server-side version generation
+ schemes, as the server side value is retrieved via RETURNING which
+ is not supported with executemany.
+
+ .. change::
+ :tags: feature, ext
+ :versions: 1.1.0b1
+ :tickets: 3551
+
+ Added the :paramref:`.AssociationProxy.info` parameter to the
+ :class:`.AssociationProxy` constructor, to suit the
+ :attr:`.AssociationProxy.info` accessor that was added in
+ :ticket:`2971`. This is possible because :class:`.AssociationProxy`
+ is constructed explicitly, unlike a hybrid which is constructed
+ implicitly via the decorator syntax.
+
+ .. change::
+ :tags: bug, oracle
+ :versions: 1.1.0b1
+ :tickets: 3548
+
+ Fixed bug in Oracle dialect where reflection of tables and other
+ symbols with names quoted to force all-lower-case would not be
+ identified properly in reflection queries. The :class:`.quoted_name`
+ construct is now applied to incoming symbol names that detect as
+ forced into all-lower-case within the "name normalize" process.
+
+ .. change::
+ :tags: feature, orm
+ :versions: 1.1.0b1
+ :pullreq: github:201
+
+ Added new method :meth:`.Query.one_or_none`; same as
+ :meth:`.Query.one` but returns None if no row found. Pull request
+ courtesy esiegerman.
+
+ .. change::
+ :tags: bug, orm
+ :versions: 1.1.0b1
+ :tickets: 3539
+
+ Fixed rare TypeError which could occur when stringifying certain
+ kinds of internal column loader options within internal logging.
+
+ .. change::
+ :tags: bug, orm
+ :versions: 1.1.0b1
+ :tickets: 3525
+
+ Fixed bug in :meth:`.Session.bulk_save_objects` where a mapped
+ column that had some kind of "fetch on update" value and was not
+ locally present in the given object would cause an AttributeError
+ within the operation.
+
+ .. change::
+ :tags: bug, sql
+ :versions: 1.1.0b1
+ :tickets: 3520
+
+ Fixed regression in 1.0-released default-processor for multi-VALUES
+ insert statement, :ticket:`3288`, where the column type for the
+ default-holding column would not be propagated to the compiled
+ statement in the case where the default was being used,
+ leading to bind-level type handlers not being invoked.
+
+ .. change::
+ :tags: bug, examples
+ :versions: 1.1.0b1
+
+ Fixed two issues in the "history_meta" example where history tracking
+ could encounter empty history, and where a column keyed to an alternate
+ attribute name would fail to track properly. Fixes courtesy
+ Alex Fraser.
+
+ .. change::
+ :tags: bug, orm
+ :tickets: 3510
+ :versions: 1.1.0b1
+
+ Fixed 1.0 regression where the "noload" loader strategy would fail
+ to function for a many-to-one relationship. The loader used an
+ API to place "None" into the dictionary which no longer actually
+ writes a value; this is a side effect of :ticket:`3061`.
+
+ .. change::
+ :tags: bug, sybase
+ :tickets: 3508, 3509
+ :versions: 1.1.0b1
+
+ Fixed two issues regarding Sybase reflection, allowing tables
+ without primary keys to be reflected as well as ensured that
+ a SQL statement involved in foreign key detection is pre-fetched up
+ front to avoid driver issues upon nested queries. Fixes here
+ courtesy Eugene Zapolsky; note that we cannot currently test
+ Sybase to locally verify these changes.
+
+ .. change::
+ :tags: bug, postgresql
+ :pullreq: github:190
+ :versions: 1.1.0b1
+
+ An adjustment to the new Postgresql feature of reflecting storage
+ options and USING of :ticket:`3455` released in 1.0.6,
+ to disable the feature for Postgresql versions < 8.2 where the
+ ``reloptions`` column is not provided; this allows Amazon Redshift
+ to again work as it is based on an 8.0.x version of Postgresql.
+ Fix courtesy Pete Hollobon.
+
+
+.. changelog::
+ :version: 1.0.8
+ :released: July 22, 2015
+
+ .. change::
+ :tags: bug, misc
+ :tickets: 3494
+
+ Fixed an issue where a particular base class within utils
+ didn't implement ``__slots__``, and therefore meant all subclasses
+ of that class didn't either, negating the rationale for ``__slots__``
+ to be in use. Didn't cause any issue except on IronPython
+ which apparently does not implement ``__slots__`` behavior compatibly
+ with cPython.
+
+
+.. changelog::
:version: 1.0.7
+ :released: July 20, 2015
+
+ .. change::
+ :tags: feature, sql
+ :tickets: 3459
+ :pullreq: bitbucket:56
+
+ Added a :meth:`.ColumnElement.cast` method which performs the same
+ purpose as the standalone :func:`.cast` function. Pull request
+ courtesy Sebastian Bank.
+
+ .. change::
+ :tags: bug, engine
+ :tickets: 3481
+
+ Fixed regression where new methods on :class:`.ResultProxy` used
+ by the ORM :class:`.Query` object (part of the performance
+ enhancements of :ticket:`3175`) would not raise the "this result
+ does not return rows" exception in the case where the driver
+ (typically MySQL) fails to generate cursor.description correctly;
+ an AttributeError against NoneType would be raised instead.
+
+ .. change::
+ :tags: bug, engine
+ :tickets: 3483
+
+ Fixed regression where :meth:`.ResultProxy.keys` would return
+ un-adjusted internal symbol names for "anonymous" labels, which
+ are the "foo_1" types of labels we see generated for SQL functions
+ without labels and similar. This was a side effect of the
+ performance enhancements implemented as part of #918.
+
+
+ .. change::
+ :tags: bug, sql
+ :tickets: 3490
+
+ Fixed bug where coersion of literal ``True`` or ``False`` constant
+ in conjunction with :func:`.and_` or :func:`.or_` would fail
+ with an AttributeError.
+
+ .. change::
+ :tags: bug, sql
+ :tickets: 3485
+
+ Fixed potential issue where a custom subclass
+ of :class:`.FunctionElement` or other column element that incorrectly
+ states 'None' or any other invalid object as the ``.type``
+ attribute will report this exception instead of recursion overflow.
+
+ .. change::
+ :tags: bug, sql
+ :pullreq: github:188
+
+ Fixed bug where the modulus SQL operator wouldn't work in reverse
+ due to a missing ``__rmod__`` method. Pull request courtesy
+ dan-gittik.
+
+ .. change::
+ :tags: feature, schema
+ :pullreq: github:186
+
+ Added support for the MINVALUE, MAXVALUE, NO MINVALUE, NO MAXVALUE,
+ and CYCLE arguments for CREATE SEQUENCE as supported by Postgresql
+ and Oracle. Pull request courtesy jakeogh.
+
+ .. change::
+ :tags: bug, orm, declarative
+ :tickets: 3480
+
+ Fixed bug in :class:`.AbstractConcreteBase` extension where
+ a column setup on the ABC base which had a different attribute
+ name vs. column name would not be correctly mapped on the final
+ base class. The failure on 0.9 would be silent whereas on
+ 1.0 it raised an ArgumentError, so may not have been noticed
+ prior to 1.0.
.. change::
:tags: bug, orm
diff --git a/doc/build/changelog/changelog_11.rst b/doc/build/changelog/changelog_11.rst
new file mode 100644
index 000000000..0d9f997f9
--- /dev/null
+++ b/doc/build/changelog/changelog_11.rst
@@ -0,0 +1,449 @@
+
+
+==============
+1.1 Changelog
+==============
+
+.. changelog_imports::
+
+ .. include:: changelog_10.rst
+ :start-line: 5
+
+ .. include:: changelog_09.rst
+ :start-line: 5
+
+ .. include:: changelog_08.rst
+ :start-line: 5
+
+ .. include:: changelog_07.rst
+ :start-line: 5
+
+.. changelog::
+ :version: 1.1.0b1
+
+ .. change::
+ :tags: bug, orm
+ :tickets: 3601
+
+ The :meth:`.Session.merge` method now tracks pending objects by
+ primary key before emitting an INSERT, and merges distinct objects with
+ duplicate primary keys together as they are encountered, which is
+ essentially semi-deterministic at best. This behavior
+ matches what happens already with persistent objects.
+
+ .. seealso::
+
+ :ref:`change_3601`
+
+ .. change::
+ :tags: bug, postgresql
+ :tickets: 3587
+
+ Added support for reflecting the source of materialized views
+ to the Postgresql version of the :meth:`.Inspector.get_view_definition`
+ method.
+
+ .. change::
+ :tags: bug, orm
+ :tickets: 3582
+
+ Fixed bug where the "single table inheritance" criteria would be
+ added onto the end of a query in some inappropriate situations, such
+ as when querying from an exists() of a single-inheritance subclass.
+
+ .. seealso::
+
+ :ref:`change_3582`
+
+ .. change::
+ :tags: enhancement, schema
+ :pullreq: github:204
+
+ The default generation functions passed to :class:`.Column` objects
+ are now run through "update_wrapper", or an equivalent function
+ if a callable non-function is passed, so that introspection tools
+ preserve the name and docstring of the wrapped function. Pull
+ request courtesy hsum.
+
+ .. change::
+ :tags: change, sql, mysql
+ :tickets: 3216
+
+ The system by which a :class:`.Column` considers itself to be an
+ "auto increment" column has been changed, such that autoincrement
+ is no longer implicitly enabled for a :class:`.Table` that has a
+ composite primary key. In order to accommodate being able to enable
+ autoincrement for a composite PK member column while at the same time
+ maintaining SQLAlchemy's long standing behavior of enabling
+ implicit autoincrement for a single integer primary key, a third
+ state has been added to the :paramref:`.Column.autoincrement` parameter
+ ``"auto"``, which is now the default.
+
+ .. seealso::
+
+ :ref:`change_3216`
+
+ :ref:`change_mysql_3216`
+
+ .. change::
+ :tags: change, mysql
+ :tickets: 3216
+
+ The MySQL dialect no longer generates an extra "KEY" directive when
+ generating CREATE TABLE DDL for a table using InnoDB with a
+ composite primary key with AUTO_INCREMENT on a column that isn't the
+ first column; to overcome InnoDB's limitation here, the PRIMARY KEY
+ constraint is now generated with the AUTO_INCREMENT column placed
+ first in the list of columns.
+
+ .. seealso::
+
+ :ref:`change_mysql_3216`
+
+ :ref:`change_3216`
+
+ .. change::
+ :tags: change, sqlite
+ :pullreq: github:198
+
+ Added support to the SQLite dialect for the
+ :meth:`.Inspector.get_schema_names` method to work with SQLite;
+ pull request courtesy Brian Van Klaveren. Also repaired support
+ for creation of indexes with schemas as well as reflection of
+ foreign key constraints in schema-bound tables.
+
+ .. seealso::
+
+ :ref:`change_sqlite_schemas`
+
+ .. change::
+ :tags: change, mssql
+ :tickets: 3434
+
+ The ``legacy_schema_aliasing`` flag, introduced in version 1.0.5
+ as part of :ticket:`3424` to allow disabling of the MSSQL dialect's
+ attempts to create aliases for schema-qualified tables, now defaults
+ to False; the old behavior is now disabled unless explicitly turned on.
+
+ .. seealso::
+
+ :ref:`change_3434`
+
+ .. change::
+ :tags: bug, orm
+ :tickets: 3250
+
+ Added a new type-level modifier :meth:`.TypeEngine.evaluates_none`
+ which indicates to the ORM that a positive set of None should be
+ persisted as the value NULL, instead of omitting the column from
+ the INSERT statement. This feature is used both as part of the
+ implementation for :ticket:`3514` as well as a standalone feature
+ available on any type.
+
+ .. seealso::
+
+ :ref:`change_3250`
+
+ .. change::
+ :tags: bug, postgresql
+ :tickets: 2729
+
+ The use of a :class:`.postgresql.ARRAY` object that refers
+ to a :class:`.types.Enum` or :class:`.postgresql.ENUM` subtype
+ will now emit the expected "CREATE TYPE" and "DROP TYPE" DDL when
+ the type is used within a "CREATE TABLE" or "DROP TABLE".
+
+ .. seealso::
+
+ :ref:`change_2729`
+
+ .. change::
+ :tags: bug, sql
+ :tickets: 3531
+
+ The :func:`.type_coerce` construct is now a fully fledged Core
+ expression element which is late-evaluated at compile time. Previously,
+ the function was only a conversion function which would handle different
+ expression inputs by returning either a :class:`.Label` of a column-oriented
+ expression or a copy of a given :class:`.BindParameter` object,
+ which in particular prevented the operation from being logically
+ maintained when an ORM-level expression transformation would convert
+ a column to a bound parameter (e.g. for lazy loading).
+
+ .. seealso::
+
+ :ref:`change_3531`
+
+ .. change::
+ :tags: bug, orm
+ :tickets: 3526
+
+ Internal calls to "bookkeeping" functions within
+ :meth:`.Session.bulk_save_objects` and related bulk methods have
+ been scaled back to the extent that this functionality is not
+ currently used, e.g. checks for column default values to be
+ fetched after an INSERT or UPDATE statement.
+
+ .. change::
+ :tags: feature, orm
+ :tickets: 2677
+
+ The :class:`.SessionEvents` suite now includes events to allow
+ unambiguous tracking of all object lifecycle state transitions
+ in terms of the :class:`.Session` itself, e.g. pending,
+ transient, persistent, detached. The state of the object
+ within each event is also defined.
+
+ .. seealso::
+
+ :ref:`change_2677`
+
+ .. change::
+ :tags: feature, orm
+ :tickets: 2677
+
+ Added a new session lifecycle state :term:`deleted`. This new state
+ represents an object that has been deleted from the :term:`persistent`
+ state and will move to the :term:`detached` state once the transaction
+ is committed. This resolves the long-standing issue that objects
+ which were deleted existed in a gray area between persistent and
+ detached. The :attr:`.InstanceState.persistent` accessor will
+ **no longer** report on a deleted object as persistent; the
+ :attr:`.InstanceState.deleted` accessor will instead be True for
+ these objects, until they become detached.
+
+ .. seealso::
+
+ :ref:`change_2677`
+
+ .. change::
+ :tags: change, orm
+ :tickets: 2677
+
+ The :paramref:`.Session.weak_identity_map` parameter is deprecated.
+ See the new recipe at :ref:`session_referencing_behavior` for
+ an event-based approach to maintaining strong identity map behavior.
+
+ .. seealso::
+
+ :ref:`change_2677`
+
+ .. change::
+ :tags: bug, sql
+ :tickets: 2919
+
+ The :class:`.TypeDecorator` type extender will now work in conjunction
+ with a :class:`.SchemaType` implementation, typically :class:`.Enum`
+ or :class:`.Boolean` with regards to ensuring that the per-table
+ events are propagated from the implementation type to the outer type.
+ These events are used
+ to ensure that the constraints or Postgresql types (e.g. ENUM)
+ are correctly created (and possibly dropped) along with the parent
+ table.
+
+ .. seealso::
+
+ :ref:`change_2919`
+
+ .. change::
+ :tags: feature, sql
+ :tickets: 1370
+
+ Added support for "set-aggregate" functions of the form
+ ``<function> WITHIN GROUP (ORDER BY <criteria>)``, using the
+ method :meth:`.FunctionElement.within_group`. A series of common
+ set-aggregate functions with return types derived from the set have
+ been added. This includes functions like :class:`.percentile_cont`,
+ :class:`.dense_rank` and others.
+
+ .. seealso::
+
+ :ref:`change_3132`
+
+ .. change::
+ :tags: feature, sql, postgresql
+ :tickets: 3132
+
+ Added support for the SQL-standard function :class:`.array_agg`,
+ which automatically returns an :class:`.Array` of the correct type
+ and supports index / slice operations, as well as
+ :func:`.postgresql.array_agg`, which returns a :class:`.postgresql.ARRAY`
+ with additional comparison features. As arrays are only
+ supported on Postgresql at the moment, only actually works on
+ Postgresql. Also added a new construct
+ :class:`.postgresql.aggregate_order_by` in support of PG's
+ "ORDER BY" extension.
+
+ .. seealso::
+
+ :ref:`change_3132`
+
+ .. change::
+ :tags: feature, sql
+ :tickets: 3516
+
+ Added a new type to core :class:`.types.Array`. This is the
+ base of the PostgreSQL :class:`.ARRAY` type, and is now part of Core
+ to begin supporting various SQL-standard array-supporting features
+ including some functions and eventual support for native arrays
+ on other databases that have an "array" concept, such as DB2 or Oracle.
+ Additionally, new operators :func:`.expression.any_` and
+ :func:`.expression.all_` have been added. These support not just
+ array constructs on Postgresql, but also subqueries that are usable
+ on MySQL (but sadly not on Postgresql).
+
+ .. seealso::
+
+ :ref:`change_3516`
+
+ .. change::
+ :tags: feature, orm
+ :tickets: 3321
+
+ Added new checks for the common error case of passing mapped classes
+ or mapped instances into contexts where they are interpreted as
+ SQL bound parameters; a new exception is raised for this.
+
+ .. seealso::
+
+ :ref:`change_3321`
+
+ .. change::
+ :tags: bug, postgresql
+ :tickets: 3499
+
+ The "hashable" flag on special datatypes such as :class:`.postgresql.ARRAY`,
+ :class:`.postgresql.JSON` and :class:`.postgresql.HSTORE` is now
+ set to False, which allows these types to be fetchable in ORM
+ queries that include entities within the row.
+
+ .. seealso::
+
+ :ref:`change_3499`
+
+ :ref:`change_3499_postgresql`
+
+ .. change::
+ :tags: bug, postgresql
+ :tickets: 3487
+
+ The Postgresql :class:`.postgresql.ARRAY` type now supports multidimensional
+ indexed access, e.g. expressions such as ``somecol[5][6]`` without
+ any need for explicit casts or type coercions, provided
+ that the :paramref:`.postgresql.ARRAY.dimensions` parameter is set to the
+ desired number of dimensions.
+
+ .. seealso::
+
+ :ref:`change_3503`
+
+ .. change::
+ :tags: bug, postgresql
+ :tickets: 3503
+
+ The return type for the :class:`.postgresql.JSON` and :class:`.postgresql.JSONB`
+ when using indexed access has been fixed to work like Postgresql itself,
+ and returns an expression that itself is of type :class:`.postgresql.JSON`
+ or :class:`.postgresql.JSONB`. Previously, the accessor would return
+ :class:`.NullType` which disallowed subsequent JSON-like operators to be
+ used.
+
+ .. seealso::
+
+ :ref:`change_3503`
+
+ .. change::
+ :tags: bug, postgresql
+ :tickets: 3503
+
+ The :class:`.postgresql.JSON`, :class:`.postgresql.JSONB` and
+ :class:`.postgresql.HSTORE` datatypes now allow full control over the
+ return type from an indexed textual access operation, either ``column[someindex].astext``
+ for a JSON type or ``column[someindex]`` for an HSTORE type,
+ via the :paramref:`.postgresql.JSON.astext_type` and
+ :paramref:`.postgresql.HSTORE.text_type` parameters.
+
+ .. seealso::
+
+ :ref:`change_3503`
+
+
+ .. change::
+ :tags: bug, postgresql
+ :tickets: 3503
+
+ The :attr:`.postgresql.JSON.Comparator.astext` modifier no longer
+ calls upon :meth:`.ColumnElement.cast` implicitly, as PG's JSON/JSONB
+ types allow cross-casting between each other as well. Code that
+ makes use of :meth:`.ColumnElement.cast` on JSON indexed access,
+ e.g. ``col[someindex].cast(Integer)``, will need to be changed
+ to call :attr:`.postgresql.JSON.Comparator.astext` explicitly.
+
+ .. seealso::
+
+ :ref:`change_3503_cast`
+
+
+ .. change::
+ :tags: bug, orm, postgresql
+ :tickets: 3514
+
+ Additional fixes have been made regarding the value of ``None``
+ in conjunction with the Postgresql :class:`.JSON` type. When
+ the :paramref:`.JSON.none_as_null` flag is left at its default
+ value of ``False``, the ORM will now correctly insert the Json
+ "'null'" string into the column whenever the value on the ORM
+ object is set to the value ``None`` or when the value ``None``
+ is used with :meth:`.Session.bulk_insert_mappings`,
+ **including** if the column has a default or server default on it.
+
+ .. seealso::
+
+ :ref:`change_3514`
+
+ :ref:`change_3250`
+
+ .. change::
+ :tags: feature, postgresql
+ :tickets: 3514
+
+ Added a new constant :attr:`.postgresql.JSON.NULL`, indicating
+ that the JSON NULL value should be used for a value
+ regardless of other settings.
+
+ .. seealso::
+
+ :ref:`change_3514_jsonnull`
+
+ .. change::
+ :tags: bug, sql
+ :tickets: 2528
+
+ The behavior of the :func:`.union` construct and related constructs
+ such as :meth:`.Query.union` now handle the case where the embedded
+ SELECT statements need to be parenthesized due to the fact that they
+ include LIMIT, OFFSET and/or ORDER BY. These queries **do not work
+ on SQLite**, and will fail on that backend as they did before, but
+ should now work on all other backends.
+
+ .. seealso::
+
+ :ref:`change_2528`
+
+ .. change::
+ :tags: bug, mssql
+ :tickets: 3504
+
+ Fixed issue where the SQL Server dialect would reflect a string-
+ or other variable-length column type with unbounded length
+ by assigning the token ``"max"`` to the
+ length attribute of the string. While using the ``"max"`` token
+ explicitly is supported by the SQL Server dialect, it isn't part
+ of the normal contract of the base string types, and instead the
+ length should just be left as None. The dialect now assigns the
+ length to None on reflection of the type so that the type behaves
+ normally in other contexts.
+
+ .. seealso::
+
+ :ref:`change_3504`
diff --git a/doc/build/changelog/index.rst b/doc/build/changelog/index.rst
index 8c5be99b8..a9f294e87 100644
--- a/doc/build/changelog/index.rst
+++ b/doc/build/changelog/index.rst
@@ -12,7 +12,7 @@ Current Migration Guide
.. toctree::
:titlesonly:
- migration_10
+ migration_11
Change logs
-----------
@@ -20,6 +20,7 @@ Change logs
.. toctree::
:titlesonly:
+ changelog_11
changelog_10
changelog_09
changelog_08
@@ -38,6 +39,7 @@ Older Migration Guides
.. toctree::
:titlesonly:
+ migration_10
migration_09
migration_08
migration_07
diff --git a/doc/build/changelog/migration_10.rst b/doc/build/changelog/migration_10.rst
index 08e26fd4b..a4fbf117d 100644
--- a/doc/build/changelog/migration_10.rst
+++ b/doc/build/changelog/migration_10.rst
@@ -2305,7 +2305,7 @@ Postgresql Dialect reflects Materialized Views, Foreign Tables
Changes are as follows:
* the :class:`Table` construct with ``autoload=True`` will now match a name
- that exists in the database as a materialized view or foriegn table.
+ that exists in the database as a materialized view or foreign table.
* :meth:`.Inspector.get_view_names` will return plain and materialized view
names.
diff --git a/doc/build/changelog/migration_11.rst b/doc/build/changelog/migration_11.rst
new file mode 100644
index 000000000..b5889c763
--- /dev/null
+++ b/doc/build/changelog/migration_11.rst
@@ -0,0 +1,1124 @@
+==============================
+What's New in SQLAlchemy 1.1?
+==============================
+
+.. admonition:: About this Document
+
+ This document describes changes between SQLAlchemy version 1.0,
+ at the moment the current release series of SQLAlchemy,
+ and SQLAlchemy version 1.1, which is the current development
+ series of SQLAlchemy.
+
+ As the 1.1 series is under development, issues that are targeted
+ at this series can be seen under the
+ `1.1 milestone <https://bitbucket.org/zzzeek/sqlalchemy/issues?milestone=1.1>`_.
+ Please note that the set of issues within the milestone is not fixed;
+ some issues may be moved to later milestones in order to allow
+ for a timely release.
+
+ Document last updated: December 4, 2015
+
+Introduction
+============
+
+This guide introduces what's new in SQLAlchemy version 1.1,
+and also documents changes which affect users migrating
+their applications from the 1.0 series of SQLAlchemy to 1.1.
+
+Please carefully review the sections on behavioral changes for
+potentially backwards-incompatible changes in behavior.
+
+Platform / Installer Changes
+============================
+
+Setuptools is now required for install
+--------------------------------------
+
+SQLAlchemy's ``setup.py`` file has for many years supported operation
+both with Setuptools installed and without; supporting a "fallback" mode
+that uses straight Distutils. As a Setuptools-less Python environment is
+now unheard of, and in order to support the featureset of Setuptools
+more fully, in particular to support py.test's integration with it,
+``setup.py`` now depends on Setuptools fully.
+
+.. seealso::
+
+ :ref:`installation`
+
+:ticket:`3489`
+
+Enabling / Disabling C Extension builds is only via environment variable
+------------------------------------------------------------------------
+
+The C Extensions build by default during install as long as it is possible.
+To disable C extension builds, the ``DISABLE_SQLALCHEMY_CEXT`` environment
+variable was made available as of SQLAlchemy 0.8.6 / 0.9.4. The previous
+approach of using the ``--without-cextensions`` argument has been removed,
+as it relies on deprecated features of setuptools.
+
+.. seealso::
+
+ :ref:`c_extensions`
+
+:ticket:`3500`
+
+
+New Features and Improvements - ORM
+===================================
+
+.. _change_2677:
+
+New Session lifecycle events
+----------------------------
+
+The :class:`.Session` has long supported events that allow some degree
+of tracking of state changes to objects, including
+:meth:`.SessionEvents.before_attach`, :meth:`.SessionEvents.after_attach`,
+and :meth:`.SessionEvents.before_flush`. The Session documentation also
+documents major object states at :ref:`session_object_states`. However,
+there has never been system of tracking objects specifically as they
+pass through these transitions. Additionally, the status of "deleted" objects
+has historically been murky as the objects act somewhere between
+the "persistent" and "detached" states.
+
+To clean up this area and allow the realm of session state transition
+to be fully transparent, a new series of events have been added that
+are intended to cover every possible way that an object might transition
+between states, and additionally the "deleted" status has been given
+its own official state name within the realm of session object states.
+
+New State Transition Events
+^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Transitions between all states of an object such as :term:`persistent`,
+:term:`pending` and others can now be intercepted in terms of a
+session-level event intended to cover a specific transition.
+Transitions as objects move into a :class:`.Session`, move out of a
+:class:`.Session`, and even all the transitions which occur when the
+transaction is rolled back using :meth:`.Session.rollback`
+are explicitly present in the interface of :class:`.SessionEvents`.
+
+In total, there are **ten new events**. A summary of these events is in a
+newly written documentation section :ref:`session_lifecycle_events`.
+
+
+New Object State "deleted" is added, deleted objects no longer "persistent"
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+The :term:`persistent` state of an object in the :class:`.Session` has
+always been documented as an object that has a valid database identity;
+however in the case of objects that were deleted within a flush, they
+have always been in a grey area where they are not really "detached"
+from the :class:`.Session` yet, because they can still be restored
+within a rollback, but are not really "persistent" because their database
+identity has been deleted and they aren't present in the identity map.
+
+To resolve this grey area given the new events, a new object state
+:term:`deleted` is introduced. This state exists between the "persistent" and
+"detached" states. An object that is marked for deletion via
+:meth:`.Session.delete` remains in the "persistent" state until a flush
+proceeds; at that point, it is removed from the identity map, moves
+to the "deleted" state, and the :meth:`.SessionEvents.persistent_to_deleted`
+hook is invoked. If the :class:`.Session` object's transaction is rolled
+back, the object is restored as persistent; the
+:meth:`.SessionEvents.deleted_to_persistent` transition is called. Otherwise
+if the :class:`.Session` object's transaction is committed,
+the :meth:`.SessionEvents.deleted_to_detached` transition is invoked.
+
+Additionally, the :attr:`.InstanceState.persistent` accessor **no longer returns
+True** for an object that is in the new "deleted" state; instead, the
+:attr:`.InstanceState.deleted` accessor has been enhanced to reliably
+report on this new state. When the object is detached, the :attr:`.InstanceState.deleted`
+returns False and the :attr:`.InstanceState.detached` accessor is True
+instead. To determine if an object was deleted either in the current
+transaction or in a previous transaction, use the
+:attr:`.InstanceState.was_deleted` accessor.
+
+Strong Identity Map is Deprecated
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+One of the inspirations for the new series of transition events was to enable
+leak-proof tracking of objects as they move in and out of the identity map,
+so that a "strong reference" may be maintained mirroring the object
+moving in and out of this map. With this new capability, there is no longer
+any need for the :paramref:`.Session.weak_identity_map` parameter and the
+corresponding :class:`.StrongIdentityMap` object. This option has remained
+in SQLAlchemy for many years as the "strong-referencing" behavior used to be
+the only behavior available, and many applications were written to assume
+this behavior. It has long been recommended that strong-reference tracking
+of objects not be an intrinsic job of the :class:`.Session` and instead
+be an application-level construct built as needed by the application; the
+new event model allows even the exact behavior of the strong identity map
+to be replicated. See :ref:`session_referencing_behavior` for a new
+recipe illustrating how to replace the strong identity map.
+
+:ticket:`2677`
+
+.. _change_3499:
+
+Changes regarding "unhashable" types
+------------------------------------
+
+The :class:`.Query` object has a well-known behavior of "deduping"
+returned rows that contain at least one ORM-mapped entity (e.g., a
+full mapped object, as opposed to individual column values). The
+primary purpose of this is so that the handling of entities works
+smoothly in conjunction with the identity map, including to
+accommodate for the duplicate entities normally represented within
+joined eager loading, as well as when joins are used for the purposes
+of filtering on additional columns.
+
+This deduplication relies upon the hashability of the elements within
+the row. With the introduction of Postgresql's special types like
+:class:`.postgresql.ARRAY`, :class:`.postgresql.HSTORE` and
+:class:`.postgresql.JSON`, the experience of types within rows being
+unhashable and encountering problems here is more prevalent than
+it was previously.
+
+In fact, SQLAlchemy has since version 0.8 included a flag on datatypes that
+are noted as "unhashable", however this flag was not used consistently
+on built in types. As described in :ref:`change_3499_postgresql`, this
+flag is now set consistently for all of Postgresql's "structural" types.
+
+The "unhashable" flag is also set on the :class:`.NullType` type,
+as :class:`.NullType` is used to refer to any expression of unknown
+type.
+
+Additionally, the treatment of a so-called "unhashable" type is slightly
+different than its been in previous releases; internally we are using
+the ``id()`` function to get a "hash value" from these structures, just
+as we would any ordinary mapped object. This replaces the previous
+approach which applied a counter to the object.
+
+:ticket:`3499`
+
+.. _change_3321:
+
+Specific checks added for passing mapped classes, instances as SQL literals
+---------------------------------------------------------------------------
+
+The typing system now has specific checks for passing of SQLAlchemy
+"inspectable" objects in contexts where they would otherwise be handled as
+literal values. Any SQLAlchemy built-in object that is legal to pass as a
+SQL value includes a method ``__clause_element__()`` which provides a
+valid SQL expression for that object. For SQLAlchemy objects that
+don't provide this, such as mapped classes, mappers, and mapped
+instances, a more informative error message is emitted rather than
+allowing the DBAPI to receive the object and fail later. An example
+is illustrated below, where a string-based attribute ``User.name`` is
+compared to a full instance of ``User()``, rather than against a
+string value::
+
+ >>> some_user = User()
+ >>> q = s.query(User).filter(User.name == some_user)
+ ...
+ sqlalchemy.exc.ArgumentError: Object <__main__.User object at 0x103167e90> is not legal as a SQL literal value
+
+The exception is now immediate when the comparison is made between
+``User.name == some_user``. Previously, a comparison like the above
+would produce a SQL expression that would only fail once resolved
+into a DBAPI execution call; the mapped ``User`` object would
+ultimately become a bound parameter that would be rejected by the
+DBAPI.
+
+Note that in the above example, the expression fails because
+``User.name`` is a string-based (e.g. column oriented) attribute.
+The change does *not* impact the usual case of comparing a many-to-one
+relationship attribute to an object, which is handled distinctly::
+
+ >>> # Address.user refers to the User mapper, so
+ >>> # this is of course still OK!
+ >>> q = s.query(Address).filter(Address.user == some_user)
+
+
+:ticket:`3321`
+
+.. _change_3250:
+
+New options allowing explicit persistence of NULL over a default
+----------------------------------------------------------------
+
+Related to the new JSON-NULL support added to Postgresql as part of
+:ref:`change_3514`, the base :class:`.TypeEngine` class now supports
+a method :meth:`.TypeEngine.evaluates_none` which allows a positive set
+of the ``None`` value on an attribute to be persisted as NULL, rather than
+omitting the column from the INSERT statement, which has the effect of using
+the column-level default. This allows a mapper-level
+configuration of the existing object-level technique of assigning
+:func:`.sql.null` to the attribute.
+
+.. seealso::
+
+ :ref:`session_forcing_null`
+
+:ticket:`3250`
+
+
+.. _change_3582:
+
+Further Fixes to single-table inheritance querying
+--------------------------------------------------
+
+Continuing from 1.0's :ref:`migration_3177`, the :class:`.Query` should
+no longer inappropriately add the "single inheritance" criteria when the
+query is against a subquery expression such as an exists::
+
+ class Widget(Base):
+ __tablename__ = 'widget'
+ id = Column(Integer, primary_key=True)
+ type = Column(String)
+ data = Column(String)
+ __mapper_args__ = {'polymorphic_on': type}
+
+
+ class FooWidget(Widget):
+ __mapper_args__ = {'polymorphic_identity': 'foo'}
+
+ q = session.query(FooWidget).filter(FooWidget.data == 'bar').exists()
+
+ session.query(q).all()
+
+Produces::
+
+ SELECT EXISTS (SELECT 1
+ FROM widget
+ WHERE widget.data = :data_1 AND widget.type IN (:type_1)) AS anon_1
+
+The IN clause on the inside is appropriate, in order to limit to FooWidget
+objects, however previously the IN clause would also be generated a second
+time on the outside of the subquery.
+
+:ticket:`3582`
+
+
+.. _change_3601:
+
+Session.merge resolves pending conflicts the same as persistent
+---------------------------------------------------------------
+
+The :meth:`.Session.merge` method will now track the identities of objects given
+within a graph to maintain primary key uniqueness before emitting an INSERT.
+When duplicate objects of the same identity are encountered, non-primary-key
+attributes are **overwritten** as the objects are encountered, which is
+essentially non-deterministic. This behavior matches that of how persistent
+objects, that is objects that are already located in the database via
+primary key, are already treated, so this behavior is more internally
+consistent.
+
+Given::
+
+ u1 = User(id=7, name='x')
+ u1.orders = [
+ Order(description='o1', address=Address(id=1, email_address='a')),
+ Order(description='o2', address=Address(id=1, email_address='b')),
+ Order(description='o3', address=Address(id=1, email_address='c'))
+ ]
+
+ sess = Session()
+ sess.merge(u1)
+
+Above, we merge a ``User`` object with three new ``Order`` objects, each referring to
+a distinct ``Address`` object, however each is given the same primary key.
+The current behavior of :meth:`.Session.merge` is to look in the identity
+map for this ``Address`` object, and use that as the target. If the object
+is present, meaning that the database already has a row for ``Address`` with
+primary key "1", we can see that the ``email_address`` field of the ``Address``
+will be overwritten three times, in this case with the values a, b and finally
+c.
+
+However, if the ``Address`` row for primary key "1" were not present, :meth:`.Session.merge`
+would instead create three separate ``Address`` instances, and we'd then get
+a primary key conflict upon INSERT. The new behavior is that the proposed
+primary key for these ``Address`` objects are tracked in a separate dictionary
+so that we merge the state of the three proposed ``Address`` objects onto
+one ``Address`` object to be inserted.
+
+It may have been preferable if the original case emitted some kind of warning
+that conflicting data were present in a single merge-tree, however the
+non-deterministic merging of values has been the behavior for many
+years for the persistent case; it now matches for the pending case. A
+feature that warns for conflicting values could still be feasible for both
+cases but would add considerable performance overhead as each column value
+would have to be compared during the merge.
+
+
+:ticket:`3601`
+
+New Features and Improvements - Core
+====================================
+
+.. _change_3216:
+
+The ``.autoincrement`` directive is no longer implicitly enabled for a composite primary key column
+---------------------------------------------------------------------------------------------------
+
+SQLAlchemy has always had the convenience feature of enabling the backend database's
+"autoincrement" feature for a single-column integer primary key; by "autoincrement"
+we mean that the database column will include whatever DDL directives the
+database provides in order to indicate an auto-incrementing integer identifier,
+such as the SERIAL keyword on Postgresql or AUTO_INCREMENT on MySQL, and additionally
+that the dialect will recieve these generated values from the execution
+of a :meth:`.Table.insert` construct using techniques appropriate to that
+backend.
+
+What's changed is that this feature no longer turns on automatically for a
+*composite* primary key; previously, a table definition such as::
+
+ Table(
+ 'some_table', metadata,
+ Column('x', Integer, primary_key=True),
+ Column('y', Integer, primary_key=True)
+ )
+
+Would have "autoincrement" semantics applied to the ``'x'`` column, only
+because it's first in the list of primary key columns. In order to
+disable this, one would have to turn off ``autoincrement`` on all columns::
+
+ # old way
+ Table(
+ 'some_table', metadata,
+ Column('x', Integer, primary_key=True, autoincrement=False),
+ Column('y', Integer, primary_key=True, autoincrement=False)
+ )
+
+With the new behavior, the composite primary key will not have autoincrement
+semantics unless a column is marked explcitly with ``autoincrement=True``::
+
+ # column 'y' will be SERIAL/AUTO_INCREMENT/ auto-generating
+ Table(
+ 'some_table', metadata,
+ Column('x', Integer, primary_key=True),
+ Column('y', Integer, primary_key=True, autoincrement=True)
+ )
+
+In order to anticipate some potential backwards-incompatible scenarios,
+the :meth:`.Table.insert` construct will perform more thorough checks
+for missing primary key values on composite primary key columns that don't
+have autoincrement set up; given a table such as::
+
+ Table(
+ 'b', metadata,
+ Column('x', Integer, primary_key=True),
+ Column('y', Integer, primary_key=True)
+ )
+
+An INSERT emitted with no values for this table will produce the exception::
+
+ CompileError: Column 'b.x' is marked as a member of the primary
+ key for table 'b', but has no Python-side or server-side default
+ generator indicated, nor does it indicate 'autoincrement=True',
+ and no explicit value is passed. Primary key columns may not
+ store NULL. Note that as of SQLAlchemy 1.1, 'autoincrement=True'
+ must be indicated explicitly for composite (e.g. multicolumn)
+ primary keys if AUTO_INCREMENT/SERIAL/IDENTITY behavior is
+ expected for one of the columns in the primary key. CREATE TABLE
+ statements are impacted by this change as well on most backends.
+
+For a column that is receiving primary key values from a server-side
+default or something less common such as a trigger, the presence of a
+value generator can be indicated using :class:`.FetchedValue`::
+
+ Table(
+ 'b', metadata,
+ Column('x', Integer, primary_key=True, server_default=FetchedValue()),
+ Column('y', Integer, primary_key=True, server_default=FetchedValue())
+ )
+
+For the very unlikely case where a composite primary key is actually intended
+to store NULL in one or more of its columns (only supported on SQLite and MySQL),
+specify the column with ``nullable=True``::
+
+ Table(
+ 'b', metadata,
+ Column('x', Integer, primary_key=True),
+ Column('y', Integer, primary_key=True, nullable=True)
+ )
+
+In a related change, the ``autoincrement`` flag may be set to True
+on a column that has a client-side or server-side default. This typically
+will not have much impact on the behavior of the column during an INSERT.
+
+
+.. seealso::
+
+ :ref:`change_mysql_3216`
+
+:ticket:`3216`
+
+.. _change_2528:
+
+A UNION or similar of SELECTs with LIMIT/OFFSET/ORDER BY now parenthesizes the embedded selects
+-----------------------------------------------------------------------------------------------
+
+An issue that, like others, was long driven by SQLite's lack of capabilities
+has now been enhanced to work on all supporting backends. We refer to a query that
+is a UNION of SELECT statements that themselves contain row-limiting or ordering
+features which include LIMIT, OFFSET, and/or ORDER BY::
+
+ (SELECT x FROM table1 ORDER BY y LIMIT 1) UNION
+ (SELECT x FROM table2 ORDER BY y LIMIT 2)
+
+The above query requires parenthesis within each sub-select in order to
+group the sub-results correctly. Production of the above statement in
+SQLAlchemy Core looks like::
+
+ stmt1 = select([table1.c.x]).order_by(table1.c.y).limit(1)
+ stmt2 = select([table1.c.x]).order_by(table2.c.y).limit(2)
+
+ stmt = union(stmt1, stmt2)
+
+Previously, the above construct would not produce parenthesization for the
+inner SELECT statements, producing a query that fails on all backends.
+
+The above formats will **continue to fail on SQLite**; additionally, the format
+that includes ORDER BY but no LIMIT/SELECT will **continue to fail on Oracle**.
+This is not a backwards-incompatible change, because the queries fail without
+the parentheses as well; with the fix, the queries at least work on all other
+databases.
+
+In all cases, in order to produce a UNION of limited SELECT statements that
+also works on SQLite and in all cases on Oracle, the
+subqueries must be a SELECT of an ALIAS::
+
+ stmt1 = select([table1.c.x]).order_by(table1.c.y).limit(1).alias().select()
+ stmt2 = select([table2.c.x]).order_by(table2.c.y).limit(2).alias().select()
+
+ stmt = union(stmt1, stmt2)
+
+This workaround works on all SQLAlchemy versions. In the ORM, it looks like::
+
+ stmt1 = session.query(Model1).order_by(Model1.y).limit(1).subquery().select()
+ stmt2 = session.query(Model2).order_by(Model2.y).limit(1).subquery().select()
+
+ stmt = session.query(Model1).from_statement(stmt1.union(stmt2))
+
+The behavior here has many parallels to the "join rewriting" behavior
+introduced in SQLAlchemy 0.9 in :ref:`feature_joins_09`; however in this case
+we have opted not to add new rewriting behavior to accommodate this
+case for SQLite.
+The existing rewriting behavior is very complicated already, and the case of
+UNIONs with parenthesized SELECT statements is much less common than the
+"right-nested-join" use case of that feature.
+
+:ticket:`2528`
+
+.. _change_3516:
+
+Array support added to Core; new ANY and ALL operators
+------------------------------------------------------
+
+Along with the enhancements made to the Postgresql :class:`.ARRAY`
+type described in :ref:`change_3503`, the base class of :class:`.ARRAY`
+itself has been moved to Core in a new class :class:`.types.Array`.
+
+Arrays are part of the SQL standard, as are several array-oriented functions
+such as ``array_agg()`` and ``unnest()``. In support of these constructs
+for not just PostgreSQL but also potentially for other array-capable backends
+in the future such as DB2, the majority of array logic for SQL expressions
+is now in Core. The :class:`.Array` type still **only works on
+Postgresql**, however it can be used directly, supporting special array
+use cases such as indexed access, as well as support for the ANY and ALL::
+
+ mytable = Table("mytable", metadata,
+ Column("data", Array(Integer, dimensions=2))
+ )
+
+ expr = mytable.c.data[5][6]
+
+ expr = mytable.c.data[5].any(12)
+
+In support of ANY and ALL, the :class:`.Array` type retains the same
+:meth:`.Array.Comparator.any` and :meth:`.Array.Comparator.all` methods
+from the PostgreSQL type, but also exports these operations to new
+standalone operator functions :func:`.sql.expression.any_` and
+:func:`.sql.expression.all_`. These two functions work in more
+of the traditional SQL way, allowing a right-side expression form such
+as::
+
+ from sqlalchemy import any_, all_
+
+ select([mytable]).where(12 == any_(mytable.c.data[5]))
+
+For the PostgreSQL-specific operators "contains", "contained_by", and
+"overlaps", one should continue to use the :class:`.postgresql.ARRAY`
+type directly, which provides all functionality of the :class:`.Array`
+type as well.
+
+The :func:`.sql.expression.any_` and :func:`.sql.expression.all_` operators
+are open-ended at the Core level, however their interpretation by backend
+databases is limited. On the Postgresql backend, the two operators
+**only accept array values**. Whereas on the MySQL backend, they
+**only accept subquery values**. On MySQL, one can use an expression
+such as::
+
+ from sqlalchemy import any_, all_
+
+ subq = select([mytable.c.value])
+ select([mytable]).where(12 > any_(subq))
+
+
+:ticket:`3516`
+
+.. _change_3132:
+
+New Function features, "WITHIN GROUP", array_agg and set aggregate functions
+----------------------------------------------------------------------------
+
+With the new :class:`.Array` type we can also implement a pre-typed
+function for the ``array_agg()`` SQL function that returns an array,
+which is now available using :class:`.array_agg`::
+
+ from sqlalchemy import func
+ stmt = select([func.array_agg(table.c.value)])
+
+A Postgresql element for an aggregate ORDER BY is also added via
+:class:`.postgresql.aggregate_order_by`::
+
+ from sqlalchemy.dialects.postgresql import aggregate_order_by
+ expr = func.array_agg(aggregate_order_by(table.c.a, table.c.b.desc()))
+ stmt = select([expr])
+
+Producing::
+
+ SELECT array_agg(table1.a ORDER BY table1.b DESC) AS array_agg_1 FROM table1
+
+The PG dialect itself also provides an :func:`.postgresql.array_agg` wrapper to
+ensure the :class:`.postgresql.ARRAY` type::
+
+ from sqlalchemy.dialects.postgresql import array_agg
+ stmt = select([array_agg(table.c.value).contains('foo')])
+
+
+Additionally, functions like ``percentile_cont()``, ``percentile_disc()``,
+``rank()``, ``dense_rank()`` and others that require an ordering via
+``WITHIN GROUP (ORDER BY <expr>)`` are now available via the
+:meth:`.FunctionElement.within_group` modifier::
+
+ from sqlalchemy import func
+ stmt = select([
+ department.c.id,
+ func.percentile_cont(0.5).within_group(
+ department.c.salary.desc()
+ )
+ ])
+
+The above statement would produce SQL similar to::
+
+ SELECT department.id, percentile_cont(0.5)
+ WITHIN GROUP (ORDER BY department.salary DESC)
+
+Placeholders with correct return types are now provided for these functions,
+and include :class:`.percentile_cont`, :class:`.percentile_disc`,
+:class:`.rank`, :class:`.dense_rank`, :class:`.mode`, :class:`.percent_rank`,
+and :class:`.cume_dist`.
+
+:ticket:`3132` :ticket:`1370`
+
+.. _change_2919:
+
+TypeDecorator now works with Enum, Boolean, "schema" types automatically
+------------------------------------------------------------------------
+
+The :class:`.SchemaType` types include types such as :class:`.Enum`
+and :class:`.Boolean` which, in addition to corresponding to a database
+type, also generate either a CHECK constraint or in the case of Postgresql
+ENUM a new CREATE TYPE statement, will now work automatically with
+:class:`.TypeDecorator` recipes. Previously, a :class:`.TypeDecorator` for
+an :class:`.postgresql.ENUM` had to look like this::
+
+ # old way
+ class MyEnum(TypeDecorator, SchemaType):
+ impl = postgresql.ENUM('one', 'two', 'three', name='myenum')
+
+ def _set_table(self, table):
+ self.impl._set_table(table)
+
+The :class:`.TypeDecorator` now propagates those additional events so it
+can be done like any other type::
+
+ # new way
+ class MyEnum(TypeDecorator):
+ impl = postgresql.ENUM('one', 'two', 'three', name='myenum')
+
+
+:ticket:`2919`
+
+.. _change_3531:
+
+The type_coerce function is now a persistent SQL element
+--------------------------------------------------------
+
+The :func:`.expression.type_coerce` function previously would return
+an object either of type :class:`.BindParameter` or :class:`.Label`, depending
+on the input. An effect this would have was that in the case where expression
+transformations were used, such as the conversion of an element from a
+:class:`.Column` to a :class:`.BindParameter` that's critical to ORM-level
+lazy loading, the type coercion information would not be used since it would
+have been lost already.
+
+To improve this behavior, the function now returns a persistent
+:class:`.TypeCoerce` container around the given expression, which itself
+remains unaffected; this construct is evaluated explicitly by the
+SQL compiler. This allows for the coercion of the inner expression
+to be maintained no matter how the statement is modified, including if
+the contained element is replaced with a different one, as is common
+within the ORM's lazy loading feature.
+
+The test case illustrating the effect makes use of a heterogeneous
+primaryjoin condition in conjunction with custom types and lazy loading.
+Given a custom type that applies a CAST as a "bind expression"::
+
+ class StringAsInt(TypeDecorator):
+ impl = String
+
+ def column_expression(self, col):
+ return cast(col, Integer)
+
+ def bind_expression(self, value):
+ return cast(value, String)
+
+Then, a mapping where we are equating a string "id" column on one
+table to an integer "id" column on the other::
+
+ class Person(Base):
+ __tablename__ = 'person'
+ id = Column(StringAsInt, primary_key=True)
+
+ pets = relationship(
+ 'Pets',
+ primaryjoin=(
+ 'foreign(Pets.person_id)'
+ '==cast(type_coerce(Person.id, Integer), Integer)'
+ )
+ )
+
+ class Pets(Base):
+ __tablename__ = 'pets'
+ id = Column('id', Integer, primary_key=True)
+ person_id = Column('person_id', Integer)
+
+Above, in the :paramref:`.relationship.primaryjoin` expression, we are
+using :func:`.type_coerce` to handle bound parameters passed via
+lazyloading as integers, since we already know these will come from
+our ``StringAsInt`` type which maintains the value as an integer in
+Python. We are then using :func:`.cast` so that as a SQL expression,
+the VARCHAR "id" column will be CAST to an integer for a regular non-
+converted join as with :meth:`.Query.join` or :func:`.orm.joinedload`.
+That is, a joinedload of ``.pets`` looks like::
+
+ SELECT person.id AS person_id, pets_1.id AS pets_1_id,
+ pets_1.person_id AS pets_1_person_id
+ FROM person
+ LEFT OUTER JOIN pets AS pets_1
+ ON pets_1.person_id = CAST(person.id AS INTEGER)
+
+Without the CAST in the ON clause of the join, strongly-typed databases
+such as Postgresql will refuse to implicitly compare the integer and fail.
+
+The lazyload case of ``.pets`` relies upon replacing
+the ``Person.id`` column at load time with a bound parameter, which receives
+a Python-loaded value. This replacement is specifically where the intent
+of our :func:`.type_coerce` function would be lost. Prior to the change,
+this lazy load comes out as::
+
+ SELECT pets.id AS pets_id, pets.person_id AS pets_person_id
+ FROM pets
+ WHERE pets.person_id = CAST(CAST(%(param_1)s AS VARCHAR) AS INTEGER)
+ {'param_1': 5}
+
+Where above, we see that our in-Python value of ``5`` is CAST first
+to a VARCHAR, then back to an INTEGER in SQL; a double CAST which works,
+but is nevertheless not what we asked for.
+
+With the change, the :func:`.type_coerce` function maintains a wrapper
+even after the column is swapped out for a bound parameter, and the query now
+looks like::
+
+ SELECT pets.id AS pets_id, pets.person_id AS pets_person_id
+ FROM pets
+ WHERE pets.person_id = CAST(%(param_1)s AS INTEGER)
+ {'param_1': 5}
+
+Where our outer CAST that's in our primaryjoin still takes effect, but the
+needless CAST that's in part of the ``StringAsInt`` custom type is removed
+as intended by the :func:`.type_coerce` function.
+
+
+:ticket:`3531`
+
+
+Key Behavioral Changes - ORM
+============================
+
+
+Key Behavioral Changes - Core
+=============================
+
+
+Dialect Improvements and Changes - Postgresql
+=============================================
+
+.. _change_3499_postgresql:
+
+ARRAY and JSON types now correctly specify "unhashable"
+-------------------------------------------------------
+
+As described in :ref:`change_3499`, the ORM relies upon being able to
+produce a hash function for column values when a query's selected entities
+mixes full ORM entities with column expressions. The ``hashable=False``
+flag is now correctly set on all of PG's "data structure" types, including
+:class:`.ARRAY` and :class:`.JSON`. The :class:`.JSONB` and :class:`.HSTORE`
+types already included this flag. For :class:`.ARRAY`,
+this is conditional based on the :paramref:`.postgresql.ARRAY.as_tuple`
+flag, however it should no longer be necessary to set this flag
+in order to have an array value present in a composed ORM row.
+
+.. seealso::
+
+ :ref:`change_3499`
+
+ :ref:`change_3503`
+
+:ticket:`3499`
+
+.. _change_3503:
+
+Correct SQL Types are Established from Indexed Access of ARRAY, JSON, HSTORE
+-----------------------------------------------------------------------------
+
+For all three of :class:`~.postgresql.ARRAY`, :class:`~.postgresql.JSON` and :class:`.HSTORE`,
+the SQL type assigned to the expression returned by indexed access, e.g.
+``col[someindex]``, should be correct in all cases.
+
+This includes:
+
+* The SQL type assigned to indexed access of an :class:`~.postgresql.ARRAY` takes into
+ account the number of dimensions configured. An :class:`~.postgresql.ARRAY` with three
+ dimensions will return a SQL expression with a type of :class:`~.postgresql.ARRAY` of
+ one less dimension. Given a column with type ``ARRAY(Integer, dimensions=3)``,
+ we can now perform this expression::
+
+ int_expr = col[5][6][7] # returns an Integer expression object
+
+ Previously, the indexed access to ``col[5]`` would return an expression of
+ type :class:`.Integer` where we could no longer perform indexed access
+ for the remaining dimensions, unless we used :func:`.cast` or :func:`.type_coerce`.
+
+* The :class:`~.postgresql.JSON` and :class:`~.postgresql.JSONB` types now mirror what Postgresql
+ itself does for indexed access. This means that all indexed access for
+ a :class:`~.postgresql.JSON` or :class:`~.postgresql.JSONB` type returns an expression that itself
+ is *always* :class:`~.postgresql.JSON` or :class:`~.postgresql.JSONB` itself, unless the
+ :attr:`~.postgresql.JSON.Comparator.astext` modifier is used. This means that whether
+ the indexed access of the JSON structure ultimately refers to a string,
+ list, number, or other JSON structure, Postgresql always considers it
+ to be JSON itself unless it is explicitly cast differently. Like
+ the :class:`~.postgresql.ARRAY` type, this means that it is now straightforward
+ to produce JSON expressions with multiple levels of indexed access::
+
+ json_expr = json_col['key1']['attr1'][5]
+
+* The "textual" type that is returned by indexed access of :class:`.HSTORE`
+ as well as the "textual" type that is returned by indexed access of
+ :class:`~.postgresql.JSON` and :class:`~.postgresql.JSONB` in conjunction with the
+ :attr:`~.postgresql.JSON.Comparator.astext` modifier is now configurable; it defaults
+ to :class:`.Text` in both cases but can be set to a user-defined
+ type using the :paramref:`.postgresql.JSON.astext_type` or
+ :paramref:`.postgresql.HSTORE.text_type` parameters.
+
+.. seealso::
+
+ :ref:`change_3503_cast`
+
+:ticket:`3499`
+:ticket:`3487`
+
+.. _change_3503_cast:
+
+The JSON cast() operation now requires ``.astext`` is called explicitly
+------------------------------------------------------------------------
+
+As part of the changes in :ref:`change_3503`, the workings of the
+:meth:`.ColumnElement.cast` operator on :class:`.postgresql.JSON` and
+:class:`.postgresql.JSONB` no longer implictly invoke the
+:attr:`.JSON.Comparator.astext` modifier; Postgresql's JSON/JSONB types
+support CAST operations to each other without the "astext" aspect.
+
+This means that in most cases, an application that was doing this::
+
+ expr = json_col['somekey'].cast(Integer)
+
+Will now need to change to this::
+
+ expr = json_col['somekey'].astext.cast(Integer)
+
+
+
+.. _change_3514:
+
+Postgresql JSON "null" is inserted as expected with ORM operations, regardless of column default present
+-----------------------------------------------------------------------------------------------------------
+
+The :class:`.JSON` type has a flag :paramref:`.JSON.none_as_null` which
+when set to True indicates that the Python value ``None`` should translate
+into a SQL NULL rather than a JSON NULL value. This flag defaults to False,
+which means that the column should *never* insert SQL NULL or fall back
+to a default unless the :func:`.null` constant were used. However, this would
+fail in the ORM under two circumstances; one is when the column also contained
+a default or server_default value, a positive value of ``None`` on the mapped
+attribute would still result in the column-level default being triggered,
+replacing the ``None`` value::
+
+ obj = MyObject(json_value=None)
+ session.add(obj)
+ session.commit() # would fire off default / server_default, not encode "'none'"
+
+The other is when the :meth:`.Session.bulk_insert_mappings`
+method were used, ``None`` would be ignored in all cases::
+
+ session.bulk_insert_mappings(
+ MyObject,
+ [{"json_value": None}]) # would insert SQL NULL and/or trigger defaults
+
+The :class:`.JSON` type now implements the
+:attr:`.TypeEngine.should_evaluate_none` flag,
+indicating that ``None`` should not be ignored here; it is configured
+automatically based on the value of :paramref:`.JSON.none_as_null`.
+Thanks to :ticket:`3061`, we can differentiate when the value ``None`` is actively
+set by the user versus when it was never set at all.
+
+If the attribute is not set at all, then column level defaults *will*
+fire off and/or SQL NULL will be inserted as expected, as was the behavior
+previously. Below, the two variants are illustrated::
+
+ obj = MyObject(json_value=None)
+ session.add(obj)
+ session.commit() # *will not* fire off column defaults, will insert JSON 'null'
+
+ obj = MyObject()
+ session.add(obj)
+ session.commit() # *will* fire off column defaults, and/or insert SQL NULL
+
+:ticket:`3514`
+
+.. seealso::
+
+ :ref:`change_3250`
+
+ :ref:`change_3514_jsonnull`
+
+.. _change_3514_jsonnull:
+
+New JSON.NULL Constant Added
+----------------------------
+
+To ensure that an application can always have full control at the value level
+of whether a :class:`.postgresql.JSON` or :class:`.postgresql.JSONB` column
+should receive a SQL NULL or JSON ``"null"`` value, the constant
+:attr:`.postgresql.JSON.NULL` has been added, which in conjunction with
+:func:`.null` can be used to determine fully between SQL NULL and
+JSON ``"null"``, regardless of what :paramref:`.JSON.none_as_null` is set
+to::
+
+ from sqlalchemy import null
+ from sqlalchemy.dialects.postgresql import JSON
+
+ obj1 = MyObject(json_value=null()) # will *always* insert SQL NULL
+ obj2 = MyObject(json_value=JSON.NULL) # will *always* insert JSON string "null"
+
+ session.add_all([obj1, obj2])
+ session.commit()
+
+.. seealso::
+
+ :ref:`change_3514`
+
+:ticket:`3514`
+
+.. _change_2729:
+
+ARRAY with ENUM will now emit CREATE TYPE for the ENUM
+------------------------------------------------------
+
+A table definition like the following will now emit CREATE TYPE
+as expected::
+
+ enum = Enum(
+ 'manager', 'place_admin', 'carwash_admin',
+ 'parking_admin', 'service_admin', 'tire_admin',
+ 'mechanic', 'carwasher', 'tire_mechanic', name="work_place_roles")
+
+ class WorkPlacement(Base):
+ __tablename__ = 'work_placement'
+ id = Column(Integer, primary_key=True)
+ roles = Column(ARRAY(enum))
+
+
+ e = create_engine("postgresql://scott:tiger@localhost/test", echo=True)
+ Base.metadata.create_all(e)
+
+emits::
+
+ CREATE TYPE work_place_roles AS ENUM (
+ 'manager', 'place_admin', 'carwash_admin', 'parking_admin',
+ 'service_admin', 'tire_admin', 'mechanic', 'carwasher',
+ 'tire_mechanic')
+
+ CREATE TABLE work_placement (
+ id SERIAL NOT NULL,
+ roles work_place_roles[],
+ PRIMARY KEY (id)
+ )
+
+
+:ticket:`2729`
+
+Dialect Improvements and Changes - MySQL
+=============================================
+
+.. _change_mysql_3216:
+
+No more generation of an implicit KEY for composite primary key w/ AUTO_INCREMENT
+---------------------------------------------------------------------------------
+
+The MySQL dialect had the behavior such that if a composite primary key
+on an InnoDB table featured AUTO_INCREMENT on one of its columns which was
+not the first column, e.g.::
+
+ t = Table(
+ 'some_table', metadata,
+ Column('x', Integer, primary_key=True, autoincrement=False),
+ Column('y', Integer, primary_key=True, autoincrement=True),
+ mysql_engine='InnoDB'
+ )
+
+DDL such as the following would be generated::
+
+ CREATE TABLE some_table (
+ x INTEGER NOT NULL,
+ y INTEGER NOT NULL AUTO_INCREMENT,
+ PRIMARY KEY (x, y),
+ KEY idx_autoinc_y (y)
+ )ENGINE=InnoDB
+
+Note the above "KEY" with an auto-generated name; this is a change that
+found its way into the dialect many years ago in response to the issue that
+the AUTO_INCREMENT would otherwise fail on InnoDB without this additional KEY.
+
+This workaround has been removed and replaced with the much better system
+of just stating the AUTO_INCREMENT column *first* within the primary key::
+
+ CREATE TABLE some_table (
+ x INTEGER NOT NULL,
+ y INTEGER NOT NULL AUTO_INCREMENT,
+ PRIMARY KEY (y, x)
+ )ENGINE=InnoDB
+
+Along with the change :ref:`change_3216`, composite primary keys with
+or without auto increment are now easier to specify;
+:paramref:`.Column.autoincrement`
+now defaults to the value ``"auto"`` and the ``autoincrement=False``
+directives are no longer needed::
+
+ t = Table(
+ 'some_table', metadata,
+ Column('x', Integer, primary_key=True),
+ Column('y', Integer, primary_key=True, autoincrement=True),
+ mysql_engine='InnoDB'
+ )
+
+
+
+Dialect Improvements and Changes - SQLite
+=============================================
+
+.. _change_sqlite_schemas:
+
+Improved Support for Remote Schemas
+------------------------------------
+
+The SQLite dialect now implements :meth:`.Inspector.get_schema_names`
+and additionally has improved support for tables and indexes that are
+created and reflected from a remote schema, which in SQLite is a
+database that is assigned a name via the ``ATTACH`` statement; previously,
+the ``CREATE INDEX`` DDL didn't work correctly for a schema-bound table
+and the :meth:`.Inspector.get_foreign_keys` method will now indicate the
+given schema in the results. Cross-schema foreign keys aren't supported.
+
+
+Dialect Improvements and Changes - SQL Server
+=============================================
+
+.. _change_3504:
+
+String / varlength types no longer represent "max" explicitly on reflection
+---------------------------------------------------------------------------
+
+When reflecting a type such as :class:`.String`, :class:`.Text`, etc.
+which includes a length, an "un-lengthed" type under SQL Server would
+copy the "length" parameter as the value ``"max"``::
+
+ >>> from sqlalchemy import create_engine, inspect
+ >>> engine = create_engine('mssql+pyodbc://scott:tiger@ms_2008', echo=True)
+ >>> engine.execute("create table s (x varchar(max), y varbinary(max))")
+ >>> insp = inspect(engine)
+ >>> for col in insp.get_columns("s"):
+ ... print col['type'].__class__, col['type'].length
+ ...
+ <class 'sqlalchemy.sql.sqltypes.VARCHAR'> max
+ <class 'sqlalchemy.dialects.mssql.base.VARBINARY'> max
+
+The "length" parameter in the base types is expected to be an integer value
+or None only; None indicates unbounded length which the SQL Server dialect
+interprets as "max". The fix then is so that these lengths come
+out as None, so that the type objects work in non-SQL Server contexts::
+
+ >>> for col in insp.get_columns("s"):
+ ... print col['type'].__class__, col['type'].length
+ ...
+ <class 'sqlalchemy.sql.sqltypes.VARCHAR'> None
+ <class 'sqlalchemy.dialects.mssql.base.VARBINARY'> None
+
+Applications which may have been relying on a direct comparison of the "length"
+value to the string "max" should consider the value of ``None`` to mean
+the same thing.
+
+:ticket:`3504`
+
+.. _change_3434:
+
+The legacy_schema_aliasing flag is now set to False
+---------------------------------------------------
+
+SQLAlchemy 1.0.5 introduced the ``legacy_schema_aliasing`` flag to the
+MSSQL dialect, allowing so-called "legacy mode" aliasing to be turned off.
+This aliasing attempts to turn schema-qualified tables into aliases;
+given a table such as::
+
+ account_table = Table(
+ 'account', metadata,
+ Column('id', Integer, primary_key=True),
+ Column('info', String(100)),
+ schema="customer_schema"
+ )
+
+The legacy mode of behavior will attempt to turn a schema-qualified table
+name into an alias::
+
+ >>> eng = create_engine("mssql+pymssql://mydsn", legacy_schema_aliasing=True)
+ >>> print(account_table.select().compile(eng))
+ SELECT account_1.id, account_1.info
+ FROM customer_schema.account AS account_1
+
+However, this aliasing has been shown to be unnecessary and in many cases
+produces incorrect SQL.
+
+In SQLAlchemy 1.1, the ``legacy_schema_aliasing`` flag now defaults to
+False, disabling this mode of behavior and allowing the MSSQL dialect to behave
+normally with schema-qualified tables. For applications which may rely
+on this behavior, set the flag back to True.
+
+
+:ticket:`3434`
+
+Dialect Improvements and Changes - Oracle
+=============================================
diff --git a/doc/build/conf.py b/doc/build/conf.py
index fa9be2d25..e19078a87 100644
--- a/doc/build/conf.py
+++ b/doc/build/conf.py
@@ -136,11 +136,11 @@ copyright = u'2007-2015, the SQLAlchemy authors and contributors'
# built documents.
#
# The short X.Y version.
-version = "1.0"
+version = "1.1"
# The full version, including alpha/beta/rc tags.
-release = "1.0.6"
+release = "1.1.0b1"
-release_date = "June 25, 2015"
+release_date = "not released"
site_base = os.environ.get("RTD_SITE_BASE", "http://www.sqlalchemy.org")
site_adapter_template = "docs_adapter.mako"
diff --git a/doc/build/core/ddl.rst b/doc/build/core/ddl.rst
index 0ba2f2806..820ba7b84 100644
--- a/doc/build/core/ddl.rst
+++ b/doc/build/core/ddl.rst
@@ -20,85 +20,100 @@ required, SQLAlchemy offers two techniques which can be used to add any DDL
based on any condition, either accompanying the standard generation of tables
or by itself.
-.. _schema_ddl_sequences:
-
-Controlling DDL Sequences
--------------------------
+Custom DDL
+----------
-The ``sqlalchemy.schema`` package contains SQL expression constructs that
-provide DDL expressions. For example, to produce a ``CREATE TABLE`` statement:
+Custom DDL phrases are most easily achieved using the
+:class:`~sqlalchemy.schema.DDL` construct. This construct works like all the
+other DDL elements except it accepts a string which is the text to be emitted:
.. sourcecode:: python+sql
- from sqlalchemy.schema import CreateTable
- {sql}engine.execute(CreateTable(mytable))
- CREATE TABLE mytable (
- col1 INTEGER,
- col2 INTEGER,
- col3 INTEGER,
- col4 INTEGER,
- col5 INTEGER,
- col6 INTEGER
- ){stop}
+ event.listen(
+ metadata,
+ "after_create",
+ DDL("ALTER TABLE users ADD CONSTRAINT "
+ "cst_user_name_length "
+ " CHECK (length(user_name) >= 8)")
+ )
-Above, the :class:`~sqlalchemy.schema.CreateTable` construct works like any
-other expression construct (such as ``select()``, ``table.insert()``, etc.). A
-full reference of available constructs is in :ref:`schema_api_ddl`.
+A more comprehensive method of creating libraries of DDL constructs is to use
+custom compilation - see :ref:`sqlalchemy.ext.compiler_toplevel` for
+details.
-The DDL constructs all extend a common base class which provides the
-capability to be associated with an individual
-:class:`~sqlalchemy.schema.Table` or :class:`~sqlalchemy.schema.MetaData`
-object, to be invoked upon create/drop events. Consider the example of a table
-which contains a CHECK constraint:
-.. sourcecode:: python+sql
+.. _schema_ddl_sequences:
+
+Controlling DDL Sequences
+-------------------------
- users = Table('users', metadata,
- Column('user_id', Integer, primary_key=True),
- Column('user_name', String(40), nullable=False),
- CheckConstraint('length(user_name) >= 8',name="cst_user_name_length")
- )
+The :class:`~.schema.DDL` construct introduced previously also has the
+ability to be invoked conditionally based on inspection of the
+database. This feature is available using the :meth:`.DDLElement.execute_if`
+method. For example, if we wanted to create a trigger but only on
+the Postgresql backend, we could invoke this as::
- {sql}users.create(engine)
- CREATE TABLE users (
- user_id SERIAL NOT NULL,
- user_name VARCHAR(40) NOT NULL,
- PRIMARY KEY (user_id),
- CONSTRAINT cst_user_name_length CHECK (length(user_name) >= 8)
- ){stop}
+ mytable = Table(
+ 'mytable', metadata,
+ Column('id', Integer, primary_key=True),
+ Column('data', String(50))
+ )
-The above table contains a column "user_name" which is subject to a CHECK
-constraint that validates that the length of the string is at least eight
-characters. When a ``create()`` is issued for this table, DDL for the
-:class:`~sqlalchemy.schema.CheckConstraint` will also be issued inline within
-the table definition.
+ trigger = DDL(
+ "CREATE TRIGGER dt_ins BEFORE INSERT ON mytable "
+ "FOR EACH ROW BEGIN SET NEW.data='ins'; END"
+ )
-The :class:`~sqlalchemy.schema.CheckConstraint` construct can also be
-constructed externally and associated with the
-:class:`~sqlalchemy.schema.Table` afterwards::
+ event.listen(
+ mytable,
+ 'after_create',
+ trigger.execute_if(dialect='postgresql')
+ )
+
+The :paramref:`.DDLElement.execute_if.dialect` keyword also accepts a tuple
+of string dialect names::
- constraint = CheckConstraint('length(user_name) >= 8',name="cst_user_name_length")
- users.append_constraint(constraint)
+ event.listen(
+ mytable,
+ "after_create",
+ trigger.execute_if(dialect=('postgresql', 'mysql'))
+ )
+ event.listen(
+ mytable,
+ "before_drop",
+ trigger.execute_if(dialect=('postgresql', 'mysql'))
+ )
-So far, the effect is the same. However, if we create DDL elements
-corresponding to the creation and removal of this constraint, and associate
-them with the :class:`.Table` as events, these new events
-will take over the job of issuing DDL for the constraint. Additionally, the
-constraint will be added via ALTER:
+The :meth:`.DDLElement.execute_if` method can also work against a callable
+function that will receive the database connection in use. In the
+example below, we use this to conditionally create a CHECK constraint,
+first looking within the Postgresql catalogs to see if it exists:
.. sourcecode:: python+sql
- from sqlalchemy import event
+ def should_create(ddl, target, connection, **kw):
+ row = connection.execute(
+ "select conname from pg_constraint where conname='%s'" %
+ ddl.element.name).scalar()
+ return not bool(row)
+
+ def should_drop(ddl, target, connection, **kw):
+ return not should_create(ddl, target, connection, **kw)
event.listen(
users,
"after_create",
- AddConstraint(constraint)
+ DDL(
+ "ALTER TABLE users ADD CONSTRAINT "
+ "cst_user_name_length CHECK (length(user_name) >= 8)"
+ ).execute_if(callable_=should_create)
)
event.listen(
users,
"before_drop",
- DropConstraint(constraint)
+ DDL(
+ "ALTER TABLE users DROP CONSTRAINT cst_user_name_length"
+ ).execute_if(callable_=should_drop)
)
{sql}users.create(engine)
@@ -108,61 +123,67 @@ constraint will be added via ALTER:
PRIMARY KEY (user_id)
)
+ select conname from pg_constraint where conname='cst_user_name_length'
ALTER TABLE users ADD CONSTRAINT cst_user_name_length CHECK (length(user_name) >= 8){stop}
{sql}users.drop(engine)
+ select conname from pg_constraint where conname='cst_user_name_length'
ALTER TABLE users DROP CONSTRAINT cst_user_name_length
DROP TABLE users{stop}
-The real usefulness of the above becomes clearer once we illustrate the
-:meth:`.DDLElement.execute_if` method. This method returns a modified form of
-the DDL callable which will filter on criteria before responding to a
-received event. It accepts a parameter ``dialect``, which is the string
-name of a dialect or a tuple of such, which will limit the execution of the
-item to just those dialects. It also accepts a ``callable_`` parameter which
-may reference a Python callable which will be invoked upon event reception,
-returning ``True`` or ``False`` indicating if the event should proceed.
-
-If our :class:`~sqlalchemy.schema.CheckConstraint` was only supported by
-Postgresql and not other databases, we could limit its usage to just that dialect::
+Using the built-in DDLElement Classes
+--------------------------------------
- event.listen(
- users,
- 'after_create',
- AddConstraint(constraint).execute_if(dialect='postgresql')
- )
- event.listen(
- users,
- 'before_drop',
- DropConstraint(constraint).execute_if(dialect='postgresql')
- )
+The ``sqlalchemy.schema`` package contains SQL expression constructs that
+provide DDL expressions. For example, to produce a ``CREATE TABLE`` statement:
-Or to any set of dialects::
+.. sourcecode:: python+sql
- event.listen(
- users,
- "after_create",
- AddConstraint(constraint).execute_if(dialect=('postgresql', 'mysql'))
- )
- event.listen(
- users,
- "before_drop",
- DropConstraint(constraint).execute_if(dialect=('postgresql', 'mysql'))
- )
+ from sqlalchemy.schema import CreateTable
+ {sql}engine.execute(CreateTable(mytable))
+ CREATE TABLE mytable (
+ col1 INTEGER,
+ col2 INTEGER,
+ col3 INTEGER,
+ col4 INTEGER,
+ col5 INTEGER,
+ col6 INTEGER
+ ){stop}
-When using a callable, the callable is passed the ddl element, the
-:class:`.Table` or :class:`.MetaData`
-object whose "create" or "drop" event is in progress, and the
-:class:`.Connection` object being used for the
-operation, as well as additional information as keyword arguments. The
-callable can perform checks, such as whether or not a given item already
-exists. Below we define ``should_create()`` and ``should_drop()`` callables
-that check for the presence of our named constraint:
+Above, the :class:`~sqlalchemy.schema.CreateTable` construct works like any
+other expression construct (such as ``select()``, ``table.insert()``, etc.).
+All of SQLAlchemy's DDL oriented constructs are subclasses of
+the :class:`.DDLElement` base class; this is the base of all the
+objects corresponding to CREATE and DROP as well as ALTER,
+not only in SQLAlchemy but in Alembic Migrations as well.
+A full reference of available constructs is in :ref:`schema_api_ddl`.
+
+User-defined DDL constructs may also be created as subclasses of
+:class:`.DDLElement` itself. The documentation in
+:ref:`sqlalchemy.ext.compiler_toplevel` has several examples of this.
+
+The event-driven DDL system described in the previous section
+:ref:`schema_ddl_sequences` is available with other :class:`.DDLElement`
+objects as well. However, when dealing with the built-in constructs
+such as :class:`.CreateIndex`, :class:`.CreateSequence`, etc, the event
+system is of **limited** use, as methods like :meth:`.Table.create` and
+:meth:`.MetaData.create_all` will invoke these constructs unconditionally.
+In a future SQLAlchemy release, the DDL event system including conditional
+execution will taken into account for built-in constructs that currently
+invoke in all cases.
+
+We can illustrate an event-driven
+example with the :class:`.AddConstraint` and :class:`.DropConstraint`
+constructs, as the event-driven system will work for CHECK and UNIQUE
+constraints, using these as we did in our previous example of
+:meth:`.DDLElement.execute_if`:
.. sourcecode:: python+sql
def should_create(ddl, target, connection, **kw):
- row = connection.execute("select conname from pg_constraint where conname='%s'" % ddl.element.name).scalar()
+ row = connection.execute(
+ "select conname from pg_constraint where conname='%s'" %
+ ddl.element.name).scalar()
return not bool(row)
def should_drop(ddl, target, connection, **kw):
@@ -194,26 +215,12 @@ that check for the presence of our named constraint:
ALTER TABLE users DROP CONSTRAINT cst_user_name_length
DROP TABLE users{stop}
-Custom DDL
-----------
-
-Custom DDL phrases are most easily achieved using the
-:class:`~sqlalchemy.schema.DDL` construct. This construct works like all the
-other DDL elements except it accepts a string which is the text to be emitted:
-
-.. sourcecode:: python+sql
-
- event.listen(
- metadata,
- "after_create",
- DDL("ALTER TABLE users ADD CONSTRAINT "
- "cst_user_name_length "
- " CHECK (length(user_name) >= 8)")
- )
-
-A more comprehensive method of creating libraries of DDL constructs is to use
-custom compilation - see :ref:`sqlalchemy.ext.compiler_toplevel` for
-details.
+While the above example is against the built-in :class:`.AddConstraint`
+and :class:`.DropConstraint` objects, the main usefulness of DDL events
+for now remains focused on the use of the :class:`.DDL` construct itself,
+as well as with user-defined subclasses of :class:`.DDLElement` that aren't
+already part of the :meth:`.MetaData.create_all`, :meth:`.Table.create`,
+and corresponding "drop" processes.
.. _schema_api_ddl:
@@ -233,6 +240,7 @@ DDL Expression Constructs API
:members:
:undoc-members:
+.. autoclass:: _CreateDropBase
.. autoclass:: CreateTable
:members:
diff --git a/doc/build/core/defaults.rst b/doc/build/core/defaults.rst
index 4166ac449..a7287a360 100644
--- a/doc/build/core/defaults.rst
+++ b/doc/build/core/defaults.rst
@@ -45,7 +45,7 @@ defaults)::
Python-Executed Functions
-------------------------
-The ``default`` and ``onupdate`` keyword arguments also accept Python
+The :paramref:`.Column.default` and :paramref:`.Column.onupdate` keyword arguments also accept Python
functions. These functions are invoked at the time of insert or update if no
other value for that column is supplied, and the value returned is used for
the column's value. Below illustrates a crude "sequence" that assigns an
@@ -67,12 +67,12 @@ built-in capabilities of the database should normally be used, which may
include sequence objects or other autoincrementing capabilities. For primary
key columns, SQLAlchemy will in most cases use these capabilities
automatically. See the API documentation for
-:class:`~sqlalchemy.schema.Column` including the ``autoincrement`` flag, as
+:class:`~sqlalchemy.schema.Column` including the :paramref:`.Column.autoincrement` flag, as
well as the section on :class:`~sqlalchemy.schema.Sequence` later in this
chapter for background on standard primary key generation techniques.
To illustrate onupdate, we assign the Python ``datetime`` function ``now`` to
-the ``onupdate`` attribute::
+the :paramref:`.Column.onupdate` attribute::
import datetime
@@ -93,7 +93,7 @@ executes.
Context-Sensitive Default Functions
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-The Python functions used by ``default`` and ``onupdate`` may also make use of
+The Python functions used by :paramref:`.Column.default` and :paramref:`.Column.onupdate` may also make use of
the current statement's context in order to determine a value. The `context`
of a statement is an internal SQLAlchemy object which contains all information
about the statement being executed, including its source expression, the
@@ -185,21 +185,23 @@ performance reasons.
When the statement is executed with a single set of parameters (that is, it is
not an "executemany" style execution), the returned
:class:`~sqlalchemy.engine.ResultProxy` will contain a collection
-accessible via ``result.postfetch_cols()`` which contains a list of all
+accessible via :meth:`.ResultProxy.postfetch_cols` which contains a list of all
:class:`~sqlalchemy.schema.Column` objects which had an inline-executed
default. Similarly, all parameters which were bound to the statement,
including all Python and SQL expressions which were pre-executed, are present
-in the ``last_inserted_params()`` or ``last_updated_params()`` collections on
-:class:`~sqlalchemy.engine.ResultProxy`. The ``inserted_primary_key``
+in the :meth:`.ResultProxy.last_inserted_params` or :meth:`.ResultProxy.last_updated_params` collections on
+:class:`~sqlalchemy.engine.ResultProxy`. The :attr:`.ResultProxy.inserted_primary_key`
collection contains a list of primary key values for the row inserted (a list
so that single-column and composite-column primary keys are represented in the
same format).
+.. _server_defaults:
+
Server Side Defaults
--------------------
-A variant on the SQL expression default is the ``server_default``, which gets
-placed in the CREATE TABLE statement during a ``create()`` operation:
+A variant on the SQL expression default is the :paramref:`.Column.server_default`, which gets
+placed in the CREATE TABLE statement during a :meth:`.Table.create` operation:
.. sourcecode:: python+sql
@@ -215,7 +217,7 @@ A create call for the above table will produce::
created_at datetime default sysdate
)
-The behavior of ``server_default`` is similar to that of a regular SQL
+The behavior of :paramref:`.Column.server_default` is similar to that of a regular SQL
default; if it's placed on a primary key column for a database which doesn't
have a way to "postfetch" the ID, and the statement is not "inlined", the SQL
expression is pre-executed; otherwise, SQLAlchemy lets the default fire off on
diff --git a/doc/build/core/events.rst b/doc/build/core/events.rst
index d19b910b1..451cb9460 100644
--- a/doc/build/core/events.rst
+++ b/doc/build/core/events.rst
@@ -11,10 +11,6 @@ ORM events are described in :ref:`orm_event_toplevel`.
.. autoclass:: sqlalchemy.event.base.Events
:members:
-.. versionadded:: 0.7
- The event system supersedes the previous system of "extension", "listener",
- and "proxy" classes.
-
Connection Pool Events
-----------------------
diff --git a/doc/build/core/metadata.rst b/doc/build/core/metadata.rst
index c04de158b..1eade1c1c 100644
--- a/doc/build/core/metadata.rst
+++ b/doc/build/core/metadata.rst
@@ -306,26 +306,21 @@ Column, Table, MetaData API
.. autoclass:: Column
:members:
:inherited-members:
- :undoc-members:
.. autoclass:: MetaData
:members:
- :undoc-members:
.. autoclass:: SchemaItem
:members:
- :undoc-members:
.. autoclass:: Table
:members:
:inherited-members:
- :undoc-members:
.. autoclass:: ThreadLocalMetaData
:members:
- :undoc-members:
diff --git a/doc/build/core/pooling.rst b/doc/build/core/pooling.rst
index ce6d443f9..f9384fd60 100644
--- a/doc/build/core/pooling.rst
+++ b/doc/build/core/pooling.rst
@@ -209,6 +209,8 @@ correspond to a single request failing with a 500 error, then the web applicatio
continuing normally beyond that. Hence the approach is "optimistic" in that frequent
database restarts are not anticipated.
+.. _pool_setting_recycle:
+
Setting Pool Recycle
~~~~~~~~~~~~~~~~~~~~~~~
@@ -231,9 +233,55 @@ of the :class:`.Pool` itself, independent of whether or not an :class:`.Engine`
Disconnect Handling - Pessimistic
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-At the expense of some extra SQL emitted for each connection checked out from the pool,
-a "ping" operation established by a checkout event handler
-can detect an invalid connection before it is used::
+At the expense of some extra SQL emitted for each connection checked out from
+the pool, a "ping" operation established by a checkout event handler can
+detect an invalid connection before it is used. In modern SQLAlchemy, the
+best way to do this is to make use of the
+:meth:`.ConnectionEvents.engine_connect` event, assuming the use of a
+:class:`.Engine` and not just a raw :class:`.Pool` object::
+
+ from sqlalchemy import exc
+ from sqlalchemy import event
+ from sqlalchemy import select
+
+ some_engine = create_engine(...)
+
+ @event.listens_for(some_engine, "engine_connect")
+ def ping_connection(connection, branch):
+ if branch:
+ # "branch" refers to a sub-connection of a connection,
+ # we don't want to bother pinging on these.
+ return
+
+ try:
+ # run a SELECT 1. use a core select() so that
+ # the SELECT of a scalar value without a table is
+ # appropriately formatted for the backend
+ connection.scalar(select([1]))
+ except exc.DBAPIError as err:
+ # catch SQLAlchemy's DBAPIError, which is a wrapper
+ # for the DBAPI's exception. It includes a .connection_invalidated
+ # attribute which specifies if this connection is a "disconnect"
+ # condition, which is based on inspection of the original exception
+ # by the dialect in use.
+ if err.connection_invalidated:
+ # run the same SELECT again - the connection will re-validate
+ # itself and establish a new connection. The disconnect detection
+ # here also causes the whole connection pool to be invalidated
+ # so that all stale connections are discarded.
+ connection.scalar(select([1]))
+ else:
+ raise
+
+The above recipe has the advantage that we are making use of SQLAlchemy's
+facilities for detecting those DBAPI exceptions that are known to indicate
+a "disconnect" situation, as well as the :class:`.Engine` object's ability
+to correctly invalidate the current connection pool when this condition
+occurs and allowing the current :class:`.Connection` to re-validate onto
+a new DBAPI connection.
+
+For the much less common case of where a :class:`.Pool` is being used without
+an :class:`.Engine`, an older approach may be used as below::
from sqlalchemy import exc
from sqlalchemy import event
@@ -245,46 +293,19 @@ can detect an invalid connection before it is used::
try:
cursor.execute("SELECT 1")
except:
- # optional - dispose the whole pool
- # instead of invalidating one at a time
- # connection_proxy._pool.dispose()
-
# raise DisconnectionError - pool will try
# connecting again up to three times before raising.
raise exc.DisconnectionError()
cursor.close()
-Above, the :class:`.Pool` object specifically catches :class:`~sqlalchemy.exc.DisconnectionError` and attempts
-to create a new DBAPI connection, up to three times, before giving up and then raising
-:class:`~sqlalchemy.exc.InvalidRequestError`, failing the connection. This recipe will ensure
-that a new :class:`.Connection` will succeed even if connections
-in the pool have gone stale, provided that the database server is actually running. The expense
-is that of an additional execution performed per checkout. When using the ORM :class:`.Session`,
-there is one connection checkout per transaction, so the expense is fairly low. The ping approach
-above also works with straight connection pool usage, that is, even if no :class:`.Engine` were
-involved.
-
-The event handler can be tested using a script like the following, restarting the database
-server at the point at which the script pauses for input::
-
- from sqlalchemy import create_engine
- e = create_engine("mysql://scott:tiger@localhost/test", echo_pool=True)
- c1 = e.connect()
- c2 = e.connect()
- c3 = e.connect()
- c1.close()
- c2.close()
- c3.close()
-
- # pool size is now three.
-
- print "Restart the server"
- raw_input()
-
- for i in xrange(10):
- c = e.connect()
- print c.execute("select 1").fetchall()
- c.close()
+Above, the :class:`.Pool` object specifically catches
+:class:`~sqlalchemy.exc.DisconnectionError` and attempts to create a new DBAPI
+connection, up to three times, before giving up and then raising
+:class:`~sqlalchemy.exc.InvalidRequestError`, failing the connection. The
+disadvantage of the above approach is that we don't have any easy way of
+determining if the exception raised is in fact a "disconnect" situation, since
+there is no :class:`.Engine` or :class:`.Dialect` in play, and also the above
+error would occur individually for all stale connections still in the pool.
.. _pool_connection_invalidation:
diff --git a/doc/build/core/sqlelement.rst b/doc/build/core/sqlelement.rst
index 44a969dbb..cf52a0166 100644
--- a/doc/build/core/sqlelement.rst
+++ b/doc/build/core/sqlelement.rst
@@ -9,8 +9,12 @@ constructs is the :class:`.ClauseElement`, which is the base for several
sub-branches. The :class:`.ColumnElement` class is the fundamental unit
used to construct any kind of typed SQL expression.
+.. autofunction:: all_
+
.. autofunction:: and_
+.. autofunction:: any_
+
.. autofunction:: asc
.. autofunction:: between
@@ -65,6 +69,8 @@ used to construct any kind of typed SQL expression.
.. autofunction:: type_coerce
+.. autofunction:: within_group
+
.. autoclass:: BinaryExpression
:members:
@@ -129,9 +135,15 @@ used to construct any kind of typed SQL expression.
.. autoclass:: Tuple
:members:
+.. autoclass:: WithinGroup
+ :members:
+
.. autoclass:: sqlalchemy.sql.elements.True_
:members:
+.. autoclass:: TypeCoerce
+ :members:
+
.. autoclass:: sqlalchemy.sql.operators.custom_op
:members:
diff --git a/doc/build/core/tutorial.rst b/doc/build/core/tutorial.rst
index cc2a97625..5773cab40 100644
--- a/doc/build/core/tutorial.rst
+++ b/doc/build/core/tutorial.rst
@@ -50,13 +50,13 @@ Version Check
=============
-A quick check to verify that we are on at least **version 1.0** of SQLAlchemy:
+A quick check to verify that we are on at least **version 1.1** of SQLAlchemy:
.. sourcecode:: pycon+sql
>>> import sqlalchemy
- >>> sqlalchemy.__version__ # doctest:+SKIP
- 1.0.0
+ >>> sqlalchemy.__version__ # doctest: +SKIP
+ 1.1.0
Connecting
==========
@@ -149,11 +149,8 @@ each table first before creating, so it's safe to call multiple times:
.. sourcecode:: pycon+sql
- {sql}>>> metadata.create_all(engine) #doctest: +NORMALIZE_WHITESPACE
- PRAGMA table_info("users")
- ()
- PRAGMA table_info("addresses")
- ()
+ {sql}>>> metadata.create_all(engine)
+ SE...
CREATE TABLE users (
id INTEGER NOT NULL,
name VARCHAR,
@@ -243,7 +240,7 @@ data consists of literal values, SQLAlchemy automatically generates bind
parameters for them. We can peek at this data for now by looking at the
compiled form of the statement::
- >>> ins.compile().params #doctest: +NORMALIZE_WHITESPACE
+ >>> ins.compile().params # doctest: +SKIP
{'fullname': 'Jack Jones', 'name': 'jack'}
Executing
@@ -257,7 +254,7 @@ connections capable of issuing SQL to the database. To acquire a connection,
we use the ``connect()`` method::
>>> conn = engine.connect()
- >>> conn #doctest: +ELLIPSIS
+ >>> conn
<sqlalchemy.engine.base.Connection object at 0x...>
The :class:`~sqlalchemy.engine.Connection` object represents an actively
@@ -290,7 +287,8 @@ the SQLAlchemy :class:`~sqlalchemy.engine.Connection` object references a
DBAPI connection, the result, known as a
:class:`~sqlalchemy.engine.ResultProxy` object, is analogous to the DBAPI
cursor object. In the case of an INSERT, we can get important information from
-it, such as the primary key values which were generated from our statement:
+it, such as the primary key values which were generated from our statement
+using :attr:`.ResultProxy.inserted_primary_key`:
.. sourcecode:: pycon+sql
@@ -304,8 +302,11 @@ value would have been used. In either case, SQLAlchemy always knows how to get
at a newly generated primary key value, even though the method of generating
them is different across different databases; each database's
:class:`~sqlalchemy.engine.interfaces.Dialect` knows the specific steps needed to
-determine the correct value (or values; note that ``inserted_primary_key``
-returns a list so that it supports composite primary keys).
+determine the correct value (or values; note that
+:attr:`.ResultProxy.inserted_primary_key`
+returns a list so that it supports composite primary keys). Methods here
+range from using ``cursor.lastrowid``, to selecting from a database-specific
+function, to using ``INSERT..RETURNING`` syntax; this all occurs transparently.
.. _execute_multiple:
@@ -324,7 +325,7 @@ and use it in the "normal" way:
.. sourcecode:: pycon+sql
>>> ins = users.insert()
- >>> conn.execute(ins, id=2, name='wendy', fullname='Wendy Williams') # doctest: +ELLIPSIS
+ >>> conn.execute(ins, id=2, name='wendy', fullname='Wendy Williams')
{opensql}INSERT INTO users (id, name, fullname) VALUES (?, ?, ?)
(2, 'wendy', 'Wendy Williams')
COMMIT
@@ -343,7 +344,7 @@ inserted, as we do here to add some email addresses:
.. sourcecode:: pycon+sql
- >>> conn.execute(addresses.insert(), [ # doctest: +ELLIPSIS
+ >>> conn.execute(addresses.insert(), [
... {'user_id': 1, 'email_address' : 'jack@yahoo.com'},
... {'user_id': 1, 'email_address' : 'jack@msn.com'},
... {'user_id': 2, 'email_address' : 'www@www.org'},
@@ -382,7 +383,7 @@ statements is the :func:`.select` function:
>>> from sqlalchemy.sql import select
>>> s = select([users])
- >>> result = conn.execute(s) # doctest: +NORMALIZE_WHITESPACE
+ >>> result = conn.execute(s)
{opensql}SELECT users.id, users.name, users.fullname
FROM users
()
@@ -400,7 +401,7 @@ rows from it is to just iterate:
.. sourcecode:: pycon+sql
>>> for row in result:
- ... print row
+ ... print(row)
(1, u'jack', u'Jack Jones')
(2, u'wendy', u'Wendy Williams')
@@ -410,13 +411,13 @@ through dictionary access, using the string names of columns:
.. sourcecode:: pycon+sql
- {sql}>>> result = conn.execute(s) # doctest: +NORMALIZE_WHITESPACE
+ {sql}>>> result = conn.execute(s)
SELECT users.id, users.name, users.fullname
FROM users
()
{stop}>>> row = result.fetchone()
- >>> print "name:", row['name'], "; fullname:", row['fullname']
+ >>> print("name:", row['name'], "; fullname:", row['fullname'])
name: jack ; fullname: Jack Jones
Integer indexes work as well:
@@ -424,7 +425,7 @@ Integer indexes work as well:
.. sourcecode:: pycon+sql
>>> row = result.fetchone()
- >>> print "name:", row[1], "; fullname:", row[2]
+ >>> print("name:", row[1], "; fullname:", row[2])
name: wendy ; fullname: Wendy Williams
But another way, whose usefulness will become apparent later on, is to use the
@@ -432,8 +433,8 @@ But another way, whose usefulness will become apparent later on, is to use the
.. sourcecode:: pycon+sql
- {sql}>>> for row in conn.execute(s): # doctest: +NORMALIZE_WHITESPACE
- ... print "name:", row[users.c.name], "; fullname:", row[users.c.fullname]
+ {sql}>>> for row in conn.execute(s):
+ ... print("name:", row[users.c.name], "; fullname:", row[users.c.fullname])
SELECT users.id, users.name, users.fullname
FROM users
()
@@ -460,12 +461,12 @@ the ``c`` attribute of the :class:`~sqlalchemy.schema.Table` object:
.. sourcecode:: pycon+sql
>>> s = select([users.c.name, users.c.fullname])
- {sql}>>> result = conn.execute(s) # doctest: +NORMALIZE_WHITESPACE
+ {sql}>>> result = conn.execute(s)
SELECT users.name, users.fullname
FROM users
()
- {stop}>>> for row in result: #doctest: +NORMALIZE_WHITESPACE
- ... print row
+ {stop}>>> for row in result:
+ ... print(row)
(u'jack', u'Jack Jones')
(u'wendy', u'Wendy Williams')
@@ -478,7 +479,7 @@ our :func:`.select` statement:
.. sourcecode:: pycon+sql
{sql}>>> for row in conn.execute(select([users, addresses])):
- ... print row # doctest: +NORMALIZE_WHITESPACE
+ ... print(row)
SELECT users.id, users.name, users.fullname, addresses.id, addresses.user_id, addresses.email_address
FROM users, addresses
()
@@ -501,7 +502,7 @@ WHERE clause. We do that using :meth:`.Select.where`:
>>> s = select([users, addresses]).where(users.c.id == addresses.c.user_id)
{sql}>>> for row in conn.execute(s):
- ... print row # doctest: +NORMALIZE_WHITESPACE
+ ... print(row)
SELECT users.id, users.name, users.fullname, addresses.id,
addresses.user_id, addresses.email_address
FROM users, addresses
@@ -523,8 +524,8 @@ a WHERE clause. So lets see exactly what that expression is doing:
.. sourcecode:: pycon+sql
- >>> users.c.id == addresses.c.user_id #doctest: +ELLIPSIS
- <sqlalchemy.sql.expression.BinaryExpression object at 0x...>
+ >>> users.c.id == addresses.c.user_id
+ <sqlalchemy.sql.elements.BinaryExpression object at 0x...>
Wow, surprise ! This is neither a ``True`` nor a ``False``. Well what is it ?
@@ -548,7 +549,7 @@ some of its capabilities. We've seen how to equate two columns to each other:
.. sourcecode:: pycon+sql
- >>> print users.c.id == addresses.c.user_id
+ >>> print(users.c.id == addresses.c.user_id)
users.id = addresses.user_id
If we use a literal value (a literal meaning, not a SQLAlchemy clause object),
@@ -556,7 +557,7 @@ we get a bind parameter:
.. sourcecode:: pycon+sql
- >>> print users.c.id == 7
+ >>> print(users.c.id == 7)
users.id = :id_1
The ``7`` literal is embedded the resulting
@@ -573,22 +574,22 @@ equals, not equals, etc.:
.. sourcecode:: pycon+sql
- >>> print users.c.id != 7
+ >>> print(users.c.id != 7)
users.id != :id_1
>>> # None converts to IS NULL
- >>> print users.c.name == None
+ >>> print(users.c.name == None)
users.name IS NULL
>>> # reverse works too
- >>> print 'fred' > users.c.name
+ >>> print('fred' > users.c.name)
users.name < :name_1
If we add two integer columns together, we get an addition expression:
.. sourcecode:: pycon+sql
- >>> print users.c.id + addresses.c.id
+ >>> print(users.c.id + addresses.c.id)
users.id + addresses.id
Interestingly, the type of the :class:`~sqlalchemy.schema.Column` is important!
@@ -599,7 +600,7 @@ something different:
.. sourcecode:: pycon+sql
- >>> print users.c.name + users.c.fullname
+ >>> print(users.c.name + users.c.fullname)
users.name || users.fullname
Where ``||`` is the string concatenation operator used on most databases. But
@@ -607,8 +608,8 @@ not all of them. MySQL users, fear not:
.. sourcecode:: pycon+sql
- >>> print (users.c.name + users.c.fullname).\
- ... compile(bind=create_engine('mysql://'))
+ >>> print((users.c.name + users.c.fullname).
+ ... compile(bind=create_engine('mysql://'))) # doctest: +SKIP
concat(users.name, users.fullname)
The above illustrates the SQL that's generated for an
@@ -620,7 +621,7 @@ always use the :meth:`.ColumnOperators.op` method; this generates whatever opera
.. sourcecode:: pycon+sql
- >>> print users.c.name.op('tiddlywinks')('foo')
+ >>> print(users.c.name.op('tiddlywinks')('foo'))
users.name tiddlywinks :name_1
This function can also be used to make bitwise operators explicit. For example::
@@ -656,15 +657,16 @@ a :meth:`~.ColumnOperators.like`):
.. sourcecode:: pycon+sql
>>> from sqlalchemy.sql import and_, or_, not_
- >>> print and_(
+ >>> print(and_(
... users.c.name.like('j%'),
- ... users.c.id == addresses.c.user_id, #doctest: +NORMALIZE_WHITESPACE
+ ... users.c.id == addresses.c.user_id,
... or_(
... addresses.c.email_address == 'wendy@aol.com',
... addresses.c.email_address == 'jack@yahoo.com'
... ),
... not_(users.c.id > 5)
... )
+ ... )
users.name LIKE :name_1 AND users.id = addresses.user_id AND
(addresses.email_address = :email_address_1
OR addresses.email_address = :email_address_2)
@@ -676,12 +678,13 @@ parenthesis:
.. sourcecode:: pycon+sql
- >>> print users.c.name.like('j%') & (users.c.id == addresses.c.user_id) & \
+ >>> print(users.c.name.like('j%') & (users.c.id == addresses.c.user_id) &
... (
... (addresses.c.email_address == 'wendy@aol.com') | \
... (addresses.c.email_address == 'jack@yahoo.com')
... ) \
- ... & ~(users.c.id>5) # doctest: +NORMALIZE_WHITESPACE
+ ... & ~(users.c.id>5)
+ ... )
users.name LIKE :name_1 AND users.id = addresses.user_id AND
(addresses.email_address = :email_address_1
OR addresses.email_address = :email_address_2)
@@ -712,7 +715,7 @@ not have a name:
... )
... )
... )
- >>> conn.execute(s).fetchall() #doctest: +NORMALIZE_WHITESPACE
+ >>> conn.execute(s).fetchall()
SELECT users.fullname || ? || addresses.email_address AS title
FROM users, addresses
WHERE users.id = addresses.user_id AND users.name BETWEEN ? AND ? AND
@@ -741,7 +744,7 @@ A shortcut to using :func:`.and_` is to chain together multiple
... addresses.c.email_address.like('%@msn.com')
... )
... )
- >>> conn.execute(s).fetchall() #doctest: +NORMALIZE_WHITESPACE
+ >>> conn.execute(s).fetchall()
SELECT users.fullname || ? || addresses.email_address AS title
FROM users, addresses
WHERE users.id = addresses.user_id AND users.name BETWEEN ? AND ? AND
@@ -754,8 +757,8 @@ method calls is called :term:`method chaining`.
.. _sqlexpression_text:
-Using Text
-===========
+Using Textual SQL
+=================
Our last example really became a handful to type. Going from what one
understands to be a textual SQL expression into a Python construct which
@@ -776,7 +779,7 @@ unchanged. Below, we create a :func:`~.expression.text` object and execute it:
... "AND users.name BETWEEN :x AND :y "
... "AND (addresses.email_address LIKE :e1 "
... "OR addresses.email_address LIKE :e2)")
- {sql}>>> conn.execute(s, x='m', y='z', e1='%@aol.com', e2='%@msn.com').fetchall() # doctest:+NORMALIZE_WHITESPACE
+ {sql}>>> conn.execute(s, x='m', y='z', e1='%@aol.com', e2='%@msn.com').fetchall()
SELECT users.fullname || ', ' || addresses.email_address AS title
FROM users, addresses
WHERE users.id = addresses.user_id AND users.name BETWEEN ? AND ? AND
@@ -794,7 +797,27 @@ construct using the :meth:`~.TextClause.bindparams` method; if we are
using datatypes that need special handling as they are received in Python,
or we'd like to compose our :func:`~.expression.text` object into a larger
expression, we may also wish to use the :meth:`~.TextClause.columns` method
-in order to specify column return types and names.
+in order to specify column return types and names:
+
+.. sourcecode:: pycon+sql
+
+ >>> s = text(
+ ... "SELECT users.fullname || ', ' || addresses.email_address AS title "
+ ... "FROM users, addresses "
+ ... "WHERE users.id = addresses.user_id "
+ ... "AND users.name BETWEEN :x AND :y "
+ ... "AND (addresses.email_address LIKE :e1 "
+ ... "OR addresses.email_address LIKE :e2)")
+ >>> s = s.columns(title=String)
+ >>> s = s.bindparams(x='m', y='z', e1='%@aol.com', e2='%@msn.com')
+ >>> conn.execute(s).fetchall()
+ SELECT users.fullname || ', ' || addresses.email_address AS title
+ FROM users, addresses
+ WHERE users.id = addresses.user_id AND users.name BETWEEN ? AND ? AND
+ (addresses.email_address LIKE ? OR addresses.email_address LIKE ?)
+ ('m', 'z', '%@aol.com', '%@msn.com')
+ {stop}[(u'Wendy Williams, wendy@aol.com',)]
+
:func:`~.expression.text` can also be used freely within a
:func:`~.expression.select` object, which accepts :func:`~.expression.text`
@@ -819,7 +842,7 @@ need to refer to any pre-established :class:`.Table` metadata:
... "OR addresses.email_address LIKE :y)")
... )
... ).select_from(text('users, addresses'))
- {sql}>>> conn.execute(s, x='%@aol.com', y='%@msn.com').fetchall() #doctest: +NORMALIZE_WHITESPACE
+ {sql}>>> conn.execute(s, x='%@aol.com', y='%@msn.com').fetchall()
SELECT users.fullname || ', ' || addresses.email_address AS title
FROM users, addresses
WHERE users.id = addresses.user_id AND users.name BETWEEN 'm' AND 'z'
@@ -841,7 +864,12 @@ need to refer to any pre-established :class:`.Table` metadata:
the less flexibility and ability for manipulation/transformation
the statement will have.
-.. versionchanged:: 1.0.0
+.. seealso::
+
+ :ref:`orm_tutorial_literal_sql` - integrating ORM-level queries with
+ :func:`.text`
+
+.. fchanged:: 1.0.0
The :func:`.select` construct emits warnings when string SQL
fragments are coerced to :func:`.text`, and :func:`.text` should
be used explicitly. See :ref:`migration_2992` for background.
@@ -872,7 +900,7 @@ be quoted:
>>> from sqlalchemy.sql import table, literal_column
>>> s = select([
... literal_column("users.fullname", String) +
- ... ' , ' +
+ ... ', ' +
... literal_column("addresses.email_address").label("title")
... ]).\
... where(
@@ -885,13 +913,13 @@ be quoted:
... )
... ).select_from(table('users')).select_from(table('addresses'))
- {sql}>>> conn.execute(s, x='%@aol.com', y='%@msn.com').fetchall() #doctest: +NORMALIZE_WHITESPACE
- SELECT "users.fullname" || ? || "addresses.email_address" AS anon_1
+ {sql}>>> conn.execute(s, x='%@aol.com', y='%@msn.com').fetchall()
+ SELECT users.fullname || ? || addresses.email_address AS anon_1
FROM users, addresses
- WHERE "users.id" = "addresses.user_id"
+ WHERE users.id = addresses.user_id
AND users.name BETWEEN 'm' AND 'z'
AND (addresses.email_address LIKE ? OR addresses.email_address LIKE ?)
- (' , ', '%@aol.com', '%@msn.com')
+ (', ', '%@aol.com', '%@msn.com')
{stop}[(u'Wendy Williams, wendy@aol.com',)]
Ordering or Grouping by a Label
@@ -914,7 +942,7 @@ expression from being rendered twice:
... func.count(addresses.c.id).label('num_addresses')]).\
... order_by("num_addresses")
- {sql}>>> conn.execute(stmt).fetchall() # doctest: +NORMALIZE_WHITESPACE
+ {sql}>>> conn.execute(stmt).fetchall()
SELECT addresses.user_id, count(addresses.id) AS num_addresses
FROM addresses ORDER BY num_addresses
()
@@ -931,7 +959,7 @@ name:
... func.count(addresses.c.id).label('num_addresses')]).\
... order_by(desc("num_addresses"))
- {sql}>>> conn.execute(stmt).fetchall() # doctest: +NORMALIZE_WHITESPACE
+ {sql}>>> conn.execute(stmt).fetchall()
SELECT addresses.user_id, count(addresses.id) AS num_addresses
FROM addresses ORDER BY num_addresses DESC
()
@@ -952,7 +980,7 @@ by a column name that appears more than once:
... where(u1a.c.name > u1b.c.name).\
... order_by(u1a.c.name) # using "name" here would be ambiguous
- {sql}>>> conn.execute(stmt).fetchall() # doctest: +NORMALIZE_WHITESPACE
+ {sql}>>> conn.execute(stmt).fetchall()
SELECT users_1.id, users_1.name, users_1.fullname, users_2.id,
users_2.name, users_2.fullname
FROM users AS users_1, users AS users_2
@@ -994,7 +1022,7 @@ once for each address. We create two :class:`.Alias` constructs against
... a1.c.email_address == 'jack@msn.com',
... a2.c.email_address == 'jack@yahoo.com'
... ))
- {sql}>>> conn.execute(s).fetchall() # doctest: +NORMALIZE_WHITESPACE
+ {sql}>>> conn.execute(s).fetchall()
SELECT users.id, users.name, users.fullname
FROM users, addresses AS addresses_1, addresses AS addresses_2
WHERE users.id = addresses_1.user_id
@@ -1029,7 +1057,7 @@ to "correlate" the inner ``users`` table with the outer one:
>>> a1 = s.correlate(None).alias()
>>> s = select([users.c.name]).where(users.c.id == a1.c.id)
- {sql}>>> conn.execute(s).fetchall() # doctest: +NORMALIZE_WHITESPACE
+ {sql}>>> conn.execute(s).fetchall()
SELECT users.name
FROM users,
(SELECT users.id AS id, users.name AS name, users.fullname AS fullname
@@ -1054,7 +1082,7 @@ join:
.. sourcecode:: pycon+sql
- >>> print users.join(addresses)
+ >>> print(users.join(addresses))
users JOIN addresses ON users.id = addresses.user_id
The alert reader will see more surprises; SQLAlchemy figured out how to JOIN
@@ -1070,9 +1098,10 @@ username:
.. sourcecode:: pycon+sql
- >>> print users.join(addresses,
+ >>> print(users.join(addresses,
... addresses.c.email_address.like(users.c.name + '%')
... )
+ ... )
users JOIN addresses ON addresses.email_address LIKE (users.name || :name_1)
When we create a :func:`.select` construct, SQLAlchemy looks around at the
@@ -1086,7 +1115,7 @@ here we make use of the :meth:`~.Select.select_from` method:
... users.join(addresses,
... addresses.c.email_address.like(users.c.name + '%'))
... )
- {sql}>>> conn.execute(s).fetchall() # doctest: +NORMALIZE_WHITESPACE
+ {sql}>>> conn.execute(s).fetchall()
SELECT users.fullname
FROM users JOIN addresses ON addresses.email_address LIKE (users.name || ?)
('%',)
@@ -1098,7 +1127,7 @@ and is used in the same way as :meth:`~.FromClause.join`:
.. sourcecode:: pycon+sql
>>> s = select([users.c.fullname]).select_from(users.outerjoin(addresses))
- >>> print s # doctest: +NORMALIZE_WHITESPACE
+ >>> print(s)
SELECT users.fullname
FROM users
LEFT OUTER JOIN addresses ON users.id = addresses.user_id
@@ -1110,7 +1139,7 @@ would be using ``OracleDialect``) to use Oracle-specific SQL:
.. sourcecode:: pycon+sql
>>> from sqlalchemy.dialects.oracle import dialect as OracleDialect
- >>> print s.compile(dialect=OracleDialect(use_ansi=False)) # doctest: +NORMALIZE_WHITESPACE
+ >>> print(s.compile(dialect=OracleDialect(use_ansi=False)))
SELECT users.fullname
FROM users, addresses
WHERE users.id = addresses.user_id(+)
@@ -1151,7 +1180,7 @@ at execution time, as here where it converts to positional for SQLite:
>>> from sqlalchemy.sql import bindparam
>>> s = users.select(users.c.name == bindparam('username'))
- {sql}>>> conn.execute(s, username='wendy').fetchall() # doctest: +NORMALIZE_WHITESPACE
+ {sql}>>> conn.execute(s, username='wendy').fetchall()
SELECT users.id, users.name, users.fullname
FROM users
WHERE users.name = ?
@@ -1166,7 +1195,7 @@ off to the database:
.. sourcecode:: pycon+sql
>>> s = users.select(users.c.name.like(bindparam('username', type_=String) + text("'%'")))
- {sql}>>> conn.execute(s, username='wendy').fetchall() # doctest: +NORMALIZE_WHITESPACE
+ {sql}>>> conn.execute(s, username='wendy').fetchall()
SELECT users.id, users.name, users.fullname
FROM users
WHERE users.name LIKE (? || '%')
@@ -1190,7 +1219,7 @@ single named value is needed in the execute parameters:
... ).\
... select_from(users.outerjoin(addresses)).\
... order_by(addresses.c.id)
- {sql}>>> conn.execute(s, name='jack').fetchall() # doctest: +NORMALIZE_WHITESPACE
+ {sql}>>> conn.execute(s, name='jack').fetchall()
SELECT users.id, users.name, users.fullname, addresses.id,
addresses.user_id, addresses.email_address
FROM users LEFT OUTER JOIN addresses ON users.id = addresses.user_id
@@ -1212,16 +1241,16 @@ generates functions using attribute access:
.. sourcecode:: pycon+sql
>>> from sqlalchemy.sql import func
- >>> print func.now()
+ >>> print(func.now())
now()
- >>> print func.concat('x', 'y')
- concat(:param_1, :param_2)
+ >>> print(func.concat('x', 'y'))
+ concat(:concat_1, :concat_2)
By "generates", we mean that **any** SQL function is created based on the word
you choose::
- >>> print func.xyz_my_goofy_function() # doctest: +NORMALIZE_WHITESPACE
+ >>> print(func.xyz_my_goofy_function())
xyz_my_goofy_function()
Certain function names are known by SQLAlchemy, allowing special behavioral
@@ -1230,7 +1259,7 @@ don't get the parenthesis added after them, such as CURRENT_TIMESTAMP:
.. sourcecode:: pycon+sql
- >>> print func.current_timestamp()
+ >>> print(func.current_timestamp())
CURRENT_TIMESTAMP
Functions are most typically used in the columns clause of a select statement,
@@ -1249,7 +1278,7 @@ not important in this case:
... func.max(addresses.c.email_address, type_=String).
... label('maxemail')
... ])
- ... ).scalar() # doctest: +NORMALIZE_WHITESPACE
+ ... ).scalar()
{opensql}SELECT max(addresses.email_address) AS maxemail
FROM addresses
()
@@ -1273,7 +1302,7 @@ well as bind parameters:
... )
... )
>>> calc = calculate.alias()
- >>> print select([users]).where(users.c.id > calc.c.z) # doctest: +NORMALIZE_WHITESPACE
+ >>> print(select([users]).where(users.c.id > calc.c.z))
SELECT users.id, users.name, users.fullname
FROM users, (SELECT q, z, r
FROM calculate(:x, :y)) AS anon_1
@@ -1291,14 +1320,14 @@ of our selectable:
>>> calc2 = calculate.alias('c2').unique_params(x=5, y=12)
>>> s = select([users]).\
... where(users.c.id.between(calc1.c.z, calc2.c.z))
- >>> print s # doctest: +NORMALIZE_WHITESPACE
+ >>> print(s)
SELECT users.id, users.name, users.fullname
FROM users,
(SELECT q, z, r FROM calculate(:x_1, :y_1)) AS c1,
(SELECT q, z, r FROM calculate(:x_2, :y_2)) AS c2
WHERE users.id BETWEEN c1.z AND c2.z
- >>> s.compile().params
+ >>> s.compile().params # doctest: +SKIP
{u'x_2': 5, u'y_2': 12, u'y_1': 45, u'x_1': 17}
.. seealso::
@@ -1318,7 +1347,7 @@ OVER clause, using the :meth:`.FunctionElement.over` method:
... users.c.id,
... func.row_number().over(order_by=users.c.name)
... ])
- >>> print s # doctest: +NORMALIZE_WHITESPACE
+ >>> print(s)
SELECT users.id, row_number() OVER (ORDER BY users.name) AS anon_1
FROM users
@@ -1345,7 +1374,7 @@ module level functions :func:`~.expression.union` and
... where(addresses.c.email_address.like('%@yahoo.com')),
... ).order_by(addresses.c.email_address)
- {sql}>>> conn.execute(u).fetchall() # doctest: +NORMALIZE_WHITESPACE
+ {sql}>>> conn.execute(u).fetchall()
SELECT addresses.id, addresses.user_id, addresses.email_address
FROM addresses
WHERE addresses.email_address = ?
@@ -1371,7 +1400,7 @@ Also available, though not supported on all databases, are
... where(addresses.c.email_address.like('%@msn.com'))
... )
- {sql}>>> conn.execute(u).fetchall() # doctest: +NORMALIZE_WHITESPACE
+ {sql}>>> conn.execute(u).fetchall()
SELECT addresses.id, addresses.user_id, addresses.email_address
FROM addresses
WHERE addresses.email_address LIKE ?
@@ -1401,7 +1430,7 @@ want the "union" to be stated as a subquery:
... ).alias().select(), # apply subquery here
... addresses.select(addresses.c.email_address.like('%@msn.com'))
... )
- {sql}>>> conn.execute(u).fetchall() # doctest: +NORMALIZE_WHITESPACE
+ {sql}>>> conn.execute(u).fetchall()
SELECT anon_1.id, anon_1.user_id, anon_1.email_address
FROM (SELECT addresses.id AS id, addresses.user_id AS user_id,
addresses.email_address AS email_address
@@ -1462,7 +1491,7 @@ other column within another :func:`.select`:
.. sourcecode:: pycon+sql
- >>> conn.execute(select([users.c.name, stmt])).fetchall() # doctest: +NORMALIZE_WHITESPACE
+ >>> conn.execute(select([users.c.name, stmt])).fetchall()
{opensql}SELECT users.name, (SELECT count(addresses.id) AS count_1
FROM addresses
WHERE users.id = addresses.user_id) AS anon_1
@@ -1478,7 +1507,7 @@ it using :meth:`.SelectBase.label` instead:
>>> stmt = select([func.count(addresses.c.id)]).\
... where(users.c.id == addresses.c.user_id).\
... label("address_count")
- >>> conn.execute(select([users.c.name, stmt])).fetchall() # doctest: +NORMALIZE_WHITESPACE
+ >>> conn.execute(select([users.c.name, stmt])).fetchall()
{opensql}SELECT users.name, (SELECT count(addresses.id) AS count_1
FROM addresses
WHERE users.id = addresses.user_id) AS address_count
@@ -1509,7 +1538,7 @@ still have at least one FROM clause of its own. For example:
... where(addresses.c.user_id == users.c.id).\
... where(addresses.c.email_address == 'jack@yahoo.com')
>>> enclosing_stmt = select([users.c.name]).where(users.c.id == stmt)
- >>> conn.execute(enclosing_stmt).fetchall() # doctest: +NORMALIZE_WHITESPACE
+ >>> conn.execute(enclosing_stmt).fetchall()
{opensql}SELECT users.name
FROM users
WHERE users.id = (SELECT addresses.user_id
@@ -1535,7 +1564,7 @@ may be correlated:
... [users.c.name, addresses.c.email_address]).\
... select_from(users.join(addresses)).\
... where(users.c.id == stmt)
- >>> conn.execute(enclosing_stmt).fetchall() # doctest: +NORMALIZE_WHITESPACE
+ >>> conn.execute(enclosing_stmt).fetchall()
{opensql}SELECT users.name, addresses.email_address
FROM users JOIN addresses ON users.id = addresses.user_id
WHERE users.id = (SELECT users.id
@@ -1554,7 +1583,7 @@ as the argument:
... correlate(None)
>>> enclosing_stmt = select([users.c.name]).\
... where(users.c.id == stmt)
- >>> conn.execute(enclosing_stmt).fetchall() # doctest: +NORMALIZE_WHITESPACE
+ >>> conn.execute(enclosing_stmt).fetchall()
{opensql}SELECT users.name
FROM users
WHERE users.id = (SELECT users.id
@@ -1577,7 +1606,7 @@ by telling it to correlate all FROM clauses except for ``users``:
... [users.c.name, addresses.c.email_address]).\
... select_from(users.join(addresses)).\
... where(users.c.id == stmt)
- >>> conn.execute(enclosing_stmt).fetchall() # doctest: +NORMALIZE_WHITESPACE
+ >>> conn.execute(enclosing_stmt).fetchall()
{opensql}SELECT users.name, addresses.email_address
FROM users JOIN addresses ON users.id = addresses.user_id
WHERE users.id = (SELECT users.id
@@ -1595,7 +1624,7 @@ Ordering is done by passing column expressions to the
.. sourcecode:: pycon+sql
>>> stmt = select([users.c.name]).order_by(users.c.name)
- >>> conn.execute(stmt).fetchall() # doctest: +NORMALIZE_WHITESPACE
+ >>> conn.execute(stmt).fetchall()
{opensql}SELECT users.name
FROM users ORDER BY users.name
()
@@ -1607,7 +1636,7 @@ and :meth:`~.ColumnElement.desc` modifiers:
.. sourcecode:: pycon+sql
>>> stmt = select([users.c.name]).order_by(users.c.name.desc())
- >>> conn.execute(stmt).fetchall() # doctest: +NORMALIZE_WHITESPACE
+ >>> conn.execute(stmt).fetchall()
{opensql}SELECT users.name
FROM users ORDER BY users.name DESC
()
@@ -1622,7 +1651,7 @@ This is provided via the :meth:`~.SelectBase.group_by` method:
>>> stmt = select([users.c.name, func.count(addresses.c.id)]).\
... select_from(users.join(addresses)).\
... group_by(users.c.name)
- >>> conn.execute(stmt).fetchall() # doctest: +NORMALIZE_WHITESPACE
+ >>> conn.execute(stmt).fetchall()
{opensql}SELECT users.name, count(addresses.id) AS count_1
FROM users JOIN addresses
ON users.id = addresses.user_id
@@ -1640,7 +1669,7 @@ method:
... select_from(users.join(addresses)).\
... group_by(users.c.name).\
... having(func.length(users.c.name) > 4)
- >>> conn.execute(stmt).fetchall() # doctest: +NORMALIZE_WHITESPACE
+ >>> conn.execute(stmt).fetchall()
{opensql}SELECT users.name, count(addresses.id) AS count_1
FROM users JOIN addresses
ON users.id = addresses.user_id
@@ -1659,10 +1688,10 @@ is the DISTINCT modifier. A simple DISTINCT clause can be added using the
... where(addresses.c.email_address.
... contains(users.c.name)).\
... distinct()
- >>> conn.execute(stmt).fetchall() # doctest: +NORMALIZE_WHITESPACE
+ >>> conn.execute(stmt).fetchall()
{opensql}SELECT DISTINCT users.name
FROM users, addresses
- WHERE addresses.email_address LIKE '%%' || users.name || '%%'
+ WHERE (addresses.email_address LIKE '%%' || users.name || '%%')
()
{stop}[(u'jack',), (u'wendy',)]
@@ -1680,7 +1709,7 @@ into the current backend's methodology:
>>> stmt = select([users.c.name, addresses.c.email_address]).\
... select_from(users.join(addresses)).\
... limit(1).offset(1)
- >>> conn.execute(stmt).fetchall() # doctest: +NORMALIZE_WHITESPACE
+ >>> conn.execute(stmt).fetchall()
{opensql}SELECT users.name, addresses.email_address
FROM users JOIN addresses ON users.id = addresses.user_id
LIMIT ? OFFSET ?
@@ -1707,7 +1736,7 @@ as a value:
>>> stmt = users.update().\
... values(fullname="Fullname: " + users.c.name)
- >>> conn.execute(stmt) #doctest: +ELLIPSIS
+ >>> conn.execute(stmt)
{opensql}UPDATE users SET fullname=(? || users.name)
('Fullname: ',)
COMMIT
@@ -1732,7 +1761,7 @@ as in the example below:
>>> stmt = users.insert().\
... values(name=bindparam('_name') + " .. name")
- >>> conn.execute(stmt, [ # doctest: +ELLIPSIS
+ >>> conn.execute(stmt, [
... {'id':4, '_name':'name1'},
... {'id':5, '_name':'name2'},
... {'id':6, '_name':'name3'},
@@ -1752,7 +1781,7 @@ that can be specified:
... where(users.c.name == 'jack').\
... values(name='ed')
- >>> conn.execute(stmt) #doctest: +ELLIPSIS
+ >>> conn.execute(stmt)
{opensql}UPDATE users SET name=? WHERE users.name = ?
('ed', 'jack')
COMMIT
@@ -1772,7 +1801,7 @@ used to achieve this:
... {'oldname':'jack', 'newname':'ed'},
... {'oldname':'wendy', 'newname':'mary'},
... {'oldname':'jim', 'newname':'jake'},
- ... ]) #doctest: +ELLIPSIS
+ ... ])
{opensql}UPDATE users SET name=? WHERE users.name = ?
(('ed', 'jack'), ('mary', 'wendy'), ('jake', 'jim'))
COMMIT
@@ -1790,7 +1819,7 @@ table, or the same table:
>>> stmt = select([addresses.c.email_address]).\
... where(addresses.c.user_id == users.c.id).\
... limit(1)
- >>> conn.execute(users.update().values(fullname=stmt)) #doctest: +ELLIPSIS,+NORMALIZE_WHITESPACE
+ >>> conn.execute(users.update().values(fullname=stmt))
{opensql}UPDATE users SET fullname=(SELECT addresses.email_address
FROM addresses
WHERE addresses.user_id = users.id
@@ -1848,6 +1877,53 @@ a non-supporting database. The ``UPDATE FROM`` syntax generates by default
when multiple tables are present, and the statement will be rejected
by the database if this syntax is not supported.
+.. _updates_order_parameters:
+
+Parameter-Ordered Updates
+--------------------------
+
+The default behavior of the :func:`.update` construct when rendering the SET
+clauses is to render them using the column ordering given in the
+originating :class:`.Table` object.
+This is an important behavior, since it means that the rendering of a
+particular UPDATE statement with particular columns
+will be rendered the same each time, which has an impact on query caching systems
+that rely on the form of the statement, either client side or server side.
+Since the parameters themselves are passed to the :meth:`.Update.values`
+method as Python dictionary keys, there is no other fixed ordering
+available.
+
+However in some cases, the order of parameters rendered in the SET clause of an
+UPDATE statement can be significant. The main example of this is when using
+MySQL and providing updates to column values based on that of other
+column values. The end result of the following statement::
+
+ UPDATE some_table SET x = y + 10, y = 20
+
+Will have a different result than::
+
+ UPDATE some_table SET y = 20, x = y + 10
+
+This because on MySQL, the individual SET clauses are fully evaluated on
+a per-value basis, as opposed to on a per-row basis, and as each SET clause
+is evaluated, the values embedded in the row are changing.
+
+To suit this specific use case, the
+:paramref:`~sqlalchemy.sql.expression.update.preserve_parameter_order`
+flag may be used. When using this flag, we supply a **Python list of 2-tuples**
+as the argument to the :meth:`.Update.values` method::
+
+ stmt = some_table.update(preserve_parameter_order=True).\
+ values([(some_table.c.y, 20), (some_table.c.x, some_table.c.y + 10)])
+
+The list of 2-tuples is essentially the same structure as a Python dictionary
+except it is ordered. Using the above form, we are assured that the
+"y" column's SET clause will render first, then the "x" column's SET clause.
+
+.. versionadded:: 1.0.10 Added support for explicit ordering of UPDATE
+ parameters using the :paramref:`~sqlalchemy.sql.expression.update.preserve_parameter_order` flag.
+
+
.. _deletes:
Deletes
@@ -1858,13 +1934,13 @@ Finally, a delete. This is accomplished easily enough using the
.. sourcecode:: pycon+sql
- >>> conn.execute(addresses.delete()) #doctest: +ELLIPSIS
+ >>> conn.execute(addresses.delete())
{opensql}DELETE FROM addresses
()
COMMIT
{stop}<sqlalchemy.engine.result.ResultProxy object at 0x...>
- >>> conn.execute(users.delete().where(users.c.name > 'm')) #doctest: +ELLIPSIS
+ >>> conn.execute(users.delete().where(users.c.name > 'm'))
{opensql}DELETE FROM users WHERE users.name > ?
('m',)
COMMIT
@@ -1881,7 +1957,7 @@ The value is available as :attr:`~.ResultProxy.rowcount`:
.. sourcecode:: pycon+sql
- >>> result = conn.execute(users.delete()) #doctest: +ELLIPSIS
+ >>> result = conn.execute(users.delete())
{opensql}DELETE FROM users
()
COMMIT
diff --git a/doc/build/core/type_api.rst b/doc/build/core/type_api.rst
index 88da4939e..7f0b68b64 100644
--- a/doc/build/core/type_api.rst
+++ b/doc/build/core/type_api.rst
@@ -11,9 +11,11 @@ Base Type API
.. autoclass:: Concatenable
:members:
- :inherited-members:
+.. autoclass:: Indexable
+ :members:
+
.. autoclass:: NullType
diff --git a/doc/build/core/type_basics.rst b/doc/build/core/type_basics.rst
index 1ff1baac2..ec3c14dd6 100644
--- a/doc/build/core/type_basics.rst
+++ b/doc/build/core/type_basics.rst
@@ -38,6 +38,9 @@ database column type available on the target database when issuing a
type is emitted in ``CREATE TABLE``, such as ``VARCHAR`` see `SQL
Standard Types`_ and the other sections of this chapter.
+.. autoclass:: Array
+ :members:
+
.. autoclass:: BigInteger
:members:
diff --git a/doc/build/dialects/index.rst b/doc/build/dialects/index.rst
index c85fa662b..0cc42bce1 100644
--- a/doc/build/dialects/index.rst
+++ b/doc/build/dialects/index.rst
@@ -48,7 +48,7 @@ Production Ready
* `ibm_db_sa <http://code.google.com/p/ibm-db/wiki/README>`_ - driver for IBM DB2 and Informix,
developed jointly by IBM and SQLAlchemy developers.
-* `redshift-sqlalchemy <https://pypi.python.org/pypi/redshift-sqlalchemy>`_ - driver for Amazon Redshift, adapts
+* `sqlalchemy-redshift <https://pypi.python.org/pypi/sqlalchemy-redshift>`_ - driver for Amazon Redshift, adapts
the existing Postgresql/psycopg2 driver.
* `sqlalchemy_exasol <https://github.com/blue-yonder/sqlalchemy_exasol>`_ - driver for EXASolution.
* `sqlalchemy-sqlany <https://github.com/sqlanywhere/sqlalchemy-sqlany>`_ - driver for SAP Sybase SQL
diff --git a/doc/build/dialects/postgresql.rst b/doc/build/dialects/postgresql.rst
index e5d8d51bc..7e2a20ef7 100644
--- a/doc/build/dialects/postgresql.rst
+++ b/doc/build/dialects/postgresql.rst
@@ -24,15 +24,18 @@ construction arguments, are as follows:
.. currentmodule:: sqlalchemy.dialects.postgresql
+.. autoclass:: aggregate_order_by
+
.. autoclass:: array
.. autoclass:: ARRAY
:members: __init__, Comparator
+.. autofunction:: array_agg
-.. autoclass:: Any
+.. autofunction:: Any
-.. autoclass:: All
+.. autofunction:: All
.. autoclass:: BIT
:members: __init__
diff --git a/doc/build/faq/connections.rst b/doc/build/faq/connections.rst
index 81a8678b4..658b4f785 100644
--- a/doc/build/faq/connections.rst
+++ b/doc/build/faq/connections.rst
@@ -136,3 +136,84 @@ when :meth:`.Connection.close` is called::
conn.detach() # detaches the DBAPI connection from the connection pool
conn.connection.<go nuts>
conn.close() # connection is closed for real, the pool replaces it with a new connection
+
+How do I use engines / connections / sessions with Python multiprocessing, or os.fork()?
+----------------------------------------------------------------------------------------
+
+The key goal with multiple python processes is to prevent any database connections
+from being shared across processes. Depending on specifics of the driver and OS,
+the issues that arise here range from non-working connections to socket connections that
+are used by multiple processes concurrently, leading to broken messaging (the latter
+case is typically the most common).
+
+The SQLAlchemy :class:`.Engine` object refers to a connection pool of existing
+database connections. So when this object is replicated to a child process,
+the goal is to ensure that no database connections are carried over. There
+are three general approaches to this:
+
+1. Disable pooling using :class:`.NullPool`. This is the most simplistic,
+ one shot system that prevents the :class:`.Engine` from using any connection
+ more than once.
+
+2. Call :meth:`.Engine.dispose` on any given :class:`.Engine` as soon one is
+ within the new process. In Python multiprocessing, constructs such as
+ ``multiprocessing.Pool`` include "initializer" hooks which are a place
+ that this can be performed; otherwise at the top of where ``os.fork()``
+ or where the ``Process`` object begins the child fork, a single call
+ to :meth:`.Engine.dispose` will ensure any remaining connections are flushed.
+
+3. An event handler can be applied to the connection pool that tests for connections
+ being shared across process boundaries, and invalidates them. This looks like
+ the following::
+
+ import os
+ import warnings
+
+ from sqlalchemy import event
+ from sqlalchemy import exc
+
+ def add_engine_pidguard(engine):
+ """Add multiprocessing guards.
+
+ Forces a connection to be reconnected if it is detected
+ as having been shared to a sub-process.
+
+ """
+
+ @event.listens_for(engine, "connect")
+ def connect(dbapi_connection, connection_record):
+ connection_record.info['pid'] = os.getpid()
+
+ @event.listens_for(engine, "checkout")
+ def checkout(dbapi_connection, connection_record, connection_proxy):
+ pid = os.getpid()
+ if connection_record.info['pid'] != pid:
+ # substitute log.debug() or similar here as desired
+ warnings.warn(
+ "Parent process %(orig)s forked (%(newproc)s) with an open "
+ "database connection, "
+ "which is being discarded and recreated." %
+ {"newproc": pid, "orig": connection_record.info['pid']})
+ connection_record.connection = connection_proxy.connection = None
+ raise exc.DisconnectionError(
+ "Connection record belongs to pid %s, "
+ "attempting to check out in pid %s" %
+ (connection_record.info['pid'], pid)
+ )
+
+ These events are applied to an :class:`.Engine` as soon as its created::
+
+ engine = create_engine("...")
+
+ add_engine_pidguard(engine)
+
+The above strategies will accommodate the case of an :class:`.Engine`
+being shared among processes. However, for the case of a transaction-active
+:class:`.Session` or :class:`.Connection` being shared, there's no automatic
+fix for this; an application needs to ensure a new child process only
+initiate new :class:`.Connection` objects and transactions, as well as ORM
+:class:`.Session` objects. For a :class:`.Session` object, technically
+this is only needed if the session is currently transaction-bound, however
+the scope of a single :class:`.Session` is in any case intended to be
+kept within a single call stack in any case (e.g. not a global object, not
+shared between processes or threads).
diff --git a/doc/build/faq/sessions.rst b/doc/build/faq/sessions.rst
index e3aae00ce..8a47db77a 100644
--- a/doc/build/faq/sessions.rst
+++ b/doc/build/faq/sessions.rst
@@ -268,7 +268,7 @@ The joins generated by joined eager loading are only used to fully load related
collections, and are designed to have no impact on the primary results of the query.
Since they are anonymously aliased, they cannot be referenced directly.
-For detail on this beahvior, see :doc:`orm/loading`.
+For detail on this beahvior, see :ref:`zen_of_eager_loading`.
Query has no ``__len__()``, why not?
------------------------------------
@@ -417,6 +417,77 @@ The recipe `ExpireRelationshipOnFKChange <http://www.sqlalchemy.org/trac/wiki/Us
in order to coordinate the setting of foreign key attributes with many-to-one
relationships.
+.. _faq_walk_objects:
+
+How do I walk all objects that are related to a given object?
+-------------------------------------------------------------
+
+An object that has other objects related to it will correspond to the
+:func:`.relationship` constructs set up between mappers. This code fragment will
+iterate all the objects, correcting for cycles as well::
+
+ from sqlalchemy import inspect
+
+
+ def walk(obj):
+ deque = [obj]
+
+ seen = set()
+
+ while deque:
+ obj = deque.pop(0)
+ if obj in seen:
+ continue
+ else:
+ seen.add(obj)
+ yield obj
+ insp = inspect(obj)
+ for relationship in insp.mapper.relationships:
+ related = getattr(obj, relationship.key)
+ if relationship.uselist:
+ deque.extend(related)
+ elif related is not None:
+ deque.append(related)
+
+The function can be demonstrated as follows::
+
+ Base = declarative_base()
+
+
+ class A(Base):
+ __tablename__ = 'a'
+ id = Column(Integer, primary_key=True)
+ bs = relationship("B", backref="a")
+
+
+ class B(Base):
+ __tablename__ = 'b'
+ id = Column(Integer, primary_key=True)
+ a_id = Column(ForeignKey('a.id'))
+ c_id = Column(ForeignKey('c.id'))
+ c = relationship("C", backref="bs")
+
+
+ class C(Base):
+ __tablename__ = 'c'
+ id = Column(Integer, primary_key=True)
+
+
+ a1 = A(bs=[B(), B(c=C())])
+
+
+ for obj in walk(a1):
+ print obj
+
+Output::
+
+ <__main__.A object at 0x10303b190>
+ <__main__.B object at 0x103025210>
+ <__main__.B object at 0x10303b0d0>
+ <__main__.C object at 0x103025490>
+
+
+
Is there a way to automagically have only unique keywords (or other kinds of objects) without doing a query for the keyword and getting a reference to the row containing that keyword?
---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
diff --git a/doc/build/glossary.rst b/doc/build/glossary.rst
index c0ecee84b..9c1395f14 100644
--- a/doc/build/glossary.rst
+++ b/doc/build/glossary.rst
@@ -1019,7 +1019,7 @@ Glossary
http://en.wikipedia.org/wiki/Unique_key#Defining_unique_keys
transient
- This describes one of the four major object states which
+ This describes one of the major object states which
an object can have within a :term:`session`; a transient object
is a new object that doesn't have any database identity
and has not been associated with a session yet. When the
@@ -1031,7 +1031,7 @@ Glossary
:ref:`session_object_states`
pending
- This describes one of the four major object states which
+ This describes one of the major object states which
an object can have within a :term:`session`; a pending object
is a new object that doesn't have any database identity,
but has been recently associated with a session. When
@@ -1042,8 +1042,23 @@ Glossary
:ref:`session_object_states`
+ deleted
+ This describes one of the major object states which
+ an object can have within a :term:`session`; a deleted object
+ is an object that was formerly persistent and has had a
+ DELETE statement emitted to the database within a flush
+ to delete its row. The object will move to the :term:`detached`
+ state once the session's transaction is committed; alternatively,
+ if the session's transaction is rolled back, the DELETE is
+ reverted and the object moves back to the :term:`persistent`
+ state.
+
+ .. seealso::
+
+ :ref:`session_object_states`
+
persistent
- This describes one of the four major object states which
+ This describes one of the major object states which
an object can have within a :term:`session`; a persistent object
is an object that has a database identity (i.e. a primary key)
and is currently associated with a session. Any object
@@ -1058,7 +1073,7 @@ Glossary
:ref:`session_object_states`
detached
- This describes one of the four major object states which
+ This describes one of the major object states which
an object can have within a :term:`session`; a detached object
is an object that has a database identity (i.e. a primary key)
but is not associated with any session. An object that
diff --git a/doc/build/index.rst b/doc/build/index.rst
index 1990df8e2..a28dfca82 100644
--- a/doc/build/index.rst
+++ b/doc/build/index.rst
@@ -14,7 +14,7 @@ A high level view and getting set up.
:doc:`Overview <intro>` |
:ref:`Installation Guide <installation>` |
:doc:`Frequently Asked Questions <faq/index>` |
-:doc:`Migration from 0.9 <changelog/migration_10>` |
+:doc:`Migration from 1.0 <changelog/migration_11>` |
:doc:`Glossary <glossary>` |
:doc:`Changelog catalog <changelog/index>`
diff --git a/doc/build/intro.rst b/doc/build/intro.rst
index 3231bfe9c..ca5662f03 100644
--- a/doc/build/intro.rst
+++ b/doc/build/intro.rst
@@ -84,18 +84,14 @@ releases as well, depending on the state of Jython itself.
Supported Installation Methods
-------------------------------
-SQLAlchemy supports installation using standard Python "distutils" or
-"setuptools" methodologies. An overview of potential setups is as follows:
-
-* **Plain Python Distutils** - SQLAlchemy can be installed with a clean
- Python install using the services provided via `Python Distutils <http://docs.python.org/distutils/>`_,
- using the ``setup.py`` script. The C extensions as well as Python 3 builds are supported.
-* **Setuptools or Distribute** - When using `setuptools <http://pypi.python.org/pypi/setuptools/>`_,
- SQLAlchemy can be installed via ``setup.py`` or ``easy_install``, and the C
- extensions are supported.
-* **pip** - `pip <http://pypi.python.org/pypi/pip/>`_ is an installer that
- rides on top of ``setuptools`` or ``distribute``, replacing the usage
- of ``easy_install``. It is often preferred for its simpler mode of usage.
+SQLAlchemy installation is via standard Python methodologies that are
+based on `setuptools <http://pypi.python.org/pypi/setuptools/>`_, either
+by referring to ``setup.py`` directly or by using
+`pip <http://pypi.python.org/pypi/pip/>`_ or other setuptools-compatible
+approaches.
+
+.. versionchanged:: 1.1 setuptools is now required by the setup.py file;
+ plain distutils installs are no longer supported.
Install via pip
---------------
@@ -108,7 +104,7 @@ downloaded from Pypi and installed in one step::
This command will download the latest **released** version of SQLAlchemy from the `Python
Cheese Shop <http://pypi.python.org/pypi/SQLAlchemy>`_ and install it to your system.
-In order to install the latest **prerelease** version, such as ``1.0.0b1``,
+In order to install the latest **prerelease** version, such as ``1.1.0b1``,
pip requires that the ``--pre`` flag be used::
pip install --pre SQLAlchemy
@@ -124,6 +120,8 @@ Otherwise, you can install from the distribution using the ``setup.py`` script::
python setup.py install
+.. _c_extensions:
+
Installing the C Extensions
----------------------------------
@@ -131,10 +129,6 @@ SQLAlchemy includes C extensions which provide an extra speed boost for
dealing with result sets. The extensions are supported on both the 2.xx
and 3.xx series of cPython.
-.. versionchanged:: 0.9.0
-
- The C extensions now compile on Python 3 as well as Python 2.
-
``setup.py`` will automatically build the extensions if an appropriate platform is
detected. If the build of the C extensions fails, due to missing compiler or
other issue, the setup process will output a warning message, and re-run the
@@ -146,26 +140,11 @@ use case for this is either for special testing circumstances, or in the rare
case of compatibility/build issues not overcome by the usual "rebuild"
mechanism::
- # *** only in SQLAlchemy 0.9.4 / 0.8.6 or greater ***
export DISABLE_SQLALCHEMY_CEXT=1; python setup.py install
-.. versionadded:: 0.9.4,0.8.6 Support for disabling the build of
- C extensions using the ``DISABLE_SQLALCHEMY_CEXT`` environment variable
- has been added. This allows control of C extension building whether or not
- setuptools is available, and additionally works around the fact that
- setuptools will possibly be **removing support** for command-line switches
- such as ``--without-extensions`` in a future release.
-
- For versions of SQLAlchemy prior to 0.9.4 or 0.8.6, the
- ``--without-cextensions`` option may be used to disable the attempt to build
- C extensions, provided setupools is in use, and provided the ``Feature``
- construct is supported by the installed version of setuptools::
-
- python setup.py --without-cextensions install
-
- Or with pip::
-
- pip install --global-option='--without-cextensions' SQLAlchemy
+.. versionchanged:: 1.1 The legacy ``--without-cextensions`` flag has been
+ removed from the installer as it relies on deprecated features of
+ setuptools.
Installing on Python 3
@@ -174,8 +153,6 @@ Installing on Python 3
SQLAlchemy runs directly on Python 2 or Python 3, and can be installed in
either environment without any adjustments or code conversion.
-.. versionchanged:: 0.9.0 Python 3 is now supported in place with no 2to3 step
- required.
Installing a Database API
@@ -189,7 +166,7 @@ the available DBAPIs for each database, including external links.
Checking the Installed SQLAlchemy Version
------------------------------------------
-This documentation covers SQLAlchemy version 1.0. If you're working on a
+This documentation covers SQLAlchemy version 1.1. If you're working on a
system that already has SQLAlchemy installed, check the version from your
Python prompt like this:
@@ -197,11 +174,11 @@ Python prompt like this:
>>> import sqlalchemy
>>> sqlalchemy.__version__ # doctest: +SKIP
- 1.0.0
+ 1.1.0
.. _migration:
-0.9 to 1.0 Migration
+1.0 to 1.1 Migration
=====================
-Notes on what's changed from 0.9 to 1.0 is available here at :doc:`changelog/migration_10`.
+Notes on what's changed from 1.0 to 1.1 is available here at :doc:`changelog/migration_11`.
diff --git a/doc/build/orm/basic_relationships.rst b/doc/build/orm/basic_relationships.rst
index 9a7ad4fa2..acb2dba01 100644
--- a/doc/build/orm/basic_relationships.rst
+++ b/doc/build/orm/basic_relationships.rst
@@ -8,7 +8,7 @@ A quick walkthrough of the basic relational patterns.
The imports used for each of the following sections is as follows::
from sqlalchemy import Table, Column, Integer, ForeignKey
- from sqlalchemy.orm import relationship, backref
+ from sqlalchemy.orm import relationship
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
@@ -32,20 +32,32 @@ a collection of items represented by the child::
parent_id = Column(Integer, ForeignKey('parent.id'))
To establish a bidirectional relationship in one-to-many, where the "reverse"
-side is a many to one, specify the :paramref:`~.relationship.backref` option::
+side is a many to one, specify an additional :func:`.relationship` and connect
+the two using the :paramref:`.relationship.back_populates` parameter::
class Parent(Base):
__tablename__ = 'parent'
id = Column(Integer, primary_key=True)
- children = relationship("Child", backref="parent")
+ children = relationship("Child", back_populates="parent")
class Child(Base):
__tablename__ = 'child'
id = Column(Integer, primary_key=True)
parent_id = Column(Integer, ForeignKey('parent.id'))
+ parent = relationship("Parent", back_populates="children")
``Child`` will get a ``parent`` attribute with many-to-one semantics.
+Alternatively, the :paramref:`~.relationship.backref` option may be used
+on a single :func:`.relationship` instead of using
+:paramref:`~.relationship.back_populates`::
+
+ class Parent(Base):
+ __tablename__ = 'parent'
+ id = Column(Integer, primary_key=True)
+ children = relationship("Child", backref="parent")
+
+
Many To One
~~~~~~~~~~~~
@@ -63,9 +75,23 @@ attribute will be created::
__tablename__ = 'child'
id = Column(Integer, primary_key=True)
-Bidirectional behavior is achieved by setting
-:paramref:`~.relationship.backref` to the value ``"parents"``, which
-will place a one-to-many collection on the ``Child`` class::
+Bidirectional behavior is achieved by adding a second :func:`.relationship`
+and applying the :paramref:`.relationship.back_populates` parameter
+in both directions::
+
+ class Parent(Base):
+ __tablename__ = 'parent'
+ id = Column(Integer, primary_key=True)
+ child_id = Column(Integer, ForeignKey('child.id'))
+ child = relationship("Child", back_populates="parents")
+
+ class Child(Base):
+ __tablename__ = 'child'
+ id = Column(Integer, primary_key=True)
+ parents = relationship("Parent", back_populates="child")
+
+Alternatively, the :paramref:`~.relationship.backref` parameter
+may be applied to a single :func:`.relationship`, such as ``Parent.child``::
class Parent(Base):
__tablename__ = 'parent'
@@ -86,25 +112,39 @@ of the relationship. To convert one-to-many into one-to-one::
class Parent(Base):
__tablename__ = 'parent'
id = Column(Integer, primary_key=True)
- child = relationship("Child", uselist=False, backref="parent")
+ child = relationship("Child", uselist=False, back_populates="parent")
class Child(Base):
__tablename__ = 'child'
id = Column(Integer, primary_key=True)
parent_id = Column(Integer, ForeignKey('parent.id'))
+ parent = relationship("Child", back_populates="child")
-Or to turn a one-to-many backref into one-to-one, use the :func:`.backref` function
-to provide arguments for the reverse side::
+Or for many-to-one::
class Parent(Base):
__tablename__ = 'parent'
id = Column(Integer, primary_key=True)
child_id = Column(Integer, ForeignKey('child.id'))
- child = relationship("Child", backref=backref("parent", uselist=False))
+ child = relationship("Child", back_populates="parent")
class Child(Base):
__tablename__ = 'child'
id = Column(Integer, primary_key=True)
+ parent = relationship("Child", back_populates="child", uselist=False)
+
+As always, the :paramref:`.relationship.backref` and :func:`.backref` functions
+may be used in lieu of the :paramref:`.relationship.back_populates` approach;
+to specify ``uselist`` on a backref, use the :func:`.backref` function::
+
+ from sqlalchemy.orm import backref
+
+ class Parent(Base):
+ __tablename__ = 'parent'
+ id = Column(Integer, primary_key=True)
+ child_id = Column(Integer, ForeignKey('child.id'))
+ child = relationship("Child", backref=backref("parent", uselist=False))
+
.. _relationships_many_to_many:
@@ -133,7 +173,32 @@ directives can locate the remote tables with which to link::
id = Column(Integer, primary_key=True)
For a bidirectional relationship, both sides of the relationship contain a
-collection. The :paramref:`~.relationship.backref` keyword will automatically use
+collection. Specify using :paramref:`.relationship.back_populates`, and
+for each :func:`.relationship` specify the common association table::
+
+ association_table = Table('association', Base.metadata,
+ Column('left_id', Integer, ForeignKey('left.id')),
+ Column('right_id', Integer, ForeignKey('right.id'))
+ )
+
+ class Parent(Base):
+ __tablename__ = 'left'
+ id = Column(Integer, primary_key=True)
+ children = relationship(
+ "Child",
+ secondary=association_table,
+ back_populates="parents")
+
+ class Child(Base):
+ __tablename__ = 'right'
+ id = Column(Integer, primary_key=True)
+ parents = relationship(
+ "Parent",
+ secondary=association_table,
+ back_populates="children")
+
+When using the :paramref:`~.relationship.backref` parameter instead of
+:paramref:`.relationship.back_populates`, the backref will automatically use
the same :paramref:`~.relationship.secondary` argument for the reverse relationship::
association_table = Table('association', Base.metadata,
@@ -259,23 +324,26 @@ is stored along with each association between ``Parent`` and
__tablename__ = 'right'
id = Column(Integer, primary_key=True)
-The bidirectional version adds backrefs to both relationships::
+As always, the bidirectional version make use of :paramref:`.relationship.back_populates`
+or :paramref:`.relationship.backref`::
class Association(Base):
__tablename__ = 'association'
left_id = Column(Integer, ForeignKey('left.id'), primary_key=True)
right_id = Column(Integer, ForeignKey('right.id'), primary_key=True)
extra_data = Column(String(50))
- child = relationship("Child", backref="parent_assocs")
+ child = relationship("Child", back_populates="parents")
+ parent = relationship("Parent", back_populates="children")
class Parent(Base):
__tablename__ = 'left'
id = Column(Integer, primary_key=True)
- children = relationship("Association", backref="parent")
+ children = relationship("Association", back_populates="parent")
class Child(Base):
__tablename__ = 'right'
id = Column(Integer, primary_key=True)
+ parents = relationship("Association", back_populates="child")
Working with the association pattern in its direct form requires that child
objects are associated with an association instance before being appended to
diff --git a/doc/build/orm/events.rst b/doc/build/orm/events.rst
index e9673bed0..470a9386b 100644
--- a/doc/build/orm/events.rst
+++ b/doc/build/orm/events.rst
@@ -5,12 +5,10 @@ ORM Events
The ORM includes a wide variety of hooks available for subscription.
-.. versionadded:: 0.7
- The event supersedes the previous system of "extension" classes.
-
-For an introduction to the event API, see :ref:`event_toplevel`. Non-ORM events
-such as those regarding connections and low-level statement execution are described in
-:ref:`core_event_toplevel`.
+For an introduction to the most commonly used ORM events, see the section
+:ref:`session_events_toplevel`. The event system in general is discussed
+at :ref:`event_toplevel`. Non-ORM events such as those regarding connections
+and low-level statement execution are described in :ref:`core_event_toplevel`.
Attribute Events
----------------
diff --git a/doc/build/orm/examples.rst b/doc/build/orm/examples.rst
index 4db7c00dc..25d243022 100644
--- a/doc/build/orm/examples.rst
+++ b/doc/build/orm/examples.rst
@@ -93,6 +93,8 @@ Versioning with a History Table
.. automodule:: examples.versioned_history
+.. _examples_versioned_rows:
+
Versioning using Temporal Rows
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
diff --git a/doc/build/orm/extensions/associationproxy.rst b/doc/build/orm/extensions/associationproxy.rst
index 6fc57e30c..7e7b1f9de 100644
--- a/doc/build/orm/extensions/associationproxy.rst
+++ b/doc/build/orm/extensions/associationproxy.rst
@@ -509,5 +509,6 @@ API Documentation
.. autoclass:: AssociationProxy
:members:
:undoc-members:
+ :inherited-members:
.. autodata:: ASSOCIATION_PROXY
diff --git a/doc/build/orm/extensions/baked.rst b/doc/build/orm/extensions/baked.rst
index a282d1298..83cee51da 100644
--- a/doc/build/orm/extensions/baked.rst
+++ b/doc/build/orm/extensions/baked.rst
@@ -355,6 +355,14 @@ this feature is local to the mapper for ``MyClass``.
For per-query use, the :func:`.baked_lazyload` strategy may be used,
which works like any other loader option.
+Opting out with the bake_queries flag
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+The :func:`.relationship` construct includes a flag
+:paramref:`.relationship.bake_queries` which when set to False will cause
+that relationship to opt out of the baked query system, when the
+application-wide :func:`.bake_lazy_loaders` function has been called to enable
+baked query loaders by default.
API Documentation
-----------------
diff --git a/doc/build/orm/inheritance.rst b/doc/build/orm/inheritance.rst
index 0713634bc..290d8099e 100644
--- a/doc/build/orm/inheritance.rst
+++ b/doc/build/orm/inheritance.rst
@@ -228,9 +228,9 @@ subclasses:
entity = with_polymorphic(Employee, [Engineer, Manager])
# join to all subclass tables
- entity = query.with_polymorphic(Employee, '*')
+ entity = with_polymorphic(Employee, '*')
- # use with Query
+ # use the 'entity' with a Query object
session.query(entity).all()
It also accepts a third argument ``selectable`` which replaces the automatic
@@ -249,7 +249,7 @@ should be used to load polymorphically::
employee.outerjoin(manager).outerjoin(engineer)
)
- # use with Query
+ # use the 'entity' with a Query object
session.query(entity).all()
Note that if you only need to load a single subtype, such as just the
diff --git a/doc/build/orm/loading_relationships.rst b/doc/build/orm/loading_relationships.rst
index 297392f3e..3a0026bbe 100644
--- a/doc/build/orm/loading_relationships.rst
+++ b/doc/build/orm/loading_relationships.rst
@@ -494,6 +494,50 @@ Or using the class-bound descriptor::
query(User).options(contains_eager(User.orders).contains_eager(Order.items))
+Using contains_eager() to load a custom-filtered collection result
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+When we use :func:`.contains_eager`, *we* are constructing ourselves the
+SQL that will be used to populate collections. From this, it naturally follows
+that we can opt to **modify** what values the collection is intended to store,
+by writing our SQL to load a subset of elements for collections or
+scalar attributes.
+
+As an example, we can load a ``User`` object and eagerly load only particular
+addresses into its ``.addresses`` collection just by filtering::
+
+ q = session.query(User).join(User.addresses).\
+ filter(Address.email.like('%ed%')).\
+ options(contains_eager(User.addresses))
+
+The above query will load only ``User`` objects which contain at
+least ``Address`` object that contains the substring ``'ed'`` in its
+``email`` field; the ``User.addresses`` collection will contain **only**
+these ``Address`` entries, and *not* any other ``Address`` entries that are
+in fact associated with the collection.
+
+.. warning::
+
+ Keep in mind that when we load only a subset of objects into a collection,
+ that collection no longer represents what's actually in the database. If
+ we attempted to add entries to this collection, we might find ourselves
+ conflicting with entries that are already in the database but not locally
+ loaded.
+
+ In addition, the **collection will fully reload normally** once the
+ object or attribute is expired. This expiration occurs whenever the
+ :meth:`.Session.commit`, :meth:`.Session.rollback` methods are used
+ assuming default session settings, or the :meth:`.Session.expire_all`
+ or :meth:`.Session.expire` methods are used.
+
+ For these reasons, prefer returning separate fields in a tuple rather
+ than artificially altering a collection, when an object plus a custom
+ set of related objects is desired::
+
+ q = session.query(User, Address).join(User.addresses).\
+ filter(Address.email.like('%ed%'))
+
+
Advanced Usage with Arbitrary Statements
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
diff --git a/doc/build/orm/mapped_sql_expr.rst b/doc/build/orm/mapped_sql_expr.rst
index 1ae5b1285..e091e33a6 100644
--- a/doc/build/orm/mapped_sql_expr.rst
+++ b/doc/build/orm/mapped_sql_expr.rst
@@ -37,7 +37,7 @@ class level, so that it is available from an instance::
some_user = session.query(User).first()
print some_user.fullname
-as well as usable wtihin queries::
+as well as usable within queries::
some_user = session.query(User).filter(User.fullname == "John Smith").first()
diff --git a/doc/build/orm/persistence_techniques.rst b/doc/build/orm/persistence_techniques.rst
index aee48121d..a30d486b5 100644
--- a/doc/build/orm/persistence_techniques.rst
+++ b/doc/build/orm/persistence_techniques.rst
@@ -78,6 +78,112 @@ proper context for the desired engine::
connection = session.connection(MyMappedClass)
+.. _session_forcing_null:
+
+Forcing NULL on a column with a default
+=======================================
+
+The ORM considers any attribute that was never set on an object as a
+"default" case; the attribute will be omitted from the INSERT statement::
+
+ class MyObject(Base):
+ __tablename__ = 'my_table'
+ id = Column(Integer, primary_key=True)
+ data = Column(String(50), nullable=True)
+
+ obj = MyObject(id=1)
+ session.add(obj)
+ session.commit() # INSERT with the 'data' column omitted; the database
+ # itself will persist this as the NULL value
+
+Omitting a column from the INSERT means that the column will
+have the NULL value set, *unless* the column has a default set up,
+in which case the default value will be persisted. This holds true
+both from a pure SQL perspective with server-side defaults, as well as the
+behavior of SQLAlchemy's insert behavior with both client-side and server-side
+defaults::
+
+ class MyObject(Base):
+ __tablename__ = 'my_table'
+ id = Column(Integer, primary_key=True)
+ data = Column(String(50), nullable=True, server_default="default")
+
+ obj = MyObject(id=1)
+ session.add(obj)
+ session.commit() # INSERT with the 'data' column omitted; the database
+ # itself will persist this as the value 'default'
+
+However, in the ORM, even if one assigns the Python value ``None`` explicitly
+to the object, this is treated the **same** as though the value were never
+assigned::
+
+ class MyObject(Base):
+ __tablename__ = 'my_table'
+ id = Column(Integer, primary_key=True)
+ data = Column(String(50), nullable=True, server_default="default")
+
+ obj = MyObject(id=1, data=None)
+ session.add(obj)
+ session.commit() # INSERT with the 'data' column explicitly set to None;
+ # the ORM still omits it from the statement and the
+ # database will still persist this as the value 'default'
+
+The above operation will persist into the ``data`` column the
+server default value of ``"default"`` and not SQL NULL, even though ``None``
+was passed; this is a long-standing behavior of the ORM that many applications
+hold as an assumption.
+
+So what if we want to actually put NULL into this column, even though the
+column has a default value? There are two approaches. One is that
+on a per-instance level, we assign the attribute using the
+:obj:`~.expression.null` SQL construct::
+
+ from sqlalchemy import null
+
+ obj = MyObject(id=1, data=null())
+ session.add(obj)
+ session.commit() # INSERT with the 'data' column explicitly set as null();
+ # the ORM uses this directly, bypassing all client-
+ # and server-side defaults, and the database will
+ # persist this as the NULL value
+
+The :obj:`~.expression.null` SQL construct always translates into the SQL
+NULL value being directly present in the target INSERT statement.
+
+If we'd like to be able to use the Python value ``None`` and have this
+also be persisted as NULL despite the presence of column defaults,
+we can configure this for the ORM using a Core-level modifier
+:meth:`.TypeEngine.evaluates_none`, which indicates
+a type where the ORM should treat the value ``None`` the same as any other
+value and pass it through, rather than omitting it as a "missing" value::
+
+ class MyObject(Base):
+ __tablename__ = 'my_table'
+ id = Column(Integer, primary_key=True)
+ data = Column(
+ String(50).evaluates_none(), # indicate that None should always be passed
+ nullable=True, server_default="default")
+
+ obj = MyObject(id=1, data=None)
+ session.add(obj)
+ session.commit() # INSERT with the 'data' column explicitly set to None;
+ # the ORM uses this directly, bypassing all client-
+ # and server-side defaults, and the database will
+ # persist this as the NULL value
+
+.. topic:: Evaluating None
+
+ The :meth:`.TypeEngine.evaluates_none` modifier is primarily intended to
+ signal a type where the Python value "None" is significant, the primary
+ example being a JSON type which may want to persist the JSON ``null`` value
+ rather than SQL NULL. We are slightly repurposing it here in order to
+ signal to the ORM that we'd like ``None`` to be passed into the type whenever
+ present, even though no special type-level behaviors are assigned to it.
+
+.. versionadded:: 1.1 added the :meth:`.TypeEngine.evaluates_none` method
+ in order to indicate that a "None" value should be treated as significant.
+
+
.. _session_partitioning:
Partitioning Strategies
diff --git a/doc/build/orm/relationship_persistence.rst b/doc/build/orm/relationship_persistence.rst
index 8af96cbd6..d4fca2c93 100644
--- a/doc/build/orm/relationship_persistence.rst
+++ b/doc/build/orm/relationship_persistence.rst
@@ -172,56 +172,108 @@ Mutable Primary Keys / Update Cascades
When the primary key of an entity changes, related items
which reference the primary key must also be updated as
well. For databases which enforce referential integrity,
-it's required to use the database's ON UPDATE CASCADE
+the best strategy is to use the database's ON UPDATE CASCADE
functionality in order to propagate primary key changes
to referenced foreign keys - the values cannot be out
-of sync for any moment.
-
-For databases that don't support this, such as SQLite and
-MySQL without their referential integrity options turned
-on, the :paramref:`~.relationship.passive_updates` flag can
-be set to ``False``, most preferably on a one-to-many or
-many-to-many :func:`.relationship`, which instructs
-SQLAlchemy to issue UPDATE statements individually for
-objects referenced in the collection, loading them into
-memory if not already locally present. The
-:paramref:`~.relationship.passive_updates` flag can also be ``False`` in
-conjunction with ON UPDATE CASCADE functionality,
-although in that case the unit of work will be issuing
-extra SELECT and UPDATE statements unnecessarily.
-
-A typical mutable primary key setup might look like::
+of sync for any moment unless the constraints are marked as "deferrable",
+that is, not enforced until the transaction completes.
+
+It is **highly recommended** that an application which seeks to employ
+natural primary keys with mutable values to use the ``ON UPDATE CASCADE``
+capabilities of the database. An example mapping which
+illustrates this is::
class User(Base):
__tablename__ = 'user'
+ __table_args__ = {'mysql_engine': 'InnoDB'}
username = Column(String(50), primary_key=True)
fullname = Column(String(100))
- # passive_updates=False *only* needed if the database
- # does not implement ON UPDATE CASCADE
- addresses = relationship("Address", passive_updates=False)
+ addresses = relationship("Address")
+
class Address(Base):
__tablename__ = 'address'
+ __table_args__ = {'mysql_engine': 'InnoDB'}
email = Column(String(50), primary_key=True)
username = Column(String(50),
ForeignKey('user.username', onupdate="cascade")
)
-:paramref:`~.relationship.passive_updates` is set to ``True`` by default,
-indicating that ON UPDATE CASCADE is expected to be in
-place in the usual case for foreign keys that expect
-to have a mutating parent key.
-
-A :paramref:`~.relationship.passive_updates` setting of False may be configured on any
-direction of relationship, i.e. one-to-many, many-to-one,
-and many-to-many, although it is much more effective when
-placed just on the one-to-many or many-to-many side.
-Configuring the :paramref:`~.relationship.passive_updates`
-to False only on the
-many-to-one side will have only a partial effect, as the
-unit of work searches only through the current identity
-map for objects that may be referencing the one with a
-mutating primary key, not throughout the database.
+Above, we illustrate ``onupdate="cascade"`` on the :class:`.ForeignKey`
+object, and we also illustrate the ``mysql_engine='InnoDB'`` setting
+which, on a MySQL backend, ensures that the ``InnoDB`` engine supporting
+referential integrity is used. When using SQLite, referential integrity
+should be enabled, using the configuration described at
+:ref:`sqlite_foreign_keys`.
+
+Simulating limited ON UPDATE CASCADE without foreign key support
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+In those cases when a database that does not support referential integrity
+is used, and natural primary keys with mutable values are in play,
+SQLAlchemy offers a feature in order to allow propagation of primary key
+values to already-referenced foreign keys to a **limited** extent,
+by emitting an UPDATE statement against foreign key columns that immediately
+reference a primary key column whose value has changed.
+The primary platforms without referential integrity features are
+MySQL when the ``MyISAM`` storage engine is used, and SQLite when the
+``PRAGMA foreign_keys=ON`` pragma is not used. The Oracle database also
+has no support for ``ON UPDATE CASCADE``, but because it still enforces
+referential integrity, needs constraints to be marked as deferrable
+so that SQLAlchemy can emit UPDATE statements.
+
+The feature is enabled by setting the
+:paramref:`~.relationship.passive_updates` flag to ``False``,
+most preferably on a one-to-many or
+many-to-many :func:`.relationship`. When "updates" are no longer
+"passive" this indicates that SQLAlchemy will
+issue UPDATE statements individually for
+objects referenced in the collection referred to by the parent object
+with a changing primary key value. This also implies that collections
+will be fully loaded into memory if not already locally present.
+
+Our previous mapping using ``passive_updates=False`` looks like::
+
+ class User(Base):
+ __tablename__ = 'user'
+
+ username = Column(String(50), primary_key=True)
+ fullname = Column(String(100))
+
+ # passive_updates=False *only* needed if the database
+ # does not implement ON UPDATE CASCADE
+ addresses = relationship("Address", passive_updates=False)
+
+ class Address(Base):
+ __tablename__ = 'address'
+
+ email = Column(String(50), primary_key=True)
+ username = Column(String(50), ForeignKey('user.username'))
+
+Key limitations of ``passive_updates=False`` include:
+
+* it performs much more poorly than direct database ON UPDATE CASCADE,
+ because it needs to fully pre-load affected collections using SELECT
+ and also must emit UPDATE statements against those values, which it
+ will attempt to run in "batches" but still runs on a per-row basis
+ at the DBAPI level.
+
+* the feature cannot "cascade" more than one level. That is,
+ if mapping X has a foreign key which refers to the primary key
+ of mapping Y, but then mapping Y's primary key is itself a foreign key
+ to mapping Z, ``passive_updates=False`` cannot cascade a change in
+ primary key value from ``Z`` to ``X``.
+
+* Configuring ``passive_updates=False`` only on the many-to-one
+ side of a relationship will not have a full effect, as the
+ unit of work searches only through the current identity
+ map for objects that may be referencing the one with a
+ mutating primary key, not throughout the database.
+
+As virtually all databases other than Oracle now support ``ON UPDATE CASCADE``,
+it is highly recommended that traditional ``ON UPDATE CASCADE`` support be used
+in the case that natural and mutable primary key values are in use.
+
diff --git a/doc/build/orm/session.rst b/doc/build/orm/session.rst
index 624ee9f75..79ea70137 100644
--- a/doc/build/orm/session.rst
+++ b/doc/build/orm/session.rst
@@ -20,6 +20,7 @@ configured, the primary usage interface for persistence operations is the
session_transaction
persistence_techniques
contextual
+ session_events
session_api
diff --git a/doc/build/orm/session_events.rst b/doc/build/orm/session_events.rst
new file mode 100644
index 000000000..ecfc5176f
--- /dev/null
+++ b/doc/build/orm/session_events.rst
@@ -0,0 +1,436 @@
+.. _session_events_toplevel:
+
+Tracking Object and Session Changes with Events
+===============================================
+
+SQLAlchemy features an extensive :ref:`Event Listening <event_toplevel>`
+system used throughout the Core and ORM. Within the ORM, there are a
+wide variety of event listener hooks, which are documented at an API
+level at :ref:`orm_event_toplevel`. This collection of events has
+grown over the years to include lots of very useful new events as well
+as some older events that aren't as relevant as they once were. This
+section will attempt to introduce the major event hooks and when they
+might be used.
+
+.. _session_persistence_events:
+
+Persistence Events
+------------------
+
+Probably the most widely used series of events are the "persistence" events,
+which correspond to the :ref:`flush process<session_flushing>`.
+The flush is where all the decisions are made about pending changes to
+objects and are then emitted out to the database in the form of INSERT,
+UPDATE, and DELETE staetments.
+
+``before_flush()``
+^^^^^^^^^^^^^^^^^^
+
+The :meth:`.SessionEvents.before_flush` hook is by far the most generally
+useful event to use when an application wants to ensure that
+additional persistence changes to the database are made when a flush proceeds.
+Use :meth:`.SessionEvents.before_flush` in order to operate
+upon objects to validate their state as well as to compose additional objects
+and references before they are persisted. Within this event,
+it is **safe to manipulate the Session's state**, that is, new objects
+can be attached to it, objects can be deleted, and indivual attributes
+on objects can be changed freely, and these changes will be pulled into
+the flush process when the event hook completes.
+
+The typical :meth:`.SessionEvents.before_flush` hook will be tasked with
+scanning the collections :attr:`.Session.new`, :attr:`.Session.dirty` and
+:attr:`.Session.deleted` in order to look for objects
+where something will be happening.
+
+For illustrations of :meth:`.SessionEvents.before_flush`, see
+examples such as :ref:`examples_versioned_history` and
+:ref:`examples_versioned_rows`.
+
+``after_flush()``
+^^^^^^^^^^^^^^^^^
+
+The :meth:`.SessionEvents.after_flush` hook is called after the SQL has been
+emitted for a flush process, but **before* the state of the objects that
+were flushed has been altered. That is, you can still inspect
+the :attr:`.Session.new`, :attr:`.Session.dirty` and
+:attr:`.Session.deleted` collections to see what was just flushed, and
+you can also use history tracking features like the ones provided
+by :class:`.AttributeState` to see what changes were just persisted.
+In the :meth:`.SessionEvents.after_flush` event, additional SQL can be emitted
+to the database based on what's observed to have changed.
+
+``after_flush_postexec()``
+^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+:meth:`.SessionEvents.after_flush_postexec` is called soon after
+:meth:`.SessionEvents.after_flush`, but is invoked **after** the state of
+the objects has been modified to account for the flush that just took place.
+The :attr:`.Session.new`, :attr:`.Session.dirty` and
+:attr:`.Session.deleted` collections are normally completely empty here.
+Use :meth:`.SessionEvents.after_flush_postexec` to inspect the identity map
+for finalized objects and possibly emit additional SQL. In this hook,
+there is the ability to make new changes on objects, which means the
+:class:`.Session` will again go into a "dirty" state; the mechanics of the
+:class:`.Session` here will cause it to flush **again** if new changes
+are detected in this hook if the flush were invoked in the context of
+:meth:`.Session.commit`; otherwise, the pending changes will be bundled
+as part of the next normal flush. When the hook detects new changes within
+a :meth:`.Session.commit`, a counter ensures that an endless loop in this
+regard is stopped after 100 iterations, in the case that an
+:meth:`.SessionEvents.after_flush_postexec`
+hook continually adds new state to be flushed each time it is called.
+
+.. _session_persistence_mapper:
+
+Mapper-level Events
+^^^^^^^^^^^^^^^^^^^
+
+In addition to the flush-level hooks, there is also a suite of hooks
+that are more fine-grained, in that they are called on a per-object
+basis and are broken out based on INSERT, UPDATE or DELETE. These
+are the mapper persistence hooks, and they too are very popular,
+however these events need to be approached more cautiously, as they
+proceed within the context of the flush process that is already
+ongoing; many operations are not safe to proceed here.
+
+The events are:
+
+* :meth:`.MapperEvents.before_insert`
+* :meth:`.MapperEvents.after_insert`
+* :meth:`.MapperEvents.before_update`
+* :meth:`.MapperEvents.after_update`
+* :meth:`.MapperEvents.before_delete`
+* :meth:`.MapperEvents.after_delete`
+
+Each event is passed the :class:`.Mapper`,
+the mapped object itself, and the :class:`.Connection` which is being
+used to emit an INSERT, UPDATE or DELETE statement. The appeal of these
+events is clear, in that if an application wants to tie some activity to
+when a specific type of object is persisted with an INSERT, the hook is
+very specific; unlike the :meth:`.SessionEvents.before_flush` event,
+there's no need to search through collections like :attr:`.Session.new`
+in order to find targets. However, the flush plan which
+represents the full list of every single INSERT, UPDATE, DELETE statement
+to be emitted has *already been decided* when these events are called,
+and no changes may be made at this stage. Therefore the only changes that are
+even possible to the given objects are upon attributes **local** to the
+object's row. Any other change to the object or other objects will
+impact the state of the :class:`.Session`, which will fail to function
+properly.
+
+Operations that are not supported within these mapper-level persistence
+events include:
+
+* :meth:`.Session.add`
+* :meth:`.Session.delete`
+* Mapped collection append, add, remove, delete, discard, etc.
+* Mapped relationship attribute set/del events,
+ i.e. ``someobject.related = someotherobject``
+
+The reason the :class:`.Connection` is passed is that it is encouraged that
+**simple SQL operations take place here**, directly on the :class:`.Connection`,
+such as incrementing counters or inserting extra rows within log tables.
+When dealing with the :class:`.Connection`, it is expected that Core-level
+SQL operations will be used; e.g. those described in :ref:`sqlexpression_toplevel`.
+
+There are also many per-object operations that don't need to be handled
+within a flush event at all. The most common alternative is to simply
+establish additional state along with an object inside its ``__init__()``
+method, such as creating additional objects that are to be associated with
+the new object. Using validators as described in :ref:`simple_validators` is
+another approach; these functions can intercept changes to attributes and
+establish additional state changes on the target object in response to the
+attribute change. With both of these approaches, the object is in
+the correct state before it ever gets to the flush step.
+
+.. _session_lifecycle_events:
+
+Object Lifecycle Events
+-----------------------
+
+Another use case for events is to track the lifecycle of objects. This
+refers to the states first introduced at :ref:`session_object_states`.
+
+.. versionadded:: 1.1 added a system of events that intercept all possible
+ state transitions of an object within the :class:`.Session`.
+
+All the states above can be tracked fully with events. Each event
+represents a distinct state transition, meaning, the starting state
+and the destination state are both part of what are tracked. With the
+exception of the initial transient event, all the events are in terms of
+the :class:`.Session` object or class, meaning they can be associated either
+with a specific :class:`.Session` object::
+
+ from sqlalchemy import event
+ from sqlalchemy.orm import Session
+
+ session = Session()
+
+ @event.listens_for(session, 'transient_to_pending')
+ def object_is_pending(session, obj):
+ print("new pending: %s" % obj)
+
+Or with the :class:`.Session` class itself, as well as with a specific
+:class:`.sessionmaker`, which is likely the most useful form::
+
+ from sqlalchemy import event
+ from sqlalchemy.orm import sessionmaker
+
+ maker = sessionmaker()
+
+ @event.listens_for(maker, 'transient_to_pending')
+ def object_is_pending(session, obj):
+ print("new pending: %s" % obj)
+
+The listeners can of course be stacked on top of one function, as is
+likely to be common. For example, to track all objects that are
+entering the persistent state::
+
+ @event.listens_for(maker, "pending_to_persistent")
+ @event.listens_for(maker, "deleted_to_persistent")
+ @event.listens_for(maker, "detached_to_persistent")
+ @event.listens_for(maker, "loaded_as_persistent")
+ def detect_all_persistent(session, instance):
+ print("object is now persistent: %s" % instance)
+
+Transient
+^^^^^^^^^
+
+All mapped objects when first constructed start out as :term:`transient`.
+In this state, the object exists alone and doesn't have an association with
+any :class:`.Session`. For this initial state, there's no specific
+"transition" event since there is no :class:`.Session`, however if one
+wanted to intercept when any transient object is created, the
+:meth:`.InstanceEvents.init` method is probably the best event. This
+event is applied to a specific class or superclass. For example, to
+intercept all new objects for a particular declarative base::
+
+ from sqlalchemy.ext.declarative import declarative_base
+ from sqlalchemy import event
+
+ Base = declarative_base()
+
+ @event.listens_for(Base, "init", propagate=True)
+ def intercept_init(instance, args, kwargs):
+ print("new transient: %s" % instance)
+
+
+Transient to Pending
+^^^^^^^^^^^^^^^^^^^^
+
+The transient object becomes :term:`pending` when it is first associated
+with a :class:`.Session` via the :meth:`.Session.add` or :meth:`.Session.add_all`
+method. An object may also become part of a :class:`.Session` as a result
+of a :ref:`"cascade" <unitofwork_cascades>` from a referencing object that was
+explicitly added. The transient to pending transition is detectable using
+the :meth:`.SessionEvents.transient_to_pending` event::
+
+ @event.listens_for(sessionmaker, "transient_to_pending")
+ def intercept_transient_to_pending(session, object_):
+ print("transient to pending: %s" % object_)
+
+
+Pending to Persistent
+^^^^^^^^^^^^^^^^^^^^^
+
+The :term:`pending` object becomes :term:`persistent` when a flush
+proceeds and an INSERT statement takes place for the instance. The object
+now has an identity key. Track pending to persistent with the
+:meth:`.SessionEvents.pending_to_persistent` event::
+
+ @event.listens_for(sessionmaker, "pending_to_persistent")
+ def intercept_pending_to_persistent(session, object_):
+ print("pending to persistent: %s" % object_)
+
+Pending to Transient
+^^^^^^^^^^^^^^^^^^^^^^^
+
+The :term:`pending` object can revert back to :term:`transient` if the
+:meth:`.Session.rollback` method is called before the pending object
+has been flushed, or if the :meth:`.Session.expunge` method is called
+for the object before it is flushed. Track pending to transient with the
+:meth:`.SessionEvents.pending_to_transient` event::
+
+ @event.listens_for(sessionmaker, "pending_to_transient")
+ def intercept_pending_to_transient(session, object_):
+ print("transient to pending: %s" % object_)
+
+Loaded as Persistent
+^^^^^^^^^^^^^^^^^^^^^^^
+
+Objects can appear in the :class:`.Session` directly in the :term:`persistent`
+state when they are loaded from the database. Tracking this state transition
+is synonymous with tracking objects as they are loaded, and is synonomous
+with using the :meth:`.InstanceEvents.load` instance-level event. However, the
+:meth:`.SessionEvents.loaded_as_persistent` event is provided as a
+session-centric hook for intercepting objects as they enter the persistent
+state via this particular avenue::
+
+ @event.listens_for(sessionmaker, "loaded_as_persistent")
+ def intercept_loaded_as_persistent(session, object_):
+ print("object loaded into persistent state: %s" % object_)
+
+
+Persistent to Transient
+^^^^^^^^^^^^^^^^^^^^^^^
+
+The persistent object can revert to the transient state if the
+:meth:`.Session.rollback` method is called for a transaction where the
+object was first added as pending. In the case of the ROLLBACK, the
+INSERT statement that made this object persistent is rolled back, and
+the object is evicted from the :class:`.Session` to again become transient.
+Track objects that were reverted to transient from
+persistent using the :meth:`.SessionEvents.persistent_to_transient`
+event hook::
+
+ @event.listens_for(sessionmaker, "persistent_to_transient")
+ def intercept_persistent_to_transient(session, object_):
+ print("persistent to transient: %s" % object_)
+
+Persistent to Deleted
+^^^^^^^^^^^^^^^^^^^^^
+
+The persistent object enters the :term:`deleted` state when an object
+marked for deletion is deleted from the database within the flush
+process. Note that this is **not the same** as when the :meth:`.Session.delete`
+method is called for a target object. The :meth:`.Session.delete`
+method only **marks** the object for deletion; the actual DELETE statement
+is not emitted until the flush proceeds. It is subsequent to the flush
+that the "deleted" state is present for the target object.
+
+Within the "deleted" state, the object is only marginally associated
+with the :class:`.Session`. It is not present in the identity map
+nor is it present in the :attr:`.Session.deleted` collection that refers
+to when it was pending for deletion.
+
+From the "deleted" state, the object can go either to the detached state
+when the transaction is committed, or back to the persistent state
+if the transaction is instead rolled back.
+
+Track the persistent to deleted transition with
+:meth:`.SessionEvents.persistent_to_deleted`::
+
+ @event.listens_for(sessionmaker, "persistent_to_deleted")
+ def intercept_persistent_to_deleted(session, object_):
+ print("object was DELETEd, is now in deleted state: %s" % object_)
+
+
+Deleted to Detached
+^^^^^^^^^^^^^^^^^^^^
+
+The deleted object becomes :term:`detached` when the session's transaction
+is committed. After the :meth:`.Session.commit` method is called, the
+database transaction is final and the :class:`.Session` now fully discards
+the deleted object and removes all associations to it. Track
+the deleted to detached transition using :meth:`.SessionEvents.deleted_to_detached`::
+
+ @event.listens_for(sessionmaker, "deleted_to_detached")
+ def intercept_deleted_to_detached(session, object_):
+ print("deleted to detached: %s" % object_)
+
+
+.. note::
+
+ While the object is in the deleted state, the :attr:`.InstanceState.deleted`
+ attribute, accessible using ``inspect(object).deleted``, returns True. However
+ when the object is detached, :attr:`.InstanceState.deleted` will again
+ return False. To detect that an object was deleted, regardless of whether
+ or not it is detached, use the :attr:`.InstanceState.was_deleted`
+ accessor.
+
+
+Persistent to Detached
+^^^^^^^^^^^^^^^^^^^^^^^
+
+The persistent object becomes :term:`detached` when the object is de-associated
+with the :class:`.Session`, via the :meth:`.Session.expunge`,
+:meth:`.Session.expunge_all`, or :meth:`.Session.close` methods.
+
+.. note::
+
+ An object may also become **implicitly detached** if its owning
+ :class:`.Session` is dereferenced by the application and discarded due to
+ garbage collection. In this case, **no event is emitted**.
+
+Track objects as they move from persistent to detached using the
+:meth:`.SessionEvents.persistent_to_detached` event::
+
+ @event.listens_for(sessionmaker, "persistent_to_detached")
+ def intecept_persistent_to_detached(session, object_):
+ print("object became detached: %s" % object_)
+
+Detached to Persistent
+^^^^^^^^^^^^^^^^^^^^^^^
+
+The detached object becomes persistent when it is re-associated with a
+session using the :meth:`.Session.add` or equivalent method. Track
+objects moving back to persistent from detached using the
+:meth:`.SessionEvents.detached_to_persistent` event::
+
+ @event.listens_for(sessionmaker, "detached_to_persistent")
+ def intecept_detached_to_persistent(session, object_):
+ print("object became persistent again: %s" % object_)
+
+
+Deleted to Persistent
+^^^^^^^^^^^^^^^^^^^^^^^
+
+The :term:`deleted` object can be reverted to the :term:`persistent`
+state when the transaction in which it was DELETEd was rolled back
+using the :meth:`.Session.rollback` method. Track deleted objects
+moving back to the persistent state using the
+:meth:`.SessionEvents.deleted_to_persistent` event::
+
+ @event.listens_for(sessionmaker, "transient_to_pending")
+ def intercept_transient_to_pending(session, object_):
+ print("transient to pending: %s" % object_)
+
+.. _session_transaction_events:
+
+Transaction Events
+------------------
+
+Transaction events allow an application to be notifed when transaction
+boundaries occur at the :class:`.Session` level as well as when the
+:class:`.Session` changes the transactional state on :class:`.Connection`
+objects.
+
+* :meth:`.SessionEvents.after_transaction_create`,
+ :meth:`.SessionEvents.after_transaction_end` - these events track the
+ logical transaction scopes of the :class:`.Session` in a way that is
+ not specific to individual database connections. These events are
+ intended to help with integration of transaction-tracking systems such as
+ ``zope.sqlalchemy``. Use these
+ events when the application needs to align some external scope with the
+ transactional scope of the :class:`.Session`. These hooks mirror
+ the "nested" transactional behavior of the :class:`.Session`, in that they
+ track logical "subtransactions" as well as "nested" (e.g. SAVEPOINT)
+ transactions.
+
+* :meth:`.SessionEvents.before_commit`, :meth:`.SessionEvents.after_commit`,
+ :meth:`.SessionEvents.after_begin`,
+ :meth:`.SessionEvents.after_rollback`, :meth:`.SessionEvents.after_soft_rollback` -
+ These events allow tracking of transaction events from the perspective
+ of database connections. :meth:`.SessionEvents.after_begin` in particular
+ is a per-connection event; a :class:`.Session` that maintains more than
+ one connection will emit this event for each connection individually
+ as those connections become used within the current transaction.
+ The rollback and commit events then refer to when the DBAPI connections
+ themselves have received rollback or commit instructions directly.
+
+Attribute Change Events
+-----------------------
+
+The attribute change events allow interception of when specific attributes
+on an object are modified. These events include :meth:`.AttributeEvents.set`,
+:meth:`.AttributeEvents.append`, and :meth:`.AttributeEvents.remove`. These
+events are extremely useful, particularly for per-object validation operations;
+however, it is often much more convenient to use a "validator" hook, which
+uses these hooks behind the scenes; see :ref:`simple_validators` for
+background on this. The attribute events are also behind the mechanics
+of backreferences. An example illustrating use of attribute events
+is in :ref:`examples_instrumentation`.
+
+
+
+
diff --git a/doc/build/orm/session_state_management.rst b/doc/build/orm/session_state_management.rst
index 1ca7ca2e4..090bf7674 100644
--- a/doc/build/orm/session_state_management.rst
+++ b/doc/build/orm/session_state_management.rst
@@ -23,16 +23,28 @@ It's helpful to know the states which an instance can have within a session:
existing instances (or moving persistent instances from other sessions into
your local session).
-* **Detached** - an instance which has a record in the database, but is not in
- any session. There's nothing wrong with this, and you can use objects
- normally when they're detached, **except** they will not be able to issue
- any SQL in order to load collections or attributes which are not yet loaded,
- or were marked as "expired".
-
-Knowing these states is important, since the
-:class:`.Session` tries to be strict about ambiguous
-operations (such as trying to save the same object to two different sessions
-at the same time).
+* **Deleted** - An instance which has been deleted within a flush, but
+ the transaction has not yet completed. Objects in this state are essentially
+ in the opposite of "pending" state; when the session's transaction is committed,
+ the object will move to the detached state. Alternatively, when
+ the session's transaction is rolled back, a deleted object moves
+ *back* to the persistent state.
+
+ .. versionchanged:: 1.1 The 'deleted' state is a newly added session
+ object state distinct from the 'persistent' state.
+
+* **Detached** - an instance which corresponds, or previously corresponded,
+ to a record in the database, but is not currently in any session.
+ The detached object will contain a database identity marker, however
+ because it is not associated with a session, it is unknown whether or not
+ this database identity actually exists in a target database. Detached
+ objects are safe to use normally, except that they have no ability to
+ load unloaded attributes or attributes that were previously marked
+ as "expired".
+
+For a deeper dive into all possible state transitions, see the
+section :ref:`session_lifecycle_events` which describes each transition
+as well as how to programmatically track each one.
Getting the Current State of an Object
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -53,8 +65,11 @@ the :func:`.inspect` system::
:attr:`.InstanceState.persistent`
+ :attr:`.InstanceState.deleted`
+
:attr:`.InstanceState.detached`
+.. _session_attributes:
Session Attributes
------------------
@@ -92,17 +107,80 @@ all objects which have had changes since they were last loaded or saved (i.e.
(Documentation: :attr:`.Session.new`, :attr:`.Session.dirty`,
:attr:`.Session.deleted`, :attr:`.Session.identity_map`).
-Note that objects within the session are by default *weakly referenced*. This
+
+.. _session_referencing_behavior:
+
+Session Referencing Behavior
+----------------------------
+
+Objects within the session are *weakly referenced*. This
means that when they are dereferenced in the outside application, they fall
out of scope from within the :class:`~sqlalchemy.orm.session.Session` as well
and are subject to garbage collection by the Python interpreter. The
exceptions to this include objects which are pending, objects which are marked
as deleted, or persistent objects which have pending changes on them. After a
full flush, these collections are all empty, and all objects are again weakly
-referenced. To disable the weak referencing behavior and force all objects
-within the session to remain until explicitly expunged, configure
-:class:`.sessionmaker` with the ``weak_identity_map=False``
-setting.
+referenced.
+
+To cause objects in the :class:`.Session` to remain strongly
+referenced, usually a simple approach is all that's needed. Examples
+of externally managed strong-referencing behavior include loading
+objects into a local dictionary keyed to their primary key, or into
+lists or sets for the span of time that they need to remain
+referenced. These collections can be associated with a
+:class:`.Session`, if desired, by placing them into the
+:attr:`.Session.info` dictionary.
+
+An event based approach is also feasable. A simple recipe that provides
+"strong referencing" behavior for all objects as they remain within
+the :term:`persistent` state is as follows::
+
+ from sqlalchemy import event
+
+ def strong_reference_session(session):
+ @event.listens_for(session, "pending_to_persistent")
+ @event.listens_for(session, "deleted_to_persistent")
+ @event.listens_for(session, "detached_to_persistent")
+ @event.listens_for(session, "loaded_as_persistent")
+ def strong_ref_object(sess, instance):
+ if 'refs' not in sess.info:
+ sess.info['refs'] = refs = set()
+ else:
+ refs = sess.info['refs']
+
+ refs.add(instance)
+
+
+ @event.listens_for(session, "persistent_to_detached")
+ @event.listens_for(session, "persistent_to_deleted")
+ @event.listens_for(session, "persistent_to_transient")
+ def deref_object(sess, instance):
+ sess.info['refs'].discard(instance)
+
+Above, we intercept the :meth:`.SessionEvents.pending_to_persistent`,
+:meth:`.SessionEvents.detached_to_persistent`,
+:meth:`.SessionEvents.deleted_to_persistent` and
+:meth:`.SessionEvents.loaded_as_persistent` event hooks in order to intercept
+objects as they enter the :term:`persistent` transition, and the
+:meth:`.SessionEvents.persistent_to_detached` and
+:meth:`.SessionEvents.persistent_to_deleted` hooks to intercept
+objects as they leave the persistent state.
+
+The above function may be called for any :class:`.Session` in order to
+provide strong-referencing behavior on a per-:class:`.Session` basis::
+
+ from sqlalchemy.orm import Session
+
+ my_session = Session()
+ strong_reference_session(my_session)
+
+It may also be called for any :class:`.sessionmaker`::
+
+ from sqlalchemy.orm import sessionmaker
+
+ maker = sessionmaker()
+ strong_reference_session(maker)
+
.. _unitofwork_merging:
diff --git a/doc/build/orm/session_transaction.rst b/doc/build/orm/session_transaction.rst
index bca3e944f..e27c15118 100644
--- a/doc/build/orm/session_transaction.rst
+++ b/doc/build/orm/session_transaction.rst
@@ -277,7 +277,7 @@ transactions set the flag ``twophase=True`` on the session::
Setting Transaction Isolation Levels
------------------------------------
-:term:`isolation` refers to the behavior of the transaction at the database
+:term:`Isolation` refers to the behavior of the transaction at the database
level in relation to other transactions occurring concurrently. There
are four well-known modes of isolation, and typically the Python DBAPI
allows these to be set on a per-connection basis, either through explicit
@@ -414,6 +414,12 @@ on the target connection, a warning is emitted::
:paramref:`.Session.connection.execution_options`
parameter to :meth:`.Session.connection`.
+Tracking Transaction State with Events
+--------------------------------------
+
+See the section :ref:`session_transaction_events` for an overview
+of the available event hooks for session transaction state changes.
+
.. _session_external_transaction:
Joining a Session into an External Transaction (such as for test suites)
@@ -513,3 +519,4 @@ everything is rolled back.
session.begin_nested()
# ... the tearDown() method stays the same
+
diff --git a/doc/build/orm/tutorial.rst b/doc/build/orm/tutorial.rst
index 8871ce765..53f161003 100644
--- a/doc/build/orm/tutorial.rst
+++ b/doc/build/orm/tutorial.rst
@@ -40,11 +40,11 @@ following text represents the expected return value.
Version Check
=============
-A quick check to verify that we are on at least **version 1.0** of SQLAlchemy::
+A quick check to verify that we are on at least **version 1.1** of SQLAlchemy::
>>> import sqlalchemy
>>> sqlalchemy.__version__ # doctest:+SKIP
- 1.0.0
+ 1.1.0
Connecting
==========
@@ -208,12 +208,12 @@ the actual ``CREATE TABLE`` statement:
.. sourcecode:: python+sql
- >>> Base.metadata.create_all(engine) # doctest:+ELLIPSIS,+NORMALIZE_WHITESPACE
- {opensql}PRAGMA table_info("users")
+ >>> Base.metadata.create_all(engine)
+ SELECT ...
+ PRAGMA table_info("users")
()
CREATE TABLE users (
- id INTEGER NOT NULL,
- name VARCHAR,
+ id INTEGER NOT NULL, name VARCHAR,
fullname VARCHAR,
password VARCHAR,
PRIMARY KEY (id)
@@ -369,7 +369,7 @@ added:
.. sourcecode:: python+sql
- {sql}>>> our_user = session.query(User).filter_by(name='ed').first() # doctest:+ELLIPSIS,+NORMALIZE_WHITESPACE
+ {sql}>>> our_user = session.query(User).filter_by(name='ed').first() # doctest:+NORMALIZE_WHITESPACE
BEGIN (implicit)
INSERT INTO users (name, fullname, password) VALUES (?, ?, ?)
('ed', 'Ed Jones', 'edspassword')
@@ -513,7 +513,7 @@ Querying the session, we can see that they're flushed into the current transacti
.. sourcecode:: python+sql
- {sql}>>> session.query(User).filter(User.name.in_(['Edwardo', 'fakeuser'])).all() #doctest: +NORMALIZE_WHITESPACE
+ {sql}>>> session.query(User).filter(User.name.in_(['Edwardo', 'fakeuser'])).all()
UPDATE users SET name=? WHERE users.id = ?
('Edwardo', 1)
INSERT INTO users (name, fullname, password) VALUES (?, ?, ?)
@@ -525,7 +525,7 @@ Querying the session, we can see that they're flushed into the current transacti
FROM users
WHERE users.name IN (?, ?)
('Edwardo', 'fakeuser')
- {stop}[<User(name='Edwardo', fullname='Ed Jones', password='f8s7ccs')>, <User(user='fakeuser', fullname='Invalid', password='12345')>]
+ {stop}[<User(name='Edwardo', fullname='Ed Jones', password='f8s7ccs')>, <User(name='fakeuser', fullname='Invalid', password='12345')>]
Rolling back, we can see that ``ed_user``'s name is back to ``ed``, and
``fake_user`` has been kicked out of the session:
@@ -536,7 +536,7 @@ Rolling back, we can see that ``ed_user``'s name is back to ``ed``, and
ROLLBACK
{stop}
- {sql}>>> ed_user.name #doctest: +NORMALIZE_WHITESPACE
+ {sql}>>> ed_user.name
BEGIN (implicit)
SELECT users.id AS users_id,
users.name AS users_name,
@@ -553,7 +553,7 @@ issuing a SELECT illustrates the changes made to the database:
.. sourcecode:: python+sql
- {sql}>>> session.query(User).filter(User.name.in_(['ed', 'fakeuser'])).all() #doctest: +NORMALIZE_WHITESPACE
+ {sql}>>> session.query(User).filter(User.name.in_(['ed', 'fakeuser'])).all()
SELECT users.id AS users_id,
users.name AS users_name,
users.fullname AS users_fullname,
@@ -579,8 +579,8 @@ returned:
.. sourcecode:: python+sql
- {sql}>>> for instance in session.query(User).order_by(User.id): # doctest: +NORMALIZE_WHITESPACE
- ... print instance.name, instance.fullname
+ {sql}>>> for instance in session.query(User).order_by(User.id):
+ ... print(instance.name, instance.fullname)
SELECT users.id AS users_id,
users.name AS users_name,
users.fullname AS users_fullname,
@@ -600,8 +600,8 @@ is expressed as tuples:
.. sourcecode:: python+sql
- {sql}>>> for name, fullname in session.query(User.name, User.fullname): # doctest: +NORMALIZE_WHITESPACE
- ... print name, fullname
+ {sql}>>> for name, fullname in session.query(User.name, User.fullname):
+ ... print(name, fullname)
SELECT users.name AS users_name,
users.fullname AS users_fullname
FROM users
@@ -619,8 +619,8 @@ class:
.. sourcecode:: python+sql
- {sql}>>> for row in session.query(User, User.name).all(): #doctest: +NORMALIZE_WHITESPACE
- ... print row.User, row.name
+ {sql}>>> for row in session.query(User, User.name).all():
+ ... print(row.User, row.name)
SELECT users.id AS users_id,
users.name AS users_name,
users.fullname AS users_fullname,
@@ -639,7 +639,7 @@ is mapped to one (such as ``User.name``):
.. sourcecode:: python+sql
- {sql}>>> for row in session.query(User.name.label('name_label')).all(): #doctest: +NORMALIZE_WHITESPACE
+ {sql}>>> for row in session.query(User.name.label('name_label')).all():
... print(row.name_label)
SELECT users.name AS name_label
FROM users
@@ -658,8 +658,8 @@ entities are present in the call to :meth:`~.Session.query`, can be controlled u
>>> from sqlalchemy.orm import aliased
>>> user_alias = aliased(User, name='user_alias')
- {sql}>>> for row in session.query(user_alias, user_alias.name).all(): #doctest: +NORMALIZE_WHITESPACE
- ... print row.user_alias
+ {sql}>>> for row in session.query(user_alias, user_alias.name).all():
+ ... print(row.user_alias)
SELECT user_alias.id AS user_alias_id,
user_alias.name AS user_alias_name,
user_alias.fullname AS user_alias_fullname,
@@ -677,8 +677,8 @@ conjunction with ORDER BY:
.. sourcecode:: python+sql
- {sql}>>> for u in session.query(User).order_by(User.id)[1:3]: #doctest: +NORMALIZE_WHITESPACE
- ... print u
+ {sql}>>> for u in session.query(User).order_by(User.id)[1:3]:
+ ... print(u)
SELECT users.id AS users_id,
users.name AS users_name,
users.fullname AS users_fullname,
@@ -695,8 +695,8 @@ and filtering results, which is accomplished either with
.. sourcecode:: python+sql
{sql}>>> for name, in session.query(User.name).\
- ... filter_by(fullname='Ed Jones'): # doctest: +NORMALIZE_WHITESPACE
- ... print name
+ ... filter_by(fullname='Ed Jones'):
+ ... print(name)
SELECT users.name AS users_name FROM users
WHERE users.fullname = ?
('Ed Jones',)
@@ -709,8 +709,8 @@ operators with the class-level attributes on your mapped class:
.. sourcecode:: python+sql
{sql}>>> for name, in session.query(User.name).\
- ... filter(User.fullname=='Ed Jones'): # doctest: +NORMALIZE_WHITESPACE
- ... print name
+ ... filter(User.fullname=='Ed Jones'):
+ ... print(name)
SELECT users.name AS users_name FROM users
WHERE users.fullname = ?
('Ed Jones',)
@@ -727,8 +727,8 @@ users named "ed" with a full name of "Ed Jones", you can call
{sql}>>> for user in session.query(User).\
... filter(User.name=='ed').\
- ... filter(User.fullname=='Ed Jones'): # doctest: +NORMALIZE_WHITESPACE
- ... print user
+ ... filter(User.fullname=='Ed Jones'):
+ ... print(user)
SELECT users.id AS users_id,
users.name AS users_name,
users.fullname AS users_fullname,
@@ -795,11 +795,17 @@ Here's a rundown of some of the most common operators used in
# or chain multiple filter()/filter_by() calls
query.filter(User.name == 'ed').filter(User.fullname == 'Ed Jones')
+ .. note:: Make sure you use :func:`.and_` and **not** the
+ Python ``and`` operator!
+
* :func:`OR <.sql.expression.or_>`::
from sqlalchemy import or_
query.filter(or_(User.name == 'ed', User.name == 'wendy'))
+ .. note:: Make sure you use :func:`.or_` and **not** the
+ Python ``or`` operator!
+
* :meth:`MATCH <.ColumnOperators.match>`::
query.filter(User.name.match('wendy'))
@@ -822,7 +828,7 @@ database results. Here's a brief tour:
.. sourcecode:: python+sql
>>> query = session.query(User).filter(User.name.like('%ed')).order_by(User.id)
- {sql}>>> query.all() #doctest: +NORMALIZE_WHITESPACE
+ {sql}>>> query.all()
SELECT users.id AS users_id,
users.name AS users_name,
users.fullname AS users_fullname,
@@ -838,7 +844,7 @@ database results. Here's a brief tour:
.. sourcecode:: python+sql
- {sql}>>> query.first() #doctest: +NORMALIZE_WHITESPACE
+ {sql}>>> query.first()
SELECT users.id AS users_id,
users.name AS users_name,
users.fullname AS users_fullname,
@@ -849,17 +855,17 @@ database results. Here's a brief tour:
('%ed', 1, 0)
{stop}<User(name='ed', fullname='Ed Jones', password='f8s7ccs')>
-* :meth:`~.Query.one()`, fully fetches all rows, and if not
+* :meth:`~.Query.one()` fully fetches all rows, and if not
exactly one object identity or composite row is present in the result, raises
an error. With multiple rows found:
.. sourcecode:: python+sql
{sql}>>> from sqlalchemy.orm.exc import MultipleResultsFound
- >>> try: #doctest: +NORMALIZE_WHITESPACE
+ >>> try:
... user = query.one()
- ... except MultipleResultsFound, e:
- ... print e
+ ... except MultipleResultsFound as e:
+ ... print(e)
SELECT users.id AS users_id,
users.name AS users_name,
users.fullname AS users_fullname,
@@ -874,10 +880,10 @@ database results. Here's a brief tour:
.. sourcecode:: python+sql
{sql}>>> from sqlalchemy.orm.exc import NoResultFound
- >>> try: #doctest: +NORMALIZE_WHITESPACE
+ >>> try:
... user = query.filter(User.id == 99).one()
- ... except NoResultFound, e:
- ... print e
+ ... except NoResultFound as e:
+ ... print(e)
SELECT users.id AS users_id,
users.name AS users_name,
users.fullname AS users_fullname,
@@ -892,6 +898,11 @@ database results. Here's a brief tour:
web service, which may want to raise a "404 not found" when no results are found,
but raise an application error when multiple results are found.
+* :meth:`~.Query.one_or_none` is like :meth:`~.Query.one`, except that if no
+ results are found, it doesn't raise an error; it just returns ``None``. Like
+ :meth:`~.Query.one`, however, it does raise an error if multiple results are
+ found.
+
* :meth:`~.Query.scalar` invokes the :meth:`~.Query.one` method, and upon
success returns the first column of the row:
@@ -899,17 +910,16 @@ database results. Here's a brief tour:
>>> query = session.query(User.id).filter(User.name == 'ed').\
... order_by(User.id)
- {sql}>>> query.scalar() #doctest: +NORMALIZE_WHITESPACE
+ {sql}>>> query.scalar()
SELECT users.id AS users_id
FROM users
- WHERE users.name LIKE ? ORDER BY users.id
- LIMIT ? OFFSET ?
- ('%ed', 1, 0)
- {stop}7
+ WHERE users.name = ? ORDER BY users.id
+ ('ed',)
+ {stop}1
.. _orm_tutorial_literal_sql:
-Using Literal SQL
+Using Textual SQL
-----------------
Literal strings can be used flexibly with
@@ -924,8 +934,8 @@ by most applicable methods. For example,
>>> from sqlalchemy import text
{sql}>>> for user in session.query(User).\
... filter(text("id<224")).\
- ... order_by(text("id")).all(): #doctest: +NORMALIZE_WHITESPACE
- ... print user.name
+ ... order_by(text("id")).all():
+ ... print(user.name)
SELECT users.id AS users_id,
users.name AS users_name,
users.fullname AS users_fullname,
@@ -945,7 +955,7 @@ method:
.. sourcecode:: python+sql
{sql}>>> session.query(User).filter(text("id<:value and name=:name")).\
- ... params(value=224, name='fred').order_by(User.id).one() # doctest: +NORMALIZE_WHITESPACE
+ ... params(value=224, name='fred').order_by(User.id).one()
SELECT users.id AS users_id,
users.name AS users_name,
users.fullname AS users_fullname,
@@ -969,31 +979,40 @@ mapper (below illustrated using an asterisk):
('ed',)
{stop}[<User(name='ed', fullname='Ed Jones', password='f8s7ccs')>]
-You can use :meth:`~sqlalchemy.orm.query.Query.from_statement()` to go
-completely "raw", using string names to identify desired columns:
+Or alternatively, specify how the columns map to the :func:`.text` construct
+explicitly using the :meth:`.TextClause.columns` method:
+
+.. sourcecode:: python+sql
+
+ >>> stmt = text("SELECT name, id FROM users where name=:name")
+ >>> stmt = stmt.columns(User.name, User.id)
+ {sql}>>> session.query(User).from_statement(stmt).params(name='ed').all()
+ SELECT name, id FROM users where name=?
+ ('ed',)
+ {stop}[<User(name='ed', fullname='Ed Jones', password='f8s7ccs')>]
+
+We can choose columns to return individually as well, as in any other case:
.. sourcecode:: python+sql
- {sql}>>> session.query("id", "name", "thenumber12").\
- ... from_statement(text("SELECT id, name, 12 as "
- ... "thenumber12 FROM users where name=:name")).\
- ... params(name='ed').all()
- SELECT id, name, 12 as thenumber12 FROM users where name=?
+ >>> stmt = text("SELECT name, id FROM users where name=:name")
+ >>> stmt = stmt.columns(User.name, User.id)
+ {sql}>>> session.query(User.id, User.name).\
+ ... from_statement(stmt).params(name='ed').all()
+ SELECT name, id FROM users where name=?
('ed',)
- {stop}[(1, u'ed', 12)]
+ {stop}[(1, u'ed')]
+
+.. seealso::
+
+ :ref:`sqlexpression_text` - The :func:`.text` construct explained
+ from the perspective of Core-only queries.
.. versionchanged:: 1.0.0
The :class:`.Query` construct emits warnings when string SQL
fragments are coerced to :func:`.text`, and :func:`.text` should
be used explicitly. See :ref:`migration_2992` for background.
-.. seealso::
-
- :ref:`sqlexpression_text` - Core description of textual segments. The
- behavior of the ORM :class:`.Query` object with regards to
- :func:`.text` and related constructs is very similar to that of the
- Core :func:`.select` object.
-
Counting
--------
@@ -1002,7 +1021,7 @@ counting called :meth:`~sqlalchemy.orm.query.Query.count()`:
.. sourcecode:: python+sql
- {sql}>>> session.query(User).filter(User.name.like('%ed')).count() #doctest: +NORMALIZE_WHITESPACE
+ {sql}>>> session.query(User).filter(User.name.like('%ed')).count()
SELECT count(*) AS count_1
FROM (SELECT users.id AS users_id,
users.name AS users_name,
@@ -1040,7 +1059,7 @@ use it to return the count of each distinct user name:
.. sourcecode:: python+sql
>>> from sqlalchemy import func
- {sql}>>> session.query(func.count(User.name), User.name).group_by(User.name).all() #doctest: +NORMALIZE_WHITESPACE
+ {sql}>>> session.query(func.count(User.name), User.name).group_by(User.name).all()
SELECT count(users.name) AS count_1, users.name AS users_name
FROM users GROUP BY users.name
()
@@ -1050,7 +1069,7 @@ To achieve our simple ``SELECT count(*) FROM table``, we can apply it as:
.. sourcecode:: python+sql
- {sql}>>> session.query(func.count('*')).select_from(User).scalar() #doctest: +NORMALIZE_WHITESPACE
+ {sql}>>> session.query(func.count('*')).select_from(User).scalar()
SELECT count(?) AS count_1
FROM users
('*',)
@@ -1061,7 +1080,7 @@ of the ``User`` primary key directly:
.. sourcecode:: python+sql
- {sql}>>> session.query(func.count(User.id)).scalar() #doctest: +NORMALIZE_WHITESPACE
+ {sql}>>> session.query(func.count(User.id)).scalar()
SELECT count(users.id) AS count_1
FROM users
()
@@ -1082,7 +1101,7 @@ declarative, we define this table along with its mapped class, ``Address``:
.. sourcecode:: python+sql
>>> from sqlalchemy import ForeignKey
- >>> from sqlalchemy.orm import relationship, backref
+ >>> from sqlalchemy.orm import relationship
>>> class Address(Base):
... __tablename__ = 'addresses'
@@ -1090,11 +1109,14 @@ declarative, we define this table along with its mapped class, ``Address``:
... email_address = Column(String, nullable=False)
... user_id = Column(Integer, ForeignKey('users.id'))
...
- ... user = relationship("User", backref=backref('addresses', order_by=id))
+ ... user = relationship("User", back_populates="addresses")
...
... def __repr__(self):
... return "<Address(email_address='%s')>" % self.email_address
+ >>> User.addresses = relationship(
+ ... "Address", order_by=Address.id, back_populates="user")
+
The above class introduces the :class:`.ForeignKey` construct, which is a
directive applied to :class:`.Column` that indicates that values in this
column should be :term:`constrained` to be values present in the named remote
@@ -1110,11 +1132,27 @@ to the ``User`` class, using the attribute ``Address.user``.
:func:`.relationship` uses the foreign key
relationships between the two tables to determine the nature of
this linkage, determining that ``Address.user`` will be :term:`many to one`.
-A subdirective of :func:`.relationship` called :func:`.backref` is
-placed inside of :func:`.relationship`, providing details about
-the relationship as expressed in reverse, that of a collection of ``Address``
-objects on ``User`` referenced by ``User.addresses``. The reverse
-side of a many-to-one relationship is always :term:`one to many`.
+An additional :func:`.relationship` directive is placed on the
+``User`` mapped class under the attribute ``User.addresses``. In both
+:func:`.relationship` directives, the parameter
+:paramref:`.relationship.back_populates` is assigned to refer to the
+complementary attribute names; by doing so, each :func:`.relationship`
+can make intelligent decision about the same relationship as expressed
+in reverse; on one side, ``Address.user`` refers to a ``User`` instance,
+and on the other side, ``User.addresses`` refers to a list of
+``Address`` instances.
+
+.. note::
+
+ The :paramref:`.relationship.back_populates` parameter is a newer
+ version of a very common SQLAlchemy feature called
+ :paramref:`.relationship.backref`. The :paramref:`.relationship.backref`
+ parameter hasn't gone anywhere and will always remain available!
+ The :paramref:`.relationship.back_populates` is the same thing, except
+ a little more verbose and easier to manipulate. For an overview
+ of the entire topic, see the section :ref:`relationships_backref`.
+
+The reverse side of a many-to-one relationship is always :term:`one to many`.
A full catalog of available :func:`.relationship` configurations
is at :ref:`relationship_patterns`.
@@ -1129,13 +1167,7 @@ use. Once all mappings are complete, these strings are evaluated
as Python expressions in order to produce the actual argument, in the
above case the ``User`` class. The names which are allowed during
this evaluation include, among other things, the names of all classes
-which have been created in terms of the declared base. Below we illustrate creation
-of the same "addresses/user" bidirectional relationship in terms of ``User`` instead of
-``Address``::
-
- class User(Base):
- # ....
- addresses = relationship("Address", order_by="Address.id", backref="user")
+which have been created in terms of the declared base.
See the docstring for :func:`.relationship` for more detail on argument style.
@@ -1159,11 +1191,8 @@ already been created:
.. sourcecode:: python+sql
- {sql}>>> Base.metadata.create_all(engine) # doctest: +NORMALIZE_WHITESPACE
- PRAGMA table_info("users")
- ()
- PRAGMA table_info("addresses")
- ()
+ {sql}>>> Base.metadata.create_all(engine)
+ PRAGMA...
CREATE TABLE addresses (
id INTEGER NOT NULL,
email_address VARCHAR NOT NULL,
@@ -1232,7 +1261,7 @@ Querying for Jack, we get just Jack back. No SQL is yet issued for Jack's addre
.. sourcecode:: python+sql
{sql}>>> jack = session.query(User).\
- ... filter_by(name='jack').one() #doctest: +NORMALIZE_WHITESPACE
+ ... filter_by(name='jack').one()
BEGIN (implicit)
SELECT users.id AS users_id,
users.name AS users_name,
@@ -1249,7 +1278,7 @@ Let's look at the ``addresses`` collection. Watch the SQL:
.. sourcecode:: python+sql
- {sql}>>> jack.addresses #doctest: +NORMALIZE_WHITESPACE
+ {sql}>>> jack.addresses
SELECT addresses.id AS addresses_id,
addresses.email_address AS
addresses_email_address,
@@ -1284,9 +1313,9 @@ Below we load the ``User`` and ``Address`` entities at once using this method:
{sql}>>> for u, a in session.query(User, Address).\
... filter(User.id==Address.user_id).\
... filter(Address.email_address=='jack@google.com').\
- ... all(): # doctest: +NORMALIZE_WHITESPACE
- ... print u
- ... print a
+ ... all():
+ ... print(u)
+ ... print(a)
SELECT users.id AS users_id,
users.name AS users_name,
users.fullname AS users_fullname,
@@ -1308,7 +1337,7 @@ using the :meth:`.Query.join` method:
{sql}>>> session.query(User).join(Address).\
... filter(Address.email_address=='jack@google.com').\
- ... all() #doctest: +NORMALIZE_WHITESPACE
+ ... all()
SELECT users.id AS users_id,
users.name AS users_name,
users.fullname AS users_fullname,
@@ -1337,6 +1366,16 @@ The reference documentation for :meth:`~.Query.join` contains detailed informati
and examples of the calling styles accepted by this method; :meth:`~.Query.join`
is an important method at the center of usage for any SQL-fluent application.
+.. topic:: What does :class:`.Query` select from if there's multiple entities?
+
+ The :meth:`.Query.join` method will **typically join from the leftmost
+ item** in the list of entities, when the ON clause is omitted, or if the
+ ON clause is a plain SQL expression. To control the first entity in the list
+ of JOINs, use the :meth:`.Query.select_from` method::
+
+ query = Session.query(User, Address).select_from(Address).join(User)
+
+
.. _ormtutorial_aliases:
Using Aliases
@@ -1361,7 +1400,7 @@ same time:
... join(adalias2, User.addresses).\
... filter(adalias1.email_address=='jack@google.com').\
... filter(adalias2.email_address=='j25@yahoo.com'):
- ... print username, email1, email2 # doctest: +NORMALIZE_WHITESPACE
+ ... print(username, email1, email2)
SELECT users.name AS users_name,
addresses_1.email_address AS addresses_1_email_address,
addresses_2.email_address AS addresses_2_email_address
@@ -1413,8 +1452,8 @@ accessible through an attribute called ``c``:
.. sourcecode:: python+sql
{sql}>>> for u, count in session.query(User, stmt.c.address_count).\
- ... outerjoin(stmt, User.id==stmt.c.user_id).order_by(User.id): # doctest: +NORMALIZE_WHITESPACE
- ... print u, count
+ ... outerjoin(stmt, User.id==stmt.c.user_id).order_by(User.id):
+ ... print(u, count)
SELECT users.id AS users_id,
users.name AS users_name,
users.fullname AS users_fullname,
@@ -1446,9 +1485,9 @@ to associate an "alias" of a mapped class to a subquery:
... subquery()
>>> adalias = aliased(Address, stmt)
>>> for user, address in session.query(User, adalias).\
- ... join(adalias, User.addresses): # doctest: +NORMALIZE_WHITESPACE
- ... print user
- ... print address
+ ... join(adalias, User.addresses):
+ ... print(user)
+ ... print(address)
SELECT users.id AS users_id,
users.name AS users_name,
users.fullname AS users_fullname,
@@ -1481,8 +1520,8 @@ There is an explicit EXISTS construct, which looks like this:
>>> from sqlalchemy.sql import exists
>>> stmt = exists().where(Address.user_id==User.id)
- {sql}>>> for name, in session.query(User.name).filter(stmt): # doctest: +NORMALIZE_WHITESPACE
- ... print name
+ {sql}>>> for name, in session.query(User.name).filter(stmt):
+ ... print(name)
SELECT users.name AS users_name
FROM users
WHERE EXISTS (SELECT *
@@ -1498,8 +1537,8 @@ usage of EXISTS automatically. Above, the statement can be expressed along the
.. sourcecode:: python+sql
{sql}>>> for name, in session.query(User.name).\
- ... filter(User.addresses.any()): # doctest: +NORMALIZE_WHITESPACE
- ... print name
+ ... filter(User.addresses.any()):
+ ... print(name)
SELECT users.name AS users_name
FROM users
WHERE EXISTS (SELECT 1
@@ -1513,8 +1552,8 @@ usage of EXISTS automatically. Above, the statement can be expressed along the
.. sourcecode:: python+sql
{sql}>>> for name, in session.query(User.name).\
- ... filter(User.addresses.any(Address.email_address.like('%google%'))): # doctest: +NORMALIZE_WHITESPACE
- ... print name
+ ... filter(User.addresses.any(Address.email_address.like('%google%'))):
+ ... print(name)
SELECT users.name AS users_name
FROM users
WHERE EXISTS (SELECT 1
@@ -1530,7 +1569,7 @@ usage of EXISTS automatically. Above, the statement can be expressed along the
.. sourcecode:: python+sql
{sql}>>> session.query(Address).\
- ... filter(~Address.user.has(User.name=='jack')).all() # doctest: +NORMALIZE_WHITESPACE
+ ... filter(~Address.user.has(User.name=='jack')).all()
SELECT addresses.id AS addresses_id,
addresses.email_address AS addresses_email_address,
addresses.user_id AS addresses_user_id
@@ -1608,7 +1647,7 @@ very easy to use:
>>> from sqlalchemy.orm import subqueryload
{sql}>>> jack = session.query(User).\
... options(subqueryload(User.addresses)).\
- ... filter_by(name='jack').one() #doctest: +NORMALIZE_WHITESPACE
+ ... filter_by(name='jack').one()
SELECT users.id AS users_id,
users.name AS users_name,
users.fullname AS users_fullname,
@@ -1655,7 +1694,7 @@ will emit the extra join regardless:
{sql}>>> jack = session.query(User).\
... options(joinedload(User.addresses)).\
- ... filter_by(name='jack').one() #doctest: +NORMALIZE_WHITESPACE
+ ... filter_by(name='jack').one()
SELECT users.id AS users_id,
users.name AS users_name,
users.fullname AS users_fullname,
@@ -1717,7 +1756,7 @@ attribute:
... join(Address.user).\
... filter(User.name=='jack').\
... options(contains_eager(Address.user)).\
- ... all() #doctest: +NORMALIZE_WHITESPACE
+ ... all()
SELECT users.id AS users_id,
users.name AS users_name,
users.fullname AS users_fullname,
@@ -1736,7 +1775,7 @@ attribute:
<User(name='jack', fullname='Jack Bean', password='gjffdd')>
For more information on eager loading, including how to configure various forms
-of loading by default, see the section :doc:`/orm/loading`.
+of loading by default, see the section :doc:`/orm/loading_relationships`.
Deleting
========
@@ -1747,11 +1786,9 @@ the session, then we'll issue a ``count`` query to see that no rows remain:
.. sourcecode:: python+sql
>>> session.delete(jack)
- {sql}>>> session.query(User).filter_by(name='jack').count() # doctest: +NORMALIZE_WHITESPACE
- UPDATE addresses SET user_id=? WHERE addresses.id = ?
- (None, 1)
+ {sql}>>> session.query(User).filter_by(name='jack').count()
UPDATE addresses SET user_id=? WHERE addresses.id = ?
- (None, 2)
+ ((None, 1), (None, 2))
DELETE FROM users WHERE users.id = ?
(5,)
SELECT count(*) AS count_1
@@ -1770,7 +1807,7 @@ So far, so good. How about Jack's ``Address`` objects ?
{sql}>>> session.query(Address).filter(
... Address.email_address.in_(['jack@google.com', 'j25@yahoo.com'])
- ... ).count() # doctest: +NORMALIZE_WHITESPACE
+ ... ).count()
SELECT count(*) AS count_1
FROM (SELECT addresses.id AS addresses_id,
addresses.email_address AS addresses_email_address,
@@ -1797,6 +1834,8 @@ relationship needs to be removed, so we need to tear down the mappings
completely and start again - we'll close the :class:`.Session`::
>>> session.close()
+ ROLLBACK
+
and use a new :func:`.declarative_base`::
@@ -1813,11 +1852,11 @@ including the cascade configuration (we'll leave the constructor out too)::
... fullname = Column(String)
... password = Column(String)
...
- ... addresses = relationship("Address", backref='user',
+ ... addresses = relationship("Address", back_populates='user',
... cascade="all, delete, delete-orphan")
...
... def __repr__(self):
- ... return "<User(name='%s', fullname='%s', password'%s')>" % (
+ ... return "<User(name='%s', fullname='%s', password='%s')>" % (
... self.name, self.fullname, self.password)
Then we recreate ``Address``, noting that in this case we've created
@@ -1828,6 +1867,7 @@ the ``Address.user`` relationship via the ``User`` class already::
... id = Column(Integer, primary_key=True)
... email_address = Column(String, nullable=False)
... user_id = Column(Integer, ForeignKey('users.id'))
+ ... user = relationship("User", back_populates="addresses")
...
... def __repr__(self):
... return "<Address(email_address='%s')>" % self.email_address
@@ -1840,7 +1880,7 @@ being deleted:
.. sourcecode:: python+sql
# load Jack by primary key
- {sql}>>> jack = session.query(User).get(5) #doctest: +NORMALIZE_WHITESPACE
+ {sql}>>> jack = session.query(User).get(5)
BEGIN (implicit)
SELECT users.id AS users_id,
users.name AS users_name,
@@ -1852,7 +1892,7 @@ being deleted:
{stop}
# remove one Address (lazy load fires off)
- {sql}>>> del jack.addresses[1] #doctest: +NORMALIZE_WHITESPACE
+ {sql}>>> del jack.addresses[1]
SELECT addresses.id AS addresses_id,
addresses.email_address AS addresses_email_address,
addresses.user_id AS addresses_user_id
@@ -1864,7 +1904,7 @@ being deleted:
# only one address remains
{sql}>>> session.query(Address).filter(
... Address.email_address.in_(['jack@google.com', 'j25@yahoo.com'])
- ... ).count() # doctest: +NORMALIZE_WHITESPACE
+ ... ).count()
DELETE FROM addresses WHERE addresses.id = ?
(2,)
SELECT count(*) AS count_1
@@ -1883,7 +1923,7 @@ with the user:
>>> session.delete(jack)
- {sql}>>> session.query(User).filter_by(name='jack').count() # doctest: +NORMALIZE_WHITESPACE
+ {sql}>>> session.query(User).filter_by(name='jack').count()
DELETE FROM addresses WHERE addresses.id = ?
(1,)
DELETE FROM users WHERE users.id = ?
@@ -1900,7 +1940,7 @@ with the user:
{sql}>>> session.query(Address).filter(
... Address.email_address.in_(['jack@google.com', 'j25@yahoo.com'])
- ... ).count() # doctest: +NORMALIZE_WHITESPACE
+ ... ).count()
SELECT count(*) AS count_1
FROM (SELECT addresses.id AS addresses_id,
addresses.email_address AS addresses_email_address,
@@ -1933,8 +1973,8 @@ to serve as the association table. This looks like the following::
>>> from sqlalchemy import Table, Text
>>> # association table
>>> post_keywords = Table('post_keywords', Base.metadata,
- ... Column('post_id', Integer, ForeignKey('posts.id')),
- ... Column('keyword_id', Integer, ForeignKey('keywords.id'))
+ ... Column('post_id', ForeignKey('posts.id'), primary_key=True),
+ ... Column('keyword_id', ForeignKey('keywords.id'), primary_key=True)
... )
Above, we can see declaring a :class:`.Table` directly is a little different
@@ -1943,8 +1983,9 @@ each individual :class:`.Column` argument is separated by a comma. The
:class:`.Column` object is also given its name explicitly, rather than it being
taken from an assigned attribute name.
-Next we define ``BlogPost`` and ``Keyword``, with a :func:`.relationship` linked
-via the ``post_keywords`` table::
+Next we define ``BlogPost`` and ``Keyword``, using complementary
+:func:`.relationship` constructs, each referring to the ``post_keywords``
+table as an association table::
>>> class BlogPost(Base):
... __tablename__ = 'posts'
@@ -1955,7 +1996,9 @@ via the ``post_keywords`` table::
... body = Column(Text)
...
... # many to many BlogPost<->Keyword
- ... keywords = relationship('Keyword', secondary=post_keywords, backref='posts')
+ ... keywords = relationship('Keyword',
+ ... secondary=post_keywords,
+ ... back_populates='posts')
...
... def __init__(self, headline, body, author):
... self.author = author
@@ -1971,6 +2014,9 @@ via the ``post_keywords`` table::
...
... id = Column(Integer, primary_key=True)
... keyword = Column(String(50), nullable=False, unique=True)
+ ... posts = relationship('BlogPost',
+ ... secondary=post_keywords,
+ ... back_populates='keywords')
...
... def __init__(self, keyword):
... self.keyword = keyword
@@ -1995,54 +2041,43 @@ that a single user might have lots of blog posts. When we access
``User.posts``, we'd like to be able to filter results further so as not to
load the entire collection. For this we use a setting accepted by
:func:`~sqlalchemy.orm.relationship` called ``lazy='dynamic'``, which
-configures an alternate **loader strategy** on the attribute. To use it on the
-"reverse" side of a :func:`~sqlalchemy.orm.relationship`, we use the
-:func:`~sqlalchemy.orm.backref` function:
+configures an alternate **loader strategy** on the attribute::
.. sourcecode:: python+sql
- >>> from sqlalchemy.orm import backref
- >>> # "dynamic" loading relationship to User
- >>> BlogPost.author = relationship(User, backref=backref('posts', lazy='dynamic'))
+ >>> BlogPost.author = relationship(User, back_populates="posts")
+ >>> User.posts = relationship(BlogPost, back_populates="author", lazy="dynamic")
Create new tables:
.. sourcecode:: python+sql
- {sql}>>> Base.metadata.create_all(engine) # doctest: +NORMALIZE_WHITESPACE
- PRAGMA table_info("users")
- ()
- PRAGMA table_info("addresses")
- ()
- PRAGMA table_info("posts")
- ()
- PRAGMA table_info("keywords")
- ()
- PRAGMA table_info("post_keywords")
- ()
- CREATE TABLE posts (
+ {sql}>>> Base.metadata.create_all(engine)
+ PRAGMA...
+ CREATE TABLE keywords (
id INTEGER NOT NULL,
- user_id INTEGER,
- headline VARCHAR(255) NOT NULL,
- body TEXT,
+ keyword VARCHAR(50) NOT NULL,
PRIMARY KEY (id),
- FOREIGN KEY(user_id) REFERENCES users (id)
+ UNIQUE (keyword)
)
()
COMMIT
- CREATE TABLE keywords (
+ CREATE TABLE posts (
id INTEGER NOT NULL,
- keyword VARCHAR(50) NOT NULL,
+ user_id INTEGER,
+ headline VARCHAR(255) NOT NULL,
+ body TEXT,
PRIMARY KEY (id),
- UNIQUE (keyword)
+ FOREIGN KEY(user_id) REFERENCES users (id)
)
()
COMMIT
CREATE TABLE post_keywords (
- post_id INTEGER,
- keyword_id INTEGER,
- FOREIGN KEY(post_id) REFERENCES posts (id),
- FOREIGN KEY(keyword_id) REFERENCES keywords (id)
+ post_id INTEGER NOT NULL,
+ keyword_id INTEGER NOT NULL,
+ PRIMARY KEY (post_id, keyword_id),
+ FOREIGN KEY(post_id) REFERENCES posts (id),
+ FOREIGN KEY(keyword_id) REFERENCES keywords (id)
)
()
COMMIT
@@ -2053,7 +2088,7 @@ Usage is not too different from what we've been doing. Let's give Wendy some bl
{sql}>>> wendy = session.query(User).\
... filter_by(name='wendy').\
- ... one() #doctest: +NORMALIZE_WHITESPACE
+ ... one()
SELECT users.id AS users_id,
users.name AS users_name,
users.fullname AS users_fullname,
@@ -2081,7 +2116,7 @@ keyword string 'firstpost'":
{sql}>>> session.query(BlogPost).\
... filter(BlogPost.keywords.any(keyword='firstpost')).\
- ... all() #doctest: +NORMALIZE_WHITESPACE
+ ... all()
INSERT INTO keywords (keyword) VALUES (?)
('wendy',)
INSERT INTO keywords (keyword) VALUES (?)
@@ -2089,7 +2124,7 @@ keyword string 'firstpost'":
INSERT INTO posts (user_id, headline, body) VALUES (?, ?, ?)
(2, "Wendy's Blog Post", 'This is a test')
INSERT INTO post_keywords (post_id, keyword_id) VALUES (?, ?)
- ((1, 1), (1, 2))
+ (...)
SELECT posts.id AS posts_id,
posts.user_id AS posts_user_id,
posts.headline AS posts_headline,
@@ -2111,7 +2146,7 @@ the query to narrow down to that ``User`` object as a parent:
{sql}>>> session.query(BlogPost).\
... filter(BlogPost.author==wendy).\
... filter(BlogPost.keywords.any(keyword='firstpost')).\
- ... all() #doctest: +NORMALIZE_WHITESPACE
+ ... all()
SELECT posts.id AS posts_id,
posts.user_id AS posts_user_id,
posts.headline AS posts_headline,
@@ -2132,7 +2167,7 @@ relationship, to query straight from there:
{sql}>>> wendy.posts.\
... filter(BlogPost.keywords.any(keyword='firstpost')).\
- ... all() #doctest: +NORMALIZE_WHITESPACE
+ ... all()
SELECT posts.id AS posts_id,
posts.user_id AS posts_user_id,
posts.headline AS posts_headline,
diff --git a/doc/build/testdocs.py b/doc/build/testdocs.py
deleted file mode 100644
index 815aa8669..000000000
--- a/doc/build/testdocs.py
+++ /dev/null
@@ -1,69 +0,0 @@
-import sys
-sys.path = ['../../lib', './lib/'] + sys.path
-
-import os
-import re
-import doctest
-import sqlalchemy.util as util
-import sqlalchemy.log as salog
-import logging
-
-rootlogger = logging.getLogger('sqlalchemy.engine.base.Engine')
-class MyStream(object):
- def write(self, string):
- sys.stdout.write(string)
- sys.stdout.flush()
- def flush(self):
- pass
-handler = logging.StreamHandler(MyStream())
-handler.setFormatter(logging.Formatter('%(message)s'))
-rootlogger.addHandler(handler)
-
-
-def teststring(s, name, globs=None, verbose=None, report=True,
- optionflags=0, extraglobs=None, raise_on_error=False,
- parser=doctest.DocTestParser()):
-
- from doctest import DebugRunner, DocTestRunner, master
-
- # Assemble the globals.
- if globs is None:
- globs = {}
- else:
- globs = globs.copy()
- if extraglobs is not None:
- globs.update(extraglobs)
-
- if raise_on_error:
- runner = DebugRunner(verbose=verbose, optionflags=optionflags)
- else:
- runner = DocTestRunner(verbose=verbose, optionflags=optionflags)
-
- test = parser.get_doctest(s, globs, name, name, 0)
- runner.run(test)
-
- if report:
- runner.summarize()
-
- if master is None:
- master = runner
- else:
- master.merge(runner)
-
- return runner.failures, runner.tries
-
-def replace_file(s, newfile):
- engine = r"'(sqlite|postgresql|mysql):///.*'"
- engine = re.compile(engine, re.MULTILINE)
- s, n = re.subn(engine, "'sqlite:///" + newfile + "'", s)
- if not n:
- raise ValueError("Couldn't find suitable create_engine call to replace '%s' in it" % oldfile)
- return s
-
-for filename in 'orm/tutorial','core/tutorial',:
- filename = '%s.rst' % filename
- s = open(filename).read()
- #s = replace_file(s, ':memory:')
- s = re.sub(r'{(?:stop|sql|opensql)}', '', s)
- teststring(s, filename)
-
diff --git a/examples/versioned_history/history_meta.py b/examples/versioned_history/history_meta.py
index 6d7b137eb..866f2d473 100644
--- a/examples/versioned_history/history_meta.py
+++ b/examples/versioned_history/history_meta.py
@@ -210,13 +210,13 @@ def create_version(obj, session, deleted=False):
a, u, d = attributes.get_history(obj, prop.key)
if d:
- attr[hist_col.key] = d[0]
+ attr[prop.key] = d[0]
obj_changed = True
elif u:
- attr[hist_col.key] = u[0]
- else:
+ attr[prop.key] = u[0]
+ elif a:
# if the attribute had no value.
- attr[hist_col.key] = a[0]
+ attr[prop.key] = a[0]
obj_changed = True
if not obj_changed:
diff --git a/examples/versioned_history/test_versioning.py b/examples/versioned_history/test_versioning.py
index dde73a5ae..3ea240e11 100644
--- a/examples/versioned_history/test_versioning.py
+++ b/examples/versioned_history/test_versioning.py
@@ -614,3 +614,68 @@ class TestVersioning(TestCase, AssertsCompiledSQL):
sess.commit()
assert sc.version == 1
+
+ def test_create_double_flush(self):
+
+ class SomeClass(Versioned, self.Base, ComparableEntity):
+ __tablename__ = 'sometable'
+
+ id = Column(Integer, primary_key=True)
+ name = Column(String(30))
+ other = Column(String(30))
+
+ self.create_tables()
+
+ sc = SomeClass()
+ self.session.add(sc)
+ self.session.flush()
+ sc.name = 'Foo'
+ self.session.flush()
+
+ assert sc.version == 2
+
+ def test_mutate_plain_column(self):
+ class Document(self.Base, Versioned):
+ __tablename__ = 'document'
+ id = Column(Integer, primary_key=True, autoincrement=True)
+ name = Column(String, nullable=True)
+ description_ = Column('description', String, nullable=True)
+
+ self.create_tables()
+
+ document = Document()
+ self.session.add(document)
+ document.name = 'Foo'
+ self.session.commit()
+ document.name = 'Bar'
+ self.session.commit()
+
+ DocumentHistory = Document.__history_mapper__.class_
+ v2 = self.session.query(Document).one()
+ v1 = self.session.query(DocumentHistory).one()
+ self.assertEqual(v1.id, v2.id)
+ self.assertEqual(v2.name, 'Bar')
+ self.assertEqual(v1.name, 'Foo')
+
+ def test_mutate_named_column(self):
+ class Document(self.Base, Versioned):
+ __tablename__ = 'document'
+ id = Column(Integer, primary_key=True, autoincrement=True)
+ name = Column(String, nullable=True)
+ description_ = Column('description', String, nullable=True)
+
+ self.create_tables()
+
+ document = Document()
+ self.session.add(document)
+ document.description_ = 'Foo'
+ self.session.commit()
+ document.description_ = 'Bar'
+ self.session.commit()
+
+ DocumentHistory = Document.__history_mapper__.class_
+ v2 = self.session.query(Document).one()
+ v1 = self.session.query(DocumentHistory).one()
+ self.assertEqual(v1.id, v2.id)
+ self.assertEqual(v2.description_, 'Bar')
+ self.assertEqual(v1.description_, 'Foo')
diff --git a/lib/sqlalchemy/__init__.py b/lib/sqlalchemy/__init__.py
index 093e90bbf..12d4e8d1c 100644
--- a/lib/sqlalchemy/__init__.py
+++ b/lib/sqlalchemy/__init__.py
@@ -8,7 +8,9 @@
from .sql import (
alias,
+ all_,
and_,
+ any_,
asc,
between,
bindparam,
@@ -52,6 +54,7 @@ from .sql import (
)
from .types import (
+ Array,
BIGINT,
BINARY,
BLOB,
@@ -120,7 +123,7 @@ from .schema import (
from .inspection import inspect
from .engine import create_engine, engine_from_config
-__version__ = '1.0.7'
+__version__ = '1.1.0b1'
def __go(lcls):
diff --git a/lib/sqlalchemy/dialects/firebird/base.py b/lib/sqlalchemy/dialects/firebird/base.py
index c34829cd3..acd419e85 100644
--- a/lib/sqlalchemy/dialects/firebird/base.py
+++ b/lib/sqlalchemy/dialects/firebird/base.py
@@ -648,7 +648,7 @@ class FBDialect(default.DefaultDialect):
'type': coltype,
'nullable': not bool(row['null_flag']),
'default': defvalue,
- 'autoincrement': defvalue is None
+ 'autoincrement': 'auto',
}
if orig_colname.lower() == orig_colname:
diff --git a/lib/sqlalchemy/dialects/mssql/base.py b/lib/sqlalchemy/dialects/mssql/base.py
index bd41c19bf..1ee328e83 100644
--- a/lib/sqlalchemy/dialects/mssql/base.py
+++ b/lib/sqlalchemy/dialects/mssql/base.py
@@ -166,56 +166,6 @@ how SQLAlchemy handles this:
This
is an auxilliary use case suitable for testing and bulk insert scenarios.
-.. _legacy_schema_rendering:
-
-Rendering of SQL statements that include schema qualifiers
----------------------------------------------------------
-
-When using :class:`.Table` metadata that includes a "schema" qualifier,
-such as::
-
- account_table = Table(
- 'account', metadata,
- Column('id', Integer, primary_key=True),
- Column('info', String(100)),
- schema="customer_schema"
- )
-
-The SQL Server dialect has a long-standing behavior that it will attempt
-to turn a schema-qualified table name into an alias, such as::
-
- >>> eng = create_engine("mssql+pymssql://mydsn")
- >>> print(account_table.select().compile(eng))
- SELECT account_1.id, account_1.info
- FROM customer_schema.account AS account_1
-
-This behavior is legacy, does not function correctly for many forms
-of SQL statements, and will be disabled by default in the 1.1 series
-of SQLAlchemy. As of 1.0.5, the above statement will produce the following
-warning::
-
- SAWarning: legacy_schema_aliasing flag is defaulted to True;
- some schema-qualified queries may not function correctly.
- Consider setting this flag to False for modern SQL Server versions;
- this flag will default to False in version 1.1
-
-This warning encourages the :class:`.Engine` to be created as follows::
-
- >>> eng = create_engine("mssql+pymssql://mydsn", legacy_schema_aliasing=False)
-
-Where the above SELECT statement will produce::
-
- >>> print(account_table.select().compile(eng))
- SELECT customer_schema.account.id, customer_schema.account.info
- FROM customer_schema.account
-
-The warning will not emit if the ``legacy_schema_aliasing`` flag is set
-to either True or False.
-
-.. versionadded:: 1.0.5 - Added the ``legacy_schema_aliasing`` flag to disable
- the SQL Server dialect's legacy behavior with schema-qualified table
- names. This flag will default to False in version 1.1.
-
Collation Support
-----------------
@@ -236,7 +186,7 @@ CREATE TABLE statement for this column will yield::
LIMIT/OFFSET Support
--------------------
-MSSQL has no support for the LIMIT or OFFSET keysowrds. LIMIT is
+MSSQL has no support for the LIMIT or OFFSET keywords. LIMIT is
supported directly through the ``TOP`` Transact SQL keyword::
select.limit
@@ -322,6 +272,41 @@ behavior of this flag is as follows:
.. versionadded:: 1.0.0
+.. _legacy_schema_rendering:
+
+Legacy Schema Mode
+------------------
+
+Very old versions of the MSSQL dialect introduced the behavior such that a
+schema-qualified table would be auto-aliased when used in a
+SELECT statement; given a table::
+
+ account_table = Table(
+ 'account', metadata,
+ Column('id', Integer, primary_key=True),
+ Column('info', String(100)),
+ schema="customer_schema"
+ )
+
+this legacy mode of rendering would assume that "customer_schema.account"
+would not be accepted by all parts of the SQL statement, as illustrated
+below::
+
+ >>> eng = create_engine("mssql+pymssql://mydsn", legacy_schema_aliasing=True)
+ >>> print(account_table.select().compile(eng))
+ SELECT account_1.id, account_1.info
+ FROM customer_schema.account AS account_1
+
+This mode of behavior is now off by default, as it appears to have served
+no purpose; however in the case that legacy applications rely upon it,
+it is available using the ``legacy_schema_aliasing`` argument to
+:func:`.create_engine` as illustrated above.
+
+.. versionchanged:: 1.1 the ``legacy_schema_aliasing`` flag introduced
+ in version 1.0.5 to allow disabling of legacy mode for schemas now
+ defaults to False.
+
+
.. _mssql_indexes:
Clustered Index Support
@@ -548,9 +533,13 @@ class _MSDate(sqltypes.Date):
if isinstance(value, datetime.datetime):
return value.date()
elif isinstance(value, util.string_types):
+ m = self._reg.match(value)
+ if not m:
+ raise ValueError(
+ "could not parse %r as a date value" % (value, ))
return datetime.date(*[
int(x or 0)
- for x in self._reg.match(value).groups()
+ for x in m.groups()
])
else:
return value
@@ -582,9 +571,13 @@ class TIME(sqltypes.TIME):
if isinstance(value, datetime.datetime):
return value.time()
elif isinstance(value, util.string_types):
+ m = self._reg.match(value)
+ if not m:
+ raise ValueError(
+ "could not parse %r as a time value" % (value, ))
return datetime.time(*[
int(x or 0)
- for x in self._reg.match(value).groups()])
+ for x in m.groups()])
else:
return value
return process
@@ -774,21 +767,21 @@ class MSTypeCompiler(compiler.GenericTypeCompiler):
return "TINYINT"
def visit_DATETIMEOFFSET(self, type_, **kw):
- if type_.precision:
+ if type_.precision is not None:
return "DATETIMEOFFSET(%s)" % type_.precision
else:
return "DATETIMEOFFSET"
def visit_TIME(self, type_, **kw):
precision = getattr(type_, 'precision', None)
- if precision:
+ if precision is not None:
return "TIME(%s)" % precision
else:
return "TIME"
def visit_DATETIME2(self, type_, **kw):
precision = getattr(type_, 'precision', None)
- if precision:
+ if precision is not None:
return "DATETIME2(%s)" % precision
else:
return "DATETIME2"
@@ -1156,15 +1149,6 @@ class MSSQLCompiler(compiler.SQLCompiler):
def _schema_aliased_table(self, table):
if getattr(table, 'schema', None) is not None:
- if self.dialect._warn_schema_aliasing and \
- table.schema.lower() != 'information_schema':
- util.warn(
- "legacy_schema_aliasing flag is defaulted to True; "
- "some schema-qualified queries may not function "
- "correctly. Consider setting this flag to False for "
- "modern SQL Server versions; this flag will default to "
- "False in version 1.1")
-
if table not in self.tablealiases:
self.tablealiases[table] = table.alias()
return self.tablealiases[table]
@@ -1530,7 +1514,7 @@ class MSDialect(default.DefaultDialect):
max_identifier_length=None,
schema_name="dbo",
deprecate_large_types=None,
- legacy_schema_aliasing=None, **opts):
+ legacy_schema_aliasing=False, **opts):
self.query_timeout = int(query_timeout or 0)
self.schema_name = schema_name
@@ -1538,13 +1522,7 @@ class MSDialect(default.DefaultDialect):
self.max_identifier_length = int(max_identifier_length or 0) or \
self.max_identifier_length
self.deprecate_large_types = deprecate_large_types
-
- if legacy_schema_aliasing is None:
- self.legacy_schema_aliasing = True
- self._warn_schema_aliasing = True
- else:
- self.legacy_schema_aliasing = legacy_schema_aliasing
- self._warn_schema_aliasing = False
+ self.legacy_schema_aliasing = legacy_schema_aliasing
super(MSDialect, self).__init__(**opts)
@@ -1772,7 +1750,7 @@ class MSDialect(default.DefaultDialect):
MSNText, MSBinary, MSVarBinary,
sqltypes.LargeBinary):
if charlen == -1:
- charlen = 'max'
+ charlen = None
kwargs['length'] = charlen
if collation:
kwargs['collation'] = collation
diff --git a/lib/sqlalchemy/dialects/mssql/pymssql.py b/lib/sqlalchemy/dialects/mssql/pymssql.py
index 324b3770c..1d7635c7f 100644
--- a/lib/sqlalchemy/dialects/mssql/pymssql.py
+++ b/lib/sqlalchemy/dialects/mssql/pymssql.py
@@ -85,7 +85,8 @@ class MSDialect_pymssql(MSDialect):
"message 20003", # connection timeout
"Error 10054",
"Not connected to any MS SQL server",
- "Connection is closed"
+ "Connection is closed",
+ "message 20006", # Write to the server failed
):
if msg in str(e):
return True
diff --git a/lib/sqlalchemy/dialects/mysql/base.py b/lib/sqlalchemy/dialects/mysql/base.py
index fee05fd2d..988746403 100644
--- a/lib/sqlalchemy/dialects/mysql/base.py
+++ b/lib/sqlalchemy/dialects/mysql/base.py
@@ -32,6 +32,11 @@ the ``pool_recycle`` option which controls the maximum age of any connection::
engine = create_engine('mysql+mysqldb://...', pool_recycle=3600)
+.. seealso::
+
+ :ref:`pool_setting_recycle` - full description of the pool recycle feature.
+
+
.. _mysql_storage_engines:
CREATE TABLE arguments including Storage Engines
@@ -1584,7 +1589,10 @@ class SET(_EnumeratedValues):
def column_expression(self, colexpr):
if self.retrieve_as_bitwise:
- return colexpr + 0
+ return sql.type_coerce(
+ sql.type_coerce(colexpr, sqltypes.Integer) + 0,
+ self
+ )
else:
return colexpr
@@ -1913,38 +1921,7 @@ class MySQLCompiler(compiler.SQLCompiler):
return None
-# ug. "InnoDB needs indexes on foreign keys and referenced keys [...].
-# Starting with MySQL 4.1.2, these indexes are created automatically.
-# In older versions, the indexes must be created explicitly or the
-# creation of foreign key constraints fails."
-
class MySQLDDLCompiler(compiler.DDLCompiler):
- def create_table_constraints(self, table, **kw):
- """Get table constraints."""
- constraint_string = super(
- MySQLDDLCompiler, self).create_table_constraints(table, **kw)
-
- # why self.dialect.name and not 'mysql'? because of drizzle
- is_innodb = 'engine' in table.dialect_options[self.dialect.name] and \
- table.dialect_options[self.dialect.name][
- 'engine'].lower() == 'innodb'
-
- auto_inc_column = table._autoincrement_column
-
- if is_innodb and \
- auto_inc_column is not None and \
- auto_inc_column is not list(table.primary_key)[0]:
- if constraint_string:
- constraint_string += ", \n\t"
- constraint_string += "KEY %s (%s)" % (
- self.preparer.quote(
- "idx_autoinc_%s" % auto_inc_column.name
- ),
- self.preparer.format_column(auto_inc_column)
- )
-
- return constraint_string
-
def get_column_specification(self, column, **kw):
"""Builds column DDL."""
@@ -3117,6 +3094,11 @@ class MySQLTableDefinitionParser(object):
# Column type keyword options
type_kw = {}
+
+ if issubclass(col_type, (DATETIME, TIME, TIMESTAMP)):
+ if type_args:
+ type_kw['fsp'] = type_args.pop(0)
+
for kw in ('unsigned', 'zerofill'):
if spec.get(kw, False):
type_kw[kw] = True
diff --git a/lib/sqlalchemy/dialects/oracle/base.py b/lib/sqlalchemy/dialects/oracle/base.py
index c605bd510..82ec72f2b 100644
--- a/lib/sqlalchemy/dialects/oracle/base.py
+++ b/lib/sqlalchemy/dialects/oracle/base.py
@@ -287,6 +287,7 @@ from sqlalchemy import util, sql
from sqlalchemy.engine import default, reflection
from sqlalchemy.sql import compiler, visitors, expression
from sqlalchemy.sql import operators as sql_operators
+from sqlalchemy.sql.elements import quoted_name
from sqlalchemy import types as sqltypes, schema as sa_schema
from sqlalchemy.types import VARCHAR, NVARCHAR, CHAR, \
BLOB, CLOB, TIMESTAMP, FLOAT
@@ -1032,6 +1033,8 @@ class OracleDialect(default.DefaultDialect):
if name.upper() == name and not \
self.identifier_preparer._requires_quotes(name.lower()):
return name.lower()
+ elif name.lower() == name:
+ return quoted_name(name, quote=True)
else:
return name
@@ -1280,7 +1283,7 @@ class OracleDialect(default.DefaultDialect):
'type': coltype,
'nullable': nullable,
'default': default,
- 'autoincrement': default is None
+ 'autoincrement': 'auto',
}
if orig_colname.lower() == orig_colname:
cdict['quote'] = True
diff --git a/lib/sqlalchemy/dialects/oracle/cx_oracle.py b/lib/sqlalchemy/dialects/oracle/cx_oracle.py
index 4aed45c14..dede3b21a 100644
--- a/lib/sqlalchemy/dialects/oracle/cx_oracle.py
+++ b/lib/sqlalchemy/dialects/oracle/cx_oracle.py
@@ -293,6 +293,7 @@ from .base import OracleCompiler, OracleDialect, OracleExecutionContext
from . import base as oracle
from ...engine import result as _result
from sqlalchemy import types as sqltypes, util, exc, processors
+from sqlalchemy import util
import random
import collections
import decimal
@@ -719,8 +720,10 @@ class OracleDialect_cx_oracle(OracleDialect):
# this occurs in tests with mock DBAPIs
self._cx_oracle_string_types = set()
self._cx_oracle_with_unicode = False
- elif self.cx_oracle_ver >= (5,) and not \
- hasattr(self.dbapi, 'UNICODE'):
+ elif util.py3k or (
+ self.cx_oracle_ver >= (5,) and not \
+ hasattr(self.dbapi, 'UNICODE')
+ ):
# cx_Oracle WITH_UNICODE mode. *only* python
# unicode objects accepted for anything
self.supports_unicode_statements = True
diff --git a/lib/sqlalchemy/dialects/postgresql/__init__.py b/lib/sqlalchemy/dialects/postgresql/__init__.py
index 98fe6f085..d67f2a07e 100644
--- a/lib/sqlalchemy/dialects/postgresql/__init__.py
+++ b/lib/sqlalchemy/dialects/postgresql/__init__.py
@@ -12,11 +12,13 @@ base.dialect = psycopg2.dialect
from .base import \
INTEGER, BIGINT, SMALLINT, VARCHAR, CHAR, TEXT, NUMERIC, FLOAT, REAL, \
INET, CIDR, UUID, BIT, MACADDR, OID, DOUBLE_PRECISION, TIMESTAMP, TIME, \
- DATE, BYTEA, BOOLEAN, INTERVAL, ARRAY, ENUM, dialect, array, Any, All, \
- TSVECTOR, DropEnumType
-from .constraints import ExcludeConstraint
+ DATE, BYTEA, BOOLEAN, INTERVAL, ENUM, dialect, TSVECTOR, DropEnumType, \
+ CreateEnumType
from .hstore import HSTORE, hstore
-from .json import JSON, JSONElement, JSONB
+from .json import JSON, JSONB
+from .array import array, ARRAY, Any, All
+from .ext import aggregate_order_by, ExcludeConstraint, array_agg
+
from .ranges import INT4RANGE, INT8RANGE, NUMRANGE, DATERANGE, TSRANGE, \
TSTZRANGE
@@ -24,8 +26,9 @@ __all__ = (
'INTEGER', 'BIGINT', 'SMALLINT', 'VARCHAR', 'CHAR', 'TEXT', 'NUMERIC',
'FLOAT', 'REAL', 'INET', 'CIDR', 'UUID', 'BIT', 'MACADDR', 'OID',
'DOUBLE_PRECISION', 'TIMESTAMP', 'TIME', 'DATE', 'BYTEA', 'BOOLEAN',
- 'INTERVAL', 'ARRAY', 'ENUM', 'dialect', 'Any', 'All', 'array', 'HSTORE',
+ 'INTERVAL', 'ARRAY', 'ENUM', 'dialect', 'array', 'HSTORE',
'hstore', 'INT4RANGE', 'INT8RANGE', 'NUMRANGE', 'DATERANGE',
- 'TSRANGE', 'TSTZRANGE', 'json', 'JSON', 'JSONB', 'JSONElement',
- 'DropEnumType'
+ 'TSRANGE', 'TSTZRANGE', 'json', 'JSON', 'JSONB', 'Any', 'All',
+ 'DropEnumType', 'CreateEnumType', 'ExcludeConstraint',
+ 'aggregate_order_by', 'array_agg'
)
diff --git a/lib/sqlalchemy/dialects/postgresql/array.py b/lib/sqlalchemy/dialects/postgresql/array.py
new file mode 100644
index 000000000..b88f139de
--- /dev/null
+++ b/lib/sqlalchemy/dialects/postgresql/array.py
@@ -0,0 +1,306 @@
+# postgresql/array.py
+# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors
+# <see AUTHORS file>
+#
+# This module is part of SQLAlchemy and is released under
+# the MIT License: http://www.opensource.org/licenses/mit-license.php
+
+from .base import ischema_names
+from ...sql import expression, operators
+from ...sql.base import SchemaEventTarget
+from ... import types as sqltypes
+
+try:
+ from uuid import UUID as _python_UUID
+except ImportError:
+ _python_UUID = None
+
+
+def Any(other, arrexpr, operator=operators.eq):
+ """A synonym for the :meth:`.ARRAY.Comparator.any` method.
+
+ This method is legacy and is here for backwards-compatiblity.
+
+ .. seealso::
+
+ :func:`.expression.any_`
+
+ """
+
+ return arrexpr.any(other, operator)
+
+
+def All(other, arrexpr, operator=operators.eq):
+ """A synonym for the :meth:`.ARRAY.Comparator.all` method.
+
+ This method is legacy and is here for backwards-compatiblity.
+
+ .. seealso::
+
+ :func:`.expression.all_`
+
+ """
+
+ return arrexpr.all(other, operator)
+
+
+class array(expression.Tuple):
+
+ """A Postgresql ARRAY literal.
+
+ This is used to produce ARRAY literals in SQL expressions, e.g.::
+
+ from sqlalchemy.dialects.postgresql import array
+ from sqlalchemy.dialects import postgresql
+ from sqlalchemy import select, func
+
+ stmt = select([
+ array([1,2]) + array([3,4,5])
+ ])
+
+ print stmt.compile(dialect=postgresql.dialect())
+
+ Produces the SQL::
+
+ SELECT ARRAY[%(param_1)s, %(param_2)s] ||
+ ARRAY[%(param_3)s, %(param_4)s, %(param_5)s]) AS anon_1
+
+ An instance of :class:`.array` will always have the datatype
+ :class:`.ARRAY`. The "inner" type of the array is inferred from
+ the values present, unless the ``type_`` keyword argument is passed::
+
+ array(['foo', 'bar'], type_=CHAR)
+
+ .. versionadded:: 0.8 Added the :class:`~.postgresql.array` literal type.
+
+ See also:
+
+ :class:`.postgresql.ARRAY`
+
+ """
+ __visit_name__ = 'array'
+
+ def __init__(self, clauses, **kw):
+ super(array, self).__init__(*clauses, **kw)
+ self.type = ARRAY(self.type)
+
+ def _bind_param(self, operator, obj):
+ return array([
+ expression.BindParameter(None, o, _compared_to_operator=operator,
+ _compared_to_type=self.type, unique=True)
+ for o in obj
+ ])
+
+ def self_group(self, against=None):
+ if (against in (
+ operators.any_op, operators.all_op, operators.getitem)):
+ return expression.Grouping(self)
+ else:
+ return self
+
+
+CONTAINS = operators.custom_op("@>", precedence=5)
+
+CONTAINED_BY = operators.custom_op("<@", precedence=5)
+
+OVERLAP = operators.custom_op("&&", precedence=5)
+
+
+class ARRAY(SchemaEventTarget, sqltypes.Array):
+
+ """Postgresql ARRAY type.
+
+ .. versionchanged:: 1.1 The :class:`.postgresql.ARRAY` type is now
+ a subclass of the core :class:`.Array` type.
+
+ The :class:`.postgresql.ARRAY` type is constructed in the same way
+ as the core :class:`.Array` type; a member type is required, and a
+ number of dimensions is recommended if the type is to be used for more
+ than one dimension::
+
+ from sqlalchemy.dialects import postgresql
+
+ mytable = Table("mytable", metadata,
+ Column("data", postgresql.ARRAY(Integer, dimensions=2))
+ )
+
+ The :class:`.postgresql.ARRAY` type provides all operations defined on the
+ core :class:`.Array` type, including support for "dimensions", indexed
+ access, and simple matching such as :meth:`.Array.Comparator.any`
+ and :meth:`.Array.Comparator.all`. :class:`.postgresql.ARRAY` class also
+ provides PostgreSQL-specific methods for containment operations, including
+ :meth:`.postgresql.ARRAY.Comparator.contains`
+ :meth:`.postgresql.ARRAY.Comparator.contained_by`,
+ and :meth:`.postgresql.ARRAY.Comparator.overlap`, e.g.::
+
+ mytable.c.data.contains([1, 2])
+
+ The :class:`.postgresql.ARRAY` type may not be supported on all
+ PostgreSQL DBAPIs; it is currently known to work on psycopg2 only.
+
+ Additionally, the :class:`.postgresql.ARRAY` type does not work directly in
+ conjunction with the :class:`.ENUM` type. For a workaround, see the
+ special type at :ref:`postgresql_array_of_enum`.
+
+ .. seealso::
+
+ :class:`.types.Array` - base array type
+
+ :class:`.postgresql.array` - produces a literal array value.
+
+ """
+
+ class Comparator(sqltypes.Array.Comparator):
+
+ """Define comparison operations for :class:`.ARRAY`.
+
+ Note that these operations are in addition to those provided
+ by the base :class:`.types.Array.Comparator` class, including
+ :meth:`.types.Array.Comparator.any` and
+ :meth:`.types.Array.Comparator.all`.
+
+ """
+
+ def contains(self, other, **kwargs):
+ """Boolean expression. Test if elements are a superset of the
+ elements of the argument array expression.
+ """
+ return self.operate(CONTAINS, other, result_type=sqltypes.Boolean)
+
+ def contained_by(self, other):
+ """Boolean expression. Test if elements are a proper subset of the
+ elements of the argument array expression.
+ """
+ return self.operate(
+ CONTAINED_BY, other, result_type=sqltypes.Boolean)
+
+ def overlap(self, other):
+ """Boolean expression. Test if array has elements in common with
+ an argument array expression.
+ """
+ return self.operate(OVERLAP, other, result_type=sqltypes.Boolean)
+
+ comparator_factory = Comparator
+
+ def __init__(self, item_type, as_tuple=False, dimensions=None,
+ zero_indexes=False):
+ """Construct an ARRAY.
+
+ E.g.::
+
+ Column('myarray', ARRAY(Integer))
+
+ Arguments are:
+
+ :param item_type: The data type of items of this array. Note that
+ dimensionality is irrelevant here, so multi-dimensional arrays like
+ ``INTEGER[][]``, are constructed as ``ARRAY(Integer)``, not as
+ ``ARRAY(ARRAY(Integer))`` or such.
+
+ :param as_tuple=False: Specify whether return results
+ should be converted to tuples from lists. DBAPIs such
+ as psycopg2 return lists by default. When tuples are
+ returned, the results are hashable.
+
+ :param dimensions: if non-None, the ARRAY will assume a fixed
+ number of dimensions. This will cause the DDL emitted for this
+ ARRAY to include the exact number of bracket clauses ``[]``,
+ and will also optimize the performance of the type overall.
+ Note that PG arrays are always implicitly "non-dimensioned",
+ meaning they can store any number of dimensions no matter how
+ they were declared.
+
+ :param zero_indexes=False: when True, index values will be converted
+ between Python zero-based and Postgresql one-based indexes, e.g.
+ a value of one will be added to all index values before passing
+ to the database.
+
+ .. versionadded:: 0.9.5
+
+
+ """
+ if isinstance(item_type, ARRAY):
+ raise ValueError("Do not nest ARRAY types; ARRAY(basetype) "
+ "handles multi-dimensional arrays of basetype")
+ if isinstance(item_type, type):
+ item_type = item_type()
+ self.item_type = item_type
+ self.as_tuple = as_tuple
+ self.dimensions = dimensions
+ self.zero_indexes = zero_indexes
+
+ @property
+ def hashable(self):
+ return self.as_tuple
+
+ @property
+ def python_type(self):
+ return list
+
+ def compare_values(self, x, y):
+ return x == y
+
+ def _set_parent(self, column):
+ """Support SchemaEentTarget"""
+
+ if isinstance(self.item_type, SchemaEventTarget):
+ self.item_type._set_parent(column)
+
+ def _set_parent_with_dispatch(self, parent):
+ """Support SchemaEentTarget"""
+
+ if isinstance(self.item_type, SchemaEventTarget):
+ self.item_type._set_parent_with_dispatch(parent)
+
+ def _proc_array(self, arr, itemproc, dim, collection):
+ if dim is None:
+ arr = list(arr)
+ if dim == 1 or dim is None and (
+ # this has to be (list, tuple), or at least
+ # not hasattr('__iter__'), since Py3K strings
+ # etc. have __iter__
+ not arr or not isinstance(arr[0], (list, tuple))):
+ if itemproc:
+ return collection(itemproc(x) for x in arr)
+ else:
+ return collection(arr)
+ else:
+ return collection(
+ self._proc_array(
+ x, itemproc,
+ dim - 1 if dim is not None else None,
+ collection)
+ for x in arr
+ )
+
+ def bind_processor(self, dialect):
+ item_proc = self.item_type.dialect_impl(dialect).\
+ bind_processor(dialect)
+
+ def process(value):
+ if value is None:
+ return value
+ else:
+ return self._proc_array(
+ value,
+ item_proc,
+ self.dimensions,
+ list)
+ return process
+
+ def result_processor(self, dialect, coltype):
+ item_proc = self.item_type.dialect_impl(dialect).\
+ result_processor(dialect, coltype)
+
+ def process(value):
+ if value is None:
+ return value
+ else:
+ return self._proc_array(
+ value,
+ item_proc,
+ self.dimensions,
+ tuple if self.as_tuple else list)
+ return process
+
+ischema_names['_array'] = ARRAY
diff --git a/lib/sqlalchemy/dialects/postgresql/base.py b/lib/sqlalchemy/dialects/postgresql/base.py
index 22c66dbbb..e9001f79a 100644
--- a/lib/sqlalchemy/dialects/postgresql/base.py
+++ b/lib/sqlalchemy/dialects/postgresql/base.py
@@ -102,7 +102,7 @@ via foreign key constraint, a decision must be made as to how the ``.schema``
is represented in those remote tables, in the case where that remote
schema name is also a member of the current
`Postgresql search path
-<http://www.postgresql.org/docs/9.0/static/ddl-schemas.html#DDL-SCHEMAS-PATH>`_.
+<http://www.postgresql.org/docs/current/static/ddl-schemas.html#DDL-SCHEMAS-PATH>`_.
By default, the Postgresql dialect mimics the behavior encouraged by
Postgresql's own ``pg_get_constraintdef()`` builtin procedure. This function
@@ -506,7 +506,42 @@ dialect in conjunction with the :class:`.Table` construct:
.. seealso::
`Postgresql CREATE TABLE options
- <http://www.postgresql.org/docs/9.3/static/sql-createtable.html>`_
+ <http://www.postgresql.org/docs/current/static/sql-createtable.html>`_
+
+ARRAY Types
+-----------
+
+The Postgresql dialect supports arrays, both as multidimensional column types
+as well as array literals:
+
+* :class:`.postgresql.ARRAY` - ARRAY datatype
+
+* :class:`.postgresql.array` - array literal
+
+* :func:`.postgresql.array_agg` - ARRAY_AGG SQL function
+
+* :class:`.postgresql.aggregate_order_by` - helper for PG's ORDER BY aggregate
+ function syntax.
+
+JSON Types
+----------
+
+The Postgresql dialect supports both JSON and JSONB datatypes, including
+psycopg2's native support and support for all of Postgresql's special
+operators:
+
+* :class:`.postgresql.JSON`
+
+* :class:`.postgresql.JSONB`
+
+HSTORE Type
+-----------
+
+The Postgresql HSTORE type as well as hstore literals are supported:
+
+* :class:`.postgresql.HSTORE` - HSTORE datatype
+
+* :class:`.postgresql.hstore` - hstore literal
ENUM Types
----------
@@ -524,13 +559,56 @@ entity. The following sections should be consulted:
* :meth:`.postgresql.ENUM.create` , :meth:`.postgresql.ENUM.drop` - individual
CREATE and DROP commands for ENUM.
+.. _postgresql_array_of_enum:
+
+Using ENUM with ARRAY
+^^^^^^^^^^^^^^^^^^^^^
+
+The combination of ENUM and ARRAY is not directly supported by backend
+DBAPIs at this time. In order to send and receive an ARRAY of ENUM,
+use the following workaround type::
+
+ class ArrayOfEnum(ARRAY):
+
+ def bind_expression(self, bindvalue):
+ return sa.cast(bindvalue, self)
+
+ def result_processor(self, dialect, coltype):
+ super_rp = super(ArrayOfEnum, self).result_processor(
+ dialect, coltype)
+
+ def handle_raw_string(value):
+ inner = re.match(r"^{(.*)}$", value).group(1)
+ return inner.split(",")
+
+ def process(value):
+ if value is None:
+ return None
+ return super_rp(handle_raw_string(value))
+ return process
+
+E.g.::
+
+ Table(
+ 'mydata', metadata,
+ Column('id', Integer, primary_key=True),
+ Column('data', ArrayOfEnum(ENUM('a', 'b, 'c', name='myenum')))
+
+ )
+
+This type is not included as a built-in type as it would be incompatible
+with a DBAPI that suddenly decides to support ARRAY of ENUM directly in
+a new version.
+
"""
from collections import defaultdict
import re
+import datetime as dt
+
from ... import sql, schema, exc, util
from ...engine import default, reflection
-from ...sql import compiler, expression, operators, default_comparator
+from ...sql import compiler, expression
from ... import types as sqltypes
try:
@@ -633,6 +711,10 @@ class INTERVAL(sqltypes.TypeEngine):
def _type_affinity(self):
return sqltypes.Interval
+ @property
+ def python_type(self):
+ return dt.timedelta
+
PGInterval = INTERVAL
@@ -722,407 +804,6 @@ class TSVECTOR(sqltypes.TypeEngine):
__visit_name__ = 'TSVECTOR'
-class _Slice(expression.ColumnElement):
- __visit_name__ = 'slice'
- type = sqltypes.NULLTYPE
-
- def __init__(self, slice_, source_comparator):
- self.start = default_comparator._check_literal(
- source_comparator.expr,
- operators.getitem, slice_.start)
- self.stop = default_comparator._check_literal(
- source_comparator.expr,
- operators.getitem, slice_.stop)
-
-
-class Any(expression.ColumnElement):
-
- """Represent the clause ``left operator ANY (right)``. ``right`` must be
- an array expression.
-
- .. seealso::
-
- :class:`.postgresql.ARRAY`
-
- :meth:`.postgresql.ARRAY.Comparator.any` - ARRAY-bound method
-
- """
- __visit_name__ = 'any'
-
- def __init__(self, left, right, operator=operators.eq):
- self.type = sqltypes.Boolean()
- self.left = expression._literal_as_binds(left)
- self.right = right
- self.operator = operator
-
-
-class All(expression.ColumnElement):
-
- """Represent the clause ``left operator ALL (right)``. ``right`` must be
- an array expression.
-
- .. seealso::
-
- :class:`.postgresql.ARRAY`
-
- :meth:`.postgresql.ARRAY.Comparator.all` - ARRAY-bound method
-
- """
- __visit_name__ = 'all'
-
- def __init__(self, left, right, operator=operators.eq):
- self.type = sqltypes.Boolean()
- self.left = expression._literal_as_binds(left)
- self.right = right
- self.operator = operator
-
-
-class array(expression.Tuple):
-
- """A Postgresql ARRAY literal.
-
- This is used to produce ARRAY literals in SQL expressions, e.g.::
-
- from sqlalchemy.dialects.postgresql import array
- from sqlalchemy.dialects import postgresql
- from sqlalchemy import select, func
-
- stmt = select([
- array([1,2]) + array([3,4,5])
- ])
-
- print stmt.compile(dialect=postgresql.dialect())
-
- Produces the SQL::
-
- SELECT ARRAY[%(param_1)s, %(param_2)s] ||
- ARRAY[%(param_3)s, %(param_4)s, %(param_5)s]) AS anon_1
-
- An instance of :class:`.array` will always have the datatype
- :class:`.ARRAY`. The "inner" type of the array is inferred from
- the values present, unless the ``type_`` keyword argument is passed::
-
- array(['foo', 'bar'], type_=CHAR)
-
- .. versionadded:: 0.8 Added the :class:`~.postgresql.array` literal type.
-
- See also:
-
- :class:`.postgresql.ARRAY`
-
- """
- __visit_name__ = 'array'
-
- def __init__(self, clauses, **kw):
- super(array, self).__init__(*clauses, **kw)
- self.type = ARRAY(self.type)
-
- def _bind_param(self, operator, obj):
- return array([
- expression.BindParameter(None, o, _compared_to_operator=operator,
- _compared_to_type=self.type, unique=True)
- for o in obj
- ])
-
- def self_group(self, against=None):
- return self
-
-
-class ARRAY(sqltypes.Concatenable, sqltypes.TypeEngine):
-
- """Postgresql ARRAY type.
-
- Represents values as Python lists.
-
- An :class:`.ARRAY` type is constructed given the "type"
- of element::
-
- mytable = Table("mytable", metadata,
- Column("data", ARRAY(Integer))
- )
-
- The above type represents an N-dimensional array,
- meaning Postgresql will interpret values with any number
- of dimensions automatically. To produce an INSERT
- construct that passes in a 1-dimensional array of integers::
-
- connection.execute(
- mytable.insert(),
- data=[1,2,3]
- )
-
- The :class:`.ARRAY` type can be constructed given a fixed number
- of dimensions::
-
- mytable = Table("mytable", metadata,
- Column("data", ARRAY(Integer, dimensions=2))
- )
-
- This has the effect of the :class:`.ARRAY` type
- specifying that number of bracketed blocks when a :class:`.Table`
- is used in a CREATE TABLE statement, or when the type is used
- within a :func:`.expression.cast` construct; it also causes
- the bind parameter and result set processing of the type
- to optimize itself to expect exactly that number of dimensions.
- Note that Postgresql itself still allows N dimensions with such a type.
-
- SQL expressions of type :class:`.ARRAY` have support for "index" and
- "slice" behavior. The Python ``[]`` operator works normally here, given
- integer indexes or slices. Note that Postgresql arrays default
- to 1-based indexing. The operator produces binary expression
- constructs which will produce the appropriate SQL, both for
- SELECT statements::
-
- select([mytable.c.data[5], mytable.c.data[2:7]])
-
- as well as UPDATE statements when the :meth:`.Update.values` method
- is used::
-
- mytable.update().values({
- mytable.c.data[5]: 7,
- mytable.c.data[2:7]: [1, 2, 3]
- })
-
- :class:`.ARRAY` provides special methods for containment operations,
- e.g.::
-
- mytable.c.data.contains([1, 2])
-
- For a full list of special methods see :class:`.ARRAY.Comparator`.
-
- .. versionadded:: 0.8 Added support for index and slice operations
- to the :class:`.ARRAY` type, including support for UPDATE
- statements, and special array containment operations.
-
- The :class:`.ARRAY` type may not be supported on all DBAPIs.
- It is known to work on psycopg2 and not pg8000.
-
- See also:
-
- :class:`.postgresql.array` - produce a literal array value.
-
- """
- __visit_name__ = 'ARRAY'
-
- class Comparator(sqltypes.Concatenable.Comparator):
-
- """Define comparison operations for :class:`.ARRAY`."""
-
- def __getitem__(self, index):
- shift_indexes = 1 if self.expr.type.zero_indexes else 0
- if isinstance(index, slice):
- if shift_indexes:
- index = slice(
- index.start + shift_indexes,
- index.stop + shift_indexes,
- index.step
- )
- index = _Slice(index, self)
- return_type = self.type
- else:
- index += shift_indexes
- return_type = self.type.item_type
-
- return default_comparator._binary_operate(
- self.expr, operators.getitem, index,
- result_type=return_type)
-
- def any(self, other, operator=operators.eq):
- """Return ``other operator ANY (array)`` clause.
-
- Argument places are switched, because ANY requires array
- expression to be on the right hand-side.
-
- E.g.::
-
- from sqlalchemy.sql import operators
-
- conn.execute(
- select([table.c.data]).where(
- table.c.data.any(7, operator=operators.lt)
- )
- )
-
- :param other: expression to be compared
- :param operator: an operator object from the
- :mod:`sqlalchemy.sql.operators`
- package, defaults to :func:`.operators.eq`.
-
- .. seealso::
-
- :class:`.postgresql.Any`
-
- :meth:`.postgresql.ARRAY.Comparator.all`
-
- """
- return Any(other, self.expr, operator=operator)
-
- def all(self, other, operator=operators.eq):
- """Return ``other operator ALL (array)`` clause.
-
- Argument places are switched, because ALL requires array
- expression to be on the right hand-side.
-
- E.g.::
-
- from sqlalchemy.sql import operators
-
- conn.execute(
- select([table.c.data]).where(
- table.c.data.all(7, operator=operators.lt)
- )
- )
-
- :param other: expression to be compared
- :param operator: an operator object from the
- :mod:`sqlalchemy.sql.operators`
- package, defaults to :func:`.operators.eq`.
-
- .. seealso::
-
- :class:`.postgresql.All`
-
- :meth:`.postgresql.ARRAY.Comparator.any`
-
- """
- return All(other, self.expr, operator=operator)
-
- def contains(self, other, **kwargs):
- """Boolean expression. Test if elements are a superset of the
- elements of the argument array expression.
- """
- return self.expr.op('@>')(other)
-
- def contained_by(self, other):
- """Boolean expression. Test if elements are a proper subset of the
- elements of the argument array expression.
- """
- return self.expr.op('<@')(other)
-
- def overlap(self, other):
- """Boolean expression. Test if array has elements in common with
- an argument array expression.
- """
- return self.expr.op('&&')(other)
-
- def _adapt_expression(self, op, other_comparator):
- if isinstance(op, operators.custom_op):
- if op.opstring in ['@>', '<@', '&&']:
- return op, sqltypes.Boolean
- return sqltypes.Concatenable.Comparator.\
- _adapt_expression(self, op, other_comparator)
-
- comparator_factory = Comparator
-
- def __init__(self, item_type, as_tuple=False, dimensions=None,
- zero_indexes=False):
- """Construct an ARRAY.
-
- E.g.::
-
- Column('myarray', ARRAY(Integer))
-
- Arguments are:
-
- :param item_type: The data type of items of this array. Note that
- dimensionality is irrelevant here, so multi-dimensional arrays like
- ``INTEGER[][]``, are constructed as ``ARRAY(Integer)``, not as
- ``ARRAY(ARRAY(Integer))`` or such.
-
- :param as_tuple=False: Specify whether return results
- should be converted to tuples from lists. DBAPIs such
- as psycopg2 return lists by default. When tuples are
- returned, the results are hashable.
-
- :param dimensions: if non-None, the ARRAY will assume a fixed
- number of dimensions. This will cause the DDL emitted for this
- ARRAY to include the exact number of bracket clauses ``[]``,
- and will also optimize the performance of the type overall.
- Note that PG arrays are always implicitly "non-dimensioned",
- meaning they can store any number of dimensions no matter how
- they were declared.
-
- :param zero_indexes=False: when True, index values will be converted
- between Python zero-based and Postgresql one-based indexes, e.g.
- a value of one will be added to all index values before passing
- to the database.
-
- .. versionadded:: 0.9.5
-
- """
- if isinstance(item_type, ARRAY):
- raise ValueError("Do not nest ARRAY types; ARRAY(basetype) "
- "handles multi-dimensional arrays of basetype")
- if isinstance(item_type, type):
- item_type = item_type()
- self.item_type = item_type
- self.as_tuple = as_tuple
- self.dimensions = dimensions
- self.zero_indexes = zero_indexes
-
- @property
- def python_type(self):
- return list
-
- def compare_values(self, x, y):
- return x == y
-
- def _proc_array(self, arr, itemproc, dim, collection):
- if dim is None:
- arr = list(arr)
- if dim == 1 or dim is None and (
- # this has to be (list, tuple), or at least
- # not hasattr('__iter__'), since Py3K strings
- # etc. have __iter__
- not arr or not isinstance(arr[0], (list, tuple))):
- if itemproc:
- return collection(itemproc(x) for x in arr)
- else:
- return collection(arr)
- else:
- return collection(
- self._proc_array(
- x, itemproc,
- dim - 1 if dim is not None else None,
- collection)
- for x in arr
- )
-
- def bind_processor(self, dialect):
- item_proc = self.item_type.\
- dialect_impl(dialect).\
- bind_processor(dialect)
-
- def process(value):
- if value is None:
- return value
- else:
- return self._proc_array(
- value,
- item_proc,
- self.dimensions,
- list)
- return process
-
- def result_processor(self, dialect, coltype):
- item_proc = self.item_type.\
- dialect_impl(dialect).\
- result_processor(dialect, coltype)
-
- def process(value):
- if value is None:
- return value
- else:
- return self._proc_array(
- value,
- item_proc,
- self.dimensions,
- tuple if self.as_tuple else list)
- return process
-
-PGArray = ARRAY
-
-
class ENUM(sqltypes.Enum):
"""Postgresql ENUM type.
@@ -1375,26 +1056,18 @@ class PGCompiler(compiler.SQLCompiler):
self.process(element.stop, **kw),
)
- def visit_any(self, element, **kw):
- return "%s%sANY (%s)" % (
- self.process(element.left, **kw),
- compiler.OPERATORS[element.operator],
- self.process(element.right, **kw)
- )
-
- def visit_all(self, element, **kw):
- return "%s%sALL (%s)" % (
- self.process(element.left, **kw),
- compiler.OPERATORS[element.operator],
- self.process(element.right, **kw)
- )
-
def visit_getitem_binary(self, binary, operator, **kw):
return "%s[%s]" % (
self.process(binary.left, **kw),
self.process(binary.right, **kw)
)
+ def visit_aggregate_order_by(self, element, **kw):
+ return "%s ORDER BY %s" % (
+ self.process(element.target, **kw),
+ self.process(element.order_by, **kw)
+ )
+
def visit_match_op_binary(self, binary, operator, **kw):
if "postgresql_regconfig" in binary.modifiers:
regconfig = self.render_literal_value(
@@ -1485,7 +1158,7 @@ class PGCompiler(compiler.SQLCompiler):
c.table if isinstance(c, expression.ColumnClause)
else c for c in select._for_update_arg.of)
tmp += " OF " + ", ".join(
- self.process(table, ashint=True, **kw)
+ self.process(table, ashint=True, use_schema=False, **kw)
for table in tables
)
@@ -1537,8 +1210,8 @@ class PGDDLCompiler(compiler.DDLCompiler):
else:
colspec += " SERIAL"
else:
- colspec += " " + self.dialect.type_compiler.process(column.type,
- type_expression=column)
+ colspec += " " + self.dialect.type_compiler.process(
+ column.type, type_expression=column)
default = self.get_column_default_string(column)
if default is not None:
colspec += " DEFAULT " + default
@@ -2294,11 +1967,27 @@ class PGDialect(default.DefaultDialect):
current_schema = schema
else:
current_schema = self.default_schema_name
- s = """
- SELECT definition FROM pg_views
- WHERE schemaname = :schema
- AND viewname = :view_name
- """
+
+ if self.server_version_info >= (9, 3):
+ s = """
+ SELECT definition FROM pg_views
+ WHERE schemaname = :schema
+ AND viewname = :view_name
+
+ UNION
+
+ SELECT definition FROM pg_matviews
+ WHERE schemaname = :schema
+ AND matviewname = :view_name
+
+ """
+ else:
+ s = """
+ SELECT definition FROM pg_views
+ WHERE schemaname = :schema
+ AND viewname = :view_name
+ """
+
rp = connection.execute(sql.text(s),
view_name=view_name, schema=current_schema)
if rp:
@@ -2438,7 +2127,7 @@ class PGDialect(default.DefaultDialect):
if coltype:
coltype = coltype(*args, **kwargs)
if is_array:
- coltype = ARRAY(coltype)
+ coltype = self.ischema_names['_array'](coltype)
else:
util.warn("Did not recognize type '%s' of column '%s'" %
(attype, name))
@@ -2631,7 +2320,7 @@ class PGDialect(default.DefaultDialect):
i.relname as relname,
ix.indisunique, ix.indexprs, ix.indpred,
a.attname, a.attnum, NULL, ix.indkey%s,
- i.reloptions, am.amname
+ %s, am.amname
FROM
pg_class t
join pg_index ix on t.oid = ix.indrelid
@@ -2654,6 +2343,8 @@ class PGDialect(default.DefaultDialect):
# cast does not work in PG 8.2.4, does work in 8.3.0.
# nothing in PG changelogs regarding this.
"::varchar" if self.server_version_info >= (8, 3) else "",
+ "i.reloptions" if self.server_version_info >= (8, 2)
+ else "NULL",
self._pg_index_any("a.attnum", "ix.indkey")
)
else:
diff --git a/lib/sqlalchemy/dialects/postgresql/constraints.py b/lib/sqlalchemy/dialects/postgresql/ext.py
index 4cfc050de..1a443c2d7 100644
--- a/lib/sqlalchemy/dialects/postgresql/constraints.py
+++ b/lib/sqlalchemy/dialects/postgresql/ext.py
@@ -1,11 +1,69 @@
-# Copyright (C) 2013-2015 the SQLAlchemy authors and contributors
+# postgresql/ext.py
+# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
-from ...sql.schema import ColumnCollectionConstraint
+
from ...sql import expression
-from ... import util
+from ...sql import elements
+from ...sql import functions
+from ...sql.schema import ColumnCollectionConstraint
+from .array import ARRAY
+
+
+class aggregate_order_by(expression.ColumnElement):
+ """Represent a Postgresql aggregate order by expression.
+
+ E.g.::
+
+ from sqlalchemy.dialects.postgresql import aggregate_order_by
+ expr = func.array_agg(aggregate_order_by(table.c.a, table.c.b.desc()))
+ stmt = select([expr])
+
+ would represent the expression::
+
+ SELECT array_agg(a ORDER BY b DESC) FROM table;
+
+ Similarly::
+
+ expr = func.string_agg(
+ table.c.a,
+ aggregate_order_by(literal_column("','"), table.c.a)
+ )
+ stmt = select([expr])
+
+ Would represent::
+
+ SELECT string_agg(a, ',' ORDER BY a) FROM table;
+
+ .. versionadded:: 1.1
+
+ .. seealso::
+
+ :class:`.array_agg`
+
+ """
+
+ __visit_name__ = 'aggregate_order_by'
+
+ def __init__(self, target, order_by):
+ self.target = elements._literal_as_binds(target)
+ self.order_by = elements._literal_as_binds(order_by)
+
+ def self_group(self, against=None):
+ return self
+
+ def get_children(self, **kwargs):
+ return self.target, self.order_by
+
+ def _copy_internals(self, clone=elements._clone, **kw):
+ self.target = clone(self.target, **kw)
+ self.order_by = clone(self.order_by, **kw)
+
+ @property
+ def _from_objects(self):
+ return self.target._from_objects + self.order_by._from_objects
class ExcludeConstraint(ColumnCollectionConstraint):
@@ -84,7 +142,7 @@ static/sql-createtable.html#SQL-CREATETABLE-EXCLUDE
)
self.using = kw.get('using', 'gist')
where = kw.get('where')
- if where:
+ if where is not None:
self.where = expression._literal_as_text(where)
def copy(self, **kw):
@@ -96,3 +154,15 @@ static/sql-createtable.html#SQL-CREATETABLE-EXCLUDE
initially=self.initially)
c.dispatch._update(self.dispatch)
return c
+
+
+def array_agg(*arg, **kw):
+ """Postgresql-specific form of :class:`.array_agg`, ensures
+ return type is :class:`.postgresql.ARRAY` and not
+ the plain :class:`.types.Array`.
+
+ .. versionadded:: 1.1
+
+ """
+ kw['type_'] = ARRAY(functions._type_from_args(arg))
+ return functions.func.array_agg(*arg, **kw)
diff --git a/lib/sqlalchemy/dialects/postgresql/hstore.py b/lib/sqlalchemy/dialects/postgresql/hstore.py
index 9f369cb5b..b7b0fc007 100644
--- a/lib/sqlalchemy/dialects/postgresql/hstore.py
+++ b/lib/sqlalchemy/dialects/postgresql/hstore.py
@@ -7,110 +7,43 @@
import re
-from .base import ARRAY, ischema_names
+from .base import ischema_names
+from .array import ARRAY
from ... import types as sqltypes
from ...sql import functions as sqlfunc
+from ...sql import operators
from ...sql.operators import custom_op
from ... import util
__all__ = ('HSTORE', 'hstore')
-# My best guess at the parsing rules of hstore literals, since no formal
-# grammar is given. This is mostly reverse engineered from PG's input parser
-# behavior.
-HSTORE_PAIR_RE = re.compile(r"""
-(
- "(?P<key> (\\ . | [^"])* )" # Quoted key
-)
-[ ]* => [ ]* # Pair operator, optional adjoining whitespace
-(
- (?P<value_null> NULL ) # NULL value
- | "(?P<value> (\\ . | [^"])* )" # Quoted value
-)
-""", re.VERBOSE)
-
-HSTORE_DELIMITER_RE = re.compile(r"""
-[ ]* , [ ]*
-""", re.VERBOSE)
-
-
-def _parse_error(hstore_str, pos):
- """format an unmarshalling error."""
-
- ctx = 20
- hslen = len(hstore_str)
-
- parsed_tail = hstore_str[max(pos - ctx - 1, 0):min(pos, hslen)]
- residual = hstore_str[min(pos, hslen):min(pos + ctx + 1, hslen)]
- if len(parsed_tail) > ctx:
- parsed_tail = '[...]' + parsed_tail[1:]
- if len(residual) > ctx:
- residual = residual[:-1] + '[...]'
-
- return "After %r, could not parse residual at position %d: %r" % (
- parsed_tail, pos, residual)
-
-
-def _parse_hstore(hstore_str):
- """Parse an hstore from its literal string representation.
-
- Attempts to approximate PG's hstore input parsing rules as closely as
- possible. Although currently this is not strictly necessary, since the
- current implementation of hstore's output syntax is stricter than what it
- accepts as input, the documentation makes no guarantees that will always
- be the case.
-
-
-
- """
- result = {}
- pos = 0
- pair_match = HSTORE_PAIR_RE.match(hstore_str)
-
- while pair_match is not None:
- key = pair_match.group('key').replace(r'\"', '"').replace(
- "\\\\", "\\")
- if pair_match.group('value_null'):
- value = None
- else:
- value = pair_match.group('value').replace(
- r'\"', '"').replace("\\\\", "\\")
- result[key] = value
-
- pos += pair_match.end()
-
- delim_match = HSTORE_DELIMITER_RE.match(hstore_str[pos:])
- if delim_match is not None:
- pos += delim_match.end()
-
- pair_match = HSTORE_PAIR_RE.match(hstore_str[pos:])
-
- if pos != len(hstore_str):
- raise ValueError(_parse_error(hstore_str, pos))
+INDEX = custom_op(
+ "->", precedence=5, natural_self_precedent=True
+)
- return result
+HAS_KEY = operators.custom_op(
+ "?", precedence=5, natural_self_precedent=True
+)
+HAS_ALL = operators.custom_op(
+ "?&", precedence=5, natural_self_precedent=True
+)
-def _serialize_hstore(val):
- """Serialize a dictionary into an hstore literal. Keys and values must
- both be strings (except None for values).
+HAS_ANY = operators.custom_op(
+ "?|", precedence=5, natural_self_precedent=True
+)
- """
- def esc(s, position):
- if position == 'value' and s is None:
- return 'NULL'
- elif isinstance(s, util.string_types):
- return '"%s"' % s.replace("\\", "\\\\").replace('"', r'\"')
- else:
- raise ValueError("%r in %s position is not a string." %
- (s, position))
+CONTAINS = operators.custom_op(
+ "@>", precedence=5, natural_self_precedent=True
+)
- return ', '.join('%s=>%s' % (esc(k, 'key'), esc(v, 'value'))
- for k, v in val.items())
+CONTAINED_BY = operators.custom_op(
+ "<@", precedence=5, natural_self_precedent=True
+)
-class HSTORE(sqltypes.Concatenable, sqltypes.TypeEngine):
+class HSTORE(sqltypes.Indexable, sqltypes.Concatenable, sqltypes.TypeEngine):
"""Represent the Postgresql HSTORE type.
The :class:`.HSTORE` type stores dictionaries containing strings, e.g.::
@@ -185,51 +118,61 @@ class HSTORE(sqltypes.Concatenable, sqltypes.TypeEngine):
__visit_name__ = 'HSTORE'
hashable = False
+ text_type = sqltypes.Text()
+
+ def __init__(self, text_type=None):
+ """Construct a new :class:`.HSTORE`.
+
+ :param text_type: the type that should be used for indexed values.
+ Defaults to :class:`.types.Text`.
+
+ .. versionadded:: 1.1.0
- class comparator_factory(sqltypes.Concatenable.Comparator):
+ """
+ if text_type is not None:
+ self.text_type = text_type
+
+ class Comparator(
+ sqltypes.Indexable.Comparator, sqltypes.Concatenable.Comparator):
"""Define comparison operations for :class:`.HSTORE`."""
def has_key(self, other):
"""Boolean expression. Test for presence of a key. Note that the
key may be a SQLA expression.
"""
- return self.expr.op('?')(other)
+ return self.operate(HAS_KEY, other, result_type=sqltypes.Boolean)
def has_all(self, other):
- """Boolean expression. Test for presence of all keys in the PG
- array.
+ """Boolean expression. Test for presence of all keys in jsonb
"""
- return self.expr.op('?&')(other)
+ return self.operate(HAS_ALL, other, result_type=sqltypes.Boolean)
def has_any(self, other):
- """Boolean expression. Test for presence of any key in the PG
- array.
+ """Boolean expression. Test for presence of any key in jsonb
"""
- return self.expr.op('?|')(other)
-
- def defined(self, key):
- """Boolean expression. Test for presence of a non-NULL value for
- the key. Note that the key may be a SQLA expression.
- """
- return _HStoreDefinedFunction(self.expr, key)
+ return self.operate(HAS_ANY, other, result_type=sqltypes.Boolean)
def contains(self, other, **kwargs):
- """Boolean expression. Test if keys are a superset of the keys of
- the argument hstore expression.
+ """Boolean expression. Test if keys (or array) are a superset
+ of/contained the keys of the argument jsonb expression.
"""
- return self.expr.op('@>')(other)
+ return self.operate(CONTAINS, other, result_type=sqltypes.Boolean)
def contained_by(self, other):
"""Boolean expression. Test if keys are a proper subset of the
- keys of the argument hstore expression.
+ keys of the argument jsonb expression.
"""
- return self.expr.op('<@')(other)
+ return self.operate(
+ CONTAINED_BY, other, result_type=sqltypes.Boolean)
- def __getitem__(self, other):
- """Text expression. Get the value at a given key. Note that the
- key may be a SQLA expression.
+ def _setup_getitem(self, index):
+ return INDEX, index, self.type.text_type
+
+ def defined(self, key):
+ """Boolean expression. Test for presence of a non-NULL value for
+ the key. Note that the key may be a SQLA expression.
"""
- return self.expr.op('->', precedence=5)(other)
+ return _HStoreDefinedFunction(self.expr, key)
def delete(self, key):
"""HStore expression. Returns the contents of this hstore with the
@@ -263,14 +206,7 @@ class HSTORE(sqltypes.Concatenable, sqltypes.TypeEngine):
"""Text array expression. Returns array of [key, value] pairs."""
return _HStoreMatrixFunction(self.expr)
- def _adapt_expression(self, op, other_comparator):
- if isinstance(op, custom_op):
- if op.opstring in ['?', '?&', '?|', '@>', '<@']:
- return op, sqltypes.Boolean
- elif op.opstring == '->':
- return op, sqltypes.Text
- return sqltypes.Concatenable.Comparator.\
- _adapt_expression(self, op, other_comparator)
+ comparator_factory = Comparator
def bind_processor(self, dialect):
if util.py2k:
@@ -374,3 +310,105 @@ class _HStoreArrayFunction(sqlfunc.GenericFunction):
class _HStoreMatrixFunction(sqlfunc.GenericFunction):
type = ARRAY(sqltypes.Text)
name = 'hstore_to_matrix'
+
+
+#
+# parsing. note that none of this is used with the psycopg2 backend,
+# which provides its own native extensions.
+#
+
+# My best guess at the parsing rules of hstore literals, since no formal
+# grammar is given. This is mostly reverse engineered from PG's input parser
+# behavior.
+HSTORE_PAIR_RE = re.compile(r"""
+(
+ "(?P<key> (\\ . | [^"])* )" # Quoted key
+)
+[ ]* => [ ]* # Pair operator, optional adjoining whitespace
+(
+ (?P<value_null> NULL ) # NULL value
+ | "(?P<value> (\\ . | [^"])* )" # Quoted value
+)
+""", re.VERBOSE)
+
+HSTORE_DELIMITER_RE = re.compile(r"""
+[ ]* , [ ]*
+""", re.VERBOSE)
+
+
+def _parse_error(hstore_str, pos):
+ """format an unmarshalling error."""
+
+ ctx = 20
+ hslen = len(hstore_str)
+
+ parsed_tail = hstore_str[max(pos - ctx - 1, 0):min(pos, hslen)]
+ residual = hstore_str[min(pos, hslen):min(pos + ctx + 1, hslen)]
+
+ if len(parsed_tail) > ctx:
+ parsed_tail = '[...]' + parsed_tail[1:]
+ if len(residual) > ctx:
+ residual = residual[:-1] + '[...]'
+
+ return "After %r, could not parse residual at position %d: %r" % (
+ parsed_tail, pos, residual)
+
+
+def _parse_hstore(hstore_str):
+ """Parse an hstore from its literal string representation.
+
+ Attempts to approximate PG's hstore input parsing rules as closely as
+ possible. Although currently this is not strictly necessary, since the
+ current implementation of hstore's output syntax is stricter than what it
+ accepts as input, the documentation makes no guarantees that will always
+ be the case.
+
+
+
+ """
+ result = {}
+ pos = 0
+ pair_match = HSTORE_PAIR_RE.match(hstore_str)
+
+ while pair_match is not None:
+ key = pair_match.group('key').replace(r'\"', '"').replace(
+ "\\\\", "\\")
+ if pair_match.group('value_null'):
+ value = None
+ else:
+ value = pair_match.group('value').replace(
+ r'\"', '"').replace("\\\\", "\\")
+ result[key] = value
+
+ pos += pair_match.end()
+
+ delim_match = HSTORE_DELIMITER_RE.match(hstore_str[pos:])
+ if delim_match is not None:
+ pos += delim_match.end()
+
+ pair_match = HSTORE_PAIR_RE.match(hstore_str[pos:])
+
+ if pos != len(hstore_str):
+ raise ValueError(_parse_error(hstore_str, pos))
+
+ return result
+
+
+def _serialize_hstore(val):
+ """Serialize a dictionary into an hstore literal. Keys and values must
+ both be strings (except None for values).
+
+ """
+ def esc(s, position):
+ if position == 'value' and s is None:
+ return 'NULL'
+ elif isinstance(s, util.string_types):
+ return '"%s"' % s.replace("\\", "\\\\").replace('"', r'\"')
+ else:
+ raise ValueError("%r in %s position is not a string." %
+ (s, position))
+
+ return ', '.join('%s=>%s' % (esc(k, 'key'), esc(v, 'value'))
+ for k, v in val.items())
+
+
diff --git a/lib/sqlalchemy/dialects/postgresql/json.py b/lib/sqlalchemy/dialects/postgresql/json.py
index 13ebc4afe..8a50270f5 100644
--- a/lib/sqlalchemy/dialects/postgresql/json.py
+++ b/lib/sqlalchemy/dialects/postgresql/json.py
@@ -6,96 +6,60 @@
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from __future__ import absolute_import
+import collections
import json
from .base import ischema_names
from ... import types as sqltypes
-from ...sql.operators import custom_op
-from ... import sql
-from ...sql import elements, default_comparator
+from ...sql import operators
+from ...sql import elements
from ... import util
-__all__ = ('JSON', 'JSONElement', 'JSONB')
+__all__ = ('JSON', 'JSONB')
-class JSONElement(elements.BinaryExpression):
- """Represents accessing an element of a :class:`.JSON` value.
+# json : returns json
+INDEX = operators.custom_op(
+ "->", precedence=5, natural_self_precedent=True
+)
- The :class:`.JSONElement` is produced whenever using the Python index
- operator on an expression that has the type :class:`.JSON`::
+# path operator: returns json
+PATHIDX = operators.custom_op(
+ "#>", precedence=5, natural_self_precedent=True
+)
- expr = mytable.c.json_data['some_key']
+# json + astext: returns text
+ASTEXT = operators.custom_op(
+ "->>", precedence=5, natural_self_precedent=True
+)
- The expression typically compiles to a JSON access such as ``col -> key``.
- Modifiers are then available for typing behavior, including
- :meth:`.JSONElement.cast` and :attr:`.JSONElement.astext`.
+# path operator + astext: returns text
+ASTEXT_PATHIDX = operators.custom_op(
+ "#>>", precedence=5, natural_self_precedent=True
+)
- """
-
- def __init__(self, left, right, astext=False,
- opstring=None, result_type=None):
- self._astext = astext
- if opstring is None:
- if hasattr(right, '__iter__') and \
- not isinstance(right, util.string_types):
- opstring = "#>"
- right = "{%s}" % (
- ", ".join(util.text_type(elem) for elem in right))
- else:
- opstring = "->"
-
- self._json_opstring = opstring
- operator = custom_op(opstring, precedence=5)
- right = default_comparator._check_literal(
- left, operator, right)
- super(JSONElement, self).__init__(
- left, right, operator, type_=result_type)
-
- @property
- def astext(self):
- """Convert this :class:`.JSONElement` to use the 'astext' operator
- when evaluated.
-
- E.g.::
-
- select([data_table.c.data['some key'].astext])
-
- .. seealso::
-
- :meth:`.JSONElement.cast`
-
- """
- if self._astext:
- return self
- else:
- return JSONElement(
- self.left,
- self.right,
- astext=True,
- opstring=self._json_opstring + ">",
- result_type=sqltypes.String(convert_unicode=True)
- )
-
- def cast(self, type_):
- """Convert this :class:`.JSONElement` to apply both the 'astext' operator
- as well as an explicit type cast when evaluated.
-
- E.g.::
+HAS_KEY = operators.custom_op(
+ "?", precedence=5, natural_self_precedent=True
+)
- select([data_table.c.data['some key'].cast(Integer)])
+HAS_ALL = operators.custom_op(
+ "?&", precedence=5, natural_self_precedent=True
+)
- .. seealso::
+HAS_ANY = operators.custom_op(
+ "?|", precedence=5, natural_self_precedent=True
+)
- :attr:`.JSONElement.astext`
+CONTAINS = operators.custom_op(
+ "@>", precedence=5, natural_self_precedent=True
+)
- """
- if not self._astext:
- return self.astext.cast(type_)
- else:
- return sql.cast(self, type_)
+CONTAINED_BY = operators.custom_op(
+ "<@", precedence=5, natural_self_precedent=True
+)
-class JSON(sqltypes.TypeEngine):
+class JSON(sqltypes.Indexable, sqltypes.TypeEngine):
"""Represent the Postgresql JSON type.
The :class:`.JSON` type stores arbitrary JSON format data, e.g.::
@@ -113,31 +77,36 @@ class JSON(sqltypes.TypeEngine):
:class:`.JSON` provides several operations:
- * Index operations::
+ * Index operations (the ``->`` operator)::
data_table.c.data['some key']
- * Index operations returning text (required for text comparison)::
+ * Index operations returning text (the ``->>`` operator)::
data_table.c.data['some key'].astext == 'some value'
- * Index operations with a built-in CAST call::
+ * Index operations with CAST
+ (equivalent to ``CAST(col ->> ['some key'] AS <type>)``)::
- data_table.c.data['some key'].cast(Integer) == 5
+ data_table.c.data['some key'].astext.cast(Integer) == 5
- * Path index operations::
+ * Path index operations (the ``#>`` operator)::
data_table.c.data[('key_1', 'key_2', ..., 'key_n')]
- * Path index operations returning text (required for text comparison)::
+ * Path index operations returning text (the ``#>>`` operator)::
+
+ data_table.c.data[('key_1', 'key_2', ..., 'key_n')].astext == \
+'some value'
- data_table.c.data[('key_1', 'key_2', ..., 'key_n')].astext == \\
- 'some value'
+ .. versionchanged:: 1.1 The :meth:`.ColumnElement.cast` operator on
+ JSON objects now requires that the :attr:`.JSON.Comparator.astext`
+ modifier be called explicitly, if the cast works only from a textual
+ string.
- Index operations return an instance of :class:`.JSONElement`, which
- represents an expression such as ``column -> index``. This element then
- defines methods such as :attr:`.JSONElement.astext` and
- :meth:`.JSONElement.cast` for setting up type behavior.
+ Index operations return an expression object whose type defaults to
+ :class:`.JSON` by default, so that further JSON-oriented instructions
+ may be called upon the result type.
The :class:`.JSON` type, when used with the SQLAlchemy ORM, does not
detect in-place mutations to the structure. In order to detect these, the
@@ -146,6 +115,29 @@ class JSON(sqltypes.TypeEngine):
will be detected by the unit of work. See the example at :class:`.HSTORE`
for a simple example involving a dictionary.
+ When working with NULL values, the :class:`.JSON` type recommends the
+ use of two specific constants in order to differentiate between a column
+ that evaluates to SQL NULL, e.g. no value, vs. the JSON-encoded string
+ of ``"null"``. To insert or select against a value that is SQL NULL,
+ use the constant :func:`.null`::
+
+ conn.execute(table.insert(), json_value=null())
+
+ To insert or select against a value that is JSON ``"null"``, use the
+ constant :attr:`.JSON.NULL`::
+
+ conn.execute(table.insert(), json_value=JSON.NULL)
+
+ The :class:`.JSON` type supports a flag
+ :paramref:`.JSON.none_as_null` which when set to True will result
+ in the Python constant ``None`` evaluating to the value of SQL
+ NULL, and when set to False results in the Python constant
+ ``None`` evaluating to the value of JSON ``"null"``. The Python
+ value ``None`` may be used in conjunction with either
+ :attr:`.JSON.NULL` and :func:`.null` in order to indicate NULL
+ values, but care must be taken as to the value of the
+ :paramref:`.JSON.none_as_null` in these cases.
+
Custom serializers and deserializers are specified at the dialect level,
that is using :func:`.create_engine`. The reason for this is that when
using psycopg2, the DBAPI only allows serializers at the per-cursor
@@ -161,11 +153,42 @@ class JSON(sqltypes.TypeEngine):
.. versionadded:: 0.9
+ .. seealso::
+
+ :class:`.JSONB`
+
"""
__visit_name__ = 'JSON'
- def __init__(self, none_as_null=False):
+ hashable = False
+ astext_type = sqltypes.Text()
+
+ NULL = util.symbol('JSON_NULL')
+ """Describe the json value of NULL.
+
+ This value is used to force the JSON value of ``"null"`` to be
+ used as the value. A value of Python ``None`` will be recognized
+ either as SQL NULL or JSON ``"null"``, based on the setting
+ of the :paramref:`.JSON.none_as_null` flag; the :attr:`.JSON.NULL`
+ constant can be used to always resolve to JSON ``"null"`` regardless
+ of this setting. This is in contrast to the :func:`.sql.null` construct,
+ which always resolves to SQL NULL. E.g.::
+
+ from sqlalchemy import null
+ from sqlalchemy.dialects.postgresql import JSON
+
+ obj1 = MyObject(json_value=null()) # will *always* insert SQL NULL
+ obj2 = MyObject(json_value=JSON.NULL) # will *always* insert JSON string "null"
+
+ session.add_all([obj1, obj2])
+ session.commit()
+
+ .. versionadded:: 1.1
+
+ """
+
+ def __init__(self, none_as_null=False, astext_type=None):
"""Construct a :class:`.JSON` type.
:param none_as_null: if True, persist the value ``None`` as a
@@ -179,58 +202,99 @@ class JSON(sqltypes.TypeEngine):
.. versionchanged:: 0.9.8 - Added ``none_as_null``, and :func:`.null`
is now supported in order to persist a NULL value.
+ .. seealso::
+
+ :attr:`.JSON.NULL`
+
+ :param astext_type: the type to use for the
+ :attr:`.JSON.Comparator.astext`
+ accessor on indexed attributes. Defaults to :class:`.types.Text`.
+
+ .. versionadded:: 1.1.0
+
"""
self.none_as_null = none_as_null
+ if astext_type is not None:
+ self.astext_type = astext_type
- class comparator_factory(sqltypes.Concatenable.Comparator):
+ class Comparator(
+ sqltypes.Indexable.Comparator, sqltypes.Concatenable.Comparator):
"""Define comparison operations for :class:`.JSON`."""
- def __getitem__(self, other):
- """Get the value at a given key."""
+ @property
+ def astext(self):
+ """On an indexed expression, use the "astext" (e.g. "->>")
+ conversion when rendered in SQL.
+
+ E.g.::
+
+ select([data_table.c.data['some key'].astext])
+
+ .. seealso::
+
+ :meth:`.ColumnElement.cast`
+
+ """
+ against = self.expr.operator
+ if against is PATHIDX:
+ against = ASTEXT_PATHIDX
+ else:
+ against = ASTEXT
+
+ return self.expr.left.operate(
+ against, self.expr.right, result_type=self.type.astext_type)
+
+ def _setup_getitem(self, index):
+ if not isinstance(index, util.string_types):
+ assert isinstance(index, collections.Sequence)
+ tokens = [util.text_type(elem) for elem in index]
+ index = "{%s}" % (", ".join(tokens))
+ operator = PATHIDX
+ else:
+ operator = INDEX
- return JSONElement(self.expr, other)
+ return operator, index, self.type
- def _adapt_expression(self, op, other_comparator):
- if isinstance(op, custom_op):
- if op.opstring == '->':
- return op, sqltypes.Text
- return sqltypes.Concatenable.Comparator.\
- _adapt_expression(self, op, other_comparator)
+ comparator_factory = Comparator
+
+ @property
+ def should_evaluate_none(self):
+ return not self.none_as_null
def bind_processor(self, dialect):
json_serializer = dialect._json_serializer or json.dumps
if util.py2k:
encoding = dialect.encoding
-
- def process(value):
- if isinstance(value, elements.Null) or (
- value is None and self.none_as_null
- ):
- return None
- return json_serializer(value).encode(encoding)
else:
- def process(value):
- if isinstance(value, elements.Null) or (
- value is None and self.none_as_null
- ):
- return None
+ encoding = None
+
+ def process(value):
+ if value is self.NULL:
+ value = None
+ elif isinstance(value, elements.Null) or (
+ value is None and self.none_as_null
+ ):
+ return None
+ if encoding:
+ return json_serializer(value).encode(encoding)
+ else:
return json_serializer(value)
+
return process
def result_processor(self, dialect, coltype):
json_deserializer = dialect._json_deserializer or json.loads
if util.py2k:
encoding = dialect.encoding
-
- def process(value):
- if value is None:
- return None
- return json_deserializer(value.decode(encoding))
else:
- def process(value):
- if value is None:
- return None
- return json_deserializer(value)
+ encoding = None
+
+ def process(value):
+ if value is None:
+ return None
+ if encoding:
+ value = value.decode(encoding)
+ return json_deserializer(value)
return process
@@ -253,106 +317,68 @@ class JSONB(JSON):
data = {"key1": "value1", "key2": "value2"}
)
- :class:`.JSONB` provides several operations:
-
- * Index operations::
-
- data_table.c.data['some key']
-
- * Index operations returning text (required for text comparison)::
+ The :class:`.JSONB` type includes all operations provided by
+ :class:`.JSON`, including the same behaviors for indexing operations.
+ It also adds additional operators specific to JSONB, including
+ :meth:`.JSONB.Comparator.has_key`, :meth:`.JSONB.Comparator.has_all`,
+ :meth:`.JSONB.Comparator.has_any`, :meth:`.JSONB.Comparator.contains`,
+ and :meth:`.JSONB.Comparator.contained_by`.
+
+ Like the :class:`.JSON` type, the :class:`.JSONB` type does not detect
+ in-place changes when used with the ORM, unless the
+ :mod:`sqlalchemy.ext.mutable` extension is used.
+
+ Custom serializers and deserializers
+ are shared with the :class:`.JSON` class, using the ``json_serializer``
+ and ``json_deserializer`` keyword arguments. These must be specified
+ at the dialect level using :func:`.create_engine`. When using
+ psycopg2, the serializers are associated with the jsonb type using
+ ``psycopg2.extras.register_default_jsonb`` on a per-connection basis,
+ in the same way that ``psycopg2.extras.register_default_json`` is used
+ to register these handlers with the json type.
- data_table.c.data['some key'].astext == 'some value'
-
- * Index operations with a built-in CAST call::
-
- data_table.c.data['some key'].cast(Integer) == 5
-
- * Path index operations::
-
- data_table.c.data[('key_1', 'key_2', ..., 'key_n')]
-
- * Path index operations returning text (required for text comparison)::
-
- data_table.c.data[('key_1', 'key_2', ..., 'key_n')].astext == \\
- 'some value'
-
- Index operations return an instance of :class:`.JSONElement`, which
- represents an expression such as ``column -> index``. This element then
- defines methods such as :attr:`.JSONElement.astext` and
- :meth:`.JSONElement.cast` for setting up type behavior.
-
- The :class:`.JSON` type, when used with the SQLAlchemy ORM, does not
- detect in-place mutations to the structure. In order to detect these, the
- :mod:`sqlalchemy.ext.mutable` extension must be used. This extension will
- allow "in-place" changes to the datastructure to produce events which
- will be detected by the unit of work. See the example at :class:`.HSTORE`
- for a simple example involving a dictionary.
-
- Custom serializers and deserializers are specified at the dialect level,
- that is using :func:`.create_engine`. The reason for this is that when
- using psycopg2, the DBAPI only allows serializers at the per-cursor
- or per-connection level. E.g.::
-
- engine = create_engine("postgresql://scott:tiger@localhost/test",
- json_serializer=my_serialize_fn,
- json_deserializer=my_deserialize_fn
- )
+ .. versionadded:: 0.9.7
- When using the psycopg2 dialect, the json_deserializer is registered
- against the database using ``psycopg2.extras.register_default_json``.
+ .. seealso::
- .. versionadded:: 0.9.7
+ :class:`.JSON`
"""
__visit_name__ = 'JSONB'
- hashable = False
- class comparator_factory(sqltypes.Concatenable.Comparator):
+ class Comparator(JSON.Comparator):
"""Define comparison operations for :class:`.JSON`."""
- def __getitem__(self, other):
- """Get the value at a given key."""
-
- return JSONElement(self.expr, other)
-
- def _adapt_expression(self, op, other_comparator):
- # How does one do equality?? jsonb also has "=" eg.
- # '[1,2,3]'::jsonb = '[1,2,3]'::jsonb
- if isinstance(op, custom_op):
- if op.opstring in ['?', '?&', '?|', '@>', '<@']:
- return op, sqltypes.Boolean
- if op.opstring == '->':
- return op, sqltypes.Text
- return sqltypes.Concatenable.Comparator.\
- _adapt_expression(self, op, other_comparator)
-
def has_key(self, other):
"""Boolean expression. Test for presence of a key. Note that the
key may be a SQLA expression.
"""
- return self.expr.op('?')(other)
+ return self.operate(HAS_KEY, other, result_type=sqltypes.Boolean)
def has_all(self, other):
"""Boolean expression. Test for presence of all keys in jsonb
"""
- return self.expr.op('?&')(other)
+ return self.operate(HAS_ALL, other, result_type=sqltypes.Boolean)
def has_any(self, other):
"""Boolean expression. Test for presence of any key in jsonb
"""
- return self.expr.op('?|')(other)
+ return self.operate(HAS_ANY, other, result_type=sqltypes.Boolean)
def contains(self, other, **kwargs):
- """Boolean expression. Test if keys (or array) are a superset of/contained
- the keys of the argument jsonb expression.
+ """Boolean expression. Test if keys (or array) are a superset
+ of/contained the keys of the argument jsonb expression.
"""
- return self.expr.op('@>')(other)
+ return self.operate(CONTAINS, other, result_type=sqltypes.Boolean)
def contained_by(self, other):
"""Boolean expression. Test if keys are a proper subset of the
keys of the argument jsonb expression.
"""
- return self.expr.op('<@')(other)
+ return self.operate(
+ CONTAINED_BY, other, result_type=sqltypes.Boolean)
+
+ comparator_factory = Comparator
ischema_names['jsonb'] = JSONB
diff --git a/lib/sqlalchemy/dialects/postgresql/psycopg2.py b/lib/sqlalchemy/dialects/postgresql/psycopg2.py
index 36a9d7bf7..d33554922 100644
--- a/lib/sqlalchemy/dialects/postgresql/psycopg2.py
+++ b/lib/sqlalchemy/dialects/postgresql/psycopg2.py
@@ -320,7 +320,7 @@ from ...sql import expression
from ... import types as sqltypes
from .base import PGDialect, PGCompiler, \
PGIdentifierPreparer, PGExecutionContext, \
- ENUM, ARRAY, _DECIMAL_TYPES, _FLOAT_TYPES,\
+ ENUM, _DECIMAL_TYPES, _FLOAT_TYPES,\
_INT_TYPES, UUID
from .hstore import HSTORE
from .json import JSON, JSONB
diff --git a/lib/sqlalchemy/dialects/sqlite/base.py b/lib/sqlalchemy/dialects/sqlite/base.py
index d9da46f4c..a1786d16c 100644
--- a/lib/sqlalchemy/dialects/sqlite/base.py
+++ b/lib/sqlalchemy/dialects/sqlite/base.py
@@ -853,12 +853,20 @@ class SQLiteDDLCompiler(compiler.DDLCompiler):
if not column.nullable:
colspec += " NOT NULL"
- if (column.primary_key and
- column.table.dialect_options['sqlite']['autoincrement'] and
- len(column.table.primary_key.columns) == 1 and
- issubclass(column.type._type_affinity, sqltypes.Integer) and
- not column.foreign_keys):
- colspec += " PRIMARY KEY AUTOINCREMENT"
+ if column.primary_key:
+ if (
+ column.autoincrement is True and
+ len(column.table.primary_key.columns) != 1
+ ):
+ raise exc.CompileError(
+ "SQLite does not support autoincrement for "
+ "composite primary keys")
+
+ if (column.table.dialect_options['sqlite']['autoincrement'] and
+ len(column.table.primary_key.columns) == 1 and
+ issubclass(column.type._type_affinity, sqltypes.Integer) and
+ not column.foreign_keys):
+ colspec += " PRIMARY KEY AUTOINCREMENT"
return colspec
@@ -894,11 +902,25 @@ class SQLiteDDLCompiler(compiler.DDLCompiler):
return preparer.format_table(table, use_schema=False)
- def visit_create_index(self, create):
+ def visit_create_index(self, create, include_schema=False,
+ include_table_schema=True):
index = create.element
-
- text = super(SQLiteDDLCompiler, self).visit_create_index(
- create, include_table_schema=False)
+ self._verify_index_table(index)
+ preparer = self.preparer
+ text = "CREATE "
+ if index.unique:
+ text += "UNIQUE "
+ text += "INDEX %s ON %s (%s)" \
+ % (
+ self._prepared_index_name(index,
+ include_schema=True),
+ preparer.format_table(index.table,
+ use_schema=False),
+ ', '.join(
+ self.sql_compiler.process(
+ expr, include_table=False, literal_binds=True) for
+ expr in index.expressions)
+ )
whereclause = index.dialect_options["sqlite"]["where"]
if whereclause is not None:
@@ -1095,6 +1117,13 @@ class SQLiteDialect(default.DefaultDialect):
return None
@reflection.cache
+ def get_schema_names(self, connection, **kw):
+ s = "PRAGMA database_list"
+ dl = connection.execute(s)
+
+ return [db[1] for db in dl if db[1] != "temp"]
+
+ @reflection.cache
def get_table_names(self, connection, schema=None, **kw):
if schema is not None:
qschema = self.identifier_preparer.quote_identifier(schema)
@@ -1190,7 +1219,7 @@ class SQLiteDialect(default.DefaultDialect):
'type': coltype,
'nullable': nullable,
'default': default,
- 'autoincrement': default is None,
+ 'autoincrement': 'auto',
'primary_key': primary_key,
}
@@ -1283,7 +1312,7 @@ class SQLiteDialect(default.DefaultDialect):
fk = fks[numerical_id] = {
'name': None,
'constrained_columns': [],
- 'referred_schema': None,
+ 'referred_schema': schema,
'referred_table': rtbl,
'referred_columns': [],
}
@@ -1387,7 +1416,7 @@ class SQLiteDialect(default.DefaultDialect):
unique_constraints = []
def parse_uqs():
- UNIQUE_PATTERN = '(?:CONSTRAINT (\w+) +)?UNIQUE *\((.+?)\)'
+ UNIQUE_PATTERN = '(?:CONSTRAINT "?(.+?)"? +)?UNIQUE *\((.+?)\)'
INLINE_UNIQUE_PATTERN = (
'(?:(".+?")|([a-z0-9]+)) '
'+[a-z0-9_ ]+? +UNIQUE')
diff --git a/lib/sqlalchemy/dialects/sybase/base.py b/lib/sqlalchemy/dialects/sybase/base.py
index ae0473a3e..b3f8e307a 100644
--- a/lib/sqlalchemy/dialects/sybase/base.py
+++ b/lib/sqlalchemy/dialects/sybase/base.py
@@ -608,8 +608,8 @@ class SybaseDialect(default.DefaultDialect):
FROM sysreferences r JOIN sysobjects o on r.tableid = o.id
WHERE r.tableid = :table_id
""")
- referential_constraints = connection.execute(REFCONSTRAINT_SQL,
- table_id=table_id)
+ referential_constraints = connection.execute(
+ REFCONSTRAINT_SQL, table_id=table_id).fetchall()
REFTABLE_SQL = text("""
SELECT o.name AS name, u.name AS 'schema'
@@ -740,10 +740,13 @@ class SybaseDialect(default.DefaultDialect):
results.close()
constrained_columns = []
- for i in range(1, pks["count"] + 1):
- constrained_columns.append(pks["pk_%i" % (i,)])
- return {"constrained_columns": constrained_columns,
- "name": pks["name"]}
+ if pks:
+ for i in range(1, pks["count"] + 1):
+ constrained_columns.append(pks["pk_%i" % (i,)])
+ return {"constrained_columns": constrained_columns,
+ "name": pks["name"]}
+ else:
+ return {"constrained_columns": [], "name": None}
@reflection.cache
def get_schema_names(self, connection, **kw):
diff --git a/lib/sqlalchemy/engine/__init__.py b/lib/sqlalchemy/engine/__init__.py
index f1eacf6a6..0b0d50329 100644
--- a/lib/sqlalchemy/engine/__init__.py
+++ b/lib/sqlalchemy/engine/__init__.py
@@ -389,14 +389,33 @@ def create_engine(*args, **kwargs):
def engine_from_config(configuration, prefix='sqlalchemy.', **kwargs):
"""Create a new Engine instance using a configuration dictionary.
- The dictionary is typically produced from a config file where keys
- are prefixed, such as sqlalchemy.url, sqlalchemy.echo, etc. The
- 'prefix' argument indicates the prefix to be searched for.
+ The dictionary is typically produced from a config file.
+
+ The keys of interest to ``engine_from_config()`` should be prefixed, e.g.
+ ``sqlalchemy.url``, ``sqlalchemy.echo``, etc. The 'prefix' argument
+ indicates the prefix to be searched for. Each matching key (after the
+ prefix is stripped) is treated as though it were the corresponding keyword
+ argument to a :func:`.create_engine` call.
+
+ The only required key is (assuming the default prefix) ``sqlalchemy.url``,
+ which provides the :ref:`database URL <database_urls>`.
A select set of keyword arguments will be "coerced" to their
expected type based on string values. The set of arguments
is extensible per-dialect using the ``engine_config_types`` accessor.
+ :param configuration: A dictionary (typically produced from a config file,
+ but this is not a requirement). Items whose keys start with the value
+ of 'prefix' will have that prefix stripped, and will then be passed to
+ :ref:`create_engine`.
+
+ :param prefix: Prefix to match and then strip from keys
+ in 'configuration'.
+
+ :param kwargs: Each keyword argument to ``engine_from_config()`` itself
+ overrides the corresponding item taken from the 'configuration'
+ dictionary. Keyword arguments should *not* be prefixed.
+
"""
options = dict((key[len(prefix):], configuration[key])
diff --git a/lib/sqlalchemy/engine/base.py b/lib/sqlalchemy/engine/base.py
index c5eabac0d..eaa435d45 100644
--- a/lib/sqlalchemy/engine/base.py
+++ b/lib/sqlalchemy/engine/base.py
@@ -1531,9 +1531,13 @@ class Transaction(object):
def __init__(self, connection, parent):
self.connection = connection
- self._parent = parent or self
+ self._actual_parent = parent
self.is_active = True
+ @property
+ def _parent(self):
+ return self._actual_parent or self
+
def close(self):
"""Close this :class:`.Transaction`.
diff --git a/lib/sqlalchemy/engine/interfaces.py b/lib/sqlalchemy/engine/interfaces.py
index 73a8b4635..3bad765df 100644
--- a/lib/sqlalchemy/engine/interfaces.py
+++ b/lib/sqlalchemy/engine/interfaces.py
@@ -252,7 +252,9 @@ class Dialect(object):
sequence
a dictionary of the form
- {'name' : str, 'start' :int, 'increment': int}
+ {'name' : str, 'start' :int, 'increment': int, 'minvalue': int,
+ 'maxvalue': int, 'nominvalue': bool, 'nomaxvalue': bool,
+ 'cycle': bool}
Additional column attributes may be present.
"""
@@ -1147,4 +1149,4 @@ class ExceptionContext(object):
.. versionadded:: 1.0.3
- """ \ No newline at end of file
+ """
diff --git a/lib/sqlalchemy/engine/result.py b/lib/sqlalchemy/engine/result.py
index b2b78dee8..7d1425c28 100644
--- a/lib/sqlalchemy/engine/result.py
+++ b/lib/sqlalchemy/engine/result.py
@@ -221,7 +221,7 @@ class ResultMetaData(object):
in enumerate(result_columns)
]
self.keys = [
- elem[1] for elem in result_columns
+ elem[0] for elem in result_columns
]
else:
# case 2 - raw string, or number of columns in result does
@@ -236,7 +236,8 @@ class ResultMetaData(object):
# that SQLAlchemy has used up through 0.9.
if num_ctx_cols:
- result_map = self._create_result_map(result_columns)
+ result_map = self._create_result_map(
+ result_columns, case_sensitive)
raw = []
self.keys = []
@@ -329,10 +330,12 @@ class ResultMetaData(object):
])
@classmethod
- def _create_result_map(cls, result_columns):
+ def _create_result_map(cls, result_columns, case_sensitive=True):
d = {}
for elem in result_columns:
key, rec = elem[0], elem[1:]
+ if not case_sensitive:
+ key = key.lower()
if key in d:
# conflicting keyname, just double up the list
# of objects. this will cause an "ambiguous name"
@@ -492,10 +495,20 @@ class ResultProxy(object):
self._init_metadata()
def _getter(self, key):
- return self._metadata._getter(key)
+ try:
+ getter = self._metadata._getter
+ except AttributeError:
+ return self._non_result(None)
+ else:
+ return getter(key)
def _has_key(self, key):
- return self._metadata._has_key(key)
+ try:
+ has_key = self._metadata._has_key
+ except AttributeError:
+ return self._non_result(None)
+ else:
+ return has_key(key)
def _init_metadata(self):
metadata = self._cursor_description()
@@ -699,7 +712,7 @@ class ResultProxy(object):
while True:
row = self.fetchone()
if row is None:
- raise StopIteration
+ return
else:
yield row
diff --git a/lib/sqlalchemy/event/attr.py b/lib/sqlalchemy/event/attr.py
index a64c7d08d..8a88e40ef 100644
--- a/lib/sqlalchemy/event/attr.py
+++ b/lib/sqlalchemy/event/attr.py
@@ -51,7 +51,7 @@ class _ClsLevelDispatch(RefCollection):
"""Class-level events on :class:`._Dispatch` classes."""
__slots__ = ('name', 'arg_names', 'has_kw',
- 'legacy_signatures', '_clslevel')
+ 'legacy_signatures', '_clslevel', '__weakref__')
def __init__(self, parent_dispatch_cls, fn):
self.name = fn.__name__
@@ -230,9 +230,7 @@ class _EmptyListener(_InstanceLevelDispatch):
class _CompoundListener(_InstanceLevelDispatch):
- _exec_once = False
-
- __slots__ = '_exec_once_mutex',
+ __slots__ = '_exec_once_mutex', '_exec_once'
def _memoized_attr__exec_once_mutex(self):
return threading.Lock()
@@ -279,11 +277,14 @@ class _ListenerCollection(_CompoundListener):
"""
- __slots__ = 'parent_listeners', 'parent', 'name', 'listeners', 'propagate'
+ __slots__ = (
+ 'parent_listeners', 'parent', 'name', 'listeners',
+ 'propagate', '__weakref__')
def __init__(self, parent, target_cls):
if target_cls not in parent._clslevel:
parent.update_subclass(target_cls)
+ self._exec_once = False
self.parent_listeners = parent._clslevel[target_cls]
self.parent = parent
self.name = parent.name
@@ -339,11 +340,10 @@ class _ListenerCollection(_CompoundListener):
class _JoinedListener(_CompoundListener):
- _exec_once = False
-
__slots__ = 'parent', 'name', 'local', 'parent_listeners'
def __init__(self, parent, name, local):
+ self._exec_once = False
self.parent = parent
self.name = name
self.local = local
diff --git a/lib/sqlalchemy/events.py b/lib/sqlalchemy/events.py
index f439d554f..0249b2623 100644
--- a/lib/sqlalchemy/events.py
+++ b/lib/sqlalchemy/events.py
@@ -819,6 +819,11 @@ class ConnectionEvents(event.Events):
.. seealso::
+ :ref:`pool_disconnects_pessimistic` - illustrates how to use
+ :meth:`.ConnectionEvents.engine_connect`
+ to transparently ensure pooled connections are connected to the
+ database.
+
:meth:`.PoolEvents.checkout` the lower-level pool checkout event
for an individual DBAPI connection
diff --git a/lib/sqlalchemy/ext/associationproxy.py b/lib/sqlalchemy/ext/associationproxy.py
index d837aab52..31f16287d 100644
--- a/lib/sqlalchemy/ext/associationproxy.py
+++ b/lib/sqlalchemy/ext/associationproxy.py
@@ -94,7 +94,7 @@ class AssociationProxy(interfaces.InspectionAttrInfo):
def __init__(self, target_collection, attr, creator=None,
getset_factory=None, proxy_factory=None,
- proxy_bulk_set=None):
+ proxy_bulk_set=None, info=None):
"""Construct a new :class:`.AssociationProxy`.
The :func:`.association_proxy` function is provided as the usual
@@ -138,6 +138,11 @@ class AssociationProxy(interfaces.InspectionAttrInfo):
:param proxy_bulk_set: Optional, use with proxy_factory. See
the _set() method for details.
+ :param info: optional, will be assigned to
+ :attr:`.AssociationProxy.info` if present.
+
+ .. versionadded:: 1.0.9
+
"""
self.target_collection = target_collection
self.value_attr = attr
@@ -150,6 +155,8 @@ class AssociationProxy(interfaces.InspectionAttrInfo):
self.key = '_%s_%s_%s' % (
type(self).__name__, target_collection, id(self))
self.collection_class = None
+ if info:
+ self.info = info
@property
def remote_attr(self):
@@ -596,7 +603,7 @@ class _AssociationList(_AssociationCollection):
for member in self.col:
yield self._get(member)
- raise StopIteration
+ return
def append(self, value):
item = self._create(value)
@@ -900,7 +907,7 @@ class _AssociationSet(_AssociationCollection):
"""
for member in self.col:
yield self._get(member)
- raise StopIteration
+ return
def add(self, value):
if value not in self:
diff --git a/lib/sqlalchemy/ext/automap.py b/lib/sqlalchemy/ext/automap.py
index 330992e56..218ed64e1 100644
--- a/lib/sqlalchemy/ext/automap.py
+++ b/lib/sqlalchemy/ext/automap.py
@@ -111,7 +111,7 @@ explicit table declaration::
User, Address, Order = Base.classes.user, Base.classes.address,\
Base.classes.user_order
-Specifying Classes Explcitly
+Specifying Classes Explicitly
============================
The :mod:`.sqlalchemy.ext.automap` extension allows classes to be defined
diff --git a/lib/sqlalchemy/ext/baked.py b/lib/sqlalchemy/ext/baked.py
index f01e0b348..d255b5ee4 100644
--- a/lib/sqlalchemy/ext/baked.py
+++ b/lib/sqlalchemy/ext/baked.py
@@ -272,16 +272,35 @@ class Result(object):
Equivalent to :meth:`.Query.one`.
"""
+ try:
+ ret = self.one_or_none()
+ except orm_exc.MultipleResultsFound:
+ raise orm_exc.MultipleResultsFound(
+ "Multiple rows were found for one()")
+ else:
+ if ret is None:
+ raise orm_exc.NoResultFound("No row was found for one()")
+ return ret
+
+ def one_or_none(self):
+ """Return one or zero results, or raise an exception for multiple
+ rows.
+
+ Equivalent to :meth:`.Query.one_or_none`.
+
+ .. versionadded:: 1.0.9
+
+ """
ret = list(self)
l = len(ret)
if l == 1:
return ret[0]
elif l == 0:
- raise orm_exc.NoResultFound("No row was found for one()")
+ return None
else:
raise orm_exc.MultipleResultsFound(
- "Multiple rows were found for one()")
+ "Multiple rows were found for one_or_none()")
def all(self):
"""Return all rows.
@@ -335,6 +354,12 @@ class Result(object):
# (remember, we can map to an OUTER JOIN)
bq = self.bq
+ # add the clause we got from mapper._get_clause to the cache
+ # key so that if a race causes multiple calls to _get_clause,
+ # we've cached on ours
+ bq = bq._clone()
+ bq._cache_key += (_get_clause, )
+
bq = bq.with_criteria(setup, tuple(elem is None for elem in ident))
params = dict([
@@ -359,7 +384,6 @@ def bake_lazy_loaders():
Python overhead for these operations.
"""
- strategies.LazyLoader._strategy_keys[:] = []
BakedLazyLoader._strategy_keys[:] = []
properties.RelationshipProperty.strategy_for(
@@ -369,6 +393,8 @@ def bake_lazy_loaders():
properties.RelationshipProperty.strategy_for(
lazy="baked_select")(BakedLazyLoader)
+ strategies.LazyLoader._strategy_keys[:] = BakedLazyLoader._strategy_keys[:]
+
def unbake_lazy_loaders():
"""Disable the use of baked queries for all lazyloaders systemwide.
diff --git a/lib/sqlalchemy/ext/declarative/api.py b/lib/sqlalchemy/ext/declarative/api.py
index 3d46bd4cb..dfc47ce95 100644
--- a/lib/sqlalchemy/ext/declarative/api.py
+++ b/lib/sqlalchemy/ext/declarative/api.py
@@ -7,7 +7,7 @@
"""Public API functions and helpers for declarative."""
-from ...schema import Table, MetaData
+from ...schema import Table, MetaData, Column
from ...orm import synonym as _orm_synonym, \
comparable_property,\
interfaces, properties, attributes
@@ -525,6 +525,17 @@ class AbstractConcreteBase(ConcreteBase):
mappers.append(mn)
pjoin = cls._create_polymorphic_union(mappers)
+ # For columns that were declared on the class, these
+ # are normally ignored with the "__no_table__" mapping,
+ # unless they have a different attribute key vs. col name
+ # and are in the properties argument.
+ # In that case, ensure we update the properties entry
+ # to the correct column from the pjoin target table.
+ declared_cols = set(to_map.declared_columns)
+ for k, v in list(to_map.properties.items()):
+ if v in declared_cols:
+ to_map.properties[k] = pjoin.c[v.key]
+
to_map.local_table = pjoin
m_args = to_map.mapper_args_fn or dict
diff --git a/lib/sqlalchemy/ext/declarative/base.py b/lib/sqlalchemy/ext/declarative/base.py
index 57eb54f63..57305748c 100644
--- a/lib/sqlalchemy/ext/declarative/base.py
+++ b/lib/sqlalchemy/ext/declarative/base.py
@@ -463,7 +463,6 @@ class _MapperConfig(object):
def _prepare_mapper_arguments(self):
properties = self.properties
-
if self.mapper_args_fn:
mapper_args = self.mapper_args_fn()
else:
diff --git a/lib/sqlalchemy/ext/declarative/clsregistry.py b/lib/sqlalchemy/ext/declarative/clsregistry.py
index c3887d6cf..050923980 100644
--- a/lib/sqlalchemy/ext/declarative/clsregistry.py
+++ b/lib/sqlalchemy/ext/declarative/clsregistry.py
@@ -321,7 +321,8 @@ def _deferred_relationship(cls, prop):
key, kwargs = prop.backref
for attr in ('primaryjoin', 'secondaryjoin', 'secondary',
'foreign_keys', 'remote_side', 'order_by'):
- if attr in kwargs and isinstance(kwargs[attr], str):
+ if attr in kwargs and isinstance(kwargs[attr],
+ util.string_types):
kwargs[attr] = resolve_arg(kwargs[attr])
return prop
diff --git a/lib/sqlalchemy/ext/hybrid.py b/lib/sqlalchemy/ext/hybrid.py
index 9c6178264..0073494b8 100644
--- a/lib/sqlalchemy/ext/hybrid.py
+++ b/lib/sqlalchemy/ext/hybrid.py
@@ -46,7 +46,7 @@ as the class itself::
@hybrid_method
def contains(self, point):
- return (self.start <= point) & (point < self.end)
+ return (self.start <= point) & (point <= self.end)
@hybrid_method
def intersects(self, other):
diff --git a/lib/sqlalchemy/ext/mutable.py b/lib/sqlalchemy/ext/mutable.py
index 501b18f39..88b653f60 100644
--- a/lib/sqlalchemy/ext/mutable.py
+++ b/lib/sqlalchemy/ext/mutable.py
@@ -658,6 +658,16 @@ class MutableDict(Mutable, dict):
dict.update(self, *a, **kw)
self.changed()
+ def pop(self, key):
+ result = dict.pop(self, key)
+ self.changed()
+ return result
+
+ def popitem(self):
+ result = dict.popitem(self)
+ self.changed()
+ return result
+
def clear(self):
dict.clear(self)
self.changed()
diff --git a/lib/sqlalchemy/orm/__init__.py b/lib/sqlalchemy/orm/__init__.py
index e02a271e3..d9910a070 100644
--- a/lib/sqlalchemy/orm/__init__.py
+++ b/lib/sqlalchemy/orm/__init__.py
@@ -149,7 +149,12 @@ def backref(name, **kwargs):
'items':relationship(
SomeItem, backref=backref('parent', lazy='subquery'))
+ .. seealso::
+
+ :ref:`relationships_backref`
+
"""
+
return (name, kwargs)
diff --git a/lib/sqlalchemy/orm/attributes.py b/lib/sqlalchemy/orm/attributes.py
index a45c22394..8605df785 100644
--- a/lib/sqlalchemy/orm/attributes.py
+++ b/lib/sqlalchemy/orm/attributes.py
@@ -551,6 +551,11 @@ class AttributeImpl(object):
def initialize(self, state, dict_):
"""Initialize the given state's attribute with an empty value."""
+ # As of 1.0, we don't actually set a value in
+ # dict_. This is so that the state of the object does not get
+ # modified without emitting the appropriate events.
+
+
return None
def get(self, state, dict_, passive=PASSIVE_OFF):
@@ -848,7 +853,10 @@ class CollectionAttributeImpl(AttributeImpl):
supports_population = True
collection = True
- __slots__ = 'copy', 'collection_factory', '_append_token', '_remove_token'
+ __slots__ = (
+ 'copy', 'collection_factory', '_append_token', '_remove_token',
+ '_duck_typed_as'
+ )
def __init__(self, class_, key, callable_, dispatch,
typecallable=None, trackparent=False, extension=None,
@@ -868,6 +876,8 @@ class CollectionAttributeImpl(AttributeImpl):
self.collection_factory = typecallable
self._append_token = None
self._remove_token = None
+ self._duck_typed_as = util.duck_type_collection(
+ self.collection_factory())
if getattr(self.collection_factory, "_sa_linker", None):
@@ -1011,38 +1021,46 @@ class CollectionAttributeImpl(AttributeImpl):
except (ValueError, KeyError, IndexError):
pass
- def set(self, state, dict_, value, initiator,
- passive=PASSIVE_OFF, pop=False):
- """Set a value on the given object.
-
- """
-
- self._set_iterable(
- state, dict_, value,
- lambda adapter, i: adapter.adapt_like_to_iterable(i))
-
- def _set_iterable(self, state, dict_, iterable, adapter=None):
- """Set a collection value from an iterable of state-bearers.
+ def set(self, state, dict_, value, initiator=None,
+ passive=PASSIVE_OFF, pop=False, _adapt=True):
+ iterable = orig_iterable = value
- ``adapter`` is an optional callable invoked with a CollectionAdapter
- and the iterable. Should return an iterable of state-bearing
- instances suitable for appending via a CollectionAdapter. Can be used
- for, e.g., adapting an incoming dictionary into an iterator of values
- rather than keys.
-
- """
# pulling a new collection first so that an adaptation exception does
# not trigger a lazy load of the old collection.
new_collection, user_data = self._initialize_collection(state)
- if adapter:
- new_values = list(adapter(new_collection, iterable))
- else:
- new_values = list(iterable)
+ if _adapt:
+ if new_collection._converter is not None:
+ iterable = new_collection._converter(iterable)
+ else:
+ setting_type = util.duck_type_collection(iterable)
+ receiving_type = self._duck_typed_as
+
+ if setting_type is not receiving_type:
+ given = iterable is None and 'None' or \
+ iterable.__class__.__name__
+ wanted = self._duck_typed_as.__name__
+ raise TypeError(
+ "Incompatible collection type: %s is not %s-like" % (
+ given, wanted))
+
+ # If the object is an adapted collection, return the (iterable)
+ # adapter.
+ if hasattr(iterable, '_sa_iterator'):
+ iterable = iterable._sa_iterator()
+ elif setting_type is dict:
+ if util.py3k:
+ iterable = iterable.values()
+ else:
+ iterable = getattr(
+ iterable, 'itervalues', iterable.values)()
+ else:
+ iterable = iter(iterable)
+ new_values = list(iterable)
old = self.get(state, dict_, passive=PASSIVE_ONLY_PERSISTENT)
if old is PASSIVE_NO_RESULT:
old = self.initialize(state, dict_)
- elif old is iterable:
+ elif old is orig_iterable:
# ignore re-assignment of the current collection, as happens
# implicitly with in-place operators (foo.collection |= other)
return
@@ -1054,7 +1072,8 @@ class CollectionAttributeImpl(AttributeImpl):
dict_[self.key] = user_data
- collections.bulk_replace(new_values, old_collection, new_collection)
+ collections.bulk_replace(
+ new_values, old_collection, new_collection)
del old._sa_adapter
self.dispatch.dispose_collection(state, old, old_collection)
diff --git a/lib/sqlalchemy/orm/collections.py b/lib/sqlalchemy/orm/collections.py
index 4f988a8d4..58a69227c 100644
--- a/lib/sqlalchemy/orm/collections.py
+++ b/lib/sqlalchemy/orm/collections.py
@@ -111,6 +111,7 @@ from ..sql import expression
from .. import util, exc as sa_exc
from . import base
+from sqlalchemy.util.compat import inspect_getargspec
__all__ = ['collection', 'collection_adapter',
'mapped_collection', 'column_mapped_collection',
@@ -573,13 +574,18 @@ class CollectionAdapter(object):
"""
- invalidated = False
+
+ __slots__ = (
+ 'attr', '_key', '_data', 'owner_state', '_converter', 'invalidated')
def __init__(self, attr, owner_state, data):
+ self.attr = attr
self._key = attr.key
self._data = weakref.ref(data)
self.owner_state = owner_state
data._sa_adapter = self
+ self._converter = data._sa_converter
+ self.invalidated = False
def _warn_invalidated(self):
util.warn("This collection has been invalidated.")
@@ -599,53 +605,8 @@ class CollectionAdapter(object):
"""
return self.owner_state.dict[self._key] is self._data()
- @util.memoized_property
- def attr(self):
- return self.owner_state.manager[self._key].impl
-
- def adapt_like_to_iterable(self, obj):
- """Converts collection-compatible objects to an iterable of values.
-
- Can be passed any type of object, and if the underlying collection
- determines that it can be adapted into a stream of values it can
- use, returns an iterable of values suitable for append()ing.
-
- This method may raise TypeError or any other suitable exception
- if adaptation fails.
-
- If a converter implementation is not supplied on the collection,
- a default duck-typing-based implementation is used.
-
- """
- converter = self._data()._sa_converter
- if converter is not None:
- return converter(obj)
-
- setting_type = util.duck_type_collection(obj)
- receiving_type = util.duck_type_collection(self._data())
-
- if obj is None or setting_type != receiving_type:
- given = obj is None and 'None' or obj.__class__.__name__
- if receiving_type is None:
- wanted = self._data().__class__.__name__
- else:
- wanted = receiving_type.__name__
-
- raise TypeError(
- "Incompatible collection type: %s is not %s-like" % (
- given, wanted))
-
- # If the object is an adapted collection, return the (iterable)
- # adapter.
- if getattr(obj, '_sa_adapter', None) is not None:
- return obj._sa_adapter
- elif setting_type == dict:
- if util.py3k:
- return obj.values()
- else:
- return getattr(obj, 'itervalues', obj.values)()
- else:
- return iter(obj)
+ def bulk_appender(self):
+ return self._data()._sa_appender
def append_with_event(self, item, initiator=None):
"""Add an entity to the collection, firing mutation events."""
@@ -662,6 +623,9 @@ class CollectionAdapter(object):
for item in items:
appender(item, _sa_initiator=False)
+ def bulk_remover(self):
+ return self._data()._sa_remover
+
def remove_with_event(self, item, initiator=None):
"""Remove an entity from the collection, firing mutation events."""
self._data()._sa_remover(item, _sa_initiator=initiator)
@@ -776,8 +740,8 @@ def bulk_replace(values, existing_adapter, new_adapter):
"""
- if not isinstance(values, list):
- values = list(values)
+
+ assert isinstance(values, list)
idset = util.IdentitySet
existing_idset = idset(existing_adapter or ())
@@ -785,15 +749,18 @@ def bulk_replace(values, existing_adapter, new_adapter):
additions = idset(values or ()).difference(constants)
removals = existing_idset.difference(constants)
+ appender = new_adapter.bulk_appender()
+
for member in values or ():
if member in additions:
- new_adapter.append_with_event(member)
+ appender(member)
elif member in constants:
- new_adapter.append_without_event(member)
+ appender(member, _sa_initiator=False)
if existing_adapter:
+ remover = existing_adapter.bulk_remover()
for member in removals:
- existing_adapter.remove_with_event(member)
+ remover(member)
def prepare_instrumentation(factory):
@@ -982,7 +949,7 @@ def _instrument_membership_mutator(method, before, argument, after):
adapter."""
# This isn't smart enough to handle @adds(1) for 'def fn(self, (a, b))'
if before:
- fn_args = list(util.flatten_iterator(inspect.getargspec(method)[0]))
+ fn_args = list(util.flatten_iterator(inspect_getargspec(method)[0]))
if isinstance(argument, int):
pos_arg = argument
named_arg = len(fn_args) > argument and fn_args[argument] or None
diff --git a/lib/sqlalchemy/orm/dependency.py b/lib/sqlalchemy/orm/dependency.py
index d8989939b..f3325203e 100644
--- a/lib/sqlalchemy/orm/dependency.py
+++ b/lib/sqlalchemy/orm/dependency.py
@@ -303,9 +303,9 @@ class DependencyProcessor(object):
set
)
- def _post_update(self, state, uowcommit, related):
+ def _post_update(self, state, uowcommit, related, is_m2o_delete=False):
for x in related:
- if x is not None:
+ if not is_m2o_delete or x is not None:
uowcommit.issue_post_update(
state,
[r for l, r in self.prop.synchronize_pairs]
@@ -740,7 +740,9 @@ class ManyToOneDP(DependencyProcessor):
self.key,
self._passive_delete_flag)
if history:
- self._post_update(state, uowcommit, history.sum())
+ self._post_update(
+ state, uowcommit, history.sum(),
+ is_m2o_delete=True)
def process_saves(self, uowcommit, states):
for state in states:
diff --git a/lib/sqlalchemy/orm/dynamic.py b/lib/sqlalchemy/orm/dynamic.py
index aedd863f8..ca593765f 100644
--- a/lib/sqlalchemy/orm/dynamic.py
+++ b/lib/sqlalchemy/orm/dynamic.py
@@ -128,17 +128,16 @@ class DynamicAttributeImpl(attributes.AttributeImpl):
dict_[self.key] = True
return state.committed_state[self.key]
- def set(self, state, dict_, value, initiator,
+ def set(self, state, dict_, value, initiator=None,
passive=attributes.PASSIVE_OFF,
- check_old=None, pop=False):
+ check_old=None, pop=False, _adapt=True):
if initiator and initiator.parent_token is self.parent_token:
return
if pop and value is None:
return
- self._set_iterable(state, dict_, value)
- def _set_iterable(self, state, dict_, iterable, adapter=None):
+ iterable = value
new_values = list(iterable)
if state.has_identity:
old_collection = util.IdentitySet(self.get(state, dict_))
diff --git a/lib/sqlalchemy/orm/events.py b/lib/sqlalchemy/orm/events.py
index 801701be9..5b0cbfdad 100644
--- a/lib/sqlalchemy/orm/events.py
+++ b/lib/sqlalchemy/orm/events.py
@@ -18,6 +18,7 @@ from .session import Session, sessionmaker
from .scoping import scoped_session
from .attributes import QueryableAttribute
from .query import Query
+from sqlalchemy.util.compat import inspect_getargspec
class InstrumentationEvents(event.Events):
"""Events related to class instrumentation events.
@@ -216,14 +217,41 @@ class InstanceEvents(event.Events):
def first_init(self, manager, cls):
"""Called when the first instance of a particular mapping is called.
+ This event is called when the ``__init__`` method of a class
+ is called the first time for that particular class. The event
+ invokes before ``__init__`` actually proceeds as well as before
+ the :meth:`.InstanceEvents.init` event is invoked.
+
"""
def init(self, target, args, kwargs):
"""Receive an instance when its constructor is called.
This method is only called during a userland construction of
- an object. It is not called when an object is loaded from the
- database.
+ an object, in conjunction with the object's constructor, e.g.
+ its ``__init__`` method. It is not called when an object is
+ loaded from the database; see the :meth:`.InstanceEvents.load`
+ event in order to intercept a database load.
+
+ The event is called before the actual ``__init__`` constructor
+ of the object is called. The ``kwargs`` dictionary may be
+ modified in-place in order to affect what is passed to
+ ``__init__``.
+
+ :param target: the mapped instance. If
+ the event is configured with ``raw=True``, this will
+ instead be the :class:`.InstanceState` state-management
+ object associated with the instance.
+ :param args: positional arguments passed to the ``__init__`` method.
+ This is passed as a tuple and is currently immutable.
+ :param kwargs: keyword arguments passed to the ``__init__`` method.
+ This structure *can* be altered in place.
+
+ .. seealso::
+
+ :meth:`.InstanceEvents.init_failure`
+
+ :meth:`.InstanceEvents.load`
"""
@@ -232,8 +260,31 @@ class InstanceEvents(event.Events):
and raised an exception.
This method is only called during a userland construction of
- an object. It is not called when an object is loaded from the
- database.
+ an object, in conjunction with the object's constructor, e.g.
+ its ``__init__`` method. It is not called when an object is loaded
+ from the database.
+
+ The event is invoked after an exception raised by the ``__init__``
+ method is caught. After the event
+ is invoked, the original exception is re-raised outwards, so that
+ the construction of the object still raises an exception. The
+ actual exception and stack trace raised should be present in
+ ``sys.exc_info()``.
+
+ :param target: the mapped instance. If
+ the event is configured with ``raw=True``, this will
+ instead be the :class:`.InstanceState` state-management
+ object associated with the instance.
+ :param args: positional arguments that were passed to the ``__init__``
+ method.
+ :param kwargs: keyword arguments that were passed to the ``__init__``
+ method.
+
+ .. seealso::
+
+ :meth:`.InstanceEvents.init`
+
+ :meth:`.InstanceEvents.load`
"""
@@ -260,12 +311,23 @@ class InstanceEvents(event.Events):
``None`` if the load does not correspond to a :class:`.Query`,
such as during :meth:`.Session.merge`.
+ .. seealso::
+
+ :meth:`.InstanceEvents.init`
+
+ :meth:`.InstanceEvents.refresh`
+
+ :meth:`.SessionEvents.loaded_as_persistent`
+
"""
def refresh(self, target, context, attrs):
"""Receive an object instance after one or more attributes have
been refreshed from a query.
+ Contrast this to the :meth:`.InstanceEvents.load` method, which
+ is invoked when the object is first loaded from a query.
+
:param target: the mapped instance. If
the event is configured with ``raw=True``, this will
instead be the :class:`.InstanceState` state-management
@@ -276,6 +338,10 @@ class InstanceEvents(event.Events):
were populated, or None if all column-mapped, non-deferred
attributes were populated.
+ .. seealso::
+
+ :meth:`.InstanceEvents.load`
+
"""
def refresh_flush(self, target, flush_context, attrs):
@@ -538,7 +604,7 @@ class MapperEvents(event.Events):
meth = getattr(cls, identifier)
try:
target_index = \
- inspect.getargspec(meth)[0].index('target') - 1
+ inspect_getargspec(meth)[0].index('target') - 1
except ValueError:
target_index = None
@@ -589,32 +655,67 @@ class MapperEvents(event.Events):
"""
def mapper_configured(self, mapper, class_):
- """Called when the mapper for the class is fully configured.
-
- This event is the latest phase of mapper construction, and
- is invoked when the mapped classes are first used, so that
- relationships between mappers can be resolved. When the event is
- called, the mapper should be in its final state.
-
- While the configuration event normally occurs automatically,
- it can be forced to occur ahead of time, in the case where the event
- is needed before any actual mapper usage, by using the
- :func:`.configure_mappers` function.
+ """Called when a specific mapper has completed its own configuration
+ within the scope of the :func:`.configure_mappers` call.
+
+ The :meth:`.MapperEvents.mapper_configured` event is invoked
+ for each mapper that is encountered when the
+ :func:`.orm.configure_mappers` function proceeds through the current
+ list of not-yet-configured mappers.
+ :func:`.orm.configure_mappers` is typically invoked
+ automatically as mappings are first used, as well as each time
+ new mappers have been made available and new mapper use is
+ detected.
+
+ When the event is called, the mapper should be in its final
+ state, but **not including backrefs** that may be invoked from
+ other mappers; they might still be pending within the
+ configuration operation. Bidirectional relationships that
+ are instead configured via the
+ :paramref:`.orm.relationship.back_populates` argument
+ *will* be fully available, since this style of relationship does not
+ rely upon other possibly-not-configured mappers to know that they
+ exist.
+ For an event that is guaranteed to have **all** mappers ready
+ to go including backrefs that are defined only on other
+ mappings, use the :meth:`.MapperEvents.after_configured`
+ event; this event invokes only after all known mappings have been
+ fully configured.
+
+ The :meth:`.MapperEvents.mapper_configured` event, unlike
+ :meth:`.MapperEvents.before_configured` or
+ :meth:`.MapperEvents.after_configured`,
+ is called for each mapper/class individually, and the mapper is
+ passed to the event itself. It also is called exactly once for
+ a particular mapper. The event is therefore useful for
+ configurational steps that benefit from being invoked just once
+ on a specific mapper basis, which don't require that "backref"
+ configurations are necessarily ready yet.
:param mapper: the :class:`.Mapper` which is the target
of this event.
:param class\_: the mapped class.
+ .. seealso::
+
+ :meth:`.MapperEvents.before_configured`
+
+ :meth:`.MapperEvents.after_configured`
+
"""
# TODO: need coverage for this event
def before_configured(self):
"""Called before a series of mappers have been configured.
- This corresponds to the :func:`.orm.configure_mappers` call, which
- note is usually called automatically as mappings are first
- used.
+ The :meth:`.MapperEvents.before_configured` event is invoked
+ each time the :func:`.orm.configure_mappers` function is
+ invoked, before the function has done any of its work.
+ :func:`.orm.configure_mappers` is typically invoked
+ automatically as mappings are first used, as well as each time
+ new mappers have been made available and new mapper use is
+ detected.
This event can **only** be applied to the :class:`.Mapper` class
or :func:`.mapper` function, and not to individual mappings or
@@ -626,11 +727,16 @@ class MapperEvents(event.Events):
def go():
# ...
+ Constrast this event to :meth:`.MapperEvents.after_configured`,
+ which is invoked after the series of mappers has been configured,
+ as well as :meth:`.MapperEvents.mapper_configured`, which is invoked
+ on a per-mapper basis as each one is configured to the extent possible.
+
Theoretically this event is called once per
application, but is actually called any time new mappers
are to be affected by a :func:`.orm.configure_mappers`
call. If new mappings are constructed after existing ones have
- already been used, this event can be called again. To ensure
+ already been used, this event will likely be called again. To ensure
that a particular event is only called once and no further, the
``once=True`` argument (new in 0.9.4) can be applied::
@@ -643,14 +749,33 @@ class MapperEvents(event.Events):
.. versionadded:: 0.9.3
+
+ .. seealso::
+
+ :meth:`.MapperEvents.mapper_configured`
+
+ :meth:`.MapperEvents.after_configured`
+
"""
def after_configured(self):
"""Called after a series of mappers have been configured.
- This corresponds to the :func:`.orm.configure_mappers` call, which
- note is usually called automatically as mappings are first
- used.
+ The :meth:`.MapperEvents.after_configured` event is invoked
+ each time the :func:`.orm.configure_mappers` function is
+ invoked, after the function has completed its work.
+ :func:`.orm.configure_mappers` is typically invoked
+ automatically as mappings are first used, as well as each time
+ new mappers have been made available and new mapper use is
+ detected.
+
+ Contrast this event to the :meth:`.MapperEvents.mapper_configured`
+ event, which is called on a per-mapper basis while the configuration
+ operation proceeds; unlike that event, when this event is invoked,
+ all cross-configurations (e.g. backrefs) will also have been made
+ available for any mappers that were pending.
+ Also constrast to :meth:`.MapperEvents.before_configured`,
+ which is invoked before the series of mappers has been configured.
This event can **only** be applied to the :class:`.Mapper` class
or :func:`.mapper` function, and not to individual mappings or
@@ -666,7 +791,7 @@ class MapperEvents(event.Events):
application, but is actually called any time new mappers
have been affected by a :func:`.orm.configure_mappers`
call. If new mappings are constructed after existing ones have
- already been used, this event can be called again. To ensure
+ already been used, this event will likely be called again. To ensure
that a particular event is only called once and no further, the
``once=True`` argument (new in 0.9.4) can be applied::
@@ -676,6 +801,12 @@ class MapperEvents(event.Events):
def go():
# ...
+ .. seealso::
+
+ :meth:`.MapperEvents.mapper_configured`
+
+ :meth:`.MapperEvents.before_configured`
+
"""
def before_insert(self, mapper, connection, target):
@@ -697,30 +828,14 @@ class MapperEvents(event.Events):
steps.
.. warning::
- Mapper-level flush events are designed to operate **on attributes
- local to the immediate object being handled
- and via SQL operations with the given**
- :class:`.Connection` **only.** Handlers here should **not** make
- alterations to the state of the :class:`.Session` overall, and
- in general should not affect any :func:`.relationship` -mapped
- attributes, as session cascade rules will not function properly,
- nor is it always known if the related class has already been
- handled. Operations that **are not supported in mapper
- events** include:
-
- * :meth:`.Session.add`
- * :meth:`.Session.delete`
- * Mapped collection append, add, remove, delete, discard, etc.
- * Mapped relationship attribute set/del events,
- i.e. ``someobject.related = someotherobject``
-
- Operations which manipulate the state of the object
- relative to other objects are better handled:
-
- * In the ``__init__()`` method of the mapped object itself, or
- another method designed to establish some particular state.
- * In a ``@validates`` handler, see :ref:`simple_validators`
- * Within the :meth:`.SessionEvents.before_flush` event.
+
+ Mapper-level flush events only allow **very limited operations**,
+ on attributes local to the row being operated upon only,
+ as well as allowing any SQL to be emitted on the given
+ :class:`.Connection`. **Please read fully** the notes
+ at :ref:`session_persistence_mapper` for guidelines on using
+ these methods; generally, the :meth:`.SessionEvents.before_flush`
+ method should be preferred for general on-flush changes.
:param mapper: the :class:`.Mapper` which is the target
of this event.
@@ -734,6 +849,10 @@ class MapperEvents(event.Events):
object associated with the instance.
:return: No return value is supported by this event.
+ .. seealso::
+
+ :ref:`session_persistence_events`
+
"""
def after_insert(self, mapper, connection, target):
@@ -755,30 +874,14 @@ class MapperEvents(event.Events):
event->persist->event steps.
.. warning::
- Mapper-level flush events are designed to operate **on attributes
- local to the immediate object being handled
- and via SQL operations with the given**
- :class:`.Connection` **only.** Handlers here should **not** make
- alterations to the state of the :class:`.Session` overall, and in
- general should not affect any :func:`.relationship` -mapped
- attributes, as session cascade rules will not function properly,
- nor is it always known if the related class has already been
- handled. Operations that **are not supported in mapper
- events** include:
-
- * :meth:`.Session.add`
- * :meth:`.Session.delete`
- * Mapped collection append, add, remove, delete, discard, etc.
- * Mapped relationship attribute set/del events,
- i.e. ``someobject.related = someotherobject``
-
- Operations which manipulate the state of the object
- relative to other objects are better handled:
-
- * In the ``__init__()`` method of the mapped object itself,
- or another method designed to establish some particular state.
- * In a ``@validates`` handler, see :ref:`simple_validators`
- * Within the :meth:`.SessionEvents.before_flush` event.
+
+ Mapper-level flush events only allow **very limited operations**,
+ on attributes local to the row being operated upon only,
+ as well as allowing any SQL to be emitted on the given
+ :class:`.Connection`. **Please read fully** the notes
+ at :ref:`session_persistence_mapper` for guidelines on using
+ these methods; generally, the :meth:`.SessionEvents.before_flush`
+ method should be preferred for general on-flush changes.
:param mapper: the :class:`.Mapper` which is the target
of this event.
@@ -792,6 +895,10 @@ class MapperEvents(event.Events):
object associated with the instance.
:return: No return value is supported by this event.
+ .. seealso::
+
+ :ref:`session_persistence_events`
+
"""
def before_update(self, mapper, connection, target):
@@ -832,29 +939,14 @@ class MapperEvents(event.Events):
steps.
.. warning::
- Mapper-level flush events are designed to operate **on attributes
- local to the immediate object being handled
- and via SQL operations with the given** :class:`.Connection`
- **only.** Handlers here should **not** make alterations to the
- state of the :class:`.Session` overall, and in general should not
- affect any :func:`.relationship` -mapped attributes, as
- session cascade rules will not function properly, nor is it
- always known if the related class has already been handled.
- Operations that **are not supported in mapper events** include:
-
- * :meth:`.Session.add`
- * :meth:`.Session.delete`
- * Mapped collection append, add, remove, delete, discard, etc.
- * Mapped relationship attribute set/del events,
- i.e. ``someobject.related = someotherobject``
-
- Operations which manipulate the state of the object
- relative to other objects are better handled:
-
- * In the ``__init__()`` method of the mapped object itself,
- or another method designed to establish some particular state.
- * In a ``@validates`` handler, see :ref:`simple_validators`
- * Within the :meth:`.SessionEvents.before_flush` event.
+
+ Mapper-level flush events only allow **very limited operations**,
+ on attributes local to the row being operated upon only,
+ as well as allowing any SQL to be emitted on the given
+ :class:`.Connection`. **Please read fully** the notes
+ at :ref:`session_persistence_mapper` for guidelines on using
+ these methods; generally, the :meth:`.SessionEvents.before_flush`
+ method should be preferred for general on-flush changes.
:param mapper: the :class:`.Mapper` which is the target
of this event.
@@ -867,6 +959,11 @@ class MapperEvents(event.Events):
instead be the :class:`.InstanceState` state-management
object associated with the instance.
:return: No return value is supported by this event.
+
+ .. seealso::
+
+ :ref:`session_persistence_events`
+
"""
def after_update(self, mapper, connection, target):
@@ -906,29 +1003,14 @@ class MapperEvents(event.Events):
steps.
.. warning::
- Mapper-level flush events are designed to operate **on attributes
- local to the immediate object being handled
- and via SQL operations with the given** :class:`.Connection`
- **only.** Handlers here should **not** make alterations to the
- state of the :class:`.Session` overall, and in general should not
- affect any :func:`.relationship` -mapped attributes, as
- session cascade rules will not function properly, nor is it
- always known if the related class has already been handled.
- Operations that **are not supported in mapper events** include:
-
- * :meth:`.Session.add`
- * :meth:`.Session.delete`
- * Mapped collection append, add, remove, delete, discard, etc.
- * Mapped relationship attribute set/del events,
- i.e. ``someobject.related = someotherobject``
-
- Operations which manipulate the state of the object
- relative to other objects are better handled:
-
- * In the ``__init__()`` method of the mapped object itself,
- or another method designed to establish some particular state.
- * In a ``@validates`` handler, see :ref:`simple_validators`
- * Within the :meth:`.SessionEvents.before_flush` event.
+
+ Mapper-level flush events only allow **very limited operations**,
+ on attributes local to the row being operated upon only,
+ as well as allowing any SQL to be emitted on the given
+ :class:`.Connection`. **Please read fully** the notes
+ at :ref:`session_persistence_mapper` for guidelines on using
+ these methods; generally, the :meth:`.SessionEvents.before_flush`
+ method should be preferred for general on-flush changes.
:param mapper: the :class:`.Mapper` which is the target
of this event.
@@ -942,6 +1024,10 @@ class MapperEvents(event.Events):
object associated with the instance.
:return: No return value is supported by this event.
+ .. seealso::
+
+ :ref:`session_persistence_events`
+
"""
def before_delete(self, mapper, connection, target):
@@ -957,29 +1043,14 @@ class MapperEvents(event.Events):
once in a later step.
.. warning::
- Mapper-level flush events are designed to operate **on attributes
- local to the immediate object being handled
- and via SQL operations with the given** :class:`.Connection`
- **only.** Handlers here should **not** make alterations to the
- state of the :class:`.Session` overall, and in general should not
- affect any :func:`.relationship` -mapped attributes, as
- session cascade rules will not function properly, nor is it
- always known if the related class has already been handled.
- Operations that **are not supported in mapper events** include:
-
- * :meth:`.Session.add`
- * :meth:`.Session.delete`
- * Mapped collection append, add, remove, delete, discard, etc.
- * Mapped relationship attribute set/del events,
- i.e. ``someobject.related = someotherobject``
-
- Operations which manipulate the state of the object
- relative to other objects are better handled:
-
- * In the ``__init__()`` method of the mapped object itself,
- or another method designed to establish some particular state.
- * In a ``@validates`` handler, see :ref:`simple_validators`
- * Within the :meth:`.SessionEvents.before_flush` event.
+
+ Mapper-level flush events only allow **very limited operations**,
+ on attributes local to the row being operated upon only,
+ as well as allowing any SQL to be emitted on the given
+ :class:`.Connection`. **Please read fully** the notes
+ at :ref:`session_persistence_mapper` for guidelines on using
+ these methods; generally, the :meth:`.SessionEvents.before_flush`
+ method should be preferred for general on-flush changes.
:param mapper: the :class:`.Mapper` which is the target
of this event.
@@ -993,6 +1064,10 @@ class MapperEvents(event.Events):
object associated with the instance.
:return: No return value is supported by this event.
+ .. seealso::
+
+ :ref:`session_persistence_events`
+
"""
def after_delete(self, mapper, connection, target):
@@ -1008,29 +1083,14 @@ class MapperEvents(event.Events):
once in a previous step.
.. warning::
- Mapper-level flush events are designed to operate **on attributes
- local to the immediate object being handled
- and via SQL operations with the given** :class:`.Connection`
- **only.** Handlers here should **not** make alterations to the
- state of the :class:`.Session` overall, and in general should not
- affect any :func:`.relationship` -mapped attributes, as
- session cascade rules will not function properly, nor is it
- always known if the related class has already been handled.
- Operations that **are not supported in mapper events** include:
-
- * :meth:`.Session.add`
- * :meth:`.Session.delete`
- * Mapped collection append, add, remove, delete, discard, etc.
- * Mapped relationship attribute set/del events,
- i.e. ``someobject.related = someotherobject``
-
- Operations which manipulate the state of the object
- relative to other objects are better handled:
-
- * In the ``__init__()`` method of the mapped object itself,
- or another method designed to establish some particular state.
- * In a ``@validates`` handler, see :ref:`simple_validators`
- * Within the :meth:`.SessionEvents.before_flush` event.
+
+ Mapper-level flush events only allow **very limited operations**,
+ on attributes local to the row being operated upon only,
+ as well as allowing any SQL to be emitted on the given
+ :class:`.Connection`. **Please read fully** the notes
+ at :ref:`session_persistence_mapper` for guidelines on using
+ these methods; generally, the :meth:`.SessionEvents.before_flush`
+ method should be preferred for general on-flush changes.
:param mapper: the :class:`.Mapper` which is the target
of this event.
@@ -1044,6 +1104,10 @@ class MapperEvents(event.Events):
object associated with the instance.
:return: No return value is supported by this event.
+ .. seealso::
+
+ :ref:`session_persistence_events`
+
"""
@@ -1284,6 +1348,8 @@ class SessionEvents(event.Events):
:meth:`~.SessionEvents.after_flush_postexec`
+ :ref:`session_persistence_events`
+
"""
def after_flush(self, session, flush_context):
@@ -1304,6 +1370,8 @@ class SessionEvents(event.Events):
:meth:`~.SessionEvents.after_flush_postexec`
+ :ref:`session_persistence_events`
+
"""
def after_flush_postexec(self, session, flush_context):
@@ -1326,6 +1394,8 @@ class SessionEvents(event.Events):
:meth:`~.SessionEvents.after_flush`
+ :ref:`session_persistence_events`
+
"""
def after_begin(self, session, transaction, connection):
@@ -1363,6 +1433,8 @@ class SessionEvents(event.Events):
:meth:`~.SessionEvents.after_attach`
+ :ref:`session_lifecycle_events`
+
"""
def after_attach(self, session, instance):
@@ -1385,6 +1457,8 @@ class SessionEvents(event.Events):
:meth:`~.SessionEvents.before_attach`
+ :ref:`session_lifecycle_events`
+
"""
@event._legacy_signature("0.9",
@@ -1439,6 +1513,244 @@ class SessionEvents(event.Events):
"""
+ def transient_to_pending(self, session, instance):
+ """Intercept the "transient to pending" transition for a specific object.
+
+ This event is a specialization of the
+ :meth:`.SessionEvents.after_attach` event which is only invoked
+ for this specific transition. It is invoked typically during the
+ :meth:`.Session.add` call.
+
+ :param session: target :class:`.Session`
+
+ :param instance: the ORM-mapped instance being operated upon.
+
+ .. versionadded:: 1.1
+
+ .. seealso::
+
+ :ref:`session_lifecycle_events`
+
+ """
+
+ def pending_to_transient(self, session, instance):
+ """Intercept the "pending to transient" transition for a specific object.
+
+ This less common transition occurs when an pending object that has
+ not been flushed is evicted from the session; this can occur
+ when the :meth:`.Session.rollback` method rolls back the transaction,
+ or when the :meth:`.Session.expunge` method is used.
+
+ :param session: target :class:`.Session`
+
+ :param instance: the ORM-mapped instance being operated upon.
+
+ .. versionadded:: 1.1
+
+ .. seealso::
+
+ :ref:`session_lifecycle_events`
+
+ """
+
+ def persistent_to_transient(self, session, instance):
+ """Intercept the "persistent to transient" transition for a specific object.
+
+ This less common transition occurs when an pending object that has
+ has been flushed is evicted from the session; this can occur
+ when the :meth:`.Session.rollback` method rolls back the transaction.
+
+ :param session: target :class:`.Session`
+
+ :param instance: the ORM-mapped instance being operated upon.
+
+ .. versionadded:: 1.1
+
+ .. seealso::
+
+ :ref:`session_lifecycle_events`
+
+ """
+
+ def pending_to_persistent(self, session, instance):
+ """Intercept the "pending to persistent"" transition for a specific object.
+
+ This event is invoked within the flush process, and is
+ similar to scanning the :attr:`.Session.new` collection within
+ the :meth:`.SessionEvents.after_flush` event. However, in this
+ case the object has already been moved to the persistent state
+ when the event is called.
+
+ :param session: target :class:`.Session`
+
+ :param instance: the ORM-mapped instance being operated upon.
+
+ .. versionadded:: 1.1
+
+ .. seealso::
+
+ :ref:`session_lifecycle_events`
+
+ """
+
+ def detached_to_persistent(self, session, instance):
+ """Intercept the "detached to persistent" transition for a specific object.
+
+ This event is a specialization of the
+ :meth:`.SessionEvents.after_attach` event which is only invoked
+ for this specific transition. It is invoked typically during the
+ :meth:`.Session.add` call, as well as during the
+ :meth:`.Session.delete` call if the object was not previously
+ associated with the
+ :class:`.Session` (note that an object marked as "deleted" remains
+ in the "persistent" state until the flush proceeds).
+
+ .. note::
+
+ If the object becomes persistent as part of a call to
+ :meth:`.Session.delete`, the object is **not** yet marked as
+ deleted when this event is called. To detect deleted objects,
+ check the ``deleted`` flag sent to the
+ :meth:`.SessionEvents.persistent_to_detached` to event after the
+ flush proceeds, or check the :attr:`.Session.deleted` collection
+ within the :meth:`.SessionEvents.before_flush` event if deleted
+ objects need to be intercepted before the flush.
+
+ :param session: target :class:`.Session`
+
+ :param instance: the ORM-mapped instance being operated upon.
+
+ .. versionadded:: 1.1
+
+ .. seealso::
+
+ :ref:`session_lifecycle_events`
+
+ """
+
+ def loaded_as_persistent(self, session, instance):
+ """Intercept the "loaded as peristent" transition for a specific object.
+
+ This event is invoked within the ORM loading process, and is invoked
+ very similarly to the :meth:`.InstanceEvents.load` event. However,
+ the event here is linkable to a :class:`.Session` class or instance,
+ rather than to a mapper or class hierarchy, and integrates
+ with the other session lifecycle events smoothly. The object
+ is guaranteed to be present in the session's identity map when
+ this event is called.
+
+
+ :param session: target :class:`.Session`
+
+ :param instance: the ORM-mapped instance being operated upon.
+
+ .. versionadded:: 1.1
+
+ .. seealso::
+
+ :ref:`session_lifecycle_events`
+
+ """
+
+ def persistent_to_deleted(self, session, instance):
+ """Intercept the "persistent to deleted" transition for a specific object.
+
+ This event is invoked when a persistent object's identity
+ is deleted from the database within a flush, however the object
+ still remains associated with the :class:`.Session` until the
+ transaction completes.
+
+ If the transaction is rolled back, the object moves again
+ to the persistent state, and the
+ :meth:`.SessionEvents.deleted_to_persistent` event is called.
+ If the transaction is committed, the object becomes detached,
+ which will emit the :meth:`.SessionEvents.deleted_to_detached`
+ event.
+
+ Note that while the :meth:`.Session.delete` method is the primary
+ public interface to mark an object as deleted, many objects
+ get deleted due to cascade rules, which are not always determined
+ until flush time. Therefore, there's no way to catch
+ every object that will be deleted until the flush has proceeded.
+ the :meth:`.SessionEvents.persistent_to_deleted` event is therefore
+ invoked at the end of a flush.
+
+ .. versionadded:: 1.1
+
+ .. seealso::
+
+ :ref:`session_lifecycle_events`
+
+ """
+
+ def deleted_to_persistent(self, session, instance):
+ """Intercept the "deleted to persistent" transition for a specific object.
+
+ This transition occurs only when an object that's been deleted
+ successfully in a flush is restored due to a call to
+ :meth:`.Session.rollback`. The event is not called under
+ any other circumstances.
+
+ .. versionadded:: 1.1
+
+ .. seealso::
+
+ :ref:`session_lifecycle_events`
+
+ """
+
+ def deleted_to_detached(self, session, instance):
+ """Intercept the "deleted to detached" transition for a specific object.
+
+ This event is invoked when a deleted object is evicted
+ from the session. The typical case when this occurs is when
+ the transaction for a :class:`.Session` in which the object
+ was deleted is committed; the object moves from the deleted
+ state to the detached state.
+
+ It is also invoked for objects that were deleted in a flush
+ when the :meth:`.Session.expunge_all` or :meth:`.Session.close`
+ events are called, as well as if the object is individually
+ expunged from its deleted state via :meth:`.Session.expunge`.
+
+ .. versionadded:: 1.1
+
+ .. seealso::
+
+ :ref:`session_lifecycle_events`
+
+ """
+
+ def persistent_to_detached(self, session, instance):
+ """Intercept the "persistent to detached" transition for a specific object.
+
+ This event is invoked when a persistent object is evicted
+ from the session. There are many conditions that cause this
+ to happen, including:
+
+ * using a method such as :meth:`.Session.expunge`
+ or :meth:`.Session.close`
+
+ * Calling the :meth:`.Session.rollback` method, when the object
+ was part of an INSERT statement for that session's transaction
+
+
+ :param session: target :class:`.Session`
+
+ :param instance: the ORM-mapped instance being operated upon.
+
+ :param deleted: boolean. If True, indicates this object moved
+ to the detached state because it was marked as deleted and flushed.
+
+
+ .. versionadded:: 1.1
+
+ .. seealso::
+
+ :ref:`session_lifecycle_events`
+
+ """
+
class AttributeEvents(event.Events):
"""Define events for object attributes.
@@ -1638,7 +1950,7 @@ class AttributeEvents(event.Events):
and also during replace operations::
- u1.addresess = [a2, a3] # <- new collection
+ u1.addresses = [a2, a3] # <- new collection
:param target: the object instance receiving the event.
If the listener is registered with ``raw=True``, this will
@@ -1701,7 +2013,7 @@ class QueryEvents(event.Events):
def no_deleted(query):
for desc in query.column_descriptions:
if desc['type'] is User:
- entity = desc['expr']
+ entity = desc['entity']
query = query.filter(entity.deleted == False)
return query
diff --git a/lib/sqlalchemy/orm/identity.py b/lib/sqlalchemy/orm/identity.py
index 46be2b719..2dfe3fd5c 100644
--- a/lib/sqlalchemy/orm/identity.py
+++ b/lib/sqlalchemy/orm/identity.py
@@ -8,7 +8,8 @@
import weakref
from . import attributes
from .. import util
-
+from .. import exc as sa_exc
+from . import util as orm_util
class IdentityMap(object):
def __init__(self):
@@ -126,16 +127,18 @@ class WeakInstanceDict(IdentityMap):
if existing_state is not state:
o = existing_state.obj()
if o is not None:
- raise AssertionError(
- "A conflicting state is already "
- "present in the identity map for key %r"
- % (key, ))
+ raise sa_exc.InvalidRequestError(
+ "Can't attach instance "
+ "%s; another instance with key %s is already "
+ "present in this session." % (
+ orm_util.state_str(state), state.key))
else:
- return
+ return False
except KeyError:
pass
self._dict[key] = state
self._manage_incoming_state(state)
+ return True
def _add_unpresent(self, state, key):
# inlined form of add() called by loading.py
@@ -208,6 +211,18 @@ class WeakInstanceDict(IdentityMap):
class StrongInstanceDict(IdentityMap):
+ """A 'strong-referencing' version of the identity map.
+
+ .. deprecated 1.1::
+ The strong
+ reference identity map is legacy. See the
+ recipe at :ref:`session_referencing_behavior` for
+ an event-based approach to maintaining strong identity
+ references.
+
+
+ """
+
if util.py2k:
def itervalues(self):
return self._dict.itervalues()
@@ -256,12 +271,16 @@ class StrongInstanceDict(IdentityMap):
def add(self, state):
if state.key in self:
if attributes.instance_state(self._dict[state.key]) is not state:
- raise AssertionError('A conflicting state is already '
- 'present in the identity map for key %r'
- % (state.key, ))
+ raise sa_exc.InvalidRequestError(
+ "Can't attach instance "
+ "%s; another instance with key %s is already "
+ "present in this session." % (
+ orm_util.state_str(state), state.key))
+ return False
else:
self._dict[state.key] = state.obj()
self._manage_incoming_state(state)
+ return True
def _add_unpresent(self, state, key):
# inlined form of add() called by loading.py
diff --git a/lib/sqlalchemy/orm/interfaces.py b/lib/sqlalchemy/orm/interfaces.py
index cd4a0116d..ed8f27332 100644
--- a/lib/sqlalchemy/orm/interfaces.py
+++ b/lib/sqlalchemy/orm/interfaces.py
@@ -234,7 +234,7 @@ class MapperProperty(_MappedAttribute, InspectionAttr, util.MemoizedSlots):
"""
def merge(self, session, source_state, source_dict, dest_state,
- dest_dict, load, _recursive):
+ dest_dict, load, _recursive, _resolve_conflict_map):
"""Merge the attribute represented by this ``MapperProperty``
from source to destination object.
diff --git a/lib/sqlalchemy/orm/loading.py b/lib/sqlalchemy/orm/loading.py
index b81e98a58..b5a62d6b2 100644
--- a/lib/sqlalchemy/orm/loading.py
+++ b/lib/sqlalchemy/orm/loading.py
@@ -32,8 +32,7 @@ def instances(query, cursor, context):
context.runid = _new_runid()
- filter_fns = [ent.filter_fn for ent in query._entities]
- filtered = id in filter_fns
+ filtered = query._has_mapper_entities
single_entity = len(query._entities) == 1 and \
query._entities[0].supports_single_entity
@@ -43,7 +42,12 @@ def instances(query, cursor, context):
filter_fn = id
else:
def filter_fn(row):
- return tuple(fn(x) for x, fn in zip(row, filter_fns))
+ return tuple(
+ id(item)
+ if ent.use_id_for_hash
+ else item
+ for ent, item in zip(query._entities, row)
+ )
try:
(process, labels) = \
@@ -104,7 +108,7 @@ def merge_result(querylib, query, iterator, load=True):
result = [session._merge(
attributes.instance_state(instance),
attributes.instance_dict(instance),
- load=load, _recursive={})
+ load=load, _recursive={}, _resolve_conflict_map={})
for instance in iterator]
else:
result = list(iterator)
@@ -121,7 +125,7 @@ def merge_result(querylib, query, iterator, load=True):
newrow[i] = session._merge(
attributes.instance_state(newrow[i]),
attributes.instance_dict(newrow[i]),
- load=load, _recursive={})
+ load=load, _recursive={}, _resolve_conflict_map={})
result.append(keyed_tuple(newrow))
return iter(result)
@@ -335,6 +339,9 @@ def _instance_processor(
populate_existing = context.populate_existing or mapper.always_refresh
load_evt = bool(mapper.class_manager.dispatch.load)
refresh_evt = bool(mapper.class_manager.dispatch.refresh)
+ persistent_evt = bool(context.session.dispatch.loaded_as_persistent)
+ if persistent_evt:
+ loaded_as_persistent = context.session.dispatch.loaded_as_persistent
instance_state = attributes.instance_state
instance_dict = attributes.instance_dict
session_id = context.session.hash_key
@@ -428,8 +435,11 @@ def _instance_processor(
loaded_instance, populate_existing, populators)
if isnew:
- if loaded_instance and load_evt:
- state.manager.dispatch.load(state, context)
+ if loaded_instance:
+ if load_evt:
+ state.manager.dispatch.load(state, context)
+ if persistent_evt:
+ loaded_as_persistent(context.session, state.obj())
elif refresh_evt:
state.manager.dispatch.refresh(
state, context, only_load_props)
diff --git a/lib/sqlalchemy/orm/mapper.py b/lib/sqlalchemy/orm/mapper.py
index 48fbaae32..95aa14a26 100644
--- a/lib/sqlalchemy/orm/mapper.py
+++ b/lib/sqlalchemy/orm/mapper.py
@@ -1915,6 +1915,19 @@ class Mapper(InspectionAttr):
"""
@_memoized_configured_property
+ def _insert_cols_evaluating_none(self):
+ return dict(
+ (
+ table,
+ frozenset(
+ col.key for col in columns
+ if col.type.should_evaluate_none
+ )
+ )
+ for table, columns in self._cols_by_table.items()
+ )
+
+ @_memoized_configured_property
def _insert_cols_as_none(self):
return dict(
(
@@ -1922,7 +1935,8 @@ class Mapper(InspectionAttr):
frozenset(
col.key for col in columns
if not col.primary_key and
- not col.server_default and not col.default)
+ not col.server_default and not col.default
+ and not col.type.should_evaluate_none)
)
for table, columns in self._cols_by_table.items()
)
@@ -1956,12 +1970,24 @@ class Mapper(InspectionAttr):
(
table,
frozenset([
- col for col in columns
+ col.key for col in columns
if col.server_default is not None])
)
for table, columns in self._cols_by_table.items()
)
+ @_memoized_configured_property
+ def _server_onupdate_default_cols(self):
+ return dict(
+ (
+ table,
+ frozenset([
+ col.key for col in columns
+ if col.server_onupdate is not None])
+ )
+ for table, columns in self._cols_by_table.items()
+ )
+
@property
def selectable(self):
"""The :func:`.select` construct this :class:`.Mapper` selects from
@@ -2557,15 +2583,24 @@ class Mapper(InspectionAttr):
for all relationships that meet the given cascade rule.
:param type_:
- The name of the cascade rule (i.e. save-update, delete,
- etc.)
+ The name of the cascade rule (i.e. ``"save-update"``, ``"delete"``,
+ etc.).
+
+ .. note:: the ``"all"`` cascade is not accepted here. For a generic
+ object traversal function, see :ref:`faq_walk_objects`.
:param state:
The lead InstanceState. child items will be processed per
the relationships defined for this object's mapper.
- the return value are object instances; this provides a strong
- reference so that they don't fall out of scope immediately.
+ :return: the method yields individual object instances.
+
+ .. seealso::
+
+ :ref:`unitofwork_cascades`
+
+ :ref:`faq_walk_objects` - illustrates a generic function to
+ traverse all objects without relying on cascades.
"""
visited_states = set()
@@ -2682,7 +2717,33 @@ def configure_mappers():
have been constructed thus far.
This function can be called any number of times, but in
- most cases is handled internally.
+ most cases is invoked automatically, the first time mappings are used,
+ as well as whenever mappings are used and additional not-yet-configured
+ mappers have been constructed.
+
+ Points at which this occur include when a mapped class is instantiated
+ into an instance, as well as when the :meth:`.Session.query` method
+ is used.
+
+ The :func:`.configure_mappers` function provides several event hooks
+ that can be used to augment its functionality. These methods include:
+
+ * :meth:`.MapperEvents.before_configured` - called once before
+ :func:`.configure_mappers` does any work; this can be used to establish
+ additional options, properties, or related mappings before the operation
+ proceeds.
+
+ * :meth:`.MapperEvents.mapper_configured` - called as each indivudal
+ :class:`.Mapper` is configured within the process; will include all
+ mapper state except for backrefs set up by other mappers that are still
+ to be configured.
+
+ * :meth:`.MapperEvents.after_configured` - called once after
+ :func:`.configure_mappers` is complete; at this stage, all
+ :class:`.Mapper` objects that are known to SQLAlchemy will be fully
+ configured. Note that the calling application may still have other
+ mappings that haven't been produced yet, such as if they are in modules
+ as yet unimported.
"""
diff --git a/lib/sqlalchemy/orm/persistence.py b/lib/sqlalchemy/orm/persistence.py
index 0bfee2ece..e6a2c0634 100644
--- a/lib/sqlalchemy/orm/persistence.py
+++ b/lib/sqlalchemy/orm/persistence.py
@@ -375,10 +375,12 @@ def _collect_insert_commands(
propkey_to_col = mapper._propkey_to_col[table]
+ eval_none = mapper._insert_cols_evaluating_none[table]
+
for propkey in set(propkey_to_col).intersection(state_dict):
value = state_dict[propkey]
col = propkey_to_col[propkey]
- if value is None:
+ if value is None and propkey not in eval_none:
continue
elif not bulk and isinstance(value, sql.ClauseElement):
value_params[col.key] = value
@@ -446,6 +448,7 @@ def _collect_update_commands(
set(propkey_to_col).intersection(state_dict).difference(
mapper._pk_keys_by_table[table])
)
+ has_all_defaults = True
else:
params = {}
for propkey in set(propkey_to_col).intersection(
@@ -461,6 +464,12 @@ def _collect_update_commands(
value, state.committed_state[propkey]) is not True:
params[col.key] = value
+ if mapper.base_mapper.eager_defaults:
+ has_all_defaults = mapper._server_onupdate_default_cols[table].\
+ issubset(params)
+ else:
+ has_all_defaults = True
+
if update_version_id is not None and \
mapper.version_id_col in mapper._cols_by_table[table]:
@@ -483,7 +492,7 @@ def _collect_update_commands(
col = mapper.version_id_col
params[col._label] = update_version_id
- if col.key not in params and \
+ if (bulk or col.key not in params) and \
mapper.version_id_generator is not False:
val = mapper.version_id_generator(update_version_id)
params[col.key] = val
@@ -527,7 +536,7 @@ def _collect_update_commands(
params.update(pk_params)
yield (
state, state_dict, params, mapper,
- connection, value_params)
+ connection, value_params, has_all_defaults)
def _collect_post_update_commands(base_mapper, uowtransaction, table,
@@ -617,37 +626,42 @@ def _emit_update_statements(base_mapper, uowtransaction,
type_=mapper.version_id_col.type))
stmt = table.update(clause)
- if mapper.base_mapper.eager_defaults:
- stmt = stmt.return_defaults()
- elif mapper.version_id_col is not None:
- stmt = stmt.return_defaults(mapper.version_id_col)
-
return stmt
- statement = base_mapper._memo(('update', table), update_stmt)
+ cached_stmt = base_mapper._memo(('update', table), update_stmt)
- for (connection, paramkeys, hasvalue), \
+ for (connection, paramkeys, hasvalue, has_all_defaults), \
records in groupby(
update,
lambda rec: (
rec[4], # connection
set(rec[2]), # set of parameter keys
- bool(rec[5]))): # whether or not we have "value" parameters
-
+ bool(rec[5]), # whether or not we have "value" parameters
+ rec[6] # has_all_defaults
+ )
+ ):
rows = 0
records = list(records)
+ statement = cached_stmt
+
# TODO: would be super-nice to not have to determine this boolean
# inside the loop here, in the 99.9999% of the time there's only
# one connection in use
assert_singlerow = connection.dialect.supports_sane_rowcount
assert_multirow = assert_singlerow and \
connection.dialect.supports_sane_multi_rowcount
- allow_multirow = not needs_version_id or assert_multirow
+ allow_multirow = has_all_defaults and not needs_version_id
+
+ if bookkeeping and not has_all_defaults and \
+ mapper.base_mapper.eager_defaults:
+ statement = statement.return_defaults()
+ elif mapper.version_id_col is not None:
+ statement = statement.return_defaults(mapper.version_id_col)
if hasvalue:
for state, state_dict, params, mapper, \
- connection, value_params in records:
+ connection, value_params, has_all_defaults in records:
c = connection.execute(
statement.values(value_params),
params)
@@ -667,18 +681,21 @@ def _emit_update_statements(base_mapper, uowtransaction,
if not allow_multirow:
check_rowcount = assert_singlerow
for state, state_dict, params, mapper, \
- connection, value_params in records:
+ connection, value_params, has_all_defaults in records:
c = cached_connections[connection].\
execute(statement, params)
- _postfetch(
- mapper,
- uowtransaction,
- table,
- state,
- state_dict,
- c,
- c.context.compiled_parameters[0],
- value_params)
+
+ # TODO: why with bookkeeping=False?
+ if bookkeeping:
+ _postfetch(
+ mapper,
+ uowtransaction,
+ table,
+ state,
+ state_dict,
+ c,
+ c.context.compiled_parameters[0],
+ value_params)
rows += c.rowcount
else:
multiparams = [rec[2] for rec in records]
@@ -692,17 +709,19 @@ def _emit_update_statements(base_mapper, uowtransaction,
execute(statement, multiparams)
rows += c.rowcount
+
for state, state_dict, params, mapper, \
- connection, value_params in records:
- _postfetch(
- mapper,
- uowtransaction,
- table,
- state,
- state_dict,
- c,
- c.context.compiled_parameters[0],
- value_params)
+ connection, value_params, has_all_defaults in records:
+ if bookkeeping:
+ _postfetch(
+ mapper,
+ uowtransaction,
+ table,
+ state,
+ state_dict,
+ c,
+ c.context.compiled_parameters[0],
+ value_params)
if check_rowcount:
if rows != len(records):
@@ -723,7 +742,7 @@ def _emit_insert_statements(base_mapper, uowtransaction,
"""Emit INSERT statements corresponding to value lists collected
by _collect_insert_commands()."""
- statement = base_mapper._memo(('insert', table), table.insert)
+ cached_stmt = base_mapper._memo(('insert', table), table.insert)
for (connection, pkeys, hasvalue, has_all_pks, has_all_defaults), \
records in groupby(
@@ -734,6 +753,9 @@ def _emit_insert_statements(base_mapper, uowtransaction,
bool(rec[5]), # whether we have "value" parameters
rec[6],
rec[7])):
+
+ statement = cached_stmt
+
if not bookkeeping or \
(
has_all_defaults
@@ -752,15 +774,18 @@ def _emit_insert_statements(base_mapper, uowtransaction,
conn, value_params, has_all_pks, has_all_defaults), \
last_inserted_params in \
zip(records, c.context.compiled_parameters):
- _postfetch(
- mapper_rec,
- uowtransaction,
- table,
- state,
- state_dict,
- c,
- last_inserted_params,
- value_params)
+ if state:
+ _postfetch(
+ mapper_rec,
+ uowtransaction,
+ table,
+ state,
+ state_dict,
+ c,
+ last_inserted_params,
+ value_params)
+ else:
+ _postfetch_bulk_save(mapper_rec, state_dict, table)
else:
if not has_all_defaults and base_mapper.eager_defaults:
@@ -789,15 +814,19 @@ def _emit_insert_statements(base_mapper, uowtransaction,
prop = mapper_rec._columntoproperty[col]
if state_dict.get(prop.key) is None:
state_dict[prop.key] = pk
- _postfetch(
- mapper_rec,
- uowtransaction,
- table,
- state,
- state_dict,
- result,
- result.context.compiled_parameters[0],
- value_params)
+ if bookkeeping:
+ if state:
+ _postfetch(
+ mapper_rec,
+ uowtransaction,
+ table,
+ state,
+ state_dict,
+ result,
+ result.context.compiled_parameters[0],
+ value_params)
+ else:
+ _postfetch_bulk_save(mapper_rec, state_dict, table)
def _emit_post_update_statements(base_mapper, uowtransaction,
@@ -957,7 +986,7 @@ def _finalize_insert_update_commands(base_mapper, uowtransaction, states):
def _postfetch(mapper, uowtransaction, table,
- state, dict_, result, params, value_params, bulk=False):
+ state, dict_, result, params, value_params):
"""Expire attributes in need of newly persisted database state,
after an INSERT or UPDATE statement has proceeded for that
state."""
@@ -1005,13 +1034,15 @@ def _postfetch(mapper, uowtransaction, table,
# TODO: this still goes a little too often. would be nice to
# have definitive list of "columns that changed" here
for m, equated_pairs in mapper._table_to_equated[table]:
- if state is None:
- sync.bulk_populate_inherit_keys(dict_, m, equated_pairs)
- else:
- sync.populate(state, m, state, m,
- equated_pairs,
- uowtransaction,
- mapper.passive_updates)
+ sync.populate(state, m, state, m,
+ equated_pairs,
+ uowtransaction,
+ mapper.passive_updates)
+
+
+def _postfetch_bulk_save(mapper, dict_, table):
+ for m, equated_pairs in mapper._table_to_equated[table]:
+ sync.bulk_populate_inherit_keys(dict_, m, equated_pairs)
def _connections_for_states(base_mapper, uowtransaction, states):
@@ -1242,10 +1273,16 @@ class BulkUpdate(BulkUD):
"Invalid expression type: %r" % key)
def _do_exec(self):
- values = dict(
+
+ values = [
(self._resolve_string_to_expr(k), v)
- for k, v in self.values.items()
- )
+ for k, v in (
+ self.values.items() if hasattr(self.values, 'items')
+ else self.values)
+ ]
+ if not self.update_kwargs.get('preserve_parameter_order', False):
+ values = dict(values)
+
update_stmt = sql.update(self.primary_table,
self.context.whereclause, values,
**self.update_kwargs)
@@ -1295,7 +1332,9 @@ class BulkUpdateEvaluate(BulkEvaluate, BulkUpdate):
def _additional_evaluators(self, evaluator_compiler):
self.value_evaluators = {}
- for key, value in self.values.items():
+ values = (self.values.items() if hasattr(self.values, 'items')
+ else self.values)
+ for key, value in values:
key = self._resolve_key_to_attrname(key)
if key is not None:
self.value_evaluators[key] = evaluator_compiler.process(
diff --git a/lib/sqlalchemy/orm/properties.py b/lib/sqlalchemy/orm/properties.py
index 55e02984b..0d4e1b771 100644
--- a/lib/sqlalchemy/orm/properties.py
+++ b/lib/sqlalchemy/orm/properties.py
@@ -39,7 +39,7 @@ class ColumnProperty(StrategizedProperty):
'instrument', 'comparator_factory', 'descriptor', 'extension',
'active_history', 'expire_on_flush', 'info', 'doc',
'strategy_class', '_creation_order', '_is_polymorphic_discriminator',
- '_mapped_by_synonym', '_deferred_loader')
+ '_mapped_by_synonym', '_deferred_column_loader')
def __init__(self, *columns, **kwargs):
"""Provide a column-level property for use with a Mapper.
@@ -206,7 +206,7 @@ class ColumnProperty(StrategizedProperty):
get_committed_value(state, dict_, passive=passive)
def merge(self, session, source_state, source_dict, dest_state,
- dest_dict, load, _recursive):
+ dest_dict, load, _recursive, _resolve_conflict_map):
if not self.instrument:
return
elif self.key in source_dict:
diff --git a/lib/sqlalchemy/orm/query.py b/lib/sqlalchemy/orm/query.py
index 8b3df08e7..e1b920bbb 100644
--- a/lib/sqlalchemy/orm/query.py
+++ b/lib/sqlalchemy/orm/query.py
@@ -103,6 +103,7 @@ class Query(object):
_orm_only_adapt = True
_orm_only_from_obj_alias = True
_current_path = _path_registry
+ _has_mapper_entities = False
def __init__(self, entities, session=None):
self.session = session
@@ -114,6 +115,7 @@ class Query(object):
entity_wrapper = _QueryEntity
self._entities = []
self._primary_entity = None
+ self._has_mapper_entities = False
for ent in util.to_list(entities):
entity_wrapper(self, ent)
@@ -287,6 +289,8 @@ class Query(object):
return self._entities[0]
def _mapper_zero(self):
+ # TODO: self._select_from_entity is not a mapper
+ # so this method is misnamed
return self._select_from_entity \
if self._select_from_entity is not None \
else self._entity_zero().entity_zero
@@ -608,6 +612,16 @@ class Query(object):
When the `Query` actually issues SQL to load rows, it always
uses column labeling.
+ .. note:: The :meth:`.Query.with_labels` method *only* applies
+ the output of :attr:`.Query.statement`, and *not* to any of
+ the result-row invoking systems of :class:`.Query` itself, e.g.
+ :meth:`.Query.first`, :meth:`.Query.all`, etc. To execute
+ a query using :meth:`.Query.with_labels`, invoke the
+ :attr:`.Query.statement` using :meth:`.Session.execute`::
+
+ result = session.execute(query.with_labels().statement)
+
+
"""
self._with_labels = True
@@ -930,11 +944,13 @@ class Query(object):
"""
if property is None:
+ mapper_zero = inspect(self._mapper_zero()).mapper
+
mapper = object_mapper(instance)
for prop in mapper.iterate_properties:
if isinstance(prop, properties.RelationshipProperty) and \
- prop.mapper is self._mapper_zero():
+ prop.mapper is mapper_zero:
property = prop
break
else:
@@ -972,8 +988,169 @@ class Query(object):
"""return a Query that selects from this Query's
SELECT statement.
- \*entities - optional list of entities which will replace
- those being selected.
+ :meth:`.Query.from_self` essentially turns the SELECT statement
+ into a SELECT of itself. Given a query such as::
+
+ q = session.query(User).filter(User.name.like('e%'))
+
+ Given the :meth:`.Query.from_self` version::
+
+ q = session.query(User).filter(User.name.like('e%')).from_self()
+
+ This query renders as:
+
+ .. sourcecode:: sql
+
+ SELECT anon_1.user_id AS anon_1_user_id,
+ anon_1.user_name AS anon_1_user_name
+ FROM (SELECT "user".id AS user_id, "user".name AS user_name
+ FROM "user"
+ WHERE "user".name LIKE :name_1) AS anon_1
+
+ There are lots of cases where :meth:`.Query.from_self` may be useful.
+ A simple one is where above, we may want to apply a row LIMIT to
+ the set of user objects we query against, and then apply additional
+ joins against that row-limited set::
+
+ q = session.query(User).filter(User.name.like('e%')).\\
+ limit(5).from_self().\\
+ join(User.addresses).filter(Address.email.like('q%'))
+
+ The above query joins to the ``Address`` entity but only against the
+ first five results of the ``User`` query:
+
+ .. sourcecode:: sql
+
+ SELECT anon_1.user_id AS anon_1_user_id,
+ anon_1.user_name AS anon_1_user_name
+ FROM (SELECT "user".id AS user_id, "user".name AS user_name
+ FROM "user"
+ WHERE "user".name LIKE :name_1
+ LIMIT :param_1) AS anon_1
+ JOIN address ON anon_1.user_id = address.user_id
+ WHERE address.email LIKE :email_1
+
+ **Automatic Aliasing**
+
+ Another key behavior of :meth:`.Query.from_self` is that it applies
+ **automatic aliasing** to the entities inside the subquery, when
+ they are referenced on the outside. Above, if we continue to
+ refer to the ``User`` entity without any additional aliasing applied
+ to it, those references wil be in terms of the subquery::
+
+ q = session.query(User).filter(User.name.like('e%')).\\
+ limit(5).from_self().\\
+ join(User.addresses).filter(Address.email.like('q%')).\\
+ order_by(User.name)
+
+ The ORDER BY against ``User.name`` is aliased to be in terms of the
+ inner subquery:
+
+ .. sourcecode:: sql
+
+ SELECT anon_1.user_id AS anon_1_user_id,
+ anon_1.user_name AS anon_1_user_name
+ FROM (SELECT "user".id AS user_id, "user".name AS user_name
+ FROM "user"
+ WHERE "user".name LIKE :name_1
+ LIMIT :param_1) AS anon_1
+ JOIN address ON anon_1.user_id = address.user_id
+ WHERE address.email LIKE :email_1 ORDER BY anon_1.user_name
+
+ The automatic aliasing feature only works in a **limited** way,
+ for simple filters and orderings. More ambitious constructions
+ such as referring to the entity in joins should prefer to use
+ explicit subquery objects, typically making use of the
+ :meth:`.Query.subquery` method to produce an explicit subquery object.
+ Always test the structure of queries by viewing the SQL to ensure
+ a particular structure does what's expected!
+
+ **Changing the Entities**
+
+ :meth:`.Query.from_self` also includes the ability to modify what
+ columns are being queried. In our example, we want ``User.id``
+ to be queried by the inner query, so that we can join to the
+ ``Address`` entity on the outside, but we only wanted the outer
+ query to return the ``Address.email`` column::
+
+ q = session.query(User).filter(User.name.like('e%')).\\
+ limit(5).from_self(Address.email).\\
+ join(User.addresses).filter(Address.email.like('q%'))
+
+ yielding:
+
+ .. sourcecode:: sql
+
+ SELECT address.email AS address_email
+ FROM (SELECT "user".id AS user_id, "user".name AS user_name
+ FROM "user"
+ WHERE "user".name LIKE :name_1
+ LIMIT :param_1) AS anon_1
+ JOIN address ON anon_1.user_id = address.user_id
+ WHERE address.email LIKE :email_1
+
+ **Looking out for Inner / Outer Columns**
+
+ Keep in mind that when referring to columns that originate from
+ inside the subquery, we need to ensure they are present in the
+ columns clause of the subquery itself; this is an ordinary aspect of
+ SQL. For example, if we wanted to load from a joined entity inside
+ the subquery using :func:`.contains_eager`, we need to add those
+ columns. Below illustrates a join of ``Address`` to ``User``,
+ then a subquery, and then we'd like :func:`.contains_eager` to access
+ the ``User`` columns::
+
+ q = session.query(Address).join(Address.user).\\
+ filter(User.name.like('e%'))
+
+ q = q.add_entity(User).from_self().\\
+ options(contains_eager(Address.user))
+
+ We use :meth:`.Query.add_entity` above **before** we call
+ :meth:`.Query.from_self` so that the ``User`` columns are present
+ in the inner subquery, so that they are available to the
+ :func:`.contains_eager` modifier we are using on the outside,
+ producing:
+
+ .. sourcecode:: sql
+
+ SELECT anon_1.address_id AS anon_1_address_id,
+ anon_1.address_email AS anon_1_address_email,
+ anon_1.address_user_id AS anon_1_address_user_id,
+ anon_1.user_id AS anon_1_user_id,
+ anon_1.user_name AS anon_1_user_name
+ FROM (
+ SELECT address.id AS address_id,
+ address.email AS address_email,
+ address.user_id AS address_user_id,
+ "user".id AS user_id,
+ "user".name AS user_name
+ FROM address JOIN "user" ON "user".id = address.user_id
+ WHERE "user".name LIKE :name_1) AS anon_1
+
+ If we didn't call ``add_entity(User)``, but still asked
+ :func:`.contains_eager` to load the ``User`` entity, it would be
+ forced to add the table on the outside without the correct
+ join criteria - note the ``anon1, "user"`` phrase at
+ the end:
+
+ .. sourcecode:: sql
+
+ -- incorrect query
+ SELECT anon_1.address_id AS anon_1_address_id,
+ anon_1.address_email AS anon_1_address_email,
+ anon_1.address_user_id AS anon_1_address_user_id,
+ "user".id AS user_id,
+ "user".name AS user_name
+ FROM (
+ SELECT address.id AS address_id,
+ address.email AS address_email,
+ address.user_id AS address_user_id
+ FROM address JOIN "user" ON "user".id = address.user_id
+ WHERE "user".name LIKE :name_1) AS anon_1, "user"
+
+ :param \*entities: optional list of entities which will replace
+ those being selected.
"""
fromclause = self.with_labels().enable_eagerloads(False).\
@@ -1280,7 +1457,9 @@ class Query(object):
session.query(MyClass).filter(MyClass.name == 'some name')
- Multiple criteria are joined together by AND::
+ Multiple criteria may be specified as comma separated; the effect
+ is that they will be joined together using the :func:`.and_`
+ function::
session.query(MyClass).\\
filter(MyClass.name == 'some name', MyClass.id > 5)
@@ -1289,9 +1468,6 @@ class Query(object):
WHERE clause of a select. String expressions are coerced
into SQL expression constructs via the :func:`.text` construct.
- .. versionchanged:: 0.7.5
- Multiple criteria joined by AND.
-
.. seealso::
:meth:`.Query.filter_by` - filter on keyword expressions.
@@ -1315,7 +1491,9 @@ class Query(object):
session.query(MyClass).filter_by(name = 'some name')
- Multiple criteria are joined together by AND::
+ Multiple criteria may be specified as comma separated; the effect
+ is that they will be joined together using the :func:`.and_`
+ function::
session.query(MyClass).\\
filter_by(name = 'some name', id = 5)
@@ -2323,6 +2501,19 @@ class Query(object):
"""Apply a ``DISTINCT`` to the query and return the newly resulting
``Query``.
+
+ .. note::
+
+ The :meth:`.distinct` call includes logic that will automatically
+ add columns from the ORDER BY of the query to the columns
+ clause of the SELECT statement, to satisfy the common need
+ of the database backend that ORDER BY columns be part of the
+ SELECT list when DISTINCT is used. These columns *are not*
+ added to the list of columns actually fetched by the
+ :class:`.Query`, however, so would not affect results.
+ The columns are passed through when using the
+ :attr:`.Query.statement` accessor, however.
+
:param \*expr: optional column expressions. When present,
the Postgresql dialect will render a ``DISTINCT ON (<expressions>>)``
construct.
@@ -2436,7 +2627,13 @@ class Query(object):
(note this may consist of multiple result rows if join-loaded
collections are present).
- Calling ``first()`` results in an execution of the underlying query.
+ Calling :meth:`.Query.first` results in an execution of the underlying query.
+
+ .. seealso::
+
+ :meth:`.Query.one`
+
+ :meth:`.Query.one_or_none`
"""
if self._statement is not None:
@@ -2448,26 +2645,27 @@ class Query(object):
else:
return None
- def one(self):
- """Return exactly one result or raise an exception.
+ def one_or_none(self):
+ """Return at most one result or raise an exception.
- Raises ``sqlalchemy.orm.exc.NoResultFound`` if the query selects
+ Returns ``None`` if the query selects
no rows. Raises ``sqlalchemy.orm.exc.MultipleResultsFound``
if multiple object identities are returned, or if multiple
- rows are returned for a query that does not return object
- identities.
+ rows are returned for a query that returns only scalar values
+ as opposed to full identity-mapped entities.
+
+ Calling :meth:`.Query.one_or_none` results in an execution of the
+ underlying query.
+
+ .. versionadded:: 1.0.9
- Note that an entity query, that is, one which selects one or
- more mapped classes as opposed to individual column attributes,
- may ultimately represent many rows but only one row of
- unique entity or entities - this is a successful result for one().
+ Added :meth:`.Query.one_or_none`
- Calling ``one()`` results in an execution of the underlying query.
+ .. seealso::
+
+ :meth:`.Query.first`
- .. versionchanged:: 0.6
- ``one()`` fully fetches all results instead of applying
- any kind of limit, so that the "unique"-ing of entities does not
- conceal multiple object identities.
+ :meth:`.Query.one`
"""
ret = list(self)
@@ -2476,10 +2674,38 @@ class Query(object):
if l == 1:
return ret[0]
elif l == 0:
- raise orm_exc.NoResultFound("No row was found for one()")
+ return None
else:
raise orm_exc.MultipleResultsFound(
+ "Multiple rows were found for one_or_none()")
+
+ def one(self):
+ """Return exactly one result or raise an exception.
+
+ Raises ``sqlalchemy.orm.exc.NoResultFound`` if the query selects
+ no rows. Raises ``sqlalchemy.orm.exc.MultipleResultsFound``
+ if multiple object identities are returned, or if multiple
+ rows are returned for a query that returns only scalar values
+ as opposed to full identity-mapped entities.
+
+ Calling :meth:`.one` results in an execution of the underlying query.
+
+ .. seealso::
+
+ :meth:`.Query.first`
+
+ :meth:`.Query.one_or_none`
+
+ """
+ try:
+ ret = self.one_or_none()
+ except orm_exc.MultipleResultsFound:
+ raise orm_exc.MultipleResultsFound(
"Multiple rows were found for one()")
+ else:
+ if ret is None:
+ raise orm_exc.NoResultFound("No row was found for one()")
+ return ret
def scalar(self):
"""Return the first element of the first result or None
@@ -2849,7 +3075,12 @@ class Query(object):
:param values: a dictionary with attributes names, or alternatively
mapped attributes or SQL expressions, as keys, and literal
- values or sql expressions as values.
+ values or sql expressions as values. If :ref:`parameter-ordered
+ mode <updates_order_parameters>` is desired, the values can be
+ passed as a list of 2-tuples;
+ this requires that the :paramref:`~sqlalchemy.sql.expression.update.preserve_parameter_order`
+ flag is passed to the :paramref:`.Query.update.update_args` dictionary
+ as well.
.. versionchanged:: 1.0.0 - string names in the values dictionary
are now resolved against the mapped entity; previously, these
@@ -2880,7 +3111,8 @@ class Query(object):
:param update_args: Optional dictionary, if present will be passed
to the underlying :func:`.update` construct as the ``**kw`` for
the object. May be used to pass dialect-specific arguments such
- as ``mysql_limit``.
+ as ``mysql_limit``, as well as other special arguments such as
+ :paramref:`~sqlalchemy.sql.expression.update.preserve_parameter_order`.
.. versionadded:: 1.0.0
@@ -3181,12 +3413,14 @@ class _MapperEntity(_QueryEntity):
if not query._primary_entity:
query._primary_entity = self
query._entities.append(self)
-
+ query._has_mapper_entities = True
self.entities = [entity]
self.expr = entity
supports_single_entity = True
+ use_id_for_hash = True
+
def setup_entity(self, ext_info, aliased_adapter):
self.mapper = ext_info.mapper
self.aliased_adapter = aliased_adapter
@@ -3232,8 +3466,6 @@ class _MapperEntity(_QueryEntity):
self.mapper, sql_util.ColumnAdapter(
from_obj, self.mapper._equivalent_columns))
- filter_fn = id
-
@property
def type(self):
return self.mapper.class_
@@ -3462,6 +3694,8 @@ class Bundle(InspectionAttr):
class _BundleEntity(_QueryEntity):
+ use_id_for_hash = False
+
def __init__(self, query, bundle, setup_entities=True):
query._entities.append(self)
self.bundle = self.expr = bundle
@@ -3478,8 +3712,6 @@ class _BundleEntity(_QueryEntity):
self.entities = ()
- self.filter_fn = lambda item: item
-
self.supports_single_entity = self.bundle.single_entity
@property
@@ -3582,11 +3814,7 @@ class _ColumnEntity(_QueryEntity):
search_entities = True
self.type = type_ = column.type
- if type_.hashable:
- self.filter_fn = lambda item: item
- else:
- counter = util.counter()
- self.filter_fn = lambda item: counter()
+ self.use_id_for_hash = not type_.hashable
# If the Column is unnamed, give it a
# label() so that mutable column expressions
@@ -3619,7 +3847,7 @@ class _ColumnEntity(_QueryEntity):
self._from_entities = set(self.entities)
else:
all_elements = [
- elem for elem in visitors.iterate(column, {})
+ elem for elem in sql_util.surface_column_elements(column)
if 'parententity' in elem._annotations
]
diff --git a/lib/sqlalchemy/orm/relationships.py b/lib/sqlalchemy/orm/relationships.py
index da0730f46..f822071c4 100644
--- a/lib/sqlalchemy/orm/relationships.py
+++ b/lib/sqlalchemy/orm/relationships.py
@@ -275,15 +275,31 @@ class RelationshipProperty(StrategizedProperty):
:paramref:`~.relationship.backref` - alternative form
of backref specification.
- :param bake_queries:
- Use the :class:`.BakedQuery` cache to cache queries used in lazy
- loads. True by default, as this typically improves performance
- significantly. Set to False to reduce ORM memory use, or
- if unresolved stability issues are observed with the baked query
+ :param bake_queries=True:
+ Use the :class:`.BakedQuery` cache to cache the construction of SQL
+ used in lazy loads, when the :func:`.bake_lazy_loaders` function has
+ first been called. Defaults to True and is intended to provide an
+ "opt out" flag per-relationship when the baked query cache system is
+ in use.
+
+ .. warning::
+
+ This flag **only** has an effect when the application-wide
+ :func:`.bake_lazy_loaders` function has been called. It
+ defaults to True so is an "opt out" flag.
+
+ Setting this flag to False when baked queries are otherwise in
+ use might be to reduce
+ ORM memory use for this :func:`.relationship`, or to work around
+ unresolved stability issues observed within the baked query
cache system.
.. versionadded:: 1.0.0
+ .. seealso::
+
+ :ref:`baked_toplevel`
+
:param cascade:
a comma-separated list of cascade rules which determines how
Session operations should be "cascaded" from parent to child.
@@ -604,30 +620,26 @@ class RelationshipProperty(StrategizedProperty):
and examples.
:param passive_updates=True:
- Indicates loading and INSERT/UPDATE/DELETE behavior when the
- source of a foreign key value changes (i.e. an "on update"
- cascade), which are typically the primary key columns of the
- source row.
+ Indicates the persistence behavior to take when a referenced
+ primary key value changes in place, indicating that the referencing
+ foreign key columns will also need their value changed.
- When True, it is assumed that ON UPDATE CASCADE is configured on
+ When True, it is assumed that ``ON UPDATE CASCADE`` is configured on
the foreign key in the database, and that the database will
handle propagation of an UPDATE from a source column to
- dependent rows. Note that with databases which enforce
- referential integrity (i.e. PostgreSQL, MySQL with InnoDB tables),
- ON UPDATE CASCADE is required for this operation. The
- relationship() will update the value of the attribute on related
- items which are locally present in the session during a flush.
-
- When False, it is assumed that the database does not enforce
- referential integrity and will not be issuing its own CASCADE
- operation for an update. The relationship() will issue the
- appropriate UPDATE statements to the database in response to the
- change of a referenced key, and items locally present in the
- session during a flush will also be refreshed.
-
- This flag should probably be set to False if primary key changes
- are expected and the database in use doesn't support CASCADE
- (i.e. SQLite, MySQL MyISAM tables).
+ dependent rows. When False, the SQLAlchemy :func:`.relationship`
+ construct will attempt to emit its own UPDATE statements to
+ modify related targets. However note that SQLAlchemy **cannot**
+ emit an UPDATE for more than one level of cascade. Also,
+ setting this flag to False is not compatible in the case where
+ the database is in fact enforcing referential integrity, unless
+ those constraints are explicitly "deferred", if the target backend
+ supports it.
+
+ It is highly advised that an application which is employing
+ mutable primary keys keeps ``passive_updates`` set to True,
+ and instead uses the referential integrity features of the database
+ itself in order to handle the change efficiently and fully.
.. seealso::
@@ -1418,7 +1430,7 @@ class RelationshipProperty(StrategizedProperty):
source_dict,
dest_state,
dest_dict,
- load, _recursive):
+ load, _recursive, _resolve_conflict_map):
if load:
for r in self._reverse_property:
@@ -1451,8 +1463,10 @@ class RelationshipProperty(StrategizedProperty):
current_state = attributes.instance_state(current)
current_dict = attributes.instance_dict(current)
_recursive[(current_state, self)] = True
- obj = session._merge(current_state, current_dict,
- load=load, _recursive=_recursive)
+ obj = session._merge(
+ current_state, current_dict,
+ load=load, _recursive=_recursive,
+ _resolve_conflict_map=_resolve_conflict_map)
if obj is not None:
dest_list.append(obj)
@@ -1462,16 +1476,19 @@ class RelationshipProperty(StrategizedProperty):
for c in dest_list:
coll.append_without_event(c)
else:
- dest_state.get_impl(self.key)._set_iterable(
- dest_state, dest_dict, dest_list)
+ dest_state.get_impl(self.key).set(
+ dest_state, dest_dict, dest_list,
+ _adapt=False)
else:
current = source_dict[self.key]
if current is not None:
current_state = attributes.instance_state(current)
current_dict = attributes.instance_dict(current)
_recursive[(current_state, self)] = True
- obj = session._merge(current_state, current_dict,
- load=load, _recursive=_recursive)
+ obj = session._merge(
+ current_state, current_dict,
+ load=load, _recursive=_recursive,
+ _resolve_conflict_map=_resolve_conflict_map)
else:
obj = None
diff --git a/lib/sqlalchemy/orm/session.py b/lib/sqlalchemy/orm/session.py
index 4619027e5..56513860a 100644
--- a/lib/sqlalchemy/orm/session.py
+++ b/lib/sqlalchemy/orm/session.py
@@ -180,8 +180,7 @@ class SessionTransaction(object):
if self.session._enable_transaction_accounting:
self._take_snapshot()
- if self.session.dispatch.after_transaction_create:
- self.session.dispatch.after_transaction_create(self.session, self)
+ self.session.dispatch.after_transaction_create(self.session, self)
@property
def is_active(self):
@@ -272,10 +271,9 @@ class SessionTransaction(object):
def _restore_snapshot(self, dirty_only=False):
assert self._is_transaction_boundary
- for s in set(self._new).union(self.session._new):
- self.session._expunge_state(s)
- if s.key:
- del s.key
+ self.session._expunge_states(
+ set(self._new).union(self.session._new),
+ to_transient=True)
for s, (oldkey, newkey) in self._key_switches.items():
self.session.identity_map.safe_discard(s)
@@ -283,10 +281,7 @@ class SessionTransaction(object):
self.session.identity_map.replace(s)
for s in set(self._deleted).union(self.session._deleted):
- if s.deleted:
- # assert s in self._deleted
- del s.deleted
- self.session._update_impl(s, discard_existing=True)
+ self.session._update_impl(s, revert_deletion=True)
assert not self.session._deleted
@@ -300,8 +295,9 @@ class SessionTransaction(object):
if not self.nested and self.session.expire_on_commit:
for s in self.session.identity_map.all_states():
s._expire(s.dict, self.session.identity_map._modified)
- for s in list(self._deleted):
- s._detach()
+
+ statelib.InstanceState._detach_states(
+ list(self._deleted), self.session)
self._deleted.clear()
elif self.nested:
self._parent._new.update(self._new)
@@ -412,11 +408,23 @@ class SessionTransaction(object):
for subtransaction in stx._iterate_parents(upto=self):
subtransaction.close()
+ if _capture_exception:
+ captured_exception = sys.exc_info()[1]
+
boundary = self
if self._state in (ACTIVE, PREPARED):
for transaction in self._iterate_parents():
if transaction._parent is None or transaction.nested:
- transaction._rollback_impl()
+ try:
+ transaction._rollback_impl()
+ except Exception:
+ if _capture_exception:
+ util.warn(
+ "An exception raised during a Session "
+ "persistence operation cannot be raised "
+ "due to an additional ROLLBACK exception; "
+ "the exception is: %s" % captured_exception)
+ raise
transaction._state = DEACTIVE
boundary = transaction
break
@@ -438,7 +446,7 @@ class SessionTransaction(object):
self.close()
if self._parent and _capture_exception:
- self._parent._rollback_exception = sys.exc_info()[1]
+ self._parent._rollback_exception = captured_exception
sess.dispatch.after_soft_rollback(sess, self)
@@ -466,8 +474,7 @@ class SessionTransaction(object):
transaction.close()
self._state = CLOSED
- if self.session.dispatch.after_transaction_end:
- self.session.dispatch.after_transaction_end(self.session, self)
+ self.session.dispatch.after_transaction_end(self.session, self)
if self._parent is None:
if not self.session.autocommit:
@@ -629,16 +636,23 @@ class Session(_SessionClassMethods):
:param weak_identity_map: Defaults to ``True`` - when set to
``False``, objects placed in the :class:`.Session` will be
strongly referenced until explicitly removed or the
- :class:`.Session` is closed. **Deprecated** - this option
- is obsolete.
+ :class:`.Session` is closed. **Deprecated** - The strong
+ reference identity map is legacy. See the
+ recipe at :ref:`session_referencing_behavior` for
+ an event-based approach to maintaining strong identity
+ references.
"""
if weak_identity_map:
self._identity_cls = identity.WeakInstanceDict
else:
- util.warn_deprecated("weak_identity_map=False is deprecated. "
- "This feature is not needed.")
+ util.warn_deprecated(
+ "weak_identity_map=False is deprecated. "
+ "See the documentation on 'Session Referencing Behavior' "
+ "for an event-based approach to maintaining strong identity "
+ "references.")
+
self._identity_cls = identity.StrongInstanceDict
self.identity_map = self._identity_cls()
@@ -680,7 +694,7 @@ class Session(_SessionClassMethods):
def info(self):
"""A user-modifiable dictionary.
- The initial value of this dictioanry can be populated using the
+ The initial value of this dictionary can be populated using the
``info`` argument to the :class:`.Session` constructor or
:class:`.sessionmaker` constructor or factory methods. The dictionary
here is always local to this :class:`.Session` and can be modified
@@ -1086,16 +1100,15 @@ class Session(_SessionClassMethods):
``Session``.
"""
- for state in self.identity_map.all_states() + list(self._new):
- state._detach()
+ all_states = self.identity_map.all_states() + list(self._new)
self.identity_map = self._identity_cls()
self._new = {}
self._deleted = {}
- # TODO: need much more test coverage for bind_mapper() and similar !
- # TODO: + crystallize + document resolution order
- # vis. bind_mapper/bind_table
+ statelib.InstanceState._detach_states(
+ all_states, self
+ )
def _add_bind(self, key, bind):
try:
@@ -1437,7 +1450,7 @@ class Session(_SessionClassMethods):
state._expire(state.dict, self.identity_map._modified)
elif state in self._new:
self._new.pop(state)
- state._detach()
+ state._detach(self)
@util.deprecated("0.7", "The non-weak-referencing identity map "
"feature is no longer needed.")
@@ -1472,23 +1485,26 @@ class Session(_SessionClassMethods):
cascaded = list(state.manager.mapper.cascade_iterator(
'expunge', state))
- self._expunge_state(state)
- for o, m, st_, dct_ in cascaded:
- self._expunge_state(st_)
+ self._expunge_states(
+ [state] + [st_ for o, m, st_, dct_ in cascaded]
+ )
- def _expunge_state(self, state):
- if state in self._new:
- self._new.pop(state)
- state._detach()
- elif self.identity_map.contains_state(state):
- self.identity_map.safe_discard(state)
- self._deleted.pop(state, None)
- state._detach()
- elif self.transaction:
- self.transaction._deleted.pop(state, None)
- state._detach()
+ def _expunge_states(self, states, to_transient=False):
+ for state in states:
+ if state in self._new:
+ self._new.pop(state)
+ elif self.identity_map.contains_state(state):
+ self.identity_map.safe_discard(state)
+ self._deleted.pop(state, None)
+ elif self.transaction:
+ # state is "detached" from being deleted, but still present
+ # in the transaction snapshot
+ self.transaction._deleted.pop(state, None)
+ statelib.InstanceState._detach_states(
+ states, self, to_transient=to_transient)
def _register_newly_persistent(self, states):
+ pending_to_persistent = self.dispatch.pending_to_persistent or None
for state in states:
mapper = _state_mapper(state)
@@ -1535,6 +1551,11 @@ class Session(_SessionClassMethods):
)
self._register_altered(states)
+
+ if pending_to_persistent is not None:
+ for state in states:
+ pending_to_persistent(self, state.obj())
+
# remove from new last, might be the last strong ref
for state in set(states).intersection(self._new):
self._new.pop(state)
@@ -1548,13 +1569,19 @@ class Session(_SessionClassMethods):
self.transaction._dirty[state] = True
def _remove_newly_deleted(self, states):
+ persistent_to_deleted = self.dispatch.persistent_to_deleted or None
for state in states:
if self._enable_transaction_accounting and self.transaction:
self.transaction._deleted[state] = True
self.identity_map.safe_discard(state)
self._deleted.pop(state, None)
- state.deleted = True
+ state._deleted = True
+ # can't call state._detach() here, because this state
+ # is still in the transaction snapshot and needs to be
+ # tracked as part of that
+ if persistent_to_deleted is not None:
+ persistent_to_deleted(self, state.obj())
def add(self, instance, _warn=True):
"""Place an object in the ``Session``.
@@ -1609,30 +1636,39 @@ class Session(_SessionClassMethods):
except exc.NO_STATE:
raise exc.UnmappedInstanceError(instance)
+ self._delete_impl(state, instance, head=True)
+
+ def _delete_impl(self, state, obj, head):
+
if state.key is None:
- raise sa_exc.InvalidRequestError(
- "Instance '%s' is not persisted" %
- state_str(state))
+ if head:
+ raise sa_exc.InvalidRequestError(
+ "Instance '%s' is not persisted" %
+ state_str(state))
+ else:
+ return
+
+ to_attach = self._before_attach(state, obj)
if state in self._deleted:
return
- # ensure object is attached to allow the
- # cascade operation to load deferred attributes
- # and collections
- self._attach(state, include_before=True)
+ if to_attach:
+ self.identity_map.add(state)
+ self._after_attach(state, obj)
- # grab the cascades before adding the item to the deleted list
- # so that autoflush does not delete the item
- # the strong reference to the instance itself is significant here
- cascade_states = list(state.manager.mapper.cascade_iterator(
- 'delete', state))
+ if head:
+ # grab the cascades before adding the item to the deleted list
+ # so that autoflush does not delete the item
+ # the strong reference to the instance itself is significant here
+ cascade_states = list(state.manager.mapper.cascade_iterator(
+ 'delete', state))
- self._deleted[state] = state.obj()
- self.identity_map.add(state)
+ self._deleted[state] = obj
- for o, m, st_, dct_ in cascade_states:
- self._delete_impl(st_)
+ if head:
+ for o, m, st_, dct_ in cascade_states:
+ self._delete_impl(st_, o, False)
def merge(self, instance, load=True):
"""Copy the state of a given instance into a corresponding instance
@@ -1653,6 +1689,10 @@ class Session(_SessionClassMethods):
See :ref:`unitofwork_merging` for a detailed discussion of merging.
+ .. versionchanged:: 1.1 - :meth:`.Session.merge` will now reconcile
+ pending objects with overlapping primary keys in the same way
+ as persistent. See :ref:`change_3601` for discussion.
+
:param instance: Instance to be merged.
:param load: Boolean, when False, :meth:`.merge` switches into
a "high performance" mode which causes it to forego emitting history
@@ -1677,12 +1717,14 @@ class Session(_SessionClassMethods):
should be "clean" as well, else this suggests a mis-use of the
method.
+
"""
if self._warn_on_events:
self._flush_warning("Session.merge()")
_recursive = {}
+ _resolve_conflict_map = {}
if load:
# flush current contents if we expect to load data
@@ -1695,11 +1737,13 @@ class Session(_SessionClassMethods):
return self._merge(
attributes.instance_state(instance),
attributes.instance_dict(instance),
- load=load, _recursive=_recursive)
+ load=load, _recursive=_recursive,
+ _resolve_conflict_map=_resolve_conflict_map)
finally:
self.autoflush = autoflush
- def _merge(self, state, state_dict, load=True, _recursive=None):
+ def _merge(self, state, state_dict, load=True, _recursive=None,
+ _resolve_conflict_map=None):
mapper = _state_mapper(state)
if state in _recursive:
return _recursive[state]
@@ -1715,9 +1759,14 @@ class Session(_SessionClassMethods):
"all changes on mapped instances before merging with "
"load=False.")
key = mapper._identity_key_from_state(state)
+ key_is_persistent = attributes.NEVER_SET not in key[1]
+ else:
+ key_is_persistent = True
if key in self.identity_map:
merged = self.identity_map[key]
+ elif key_is_persistent and key in _resolve_conflict_map:
+ merged = _resolve_conflict_map[key]
elif not load:
if state.modified:
@@ -1749,6 +1798,7 @@ class Session(_SessionClassMethods):
merged_dict = attributes.instance_dict(merged)
_recursive[state] = merged
+ _resolve_conflict_map[key] = merged
# check that we didn't just pull the exact same
# state out.
@@ -1787,7 +1837,7 @@ class Session(_SessionClassMethods):
for prop in mapper.iterate_properties:
prop.merge(self, state, state_dict,
merged_state, merged_dict,
- load, _recursive)
+ load, _recursive, _resolve_conflict_map)
if not load:
# remove any history
@@ -1809,35 +1859,47 @@ class Session(_SessionClassMethods):
"Object '%s' already has an identity - "
"it can't be registered as pending" % state_str(state))
- self._before_attach(state)
+ obj = state.obj()
+ to_attach = self._before_attach(state, obj)
if state not in self._new:
- self._new[state] = state.obj()
+ self._new[state] = obj
state.insert_order = len(self._new)
- self._attach(state)
-
- def _update_impl(self, state, discard_existing=False):
- if (self.identity_map.contains_state(state) and
- state not in self._deleted):
- return
+ if to_attach:
+ self._after_attach(state, obj)
+ def _update_impl(self, state, revert_deletion=False):
if state.key is None:
raise sa_exc.InvalidRequestError(
"Instance '%s' is not persisted" %
state_str(state))
- if state.deleted:
- raise sa_exc.InvalidRequestError(
- "Instance '%s' has been deleted. Use the make_transient() "
- "function to send this object back to the transient state." %
- state_str(state)
- )
- self._before_attach(state, check_identity_map=False)
+ if state._deleted:
+ if revert_deletion:
+ if not state._attached:
+ return
+ del state._deleted
+ else:
+ raise sa_exc.InvalidRequestError(
+ "Instance '%s' has been deleted. "
+ "Use the make_transient() "
+ "function to send this object back "
+ "to the transient state." %
+ state_str(state)
+ )
+
+ obj = state.obj()
+ to_attach = self._before_attach(state, obj)
+
self._deleted.pop(state, None)
- if discard_existing:
+ if revert_deletion:
self.identity_map.replace(state)
else:
self.identity_map.add(state)
- self._attach(state)
+
+ if to_attach:
+ self._after_attach(state, obj)
+ elif revert_deletion:
+ self.dispatch.deleted_to_persistent(self, obj)
def _save_or_update_impl(self, state):
if state.key is None:
@@ -1845,17 +1907,6 @@ class Session(_SessionClassMethods):
else:
self._update_impl(state)
- def _delete_impl(self, state):
- if state in self._deleted:
- return
-
- if state.key is None:
- return
-
- self._attach(state, include_before=True)
- self._deleted[state] = state.obj()
- self.identity_map.add(state)
-
def enable_relationship_loading(self, obj):
"""Associate an object with this :class:`.Session` for related
object loading.
@@ -1908,40 +1959,35 @@ class Session(_SessionClassMethods):
"""
state = attributes.instance_state(obj)
- self._attach(state, include_before=True)
+ to_attach = self._before_attach(state, obj)
state._load_pending = True
+ if to_attach:
+ self._after_attach(state, obj)
- def _before_attach(self, state, check_identity_map=True):
- if state.session_id != self.hash_key and \
- self.dispatch.before_attach:
- self.dispatch.before_attach(self, state.obj())
-
- if check_identity_map and state.key and \
- state.key in self.identity_map and \
- not self.identity_map.contains_state(state):
- raise sa_exc.InvalidRequestError(
- "Can't attach instance "
- "%s; another instance with key %s is already "
- "present in this session." % (state_str(state), state.key))
+ def _before_attach(self, state, obj):
+ if state.session_id == self.hash_key:
+ return False
- if state.session_id and \
- state.session_id is not self.hash_key and \
- state.session_id in _sessions:
+ if state.session_id and state.session_id in _sessions:
raise sa_exc.InvalidRequestError(
"Object '%s' is already attached to session '%s' "
"(this is '%s')" % (state_str(state),
state.session_id, self.hash_key))
- def _attach(self, state, include_before=False):
+ self.dispatch.before_attach(self, obj)
+
+ return True
- if state.session_id != self.hash_key:
- if include_before:
- self._before_attach(state)
- state.session_id = self.hash_key
- if state.modified and state._strong_obj is None:
- state._strong_obj = state.obj()
- if self.dispatch.after_attach:
- self.dispatch.after_attach(self, state.obj())
+ def _after_attach(self, state, obj):
+ state.session_id = self.hash_key
+ if state.modified and state._strong_obj is None:
+ state._strong_obj = obj
+ self.dispatch.after_attach(self, obj)
+
+ if state.key:
+ self.dispatch.detached_to_persistent(self, obj)
+ else:
+ self.dispatch.transient_to_pending(self, obj)
def __contains__(self, instance):
"""Return True if the instance is associated with this session.
@@ -1983,7 +2029,7 @@ class Session(_SessionClassMethods):
For ``autocommit`` Sessions with no active manual transaction, flush()
will create a transaction on the fly that surrounds the entire set of
- operations int the flush.
+ operations into the flush.
:param objects: Optional; restricts the flush operation to operate
only on elements that are in the given collection.
@@ -2700,7 +2746,7 @@ def make_transient(instance):
state = attributes.instance_state(instance)
s = _state_session(state)
if s:
- s._expunge_state(state)
+ s._expunge_states([state])
# remove expired state
state.expired_attributes.clear()
@@ -2711,8 +2757,8 @@ def make_transient(instance):
if state.key:
del state.key
- if state.deleted:
- del state.deleted
+ if state._deleted:
+ del state._deleted
def make_transient_to_detached(instance):
@@ -2744,8 +2790,8 @@ def make_transient_to_detached(instance):
raise sa_exc.InvalidRequestError(
"Given object must be transient")
state.key = state.mapper._identity_key_from_state(state)
- if state.deleted:
- del state.deleted
+ if state._deleted:
+ del state._deleted
state._commit_all(state.dict)
state._expire_attributes(state.dict, state.unloaded)
diff --git a/lib/sqlalchemy/orm/state.py b/lib/sqlalchemy/orm/state.py
index 6034e74de..b648ffa3b 100644
--- a/lib/sqlalchemy/orm/state.py
+++ b/lib/sqlalchemy/orm/state.py
@@ -14,6 +14,7 @@ defines a large part of the ORM's interactivity.
import weakref
from .. import util
+from .. import inspection
from . import exc as orm_exc, interfaces
from .path_registry import PathRegistry
from .base import PASSIVE_NO_RESULT, SQL_OK, NEVER_SET, ATTR_WAS_SET, \
@@ -21,6 +22,7 @@ from .base import PASSIVE_NO_RESULT, SQL_OK, NEVER_SET, ATTR_WAS_SET, \
from . import base
+@inspection._self_inspects
class InstanceState(interfaces.InspectionAttr):
"""tracks state information at the instance level.
@@ -56,7 +58,7 @@ class InstanceState(interfaces.InspectionAttr):
_strong_obj = None
modified = False
expired = False
- deleted = False
+ _deleted = False
_load_pending = False
is_instance = True
@@ -87,7 +89,6 @@ class InstanceState(interfaces.InspectionAttr):
see also the ``unmodified`` collection which is intersected
against this set when a refresh operation occurs."""
-
@util.memoized_property
def attrs(self):
"""Return a namespace representing each attribute on
@@ -133,16 +134,80 @@ class InstanceState(interfaces.InspectionAttr):
self._attached
@property
+ def deleted(self):
+ """Return true if the object is :term:`deleted`.
+
+ An object that is in the deleted state is guaranteed to
+ not be within the :attr:`.Session.identity_map` of its parent
+ :class:`.Session`; however if the session's transaction is rolled
+ back, the object will be restored to the persistent state and
+ the identity map.
+
+ .. note::
+
+ The :attr:`.InstanceState.deleted` attribute refers to a specific
+ state of the object that occurs between the "persistent" and
+ "detached" states; once the object is :term:`detached`, the
+ :attr:`.InstanceState.deleted` attribute **no longer returns
+ True**; in order to detect that a state was deleted, regardless
+ of whether or not the object is associated with a :class:`.Session`,
+ use the :attr:`.InstanceState.was_deleted` accessor.
+
+ .. versionadded: 1.1
+
+ .. seealso::
+
+ :ref:`session_object_states`
+
+ """
+ return self.key is not None and \
+ self._attached and self._deleted
+
+ @property
+ def was_deleted(self):
+ """Return True if this object is or was previously in the
+ "deleted" state and has not been reverted to persistent.
+
+ This flag returns True once the object was deleted in flush.
+ When the object is expunged from the session either explicitly
+ or via transaction commit and enters the "detached" state,
+ this flag will continue to report True.
+
+ .. versionadded:: 1.1 - added a local method form of
+ :func:`.orm.util.was_deleted`.
+
+ .. seealso::
+
+ :attr:`.InstanceState.deleted` - refers to the "deleted" state
+
+ :func:`.orm.util.was_deleted` - standalone function
+
+ :ref:`session_object_states`
+
+ """
+ return self._deleted
+
+ @property
def persistent(self):
"""Return true if the object is :term:`persistent`.
+ An object that is in the persistent state is guaranteed to
+ be within the :attr:`.Session.identity_map` of its parent
+ :class:`.Session`.
+
+ .. versionchanged:: 1.1 The :attr:`.InstanceState.persistent`
+ accessor no longer returns True for an object that was
+ "deleted" within a flush; use the :attr:`.InstanceState.deleted`
+ accessor to detect this state. This allows the "persistent"
+ state to guarantee membership in the identity map.
+
.. seealso::
:ref:`session_object_states`
"""
return self.key is not None and \
- self._attached
+ self._attached and not self._deleted
@property
def detached(self):
@@ -153,8 +218,7 @@ class InstanceState(interfaces.InspectionAttr):
:ref:`session_object_states`
"""
- return self.key is not None and \
- not self._attached
+ return self.key is not None and not self._attached
@property
@util.dependencies("sqlalchemy.orm.session")
@@ -241,8 +305,44 @@ class InstanceState(interfaces.InspectionAttr):
"""
return bool(self.key)
- def _detach(self):
- self.session_id = self._strong_obj = None
+ @classmethod
+ def _detach_states(self, states, session, to_transient=False):
+ persistent_to_detached = \
+ session.dispatch.persistent_to_detached or None
+ deleted_to_detached = \
+ session.dispatch.deleted_to_detached or None
+ pending_to_transient = \
+ session.dispatch.pending_to_transient or None
+ persistent_to_transient = \
+ session.dispatch.persistent_to_transient or None
+
+ for state in states:
+ deleted = state._deleted
+ pending = state.key is None
+ persistent = not pending and not deleted
+
+ state.session_id = None
+
+ if to_transient and state.key:
+ del state.key
+ if persistent:
+ if to_transient:
+ if persistent_to_transient is not None:
+ persistent_to_transient(session, state.obj())
+ elif persistent_to_detached is not None:
+ persistent_to_detached(session, state.obj())
+ elif deleted and deleted_to_detached is not None:
+ deleted_to_detached(session, state.obj())
+ elif pending and pending_to_transient is not None:
+ pending_to_transient(session, state.obj())
+
+ state._strong_obj = None
+
+ def _detach(self, session=None):
+ if session:
+ InstanceState._detach_states([self], session)
+ else:
+ self.session_id = self._strong_obj = None
def _dispose(self):
self._detach()
@@ -294,7 +394,7 @@ class InstanceState(interfaces.InspectionAttr):
return {}
def _initialize_instance(*mixed, **kwargs):
- self, instance, args = mixed[0], mixed[1], mixed[2:]
+ self, instance, args = mixed[0], mixed[1], mixed[2:] # noqa
manager = self.manager
manager.dispatch.init(self, args, kwargs)
@@ -374,12 +474,6 @@ class InstanceState(interfaces.InspectionAttr):
state_dict['manager'](self, inst, state_dict)
- def _initialize(self, key):
- """Set this attribute to an empty value or collection,
- based on the AttributeImpl in use."""
-
- self.manager.get_impl(key).initialize(self, self.dict)
-
def _reset(self, dict_, key):
"""Remove the given attribute and any
callables associated with it."""
diff --git a/lib/sqlalchemy/orm/strategies.py b/lib/sqlalchemy/orm/strategies.py
index 78e929345..b60e47bb3 100644
--- a/lib/sqlalchemy/orm/strategies.py
+++ b/lib/sqlalchemy/orm/strategies.py
@@ -346,7 +346,10 @@ class NoLoader(AbstractRelationshipLoader):
self, context, path, loadopt, mapper,
result, adapter, populators):
def invoke_no_load(state, dict_, row):
- state._initialize(self.key)
+ if self.uselist:
+ state.manager.get_impl(self.key).initialize(state, dict_)
+ else:
+ dict_[self.key] = None
populators["new"].append((self.key, invoke_no_load))
@@ -361,7 +364,8 @@ class LazyLoader(AbstractRelationshipLoader, util.MemoizedSlots):
__slots__ = (
'_lazywhere', '_rev_lazywhere', 'use_get', '_bind_to_col',
- '_equated_columns', '_rev_bind_to_col', '_rev_equated_columns')
+ '_equated_columns', '_rev_bind_to_col', '_rev_equated_columns',
+ '_simple_lazy_clause')
def __init__(self, parent):
super(LazyLoader, self).__init__(parent)
@@ -1321,8 +1325,19 @@ class JoinedLoader(AbstractRelationshipLoader):
if adapter:
if getattr(adapter, 'aliased_class', None):
+ # joining from an adapted entity. The adapted entity
+ # might be a "with_polymorphic", so resolve that to our
+ # specific mapper's entity before looking for our attribute
+ # name on it.
+ efm = inspect(adapter.aliased_class).\
+ _entity_for_mapper(
+ parentmapper
+ if parentmapper.isa(self.parent) else self.parent)
+
+ # look for our attribute on the adapted entity, else fall back
+ # to our straight property
onclause = getattr(
- adapter.aliased_class, self.key,
+ efm.entity, self.key,
self.parent_property)
else:
onclause = getattr(
@@ -1363,8 +1378,7 @@ class JoinedLoader(AbstractRelationshipLoader):
# send a hint to the Query as to where it may "splice" this join
eagerjoin.stop_on = entity.selectable
- if self.parent_property.secondary is None and \
- not parentmapper:
+ if not parentmapper:
# for parentclause that is the non-eager end of the join,
# ensure all the parent cols in the primaryjoin are actually
# in the
diff --git a/lib/sqlalchemy/orm/strategy_options.py b/lib/sqlalchemy/orm/strategy_options.py
index cb7a5fef7..3467328e3 100644
--- a/lib/sqlalchemy/orm/strategy_options.py
+++ b/lib/sqlalchemy/orm/strategy_options.py
@@ -180,7 +180,7 @@ class Load(Generative, MapperOption):
return path
def __str__(self):
- return "Load(strategy=%r)" % self.strategy
+ return "Load(strategy=%r)" % (self.strategy, )
def _coerce_strat(self, strategy):
if strategy is not None:
diff --git a/lib/sqlalchemy/orm/util.py b/lib/sqlalchemy/orm/util.py
index 6d3869679..46183a47d 100644
--- a/lib/sqlalchemy/orm/util.py
+++ b/lib/sqlalchemy/orm/util.py
@@ -537,7 +537,11 @@ class AliasedInsp(InspectionAttr):
def _entity_for_mapper(self, mapper):
self_poly = self.with_polymorphic_mappers
if mapper in self_poly:
- return getattr(self.entity, mapper.class_.__name__)._aliased_insp
+ if mapper is self.mapper:
+ return self
+ else:
+ return getattr(
+ self.entity, mapper.class_.__name__)._aliased_insp
elif mapper.isa(self.mapper):
return self
else:
@@ -985,12 +989,19 @@ def was_deleted(object):
"""Return True if the given object was deleted
within a session flush.
+ This is regardless of whether or not the object is
+ persistent or detached.
+
.. versionadded:: 0.8.0
+ .. seealso::
+
+ :attr:`.InstanceState.was_deleted`
+
"""
state = attributes.instance_state(object)
- return state.deleted
+ return state.was_deleted
def randomize_unitofwork():
diff --git a/lib/sqlalchemy/pool.py b/lib/sqlalchemy/pool.py
index b38aefb3d..4dd954fc4 100644
--- a/lib/sqlalchemy/pool.py
+++ b/lib/sqlalchemy/pool.py
@@ -587,7 +587,12 @@ class _ConnectionRecord(object):
if recycle:
self.__close()
self.info.clear()
+
+ # ensure that if self.__connect() fails,
+ # we are not referring to the previous stale connection here
+ self.connection = None
self.connection = self.__connect()
+
if self.__pool.dispatch.connect:
self.__pool.dispatch.connect(self.connection, self)
return self.connection
diff --git a/lib/sqlalchemy/sql/__init__.py b/lib/sqlalchemy/sql/__init__.py
index e8b70061d..fa2cf2399 100644
--- a/lib/sqlalchemy/sql/__init__.py
+++ b/lib/sqlalchemy/sql/__init__.py
@@ -21,6 +21,8 @@ from .expression import (
Update,
alias,
and_,
+ any_,
+ all_,
asc,
between,
bindparam,
diff --git a/lib/sqlalchemy/sql/compiler.py b/lib/sqlalchemy/sql/compiler.py
index e9c3d0efa..6766c99b7 100644
--- a/lib/sqlalchemy/sql/compiler.py
+++ b/lib/sqlalchemy/sql/compiler.py
@@ -97,6 +97,8 @@ OPERATORS = {
operators.exists: 'EXISTS ',
operators.distinct_op: 'DISTINCT ',
operators.inv: 'NOT ',
+ operators.any_op: 'ANY ',
+ operators.all_op: 'ALL ',
# modifiers
operators.desc_op: ' DESC',
@@ -281,6 +283,8 @@ class _CompileLabel(visitors.Visitable):
def type(self):
return self.element.type
+ def self_group(self, **kw):
+ return self
class SQLCompiler(Compiled):
@@ -761,6 +765,9 @@ class SQLCompiler(Compiled):
x += "END"
return x
+ def visit_type_coerce(self, type_coerce, **kw):
+ return type_coerce.typed_expression._compiler_dispatch(self, **kw)
+
def visit_cast(self, cast, **kwargs):
return "CAST(%s AS %s)" % \
(cast.clause._compiler_dispatch(self, **kwargs),
@@ -768,7 +775,7 @@ class SQLCompiler(Compiled):
def visit_over(self, over, **kwargs):
return "%s OVER (%s)" % (
- over.func._compiler_dispatch(self, **kwargs),
+ over.element._compiler_dispatch(self, **kwargs),
' '.join(
'%s BY %s' % (word, clause._compiler_dispatch(self, **kwargs))
for word, clause in (
@@ -779,6 +786,12 @@ class SQLCompiler(Compiled):
)
)
+ def visit_withingroup(self, withingroup, **kwargs):
+ return "%s WITHIN GROUP (ORDER BY %s)" % (
+ withingroup.element._compiler_dispatch(self, **kwargs),
+ withingroup.order_by._compiler_dispatch(self, **kwargs)
+ )
+
def visit_funcfilter(self, funcfilter, **kwargs):
return "%s FILTER (WHERE %s)" % (
funcfilter.func._compiler_dispatch(self, **kwargs),
@@ -1270,9 +1283,6 @@ class SQLCompiler(Compiled):
return " AS " + alias_name_text
def _add_to_result_map(self, keyname, name, objects, type_):
- if not self.dialect.case_sensitive:
- keyname = keyname.lower()
-
self._result_columns.append((keyname, name, objects, type_))
def _label_select_column(self, select, column,
@@ -1789,9 +1799,9 @@ class SQLCompiler(Compiled):
return text
def visit_table(self, table, asfrom=False, iscrud=False, ashint=False,
- fromhints=None, **kwargs):
+ fromhints=None, use_schema=True, **kwargs):
if asfrom or ashint:
- if getattr(table, "schema", None):
+ if use_schema and getattr(table, "schema", None):
ret = self.preparer.quote_schema(table.schema) + \
"." + self.preparer.quote(table.name)
else:
@@ -1812,6 +1822,22 @@ class SQLCompiler(Compiled):
join.onclause._compiler_dispatch(self, **kwargs)
)
+ def _setup_crud_hints(self, stmt, table_text):
+ dialect_hints = dict([
+ (table, hint_text)
+ for (table, dialect), hint_text in
+ stmt._hints.items()
+ if dialect in ('*', self.dialect.name)
+ ])
+ if stmt.table in dialect_hints:
+ table_text = self.format_from_hint_text(
+ table_text,
+ stmt.table,
+ dialect_hints[stmt.table],
+ True
+ )
+ return dialect_hints, table_text
+
def visit_insert(self, insert_stmt, **kw):
self.stack.append(
{'correlate_froms': set(),
@@ -1853,19 +1879,10 @@ class SQLCompiler(Compiled):
table_text = preparer.format_table(insert_stmt.table)
if insert_stmt._hints:
- dialect_hints = dict([
- (table, hint_text)
- for (table, dialect), hint_text in
- insert_stmt._hints.items()
- if dialect in ('*', self.dialect.name)
- ])
- if insert_stmt.table in dialect_hints:
- table_text = self.format_from_hint_text(
- table_text,
- insert_stmt.table,
- dialect_hints[insert_stmt.table],
- True
- )
+ dialect_hints, table_text = self._setup_crud_hints(
+ insert_stmt, table_text)
+ else:
+ dialect_hints = None
text += table_text
@@ -1957,19 +1974,8 @@ class SQLCompiler(Compiled):
crud_params = crud._get_crud_params(self, update_stmt, **kw)
if update_stmt._hints:
- dialect_hints = dict([
- (table, hint_text)
- for (table, dialect), hint_text in
- update_stmt._hints.items()
- if dialect in ('*', self.dialect.name)
- ])
- if update_stmt.table in dialect_hints:
- table_text = self.format_from_hint_text(
- table_text,
- update_stmt.table,
- dialect_hints[update_stmt.table],
- True
- )
+ dialect_hints, table_text = self._setup_crud_hints(
+ update_stmt, table_text)
else:
dialect_hints = None
@@ -2038,22 +2044,8 @@ class SQLCompiler(Compiled):
self, asfrom=True, iscrud=True)
if delete_stmt._hints:
- dialect_hints = dict([
- (table, hint_text)
- for (table, dialect), hint_text in
- delete_stmt._hints.items()
- if dialect in ('*', self.dialect.name)
- ])
- if delete_stmt.table in dialect_hints:
- table_text = self.format_from_hint_text(
- table_text,
- delete_stmt.table,
- dialect_hints[delete_stmt.table],
- True
- )
-
- else:
- dialect_hints = None
+ dialect_hints, table_text = self._setup_crud_hints(
+ delete_stmt, table_text)
text += table_text
@@ -2139,11 +2131,11 @@ class DDLCompiler(Compiled):
table = create.element
preparer = self.dialect.identifier_preparer
- text = "\n" + " ".join(['CREATE'] +
- table._prefixes +
- ['TABLE',
- preparer.format_table(table),
- "("])
+ text = "\nCREATE "
+ if table._prefixes:
+ text += " ".join(table._prefixes) + " "
+ text += "TABLE " + preparer.format_table(table) + " ("
+
separator = "\n"
# if only one primary key, specify it along with the column
@@ -2168,10 +2160,10 @@ class DDLCompiler(Compiled):
))
const = self.create_table_constraints(
- table, _include_foreign_key_constraints=
- create.include_foreign_key_constraints)
+ table, _include_foreign_key_constraints= # noqa
+ create.include_foreign_key_constraints)
if const:
- text += ", \n\t" + const
+ text += separator + "\t" + const
text += "\n)%s\n\n" % self.post_create_table(table)
return text
@@ -2223,7 +2215,7 @@ class DDLCompiler(Compiled):
and (
not self.dialect.supports_alter or
not getattr(constraint, 'use_alter', False)
- )) if p is not None
+ )) if p is not None
)
def visit_drop_table(self, drop):
@@ -2299,6 +2291,16 @@ class DDLCompiler(Compiled):
text += " INCREMENT BY %d" % create.element.increment
if create.element.start is not None:
text += " START WITH %d" % create.element.start
+ if create.element.minvalue is not None:
+ text += " MINVALUE %d" % create.element.minvalue
+ if create.element.maxvalue is not None:
+ text += " MAXVALUE %d" % create.element.maxvalue
+ if create.element.nominvalue is not None:
+ text += " NO MINVALUE"
+ if create.element.nomaxvalue is not None:
+ text += " NO MAXVALUE"
+ if create.element.cycle is not None:
+ text += " CYCLE"
return text
def visit_drop_sequence(self, drop):
@@ -2379,7 +2381,7 @@ class DDLCompiler(Compiled):
text += "CONSTRAINT %s " % formatted_name
text += "PRIMARY KEY "
text += "(%s)" % ', '.join(self.preparer.quote(c.name)
- for c in constraint)
+ for c in constraint.columns_autoinc_first)
text += self.define_constraint_deferrability(constraint)
return text
diff --git a/lib/sqlalchemy/sql/crud.py b/lib/sqlalchemy/sql/crud.py
index 2e39f6b36..c5495ccde 100644
--- a/lib/sqlalchemy/sql/crud.py
+++ b/lib/sqlalchemy/sql/crud.py
@@ -196,8 +196,9 @@ def _scan_insert_from_select_cols(
if add_select_cols:
values.extend(add_select_cols)
compiler._insert_from_select = compiler._insert_from_select._generate()
- compiler._insert_from_select._raw_columns += tuple(
- expr for col, expr in add_select_cols)
+ compiler._insert_from_select._raw_columns = \
+ tuple(compiler._insert_from_select._raw_columns) + tuple(
+ expr for col, expr in add_select_cols)
def _scan_cols(
@@ -208,10 +209,22 @@ def _scan_cols(
implicit_return_defaults, postfetch_lastrowid = \
_get_returning_modifiers(compiler, stmt)
- cols = stmt.table.columns
+ if stmt._parameter_ordering:
+ parameter_ordering = [
+ _column_as_key(key) for key in stmt._parameter_ordering
+ ]
+ ordered_keys = set(parameter_ordering)
+ cols = [
+ stmt.table.c[key] for key in parameter_ordering
+ ] + [
+ c for c in stmt.table.c if c.key not in ordered_keys
+ ]
+ else:
+ cols = stmt.table.columns
for c in cols:
col_key = _getattr_col_key(c)
+
if col_key in parameters and col_key not in check_columns:
_append_param_parameter(
@@ -248,6 +261,10 @@ def _scan_cols(
elif implicit_return_defaults and \
c in implicit_return_defaults:
compiler.returning.append(c)
+ elif c.primary_key and \
+ c is not stmt.table._autoincrement_column and \
+ not c.nullable:
+ _raise_pk_with_no_anticipated_value(c)
elif compiler.isupdate:
_append_param_update(
@@ -285,6 +302,22 @@ def _append_param_parameter(
def _append_param_insert_pk_returning(compiler, stmt, c, values, kw):
+ """Create a primary key expression in the INSERT statement and
+ possibly a RETURNING clause for it.
+
+ If the column has a Python-side default, we will create a bound
+ parameter for it and "pre-execute" the Python function. If
+ the column has a SQL expression default, or is a sequence,
+ we will add it directly into the INSERT statement and add a
+ RETURNING element to get the new value. If the column has a
+ server side default or is marked as the "autoincrement" column,
+ we will add a RETRUNING element to get at the value.
+
+ If all the above tests fail, that indicates a primary key column with no
+ noted default generation capabilities that has no parameter passed;
+ raise an exception.
+
+ """
if c.default is not None:
if c.default.is_sequence:
if compiler.dialect.supports_sequences and \
@@ -303,9 +336,12 @@ def _append_param_insert_pk_returning(compiler, stmt, c, values, kw):
values.append(
(c, _create_prefetch_bind_param(compiler, c))
)
-
- else:
+ elif c is stmt.table._autoincrement_column or c.server_default is not None:
compiler.returning.append(c)
+ elif not c.nullable:
+ # no .default, no .server_default, not autoincrement, we have
+ # no indication this primary key column will have any value
+ _raise_pk_with_no_anticipated_value(c)
def _create_prefetch_bind_param(compiler, c, process=True, name=None):
@@ -319,6 +355,7 @@ class _multiparam_column(elements.ColumnElement):
self.key = "%s_%d" % (original.key, index + 1)
self.original = original
self.default = original.default
+ self.type = original.type
def __eq__(self, other):
return isinstance(other, _multiparam_column) and \
@@ -341,18 +378,46 @@ def _process_multiparam_default_bind(compiler, c, index, kw):
def _append_param_insert_pk(compiler, stmt, c, values, kw):
+ """Create a bound parameter in the INSERT statement to receive a
+ 'prefetched' default value.
+
+ The 'prefetched' value indicates that we are to invoke a Python-side
+ default function or expliclt SQL expression before the INSERT statement
+ proceeds, so that we have a primary key value available.
+
+ if the column has no noted default generation capabilities, it has
+ no value passed in either; raise an exception.
+
+ """
if (
- (c.default is not None and
- (not c.default.is_sequence or
- compiler.dialect.supports_sequences)) or
- c is stmt.table._autoincrement_column and
- (compiler.dialect.supports_sequences or
- compiler.dialect.
- preexecute_autoincrement_sequences)
+ (
+ # column has a Python-side default
+ c.default is not None and
+ (
+ # and it won't be a Sequence
+ not c.default.is_sequence or
+ compiler.dialect.supports_sequences
+ )
+ )
+ or
+ (
+ # column is the "autoincrement column"
+ c is stmt.table._autoincrement_column and
+ (
+ # and it's either a "sequence" or a
+ # pre-executable "autoincrement" sequence
+ compiler.dialect.supports_sequences or
+ compiler.dialect.preexecute_autoincrement_sequences
+ )
+ )
):
values.append(
(c, _create_prefetch_bind_param(compiler, c))
)
+ elif c.default is None and c.server_default is None and not c.nullable:
+ # no .default, no .server_default, not autoincrement, we have
+ # no indication this primary key column will have any value
+ _raise_pk_with_no_anticipated_value(c)
def _append_param_insert_hasdefault(
@@ -428,6 +493,7 @@ def _append_param_update(
else:
compiler.postfetch.append(c)
elif implicit_return_defaults and \
+ stmt._return_defaults is not True and \
c in implicit_return_defaults:
compiler.returning.append(c)
@@ -554,3 +620,24 @@ def _get_returning_modifiers(compiler, stmt):
return need_pks, implicit_returning, \
implicit_return_defaults, postfetch_lastrowid
+
+
+def _raise_pk_with_no_anticipated_value(c):
+ msg = (
+ "Column '%s.%s' is marked as a member of the "
+ "primary key for table '%s', "
+ "but has no Python-side or server-side default generator indicated, "
+ "nor does it indicate 'autoincrement=True' or 'nullable=True', "
+ "and no explicit value is passed. "
+ "Primary key columns typically may not store NULL."
+ %
+ (c.table.fullname, c.name, c.table.fullname))
+ if len(c.table.primary_key.columns) > 1:
+ msg += (
+ " Note that as of SQLAlchemy 1.1, 'autoincrement=True' must be "
+ "indicated explicitly for composite (e.g. multicolumn) primary "
+ "keys if AUTO_INCREMENT/SERIAL/IDENTITY "
+ "behavior is expected for one of the columns in the primary key. "
+ "CREATE TABLE statements are impacted by this change as well on "
+ "most backends.")
+ raise exc.CompileError(msg)
diff --git a/lib/sqlalchemy/sql/default_comparator.py b/lib/sqlalchemy/sql/default_comparator.py
index e77ad765c..68ea5624e 100644
--- a/lib/sqlalchemy/sql/default_comparator.py
+++ b/lib/sqlalchemy/sql/default_comparator.py
@@ -14,7 +14,8 @@ from . import operators
from .elements import BindParameter, True_, False_, BinaryExpression, \
Null, _const_expr, _clause_element_as_expr, \
ClauseList, ColumnElement, TextClause, UnaryExpression, \
- collate, _is_literal, _literal_as_text, ClauseElement, and_, or_
+ collate, _is_literal, _literal_as_text, ClauseElement, and_, or_, \
+ Slice, Visitable, _literal_as_binds
from .selectable import SelectBase, Alias, Selectable, ScalarSelect
@@ -161,6 +162,34 @@ def _in_impl(expr, op, seq_or_selectable, negate_op, **kw):
negate=negate_op)
+def _getitem_impl(expr, op, other, **kw):
+ if isinstance(expr.type, type_api.INDEXABLE):
+ if isinstance(other, slice):
+ if expr.type.zero_indexes:
+ other = slice(
+ other.start + 1,
+ other.stop + 1,
+ other.step
+ )
+ other = Slice(
+ _literal_as_binds(
+ other.start, name=expr.key, type_=type_api.INTEGERTYPE),
+ _literal_as_binds(
+ other.stop, name=expr.key, type_=type_api.INTEGERTYPE),
+ _literal_as_binds(
+ other.step, name=expr.key, type_=type_api.INTEGERTYPE)
+ )
+ else:
+ if expr.type.zero_indexes:
+ other += 1
+
+ other = _literal_as_binds(
+ other, name=expr.key, type_=type_api.INTEGERTYPE)
+ return _binary_operate(expr, op, other, **kw)
+ else:
+ _unsupported_impl(expr, op, other, **kw)
+
+
def _unsupported_impl(expr, op, *arg, **kw):
raise NotImplementedError("Operator '%s' is not supported on "
"this expression" % op.__name__)
@@ -260,7 +289,7 @@ operator_lookup = {
"between_op": (_between_impl, ),
"notbetween_op": (_between_impl, ),
"neg": (_neg_impl,),
- "getitem": (_unsupported_impl,),
+ "getitem": (_getitem_impl,),
"lshift": (_unsupported_impl,),
"rshift": (_unsupported_impl,),
}
@@ -280,7 +309,7 @@ def _check_literal(expr, operator, other):
if isinstance(other, (SelectBase, Alias)):
return other.as_scalar()
- elif not isinstance(other, (ColumnElement, TextClause)):
+ elif not isinstance(other, Visitable):
return expr._bind_param(operator, other)
else:
return other
diff --git a/lib/sqlalchemy/sql/dml.py b/lib/sqlalchemy/sql/dml.py
index 6756f1554..22c534153 100644
--- a/lib/sqlalchemy/sql/dml.py
+++ b/lib/sqlalchemy/sql/dml.py
@@ -27,6 +27,7 @@ class UpdateBase(DialectKWArgs, HasPrefixes, Executable, ClauseElement):
_execution_options = \
Executable._execution_options.union({'autocommit': True})
_hints = util.immutabledict()
+ _parameter_ordering = None
_prefixes = ()
def _process_colparams(self, parameters):
@@ -39,6 +40,16 @@ class UpdateBase(DialectKWArgs, HasPrefixes, Executable, ClauseElement):
else:
return p
+ if self._preserve_parameter_order and parameters is not None:
+ if not isinstance(parameters, list) or \
+ (parameters and not isinstance(parameters[0], tuple)):
+ raise ValueError(
+ "When preserve_parameter_order is True, "
+ "values() only accepts a list of 2-tuples")
+ self._parameter_ordering = [key for key, value in parameters]
+
+ return dict(parameters), False
+
if (isinstance(parameters, (list, tuple)) and parameters and
isinstance(parameters[0], (list, tuple, dict))):
@@ -178,6 +189,7 @@ class ValuesBase(UpdateBase):
_supports_multi_parameters = False
_has_multi_parameters = False
+ _preserve_parameter_order = False
select = None
def __init__(self, table, values, prefixes):
@@ -214,23 +226,32 @@ class ValuesBase(UpdateBase):
users.update().where(users.c.id==5).values(name="some name")
- :param \*args: Alternatively, a dictionary, tuple or list
- of dictionaries or tuples can be passed as a single positional
- argument in order to form the VALUES or
- SET clause of the statement. The single dictionary form
- works the same as the kwargs form::
+ :param \*args: As an alternative to passing key/value parameters,
+ a dictionary, tuple, or list of dictionaries or tuples can be passed
+ as a single positional argument in order to form the VALUES or
+ SET clause of the statement. The forms that are accepted vary
+ based on whether this is an :class:`.Insert` or an :class:`.Update`
+ construct.
+
+ For either an :class:`.Insert` or :class:`.Update` construct, a
+ single dictionary can be passed, which works the same as that of
+ the kwargs form::
users.insert().values({"name": "some name"})
- If a tuple is passed, the tuple should contain the same number
- of columns as the target :class:`.Table`::
+ users.update().values({"name": "some new name"})
+
+ Also for either form but more typically for the :class:`.Insert`
+ construct, a tuple that contains an entry for every column in the
+ table is also accepted::
users.insert().values((5, "some name"))
- The :class:`.Insert` construct also supports multiply-rendered VALUES
- construct, for those backends which support this SQL syntax
- (SQLite, Postgresql, MySQL). This mode is indicated by passing a
- list of one or more dictionaries/tuples::
+ The :class:`.Insert` construct also supports being passed a list
+ of dictionaries or full-table-tuples, which on the server will
+ render the less common SQL syntax of "multiple values" - this
+ syntax is supported on backends such as SQLite, Postgresql, MySQL,
+ but not necessarily others::
users.insert().values([
{"name": "some name"},
@@ -238,55 +259,61 @@ class ValuesBase(UpdateBase):
{"name": "yet another name"},
])
- In the case of an :class:`.Update`
- construct, only the single dictionary/tuple form is accepted,
- else an exception is raised. It is also an exception case to
- attempt to mix the single-/multiple- value styles together,
- either through multiple :meth:`.ValuesBase.values` calls
- or by sending a list + kwargs at the same time.
-
- .. note::
-
- Passing a multiple values list is *not* the same
- as passing a multiple values list to the
- :meth:`.Connection.execute` method. Passing a list of parameter
- sets to :meth:`.ValuesBase.values` produces a construct of this
- form::
-
- INSERT INTO table (col1, col2, col3) VALUES
- (col1_0, col2_0, col3_0),
- (col1_1, col2_1, col3_1),
- ...
-
- whereas a multiple list passed to :meth:`.Connection.execute`
- has the effect of using the DBAPI
- `executemany() <http://www.python.org/dev/peps/pep-0249/#id18>`_
- method, which provides a high-performance system of invoking
- a single-row INSERT or single-criteria UPDATE or DELETE statement
- many times against a series
- of parameter sets. The "executemany" style is supported by
- all database backends, and works equally well for INSERT,
- UPDATE, and DELETE, as it does not depend on a special SQL
- syntax. See :ref:`execute_multiple` for an introduction to
- the traditional Core method of multiple parameter set invocation
- using this system.
-
- .. versionadded:: 0.8
- Support for multiple-VALUES INSERT statements.
-
- .. versionchanged:: 1.0.0 an INSERT that uses a multiple-VALUES
- clause, even a list of length one,
- implies that the :paramref:`.Insert.inline` flag is set to
- True, indicating that the statement will not attempt to fetch
- the "last inserted primary key" or other defaults. The statement
- deals with an arbitrary number of rows, so the
- :attr:`.ResultProxy.inserted_primary_key` accessor does not apply.
-
- .. versionchanged:: 1.0.0 A multiple-VALUES INSERT now supports
- columns with Python side default values and callables in the
- same way as that of an "executemany" style of invocation; the
- callable is invoked for each row. See :ref:`bug_3288`
- for other details.
+ The above form would render a multiple VALUES statement similar to::
+
+ INSERT INTO users (name) VALUES
+ (:name_1),
+ (:name_2),
+ (:name_3)
+
+ It is essential to note that **passing multiple values is
+ NOT the same as using traditional executemany() form**. The above
+ syntax is a **special** syntax not typically used. To emit an
+ INSERT statement against mutliple rows, the normal method is
+ to pass a mutiple values list to the :meth:`.Connection.execute`
+ method, which is supported by all database backends and is generally
+ more efficient for a very large number of parameters.
+
+ .. seealso::
+
+ :ref:`execute_multiple` - an introduction to
+ the traditional Core method of multiple parameter set
+ invocation for INSERTs and other statements.
+
+ .. versionchanged:: 1.0.0 an INSERT that uses a multiple-VALUES
+ clause, even a list of length one,
+ implies that the :paramref:`.Insert.inline` flag is set to
+ True, indicating that the statement will not attempt to fetch
+ the "last inserted primary key" or other defaults. The
+ statement deals with an arbitrary number of rows, so the
+ :attr:`.ResultProxy.inserted_primary_key` accessor does not
+ apply.
+
+ .. versionchanged:: 1.0.0 A multiple-VALUES INSERT now supports
+ columns with Python side default values and callables in the
+ same way as that of an "executemany" style of invocation; the
+ callable is invoked for each row. See :ref:`bug_3288`
+ for other details.
+
+ The :class:`.Update` construct supports a special form which is a
+ list of 2-tuples, which when provided must be passed in conjunction
+ with the
+ :paramref:`~sqlalchemy.sql.expression.update.preserve_parameter_order`
+ parameter.
+ This form causes the UPDATE statement to render the SET clauses
+ using the order of parameters given to :meth:`.Update.values`, rather
+ than the ordering of columns given in the :class:`.Table`.
+
+ .. versionadded:: 1.0.10 - added support for parameter-ordered
+ UPDATE statements via the
+ :paramref:`~sqlalchemy.sql.expression.update.preserve_parameter_order`
+ flag.
+
+ .. seealso::
+
+ :ref:`updates_order_parameters` - full example of the
+ :paramref:`~sqlalchemy.sql.expression.update.preserve_parameter_order`
+ flag
.. seealso::
@@ -582,6 +609,7 @@ class Update(ValuesBase):
prefixes=None,
returning=None,
return_defaults=False,
+ preserve_parameter_order=False,
**dialect_kw):
"""Construct an :class:`.Update` object.
@@ -644,6 +672,19 @@ class Update(ValuesBase):
be available in the dictionary returned from
:meth:`.ResultProxy.last_updated_params`.
+ :param preserve_parameter_order: if True, the update statement is
+ expected to receive parameters **only** via the :meth:`.Update.values`
+ method, and they must be passed as a Python ``list`` of 2-tuples.
+ The rendered UPDATE statement will emit the SET clause for each
+ referenced column maintaining this order.
+
+ .. versionadded:: 1.0.10
+
+ .. seealso::
+
+ :ref:`updates_order_parameters` - full example of the
+ :paramref:`~sqlalchemy.sql.expression.update.preserve_parameter_order` flag
+
If both ``values`` and compile-time bind parameters are present, the
compile-time bind parameters override the information specified
within ``values`` on a per-key basis.
@@ -685,6 +726,7 @@ class Update(ValuesBase):
"""
+ self._preserve_parameter_order = preserve_parameter_order
ValuesBase.__init__(self, table, values, prefixes)
self._bind = bind
self._returning = returning
diff --git a/lib/sqlalchemy/sql/elements.py b/lib/sqlalchemy/sql/elements.py
index 27ecce2b0..70046c66b 100644
--- a/lib/sqlalchemy/sql/elements.py
+++ b/lib/sqlalchemy/sql/elements.py
@@ -124,67 +124,6 @@ def literal(value, type_=None):
return BindParameter(None, value, type_=type_, unique=True)
-def type_coerce(expression, type_):
- """Associate a SQL expression with a particular type, without rendering
- ``CAST``.
-
- E.g.::
-
- from sqlalchemy import type_coerce
-
- stmt = select([type_coerce(log_table.date_string, StringDateTime())])
-
- The above construct will produce SQL that is usually otherwise unaffected
- by the :func:`.type_coerce` call::
-
- SELECT date_string FROM log
-
- However, when result rows are fetched, the ``StringDateTime`` type
- will be applied to result rows on behalf of the ``date_string`` column.
-
- A type that features bound-value handling will also have that behavior
- take effect when literal values or :func:`.bindparam` constructs are
- passed to :func:`.type_coerce` as targets.
- For example, if a type implements the :meth:`.TypeEngine.bind_expression`
- method or :meth:`.TypeEngine.bind_processor` method or equivalent,
- these functions will take effect at statement compilation/execution time
- when a literal value is passed, as in::
-
- # bound-value handling of MyStringType will be applied to the
- # literal value "some string"
- stmt = select([type_coerce("some string", MyStringType)])
-
- :func:`.type_coerce` is similar to the :func:`.cast` function,
- except that it does not render the ``CAST`` expression in the resulting
- statement.
-
- :param expression: A SQL expression, such as a :class:`.ColumnElement`
- expression or a Python string which will be coerced into a bound literal
- value.
-
- :param type_: A :class:`.TypeEngine` class or instance indicating
- the type to which the expression is coerced.
-
- .. seealso::
-
- :func:`.cast`
-
- """
- type_ = type_api.to_instance(type_)
-
- if hasattr(expression, '__clause_element__'):
- return type_coerce(expression.__clause_element__(), type_)
- elif isinstance(expression, BindParameter):
- bp = expression._clone()
- bp.type = type_
- return bp
- elif not isinstance(expression, Visitable):
- if expression is None:
- return Null()
- else:
- return literal(expression, type_=type_)
- else:
- return Label(None, expression, type_=type_)
def outparam(key, type_=None):
@@ -700,6 +639,8 @@ class ColumnElement(operators.ColumnOperators, ClauseElement):
self.type._type_affinity
is type_api.BOOLEANTYPE._type_affinity):
return AsBoolean(self, operators.istrue, operators.isfalse)
+ elif (against in (operators.any_op, operators.all_op)):
+ return Grouping(self)
else:
return self
@@ -715,7 +656,14 @@ class ColumnElement(operators.ColumnOperators, ClauseElement):
@util.memoized_property
def comparator(self):
- return self.type.comparator_factory(self)
+ try:
+ comparator_factory = self.type.comparator_factory
+ except AttributeError:
+ raise TypeError(
+ "Object %r associated with '.type' attribute "
+ "is not a TypeEngine class or object" % self.type)
+ else:
+ return comparator_factory(self)
def __getattr__(self, key):
try:
@@ -837,6 +785,16 @@ class ColumnElement(operators.ColumnOperators, ClauseElement):
else:
return False
+ def cast(self, type_):
+ """Produce a type cast, i.e. ``CAST(<expression> AS <type>)``.
+
+ This is a shortcut to the :func:`~.expression.cast` function.
+
+ .. versionadded:: 1.0.7
+
+ """
+ return Cast(self, type_)
+
def label(self, name):
"""Produce a column label, i.e. ``<columnname> AS <name>``.
@@ -1128,8 +1086,7 @@ class BindParameter(ColumnElement):
_compared_to_type.coerce_compared_value(
_compared_to_operator, value)
else:
- self.type = type_api._type_map.get(type(value),
- type_api.NULLTYPE)
+ self.type = type_api._resolve_value_to_type(value)
elif isinstance(type_, type):
self.type = type_()
else:
@@ -1144,8 +1101,7 @@ class BindParameter(ColumnElement):
cloned.callable = None
cloned.required = False
if cloned.type is type_api.NULLTYPE:
- cloned.type = type_api._type_map.get(type(value),
- type_api.NULLTYPE)
+ cloned.type = type_api._resolve_value_to_type(value)
return cloned
@property
@@ -1840,9 +1796,12 @@ class BooleanClauseList(ClauseList, ColumnElement):
def _construct(cls, operator, continue_on, skip_on, *clauses, **kw):
convert_clauses = []
- clauses = util.coerce_generator_arg(clauses)
+ clauses = [
+ _expression_literal_as_text(clause)
+ for clause in
+ util.coerce_generator_arg(clauses)
+ ]
for clause in clauses:
- clause = _expression_literal_as_text(clause)
if isinstance(clause, continue_on):
continue
@@ -2327,6 +2286,109 @@ class Cast(ColumnElement):
return self.clause._from_objects
+class TypeCoerce(ColumnElement):
+ """Represent a Python-side type-coercion wrapper.
+
+ :class:`.TypeCoerce` supplies the :func:`.expression.type_coerce`
+ function; see that function for usage details.
+
+ .. versionchanged:: 1.1 The :func:`.type_coerce` function now produces
+ a persistent :class:`.TypeCoerce` wrapper object rather than
+ translating the given object in place.
+
+ .. seealso::
+
+ :func:`.expression.type_coerce`
+
+ """
+
+ __visit_name__ = 'type_coerce'
+
+ def __init__(self, expression, type_):
+ """Associate a SQL expression with a particular type, without rendering
+ ``CAST``.
+
+ E.g.::
+
+ from sqlalchemy import type_coerce
+
+ stmt = select([
+ type_coerce(log_table.date_string, StringDateTime())
+ ])
+
+ The above construct will produce a :class:`.TypeCoerce` object, which
+ renders SQL that labels the expression, but otherwise does not
+ modify its value on the SQL side::
+
+ SELECT date_string AS anon_1 FROM log
+
+ When result rows are fetched, the ``StringDateTime`` type
+ will be applied to result rows on behalf of the ``date_string`` column.
+ The rationale for the "anon_1" label is so that the type-coerced
+ column remains separate in the list of result columns vs. other
+ type-coerced or direct values of the target column. In order to
+ provide a named label for the expression, use
+ :meth:`.ColumnElement.label`::
+
+ stmt = select([
+ type_coerce(
+ log_table.date_string, StringDateTime()).label('date')
+ ])
+
+
+ A type that features bound-value handling will also have that behavior
+ take effect when literal values or :func:`.bindparam` constructs are
+ passed to :func:`.type_coerce` as targets.
+ For example, if a type implements the
+ :meth:`.TypeEngine.bind_expression`
+ method or :meth:`.TypeEngine.bind_processor` method or equivalent,
+ these functions will take effect at statement compilation/execution
+ time when a literal value is passed, as in::
+
+ # bound-value handling of MyStringType will be applied to the
+ # literal value "some string"
+ stmt = select([type_coerce("some string", MyStringType)])
+
+ :func:`.type_coerce` is similar to the :func:`.cast` function,
+ except that it does not render the ``CAST`` expression in the resulting
+ statement.
+
+ :param expression: A SQL expression, such as a :class:`.ColumnElement`
+ expression or a Python string which will be coerced into a bound
+ literal value.
+
+ :param type_: A :class:`.TypeEngine` class or instance indicating
+ the type to which the expression is coerced.
+
+ .. seealso::
+
+ :func:`.cast`
+
+ """
+ self.type = type_api.to_instance(type_)
+ self.clause = _literal_as_binds(expression, type_=self.type)
+
+ def _copy_internals(self, clone=_clone, **kw):
+ self.clause = clone(self.clause, **kw)
+ self.__dict__.pop('typed_expression', None)
+
+ def get_children(self, **kwargs):
+ return self.clause,
+
+ @property
+ def _from_objects(self):
+ return self.clause._from_objects
+
+ @util.memoized_property
+ def typed_expression(self):
+ if isinstance(self.clause, BindParameter):
+ bp = self.clause._clone()
+ bp.type = self.type
+ return bp
+ else:
+ return self.clause
+
+
class Extract(ColumnElement):
"""Represent a SQL EXTRACT clause, ``extract(field FROM expr)``."""
@@ -2668,6 +2730,91 @@ class UnaryExpression(ColumnElement):
return self
+class CollectionAggregate(UnaryExpression):
+ """Forms the basis for right-hand collection operator modifiers
+ ANY and ALL.
+
+ The ANY and ALL keywords are available in different ways on different
+ backends. On Postgresql, they only work for an ARRAY type. On
+ MySQL, they only work for subqueries.
+
+ """
+ @classmethod
+ def _create_any(cls, expr):
+ """Produce an ANY expression.
+
+ This may apply to an array type for some dialects (e.g. postgresql),
+ or to a subquery for others (e.g. mysql). e.g.::
+
+ # postgresql '5 = ANY (somearray)'
+ expr = 5 == any_(mytable.c.somearray)
+
+ # mysql '5 = ANY (SELECT value FROM table)'
+ expr = 5 == any_(select([table.c.value]))
+
+ .. versionadded:: 1.1
+
+ .. seealso::
+
+ :func:`.expression.all_`
+
+ """
+
+ expr = _literal_as_binds(expr)
+
+ if expr.is_selectable and hasattr(expr, 'as_scalar'):
+ expr = expr.as_scalar()
+ expr = expr.self_group()
+ return CollectionAggregate(
+ expr, operator=operators.any_op,
+ type_=type_api.NULLTYPE, wraps_column_expression=False)
+
+ @classmethod
+ def _create_all(cls, expr):
+ """Produce an ALL expression.
+
+ This may apply to an array type for some dialects (e.g. postgresql),
+ or to a subquery for others (e.g. mysql). e.g.::
+
+ # postgresql '5 = ALL (somearray)'
+ expr = 5 == all_(mytable.c.somearray)
+
+ # mysql '5 = ALL (SELECT value FROM table)'
+ expr = 5 == all_(select([table.c.value]))
+
+ .. versionadded:: 1.1
+
+ .. seealso::
+
+ :func:`.expression.any_`
+
+ """
+
+ expr = _literal_as_binds(expr)
+ if expr.is_selectable and hasattr(expr, 'as_scalar'):
+ expr = expr.as_scalar()
+ expr = expr.self_group()
+ return CollectionAggregate(
+ expr, operator=operators.all_op,
+ type_=type_api.NULLTYPE, wraps_column_expression=False)
+
+ # operate and reverse_operate are hardwired to
+ # dispatch onto the type comparator directly, so that we can
+ # ensure "reversed" behavior.
+ def operate(self, op, *other, **kwargs):
+ if not operators.is_comparison(op):
+ raise exc.ArgumentError(
+ "Only comparison operators may be used with ANY/ALL")
+ kwargs['reverse'] = True
+ return self.comparator.operate(operators.mirror(op), *other, **kwargs)
+
+ def reverse_operate(self, op, other, **kwargs):
+ # comparison operators should never call reverse_operate
+ assert not operators.is_comparison(op)
+ raise exc.ArgumentError(
+ "Only comparison operators may be used with ANY/ALL")
+
+
class AsBoolean(UnaryExpression):
def __init__(self, element, operator, negate):
@@ -2779,6 +2926,32 @@ class BinaryExpression(ColumnElement):
return super(BinaryExpression, self)._negate()
+class Slice(ColumnElement):
+ """Represent SQL for a Python array-slice object.
+
+ This is not a specific SQL construct at this level, but
+ may be interpreted by specific dialects, e.g. Postgresql.
+
+ """
+ __visit_name__ = 'slice'
+
+ def __init__(self, start, stop, step):
+ self.start = start
+ self.stop = stop
+ self.step = step
+ self.type = type_api.NULLTYPE
+
+ def self_group(self, against=None):
+ assert against is operator.getitem
+ return self
+
+
+class IndexExpression(BinaryExpression):
+ """Represent the class of expressions that are like an "index" operation.
+ """
+ pass
+
+
class Grouping(ColumnElement):
"""Represent a grouping within a column expression"""
@@ -2839,21 +3012,21 @@ class Over(ColumnElement):
order_by = None
partition_by = None
- def __init__(self, func, partition_by=None, order_by=None):
+ def __init__(self, element, partition_by=None, order_by=None):
"""Produce an :class:`.Over` object against a function.
Used against aggregate or so-called "window" functions,
for database backends that support window functions.
- E.g.::
+ :func:`~.expression.over` is usually called using
+ the :meth:`.FunctionElement.over` method, e.g.::
- from sqlalchemy import over
- over(func.row_number(), order_by='x')
+ func.row_number().over(order_by='x')
- Would produce "ROW_NUMBER() OVER(ORDER BY x)".
+ Would produce ``ROW_NUMBER() OVER(ORDER BY x)``.
- :param func: a :class:`.FunctionElement` construct, typically
- generated by :data:`~.expression.func`.
+ :param element: a :class:`.FunctionElement`, :class:`.WithinGroup`,
+ or other compatible construct.
:param partition_by: a column element or string, or a list
of such, that will be used as the PARTITION BY clause
of the OVER construct.
@@ -2866,8 +3039,14 @@ class Over(ColumnElement):
.. versionadded:: 0.7
+ .. seealso::
+
+ :data:`.expression.func`
+
+ :func:`.expression.within_group`
+
"""
- self.func = func
+ self.element = element
if order_by is not None:
self.order_by = ClauseList(
*util.to_list(order_by),
@@ -2877,17 +3056,29 @@ class Over(ColumnElement):
*util.to_list(partition_by),
_literal_as_text=_literal_as_label_reference)
+ @property
+ def func(self):
+ """the element referred to by this :class:`.Over`
+ clause.
+
+ .. deprecated:: 1.1 the ``func`` element has been renamed to
+ ``.element``. The two attributes are synonymous though
+ ``.func`` is read-only.
+
+ """
+ return self.element
+
@util.memoized_property
def type(self):
- return self.func.type
+ return self.element.type
def get_children(self, **kwargs):
return [c for c in
- (self.func, self.partition_by, self.order_by)
+ (self.element, self.partition_by, self.order_by)
if c is not None]
def _copy_internals(self, clone=_clone, **kw):
- self.func = clone(self.func, **kw)
+ self.element = clone(self.element, **kw)
if self.partition_by is not None:
self.partition_by = clone(self.partition_by, **kw)
if self.order_by is not None:
@@ -2897,7 +3088,106 @@ class Over(ColumnElement):
def _from_objects(self):
return list(itertools.chain(
*[c._from_objects for c in
- (self.func, self.partition_by, self.order_by)
+ (self.element, self.partition_by, self.order_by)
+ if c is not None]
+ ))
+
+
+class WithinGroup(ColumnElement):
+ """Represent a WITHIN GROUP (ORDER BY) clause.
+
+ This is a special operator against so-called
+ so-called "ordered set aggregate" and "hypothetical
+ set aggregate" functions, including ``percentile_cont()``,
+ ``rank()``, ``dense_rank()``, etc.
+
+ It's supported only by certain database backends, such as PostgreSQL,
+ Oracle and MS SQL Server.
+
+ The :class:`.WithinGroup` consturct extracts its type from the
+ method :meth:`.FunctionElement.within_group_type`. If this returns
+ ``None``, the function's ``.type`` is used.
+
+ """
+ __visit_name__ = 'withingroup'
+
+ order_by = None
+
+ def __init__(self, element, *order_by):
+ """Produce a :class:`.WithinGroup` object against a function.
+
+ Used against so-called "ordered set aggregate" and "hypothetical
+ set aggregate" functions, including :class:`.percentile_cont`,
+ :class:`.rank`, :class:`.dense_rank`, etc.
+
+ :func:`~.expression.within_group` is usually called using
+ the :meth:`.FunctionElement.within_group` method, e.g.::
+
+ from sqlalchemy import within_group
+ stmt = select([
+ department.c.id,
+ func.percentile_cont(0.5).within_group(
+ department.c.salary.desc()
+ )
+ ])
+
+ The above statement would produce SQL similar to
+ ``SELECT department.id, percentile_cont(0.5)
+ WITHIN GROUP (ORDER BY department.salary DESC)``.
+
+ :param element: a :class:`.FunctionElement` construct, typically
+ generated by :data:`~.expression.func`.
+ :param \*order_by: one or more column elements that will be used
+ as the ORDER BY clause of the WITHIN GROUP construct.
+
+ .. versionadded:: 1.1
+
+ .. seealso::
+
+ :data:`.expression.func`
+
+ :func:`.expression.over`
+
+ """
+ self.element = element
+ if order_by is not None:
+ self.order_by = ClauseList(
+ *util.to_list(order_by),
+ _literal_as_text=_literal_as_label_reference)
+
+ def over(self, partition_by=None, order_by=None):
+ """Produce an OVER clause against this :class:`.WithinGroup`
+ construct.
+
+ This function has the same signature as that of
+ :meth:`.FunctionElement.over`.
+
+ """
+ return Over(self, partition_by=partition_by, order_by=order_by)
+
+ @util.memoized_property
+ def type(self):
+ wgt = self.element.within_group_type(self)
+ if wgt is not None:
+ return wgt
+ else:
+ return self.element.type
+
+ def get_children(self, **kwargs):
+ return [c for c in
+ (self.func, self.order_by)
+ if c is not None]
+
+ def _copy_internals(self, clone=_clone, **kw):
+ self.element = clone(self.element, **kw)
+ if self.order_by is not None:
+ self.order_by = clone(self.order_by, **kw)
+
+ @property
+ def _from_objects(self):
+ return list(itertools.chain(
+ *[c._from_objects for c in
+ (self.element, self.order_by)
if c is not None]
))
diff --git a/lib/sqlalchemy/sql/expression.py b/lib/sqlalchemy/sql/expression.py
index 74b827d7e..27fae8ca4 100644
--- a/lib/sqlalchemy/sql/expression.py
+++ b/lib/sqlalchemy/sql/expression.py
@@ -15,7 +15,7 @@ class.
"""
__all__ = [
- 'Alias', 'ClauseElement', 'ColumnCollection', 'ColumnElement',
+ 'Alias', 'Any', 'All', 'ClauseElement', 'ColumnCollection', 'ColumnElement',
'CompoundSelect', 'Delete', 'FromClause', 'Insert', 'Join', 'Select',
'Selectable', 'TableClause', 'Update', 'alias', 'and_', 'asc', 'between',
'bindparam', 'case', 'cast', 'column', 'delete', 'desc', 'distinct',
@@ -24,19 +24,19 @@ __all__ = [
'literal', 'literal_column', 'not_', 'null', 'nullsfirst', 'nullslast',
'or_', 'outparam', 'outerjoin', 'over', 'select', 'subquery',
'table', 'text',
- 'tuple_', 'type_coerce', 'union', 'union_all', 'update']
+ 'tuple_', 'type_coerce', 'union', 'union_all', 'update', 'within_group']
from .visitors import Visitable
from .functions import func, modifier, FunctionElement, Function
from ..util.langhelpers import public_factory
from .elements import ClauseElement, ColumnElement,\
- BindParameter, UnaryExpression, BooleanClauseList, \
+ BindParameter, CollectionAggregate, UnaryExpression, BooleanClauseList, \
Label, Cast, Case, ColumnClause, TextClause, Over, Null, \
True_, False_, BinaryExpression, Tuple, TypeClause, Extract, \
- Grouping, not_, \
+ Grouping, WithinGroup, not_, \
collate, literal_column, between,\
- literal, outparam, type_coerce, ClauseList, FunctionFilter
+ literal, outparam, TypeCoerce, ClauseList, FunctionFilter
from .elements import SavepointClause, RollbackToSavepointClause, \
ReleaseSavepointClause
@@ -57,6 +57,8 @@ from .dml import Insert, Update, Delete, UpdateBase, ValuesBase
# the functions to be available in the sqlalchemy.sql.* namespace and
# to be auto-cross-documenting from the function to the class itself.
+all_ = public_factory(CollectionAggregate._create_all, ".expression.all_")
+any_ = public_factory(CollectionAggregate._create_any, ".expression.any_")
and_ = public_factory(BooleanClauseList.and_, ".expression.and_")
or_ = public_factory(BooleanClauseList.or_, ".expression.or_")
bindparam = public_factory(BindParameter, ".expression.bindparam")
@@ -65,6 +67,7 @@ text = public_factory(TextClause._create_text, ".expression.text")
table = public_factory(TableClause, ".expression.table")
column = public_factory(ColumnClause, ".expression.column")
over = public_factory(Over, ".expression.over")
+within_group = public_factory(WithinGroup, ".expression.within_group")
label = public_factory(Label, ".expression.label")
case = public_factory(Case, ".expression.case")
cast = public_factory(Cast, ".expression.cast")
@@ -89,6 +92,7 @@ asc = public_factory(UnaryExpression._create_asc, ".expression.asc")
desc = public_factory(UnaryExpression._create_desc, ".expression.desc")
distinct = public_factory(
UnaryExpression._create_distinct, ".expression.distinct")
+type_coerce = public_factory(TypeCoerce, ".expression.type_coerce")
true = public_factory(True_._instance, ".expression.true")
false = public_factory(False_._instance, ".expression.false")
null = public_factory(Null._instance, ".expression.null")
diff --git a/lib/sqlalchemy/sql/functions.py b/lib/sqlalchemy/sql/functions.py
index 538a2c549..6cfbd12b3 100644
--- a/lib/sqlalchemy/sql/functions.py
+++ b/lib/sqlalchemy/sql/functions.py
@@ -12,9 +12,9 @@ from . import sqltypes, schema
from .base import Executable, ColumnCollection
from .elements import ClauseList, Cast, Extract, _literal_as_binds, \
literal_column, _type_from_args, ColumnElement, _clone,\
- Over, BindParameter, FunctionFilter
+ Over, BindParameter, FunctionFilter, Grouping, WithinGroup
from .selectable import FromClause, Select, Alias
-
+from . import util as sqlutil
from . import operators
from .visitors import VisitableType
from .. import util
@@ -116,6 +116,21 @@ class FunctionElement(Executable, ColumnElement, FromClause):
"""
return Over(self, partition_by=partition_by, order_by=order_by)
+ def within_group(self, *order_by):
+ """Produce a WITHIN GROUP (ORDER BY expr) clause against this function.
+
+ Used against so-called "ordered set aggregate" and "hypothetical
+ set aggregate" functions, including :class:`.percentile_cont`,
+ :class:`.rank`, :class:`.dense_rank`, etc.
+
+ See :func:`~.expression.within_group` for a full description.
+
+ .. versionadded:: 1.1
+
+
+ """
+ return WithinGroup(self, *order_by)
+
def filter(self, *criterion):
"""Produce a FILTER clause against this function.
@@ -157,6 +172,18 @@ class FunctionElement(Executable, ColumnElement, FromClause):
self._reset_exported()
FunctionElement.clauses._reset(self)
+ def within_group_type(self, within_group):
+ """For types that define their return type as based on the criteria
+ within a WITHIN GROUP (ORDER BY) expression, called by the
+ :class:`.WithinGroup` construct.
+
+ Returns None by default, in which case the function's normal ``.type``
+ is used.
+
+ """
+
+ return None
+
def alias(self, name=None, flat=False):
"""Produce a :class:`.Alias` construct against this
:class:`.FunctionElement`.
@@ -233,6 +260,16 @@ class FunctionElement(Executable, ColumnElement, FromClause):
return BindParameter(None, obj, _compared_to_operator=operator,
_compared_to_type=self.type, unique=True)
+ def self_group(self, against=None):
+ # for the moment, we are parenthesizing all array-returning
+ # expressions against getitem. This may need to be made
+ # more portable if in the future we support other DBs
+ # besides postgresql.
+ if against is operators.getitem:
+ return Grouping(self)
+ else:
+ return super(FunctionElement, self).self_group(against=against)
+
class _FunctionGenerator(object):
"""Generate :class:`.Function` objects based on getattr calls."""
@@ -483,7 +520,7 @@ class GenericFunction(util.with_metaclass(_GenericMeta, Function)):
def __init__(self, *args, **kwargs):
parsed_args = kwargs.pop('_parsed_args', None)
if parsed_args is None:
- parsed_args = [_literal_as_binds(c) for c in args]
+ parsed_args = [_literal_as_binds(c, self.name) for c in args]
self.packagenames = []
self._bind = kwargs.get('bind', None)
self.clause_expr = ClauseList(
@@ -528,10 +565,10 @@ class ReturnTypeFromArgs(GenericFunction):
"""Define a function whose return type is the same as its arguments."""
def __init__(self, *args, **kwargs):
- args = [_literal_as_binds(c) for c in args]
+ args = [_literal_as_binds(c, self.name) for c in args]
kwargs.setdefault('type_', _type_from_args(args))
kwargs['_parsed_args'] = args
- GenericFunction.__init__(self, *args, **kwargs)
+ super(ReturnTypeFromArgs, self).__init__(*args, **kwargs)
class coalesce(ReturnTypeFromArgs):
@@ -579,7 +616,7 @@ class count(GenericFunction):
def __init__(self, expression=None, **kwargs):
if expression is None:
expression = literal_column('*')
- GenericFunction.__init__(self, expression, **kwargs)
+ super(count, self).__init__(expression, **kwargs)
class current_date(AnsiFunction):
@@ -616,3 +653,150 @@ class sysdate(AnsiFunction):
class user(AnsiFunction):
type = sqltypes.String
+
+
+class array_agg(GenericFunction):
+ """support for the ARRAY_AGG function.
+
+ The ``func.array_agg(expr)`` construct returns an expression of
+ type :class:`.Array`.
+
+ e.g.::
+
+ stmt = select([func.array_agg(table.c.values)[2:5]])
+
+ .. versionadded:: 1.1
+
+ .. seealso::
+
+ :func:`.postgresql.array_agg` - PostgreSQL-specific version that
+ returns :class:`.ARRAY`, which has PG-specific operators added.
+
+ """
+
+ type = sqltypes.Array
+
+ def __init__(self, *args, **kwargs):
+ args = [_literal_as_binds(c) for c in args]
+ kwargs.setdefault('type_', self.type(_type_from_args(args)))
+ kwargs['_parsed_args'] = args
+ super(array_agg, self).__init__(*args, **kwargs)
+
+
+class OrderedSetAgg(GenericFunction):
+ """Define a function where the return type is based on the sort
+ expression type as defined by the expression passed to the
+ :meth:`.FunctionElement.within_group` method."""
+
+ array_for_multi_clause = False
+
+ def within_group_type(self, within_group):
+ func_clauses = self.clause_expr.element
+ order_by = sqlutil.unwrap_order_by(within_group.order_by)
+ if self.array_for_multi_clause and len(func_clauses.clauses) > 1:
+ return sqltypes.Array(order_by[0].type)
+ else:
+ return order_by[0].type
+
+
+class mode(OrderedSetAgg):
+ """implement the ``mode`` ordered-set aggregate function.
+
+ This function must be used with the :meth:`.FunctionElement.within_group`
+ modifier to supply a sort expression to operate upon.
+
+ The return type of this function is the same as the sort expression.
+
+ .. versionadded:: 1.1
+
+ """
+
+
+class percentile_cont(OrderedSetAgg):
+ """implement the ``percentile_cont`` ordered-set aggregate function.
+
+ This function must be used with the :meth:`.FunctionElement.within_group`
+ modifier to supply a sort expression to operate upon.
+
+ The return type of this function is the same as the sort expression,
+ or if the arguments are an array, an :class:`.Array` of the sort
+ expression's type.
+
+ .. versionadded:: 1.1
+
+ """
+
+ array_for_multi_clause = True
+
+
+class percentile_disc(OrderedSetAgg):
+ """implement the ``percentile_disc`` ordered-set aggregate function.
+
+ This function must be used with the :meth:`.FunctionElement.within_group`
+ modifier to supply a sort expression to operate upon.
+
+ The return type of this function is the same as the sort expression,
+ or if the arguments are an array, an :class:`.Array` of the sort
+ expression's type.
+
+ .. versionadded:: 1.1
+
+ """
+
+ array_for_multi_clause = True
+
+
+class rank(GenericFunction):
+ """Implement the ``rank`` hypothetical-set aggregate function.
+
+ This function must be used with the :meth:`.FunctionElement.within_group`
+ modifier to supply a sort expression to operate upon.
+
+ The return type of this function is :class:`.Integer`.
+
+ .. versionadded:: 1.1
+
+ """
+ type = sqltypes.Integer()
+
+
+class dense_rank(GenericFunction):
+ """Implement the ``dense_rank`` hypothetical-set aggregate function.
+
+ This function must be used with the :meth:`.FunctionElement.within_group`
+ modifier to supply a sort expression to operate upon.
+
+ The return type of this function is :class:`.Integer`.
+
+ .. versionadded:: 1.1
+
+ """
+ type = sqltypes.Integer()
+
+
+class percent_rank(GenericFunction):
+ """Implement the ``percent_rank`` hypothetical-set aggregate function.
+
+ This function must be used with the :meth:`.FunctionElement.within_group`
+ modifier to supply a sort expression to operate upon.
+
+ The return type of this function is :class:`.Numeric`.
+
+ .. versionadded:: 1.1
+
+ """
+ type = sqltypes.Numeric()
+
+
+class cume_dist(GenericFunction):
+ """Implement the ``cume_dist`` hypothetical-set aggregate function.
+
+ This function must be used with the :meth:`.FunctionElement.within_group`
+ modifier to supply a sort expression to operate upon.
+
+ The return type of this function is :class:`.Numeric`.
+
+ .. versionadded:: 1.1
+
+ """
+ type = sqltypes.Numeric()
diff --git a/lib/sqlalchemy/sql/operators.py b/lib/sqlalchemy/sql/operators.py
index 51f162c98..da3576466 100644
--- a/lib/sqlalchemy/sql/operators.py
+++ b/lib/sqlalchemy/sql/operators.py
@@ -214,10 +214,13 @@ class custom_op(object):
"""
__name__ = 'custom_op'
- def __init__(self, opstring, precedence=0, is_comparison=False):
+ def __init__(
+ self, opstring, precedence=0, is_comparison=False,
+ natural_self_precedent=False):
self.opstring = opstring
self.precedence = precedence
self.is_comparison = is_comparison
+ self.natural_self_precedent = natural_self_precedent
def __eq__(self, other):
return isinstance(other, custom_op) and \
@@ -597,6 +600,14 @@ class ColumnOperators(Operators):
"""
return self.reverse_operate(div, other)
+ def __rmod__(self, other):
+ """Implement the ``%`` operator in reverse.
+
+ See :meth:`.ColumnOperators.__mod__`.
+
+ """
+ return self.reverse_operate(mod, other)
+
def between(self, cleft, cright, symmetric=False):
"""Produce a :func:`~.expression.between` clause against
the parent object, given the lower and upper range.
@@ -611,6 +622,24 @@ class ColumnOperators(Operators):
"""
return self.operate(distinct_op)
+ def any_(self):
+ """Produce a :func:`~.expression.any_` clause against the
+ parent object.
+
+ .. versionadded:: 1.1
+
+ """
+ return self.operate(any_op)
+
+ def all_(self):
+ """Produce a :func:`~.expression.all_` clause against the
+ parent object.
+
+ .. versionadded:: 1.1
+
+ """
+ return self.operate(all_op)
+
def __add__(self, other):
"""Implement the ``+`` operator.
@@ -744,6 +773,14 @@ def distinct_op(a):
return a.distinct()
+def any_op(a):
+ return a.any_()
+
+
+def all_op(a):
+ return a.all_()
+
+
def startswith_op(a, b, escape=None):
return a.startswith(b, escape=escape)
@@ -818,6 +855,28 @@ def is_ordering_modifier(op):
return op in (asc_op, desc_op,
nullsfirst_op, nullslast_op)
+
+def is_natural_self_precedent(op):
+ return op in _natural_self_precedent or \
+ isinstance(op, custom_op) and op.natural_self_precedent
+
+_mirror = {
+ gt: lt,
+ ge: le,
+ lt: gt,
+ le: ge
+}
+
+
+def mirror(op):
+ """rotate a comparison operator 180 degrees.
+
+ Note this is not the same as negation.
+
+ """
+ return _mirror.get(op, op)
+
+
_associative = _commutative.union([concat_op, and_, or_])
_natural_self_precedent = _associative.union([getitem])
@@ -826,12 +885,15 @@ parenthesize (a op b).
"""
+
_asbool = util.symbol('_asbool', canonical=-10)
_smallest = util.symbol('_smallest', canonical=-100)
_largest = util.symbol('_largest', canonical=100)
_PRECEDENCE = {
from_: 15,
+ any_op: 15,
+ all_op: 15,
getitem: 15,
mul: 8,
truediv: 8,
@@ -885,7 +947,7 @@ _PRECEDENCE = {
def is_precedent(operator, against):
- if operator is against and operator in _natural_self_precedent:
+ if operator is against and is_natural_self_precedent(operator):
return False
else:
return (_PRECEDENCE.get(operator,
diff --git a/lib/sqlalchemy/sql/schema.py b/lib/sqlalchemy/sql/schema.py
index a8989627d..42dbe72b2 100644
--- a/lib/sqlalchemy/sql/schema.py
+++ b/lib/sqlalchemy/sql/schema.py
@@ -572,18 +572,9 @@ class Table(DialectKWArgs, SchemaItem, TableClause):
def _init_collections(self):
pass
- @util.memoized_property
+ @property
def _autoincrement_column(self):
- for col in self.primary_key:
- if (col.autoincrement and col.type._type_affinity is not None and
- issubclass(col.type._type_affinity,
- type_api.INTEGERTYPE._type_affinity) and
- (not col.foreign_keys or
- col.autoincrement == 'ignore_fk') and
- isinstance(col.default, (type(None), Sequence)) and
- (col.server_default is None or
- col.server_default.reflected)):
- return col
+ return self.primary_key._autoincrement_column
@property
def key(self):
@@ -913,17 +904,40 @@ class Column(SchemaItem, ColumnClause):
argument is available such as ``server_default``, ``default``
and ``unique``.
- :param autoincrement: This flag may be set to ``False`` to
- indicate an integer primary key column that should not be
- considered to be the "autoincrement" column, that is
- the integer primary key column which generates values
- implicitly upon INSERT and whose value is usually returned
- via the DBAPI cursor.lastrowid attribute. It defaults
- to ``True`` to satisfy the common use case of a table
- with a single integer primary key column. If the table
- has a composite primary key consisting of more than one
- integer column, set this flag to True only on the
- column that should be considered "autoincrement".
+ :param autoincrement: Set up "auto increment" semantics for an integer
+ primary key column. The default value is the string ``"auto"``
+ which indicates that a single-column primary key that is of
+ an INTEGER type with no stated client-side or python-side defaults
+ should receive auto increment semantics automatically;
+ all other varieties of primary key columns will not. This
+ includes that :term:`DDL` such as Postgresql SERIAL or MySQL
+ AUTO_INCREMENT will be emitted for this column during a table
+ create, as well as that the column is assumed to generate new
+ integer primary key values when an INSERT statement invokes which
+ will be retrieved by the dialect.
+
+ The flag may be set to ``True`` to indicate that a column which
+ is part of a composite (e.g. multi-column) primary key should
+ have autoincrement semantics, though note that only one column
+ within a primary key may have this setting. It can also
+ be set to ``True`` to indicate autoincrement semantics on a
+ column that has a client-side or server-side default configured,
+ however note that not all dialects can accommodate all styles
+ of default as an "autoincrement". It can also be
+ set to ``False`` on a single-column primary key that has a
+ datatype of INTEGER in order to disable auto increment semantics
+ for that column.
+
+ .. versionchanged:: 1.1 The autoincrement flag now defaults to
+ ``"auto"`` which indicates autoincrement semantics by default
+ for single-column integer primary keys only; for composite
+ (multi-column) primary keys, autoincrement is never implicitly
+ enabled; as always, ``autoincrement=True`` will allow for
+ at most one of those columns to be an "autoincrement" column.
+ ``autoincrement=True`` may also be set on a :class:`.Column`
+ that has an explicit client-side or server-side default,
+ subject to limitations of the backend database and dialect.
+
The setting *only* has an effect for columns which are:
@@ -940,11 +954,8 @@ class Column(SchemaItem, ColumnClause):
primary_key=True, autoincrement='ignore_fk')
It is typically not desirable to have "autoincrement" enabled
- on such a column as its value intends to mirror that of a
- primary key column elsewhere.
-
- * have no server side or client side defaults (with the exception
- of Postgresql SERIAL).
+ on a column that refers to another via foreign key, as such a column
+ is required to refer to a value that originates from elsewhere.
The setting has these two effects on columns that meet the
above criteria:
@@ -961,20 +972,15 @@ class Column(SchemaItem, ColumnClause):
:ref:`sqlite_autoincrement`
- * The column will be considered to be available as
- cursor.lastrowid or equivalent, for those dialects which
- "post fetch" newly inserted identifiers after a row has
- been inserted (SQLite, MySQL, MS-SQL). It does not have
- any effect in this regard for databases that use sequences
- to generate primary key identifiers (i.e. Firebird, Postgresql,
- Oracle).
-
- .. versionchanged:: 0.7.4
- ``autoincrement`` accepts a special value ``'ignore_fk'``
- to indicate that autoincrementing status regardless of foreign
- key references. This applies to certain composite foreign key
- setups, such as the one demonstrated in the ORM documentation
- at :ref:`post_update`.
+ * The column will be considered to be available using an
+ "autoincrement" method specific to the backend database, such
+ as calling upon ``cursor.lastrowid``, using RETURNING in an
+ INSERT statement to get at a sequence-generated value, or using
+ special functions such as "SELECT scope_identity()".
+ These methods are highly specific to the DBAPIs and databases in
+ use and vary greatly, so care should be taken when associating
+ ``autoincrement=True`` with a custom default generation function.
+
:param default: A scalar, Python callable, or
:class:`.ColumnElement` expression representing the
@@ -984,8 +990,12 @@ class Column(SchemaItem, ColumnClause):
a positional argument; see that class for full detail on the
structure of the argument.
- Contrast this argument to ``server_default`` which creates a
- default generator on the database side.
+ Contrast this argument to :paramref:`.Column.server_default`
+ which creates a default generator on the database side.
+
+ .. seealso::
+
+ :ref:`metadata_defaults_toplevel`
:param doc: optional String that can be used by the ORM or similar
to document attributes. This attribute does not render SQL
@@ -1051,6 +1061,10 @@ class Column(SchemaItem, ColumnClause):
construct does not specify any DDL and the implementation is left
to the database, such as via a trigger.
+ .. seealso::
+
+ :ref:`server_defaults`
+
:param server_onupdate: A :class:`.FetchedValue` instance
representing a database-side default generation function. This
indicates to SQLAlchemy that a newly generated value will be
@@ -1128,7 +1142,7 @@ class Column(SchemaItem, ColumnClause):
self.system = kwargs.pop('system', False)
self.doc = kwargs.pop('doc', None)
self.onupdate = kwargs.pop('onupdate', None)
- self.autoincrement = kwargs.pop('autoincrement', True)
+ self.autoincrement = kwargs.pop('autoincrement', "auto")
self.constraints = set()
self.foreign_keys = set()
@@ -1263,12 +1277,12 @@ class Column(SchemaItem, ColumnClause):
if self.primary_key:
table.primary_key._replace(self)
- Table._autoincrement_column._reset(table)
elif self.key in table.primary_key:
raise exc.ArgumentError(
"Trying to redefine primary-key column '%s' as a "
"non-primary-key column on table '%s'" % (
self.key, table.fullname))
+
self.table = table
if self.index:
@@ -1981,13 +1995,14 @@ class ColumnDefault(DefaultGenerator):
try:
argspec = util.get_callable_argspec(fn, no_self=True)
except TypeError:
- return lambda ctx: fn()
+ return util.wrap_callable(lambda ctx: fn(), fn)
defaulted = argspec[3] is not None and len(argspec[3]) or 0
positionals = len(argspec[0]) - defaulted
if positionals == 0:
- return lambda ctx: fn()
+ return util.wrap_callable(lambda ctx: fn(), fn)
+
elif positionals == 1:
return fn
else:
@@ -2040,8 +2055,9 @@ class Sequence(DefaultGenerator):
is_sequence = True
- def __init__(self, name, start=None, increment=None, schema=None,
- optional=False, quote=None, metadata=None,
+ def __init__(self, name, start=None, increment=None, minvalue=None,
+ maxvalue=None, nominvalue=None, nomaxvalue=None, cycle=None,
+ schema=None, optional=False, quote=None, metadata=None,
quote_schema=None,
for_update=False):
"""Construct a :class:`.Sequence` object.
@@ -2057,6 +2073,53 @@ class Sequence(DefaultGenerator):
the database as the value of the "INCREMENT BY" clause. If ``None``,
the clause is omitted, which on most platforms indicates an
increment of 1.
+ :param minvalue: the minimum value of the sequence. This
+ value is used when the CREATE SEQUENCE command is emitted to
+ the database as the value of the "MINVALUE" clause. If ``None``,
+ the clause is omitted, which on most platforms indicates a
+ minvalue of 1 and -2^63-1 for ascending and descending sequences,
+ respectively.
+
+ .. versionadded:: 1.0.7
+
+ :param maxvalue: the maximum value of the sequence. This
+ value is used when the CREATE SEQUENCE command is emitted to
+ the database as the value of the "MAXVALUE" clause. If ``None``,
+ the clause is omitted, which on most platforms indicates a
+ maxvalue of 2^63-1 and -1 for ascending and descending sequences,
+ respectively.
+
+ .. versionadded:: 1.0.7
+
+ :param nominvalue: no minimum value of the sequence. This
+ value is used when the CREATE SEQUENCE command is emitted to
+ the database as the value of the "NO MINVALUE" clause. If ``None``,
+ the clause is omitted, which on most platforms indicates a
+ minvalue of 1 and -2^63-1 for ascending and descending sequences,
+ respectively.
+
+ .. versionadded:: 1.0.7
+
+ :param nomaxvalue: no maximum value of the sequence. This
+ value is used when the CREATE SEQUENCE command is emitted to
+ the database as the value of the "NO MAXVALUE" clause. If ``None``,
+ the clause is omitted, which on most platforms indicates a
+ maxvalue of 2^63-1 and -1 for ascending and descending sequences,
+ respectively.
+
+ .. versionadded:: 1.0.7
+
+ :param cycle: allows the sequence to wrap around when the maxvalue
+ or minvalue has been reached by an ascending or descending sequence
+ respectively. This value is used when the CREATE SEQUENCE command
+ is emitted to the database as the "CYCLE" clause. If the limit is
+ reached, the next number generated will be the minvalue or maxvalue,
+ respectively. If cycle=False (the default) any calls to nextval
+ after the sequence has reached its maximum value will return an
+ error.
+
+ .. versionadded:: 1.0.7
+
:param schema: Optional schema name for the sequence, if located
in a schema other than the default.
:param optional: boolean value, when ``True``, indicates that this
@@ -2101,6 +2164,11 @@ class Sequence(DefaultGenerator):
self.name = quoted_name(name, quote)
self.start = start
self.increment = increment
+ self.minvalue = minvalue
+ self.maxvalue = maxvalue
+ self.nominvalue = nominvalue
+ self.nomaxvalue = nomaxvalue
+ self.cycle = cycle
self.optional = optional
if metadata is not None and schema is None and metadata.schema:
self.schema = schema = metadata.schema
@@ -2972,11 +3040,77 @@ class PrimaryKeyConstraint(ColumnCollectionConstraint):
self.columns.extend(columns)
+ PrimaryKeyConstraint._autoincrement_column._reset(self)
self._set_parent_with_dispatch(self.table)
def _replace(self, col):
+ PrimaryKeyConstraint._autoincrement_column._reset(self)
self.columns.replace(col)
+ @property
+ def columns_autoinc_first(self):
+ autoinc = self._autoincrement_column
+
+ if autoinc is not None:
+ return [autoinc] + [c for c in self.columns if c is not autoinc]
+ else:
+ return list(self.columns)
+
+ @util.memoized_property
+ def _autoincrement_column(self):
+
+ def _validate_autoinc(col, autoinc_true):
+ if col.type._type_affinity is None or not issubclass(
+ col.type._type_affinity,
+ type_api.INTEGERTYPE._type_affinity):
+ if autoinc_true:
+ raise exc.ArgumentError(
+ "Column type %s on column '%s' is not "
+ "compatible with autoincrement=True" % (
+ col.type,
+ col
+ ))
+ else:
+ return False
+ elif not isinstance(col.default, (type(None), Sequence)) and \
+ not autoinc_true:
+ return False
+ elif col.server_default is not None and not autoinc_true:
+ return False
+ elif (
+ col.foreign_keys and col.autoincrement
+ not in (True, 'ignore_fk')):
+ return False
+ return True
+
+ if len(self.columns) == 1:
+ col = list(self.columns)[0]
+
+ if col.autoincrement is True:
+ _validate_autoinc(col, True)
+ return col
+ elif (
+ col.autoincrement in ('auto', 'ignore_fk') and
+ _validate_autoinc(col, False)
+ ):
+ return col
+
+ else:
+ autoinc = None
+ for col in self.columns:
+ if col.autoincrement is True:
+ _validate_autoinc(col, True)
+ if autoinc is not None:
+ raise exc.ArgumentError(
+ "Only one Column may be marked "
+ "autoincrement=True, found both %s and %s." %
+ (col.name, autoinc.name)
+ )
+ else:
+ autoinc = col
+
+ return autoinc
+
class UniqueConstraint(ColumnCollectionConstraint):
"""A table-level UNIQUE constraint.
diff --git a/lib/sqlalchemy/sql/selectable.py b/lib/sqlalchemy/sql/selectable.py
index 245c54817..73341053d 100644
--- a/lib/sqlalchemy/sql/selectable.py
+++ b/lib/sqlalchemy/sql/selectable.py
@@ -224,7 +224,7 @@ class HasSuffixes(object):
stmt = select([col1, col2]).cte().suffix_with(
"cycle empno set y_cycle to 1 default 0", dialect="oracle")
- Multiple prefixes can be specified by multiple calls
+ Multiple suffixes can be specified by multiple calls
to :meth:`.suffix_with`.
:param \*expr: textual or :class:`.ClauseElement` construct which
@@ -1101,6 +1101,14 @@ class Alias(FromClause):
or 'anon'))
self.name = name
+ def self_group(self, target=None):
+ if isinstance(target, CompoundSelect) and \
+ isinstance(self.original, Select) and \
+ self.original._needs_parens_for_grouping():
+ return FromGrouping(self)
+
+ return super(Alias, self).self_group(target)
+
@property
def description(self):
if util.py3k:
@@ -3208,6 +3216,13 @@ class Select(HasPrefixes, HasSuffixes, GenerativeSelect):
return None
return None
+ def _needs_parens_for_grouping(self):
+ return (
+ self._limit_clause is not None or
+ self._offset_clause is not None or
+ bool(self._order_by_clause.clauses)
+ )
+
def self_group(self, against=None):
"""return a 'grouping' construct as per the ClauseElement
specification.
@@ -3217,7 +3232,8 @@ class Select(HasPrefixes, HasSuffixes, GenerativeSelect):
expressions and should not require explicit use.
"""
- if isinstance(against, CompoundSelect):
+ if isinstance(against, CompoundSelect) and \
+ not self._needs_parens_for_grouping():
return self
return FromGrouping(self)
diff --git a/lib/sqlalchemy/sql/sqltypes.py b/lib/sqlalchemy/sql/sqltypes.py
index 7e2e601e2..4abb9b15a 100644
--- a/lib/sqlalchemy/sql/sqltypes.py
+++ b/lib/sqlalchemy/sql/sqltypes.py
@@ -13,10 +13,11 @@ import datetime as dt
import codecs
from .type_api import TypeEngine, TypeDecorator, to_instance
-from .elements import quoted_name, type_coerce, _defer_name
+from .elements import quoted_name, TypeCoerce as type_coerce, _defer_name
from .. import exc, util, processors
from .base import _bind_or_error, SchemaEventTarget
from . import operators
+from .. import inspection
from .. import event
from ..util import pickle
import decimal
@@ -68,7 +69,39 @@ class Concatenable(object):
)):
return operators.concat_op, self.expr.type
else:
- return op, self.expr.type
+ return super(Concatenable.Comparator, self)._adapt_expression(
+ op, other_comparator)
+
+ comparator_factory = Comparator
+
+
+class Indexable(object):
+ """A mixin that marks a type as supporting indexing operations,
+ such as array or JSON structures.
+
+
+ .. versionadded:: 1.1.0
+
+
+ """
+
+ zero_indexes = False
+ """if True, Python zero-based indexes should be interpreted as one-based
+ on the SQL expression side."""
+
+ class Comparator(TypeEngine.Comparator):
+
+ def _setup_getitem(self, index):
+ raise NotImplementedError()
+
+ def __getitem__(self, index):
+ operator, adjusted_right_expr, result_type = \
+ self._setup_getitem(index)
+ return self.operate(
+ operator,
+ adjusted_right_expr,
+ result_type=result_type
+ )
comparator_factory = Comparator
@@ -215,9 +248,6 @@ class String(Concatenable, TypeEngine):
self.convert_unicode != 'force_nocheck'
)
if needs_convert:
- to_unicode = processors.to_unicode_processor_factory(
- dialect.encoding, self.unicode_error)
-
if needs_isinstance:
return processors.to_conditional_unicode_processor_factory(
dialect.encoding, self.unicode_error)
@@ -1466,6 +1496,246 @@ class Interval(_DateAffinity, TypeDecorator):
return self.impl.coerce_compared_value(op, value)
+class Array(Indexable, Concatenable, TypeEngine):
+ """Represent a SQL Array type.
+
+ .. note:: This type serves as the basis for all ARRAY operations.
+ However, currently **only the Postgresql backend has support
+ for SQL arrays in SQLAlchemy**. It is recommended to use the
+ :class:`.postgresql.ARRAY` type directly when using ARRAY types
+ with PostgreSQL, as it provides additional operators specific
+ to that backend.
+
+ :class:`.Array` is part of the Core in support of various SQL standard
+ functions such as :class:`.array_agg` which explicitly involve arrays;
+ however, with the exception of the PostgreSQL backend and possibly
+ some third-party dialects, no other SQLAlchemy built-in dialect has
+ support for this type.
+
+ An :class:`.Array` type is constructed given the "type"
+ of element::
+
+ mytable = Table("mytable", metadata,
+ Column("data", Array(Integer))
+ )
+
+ The above type represents an N-dimensional array,
+ meaning a supporting backend such as Postgresql will interpret values
+ with any number of dimensions automatically. To produce an INSERT
+ construct that passes in a 1-dimensional array of integers::
+
+ connection.execute(
+ mytable.insert(),
+ data=[1,2,3]
+ )
+
+ The :class:`.Array` type can be constructed given a fixed number
+ of dimensions::
+
+ mytable = Table("mytable", metadata,
+ Column("data", Array(Integer, dimensions=2))
+ )
+
+ Sending a number of dimensions is optional, but recommended if the
+ datatype is to represent arrays of more than one dimension. This number
+ is used:
+
+ * When emitting the type declaration itself to the database, e.g.
+ ``INTEGER[][]``
+
+ * When translating Python values to database values, and vice versa, e.g.
+ an ARRAY of :class:`.Unicode` objects uses this number to efficiently
+ access the string values inside of array structures without resorting
+ to per-row type inspection
+
+ * When used with the Python ``getitem`` accessor, the number of dimensions
+ serves to define the kind of type that the ``[]`` operator should
+ return, e.g. for an ARRAY of INTEGER with two dimensions::
+
+ >>> expr = table.c.column[5] # returns ARRAY(Integer, dimensions=1)
+ >>> expr = expr[6] # returns Integer
+
+ For 1-dimensional arrays, an :class:`.Array` instance with no
+ dimension parameter will generally assume single-dimensional behaviors.
+
+ SQL expressions of type :class:`.Array` have support for "index" and
+ "slice" behavior. The Python ``[]`` operator works normally here, given
+ integer indexes or slices. Arrays default to 1-based indexing.
+ The operator produces binary expression
+ constructs which will produce the appropriate SQL, both for
+ SELECT statements::
+
+ select([mytable.c.data[5], mytable.c.data[2:7]])
+
+ as well as UPDATE statements when the :meth:`.Update.values` method
+ is used::
+
+ mytable.update().values({
+ mytable.c.data[5]: 7,
+ mytable.c.data[2:7]: [1, 2, 3]
+ })
+
+ The :class:`.Array` type also provides for the operators
+ :meth:`.Array.Comparator.any` and :meth:`.Array.Comparator.all`.
+ The PostgreSQL-specific version of :class:`.Array` also provides additional
+ operators.
+
+ .. versionadded:: 1.1.0
+
+ .. seealso::
+
+ :class:`.postgresql.ARRAY`
+
+ """
+ __visit_name__ = 'ARRAY'
+
+ class Comparator(Indexable.Comparator, Concatenable.Comparator):
+
+ """Define comparison operations for :class:`.Array`.
+
+ More operators are available on the dialect-specific form
+ of this type. See :class:`.postgresql.ARRAY.Comparator`.
+
+ """
+
+ def _setup_getitem(self, index):
+ if isinstance(index, slice):
+ return_type = self.type
+ elif self.type.dimensions is None or self.type.dimensions == 1:
+ return_type = self.type.item_type
+ else:
+ adapt_kw = {'dimensions': self.type.dimensions - 1}
+ return_type = self.type.adapt(self.type.__class__, **adapt_kw)
+
+ return operators.getitem, index, return_type
+
+ @util.dependencies("sqlalchemy.sql.elements")
+ def any(self, elements, other, operator=None):
+ """Return ``other operator ANY (array)`` clause.
+
+ Argument places are switched, because ANY requires array
+ expression to be on the right hand-side.
+
+ E.g.::
+
+ from sqlalchemy.sql import operators
+
+ conn.execute(
+ select([table.c.data]).where(
+ table.c.data.any(7, operator=operators.lt)
+ )
+ )
+
+ :param other: expression to be compared
+ :param operator: an operator object from the
+ :mod:`sqlalchemy.sql.operators`
+ package, defaults to :func:`.operators.eq`.
+
+ .. seealso::
+
+ :func:`.sql.expression.any_`
+
+ :meth:`.Array.Comparator.all`
+
+ """
+ operator = operator if operator else operators.eq
+ return operator(
+ elements._literal_as_binds(other),
+ elements.CollectionAggregate._create_any(self.expr)
+ )
+
+ @util.dependencies("sqlalchemy.sql.elements")
+ def all(self, elements, other, operator=None):
+ """Return ``other operator ALL (array)`` clause.
+
+ Argument places are switched, because ALL requires array
+ expression to be on the right hand-side.
+
+ E.g.::
+
+ from sqlalchemy.sql import operators
+
+ conn.execute(
+ select([table.c.data]).where(
+ table.c.data.all(7, operator=operators.lt)
+ )
+ )
+
+ :param other: expression to be compared
+ :param operator: an operator object from the
+ :mod:`sqlalchemy.sql.operators`
+ package, defaults to :func:`.operators.eq`.
+
+ .. seealso::
+
+ :func:`.sql.expression.all_`
+
+ :meth:`.Array.Comparator.any`
+
+ """
+ operator = operator if operator else operators.eq
+ return operator(
+ elements._literal_as_binds(other),
+ elements.CollectionAggregate._create_all(self.expr)
+ )
+
+ comparator_factory = Comparator
+
+ def __init__(self, item_type, as_tuple=False, dimensions=None,
+ zero_indexes=False):
+ """Construct an :class:`.Array`.
+
+ E.g.::
+
+ Column('myarray', Array(Integer))
+
+ Arguments are:
+
+ :param item_type: The data type of items of this array. Note that
+ dimensionality is irrelevant here, so multi-dimensional arrays like
+ ``INTEGER[][]``, are constructed as ``Array(Integer)``, not as
+ ``Array(Array(Integer))`` or such.
+
+ :param as_tuple=False: Specify whether return results
+ should be converted to tuples from lists. This parameter is
+ not generally needed as a Python list corresponds well
+ to a SQL array.
+
+ :param dimensions: if non-None, the ARRAY will assume a fixed
+ number of dimensions. This impacts how the array is declared
+ on the database, how it goes about interpreting Python and
+ result values, as well as how expression behavior in conjunction
+ with the "getitem" operator works. See the description at
+ :class:`.Array` for additional detail.
+
+ :param zero_indexes=False: when True, index values will be converted
+ between Python zero-based and SQL one-based indexes, e.g.
+ a value of one will be added to all index values before passing
+ to the database.
+
+ """
+ if isinstance(item_type, Array):
+ raise ValueError("Do not nest ARRAY types; ARRAY(basetype) "
+ "handles multi-dimensional arrays of basetype")
+ if isinstance(item_type, type):
+ item_type = item_type()
+ self.item_type = item_type
+ self.as_tuple = as_tuple
+ self.dimensions = dimensions
+ self.zero_indexes = zero_indexes
+
+ @property
+ def hashable(self):
+ return self.as_tuple
+
+ @property
+ def python_type(self):
+ return list
+
+ def compare_values(self, x, y):
+ return x == y
+
+
class REAL(Float):
"""The SQL REAL type."""
@@ -1648,6 +1918,8 @@ class NullType(TypeEngine):
_isnull = True
+ hashable = False
+
def literal_processor(self, dialect):
def process(value):
return "NULL"
@@ -1704,6 +1976,26 @@ else:
_type_map[unicode] = Unicode()
_type_map[str] = String()
+_type_map_get = _type_map.get
+
+
+def _resolve_value_to_type(value):
+ _result_type = _type_map_get(type(value), False)
+ if _result_type is False:
+ # use inspect() to detect SQLAlchemy built-in
+ # objects.
+ insp = inspection.inspect(value, False)
+ if (
+ insp is not None and
+ # foil mock.Mock() and other impostors by ensuring
+ # the inspection target itself self-inspects
+ insp.__class__ in inspection._registrars
+ ):
+ raise exc.ArgumentError(
+ "Object %r is not legal as a SQL literal value" % value)
+ return NULLTYPE
+ else:
+ return _result_type
# back-assign to type_api
from . import type_api
@@ -1712,6 +2004,6 @@ type_api.STRINGTYPE = STRINGTYPE
type_api.INTEGERTYPE = INTEGERTYPE
type_api.NULLTYPE = NULLTYPE
type_api.MATCHTYPE = MATCHTYPE
-type_api._type_map = _type_map
-
+type_api.INDEXABLE = Indexable
+type_api._resolve_value_to_type = _resolve_value_to_type
TypeEngine.Comparator.BOOLEANTYPE = BOOLEANTYPE
diff --git a/lib/sqlalchemy/sql/type_api.py b/lib/sqlalchemy/sql/type_api.py
index a55eed981..c367bc73e 100644
--- a/lib/sqlalchemy/sql/type_api.py
+++ b/lib/sqlalchemy/sql/type_api.py
@@ -13,6 +13,7 @@
from .. import exc, util
from . import operators
from .visitors import Visitable, VisitableType
+from .base import SchemaEventTarget
# these are back-assigned by sqltypes.
BOOLEANTYPE = None
@@ -20,6 +21,8 @@ INTEGERTYPE = None
NULLTYPE = None
STRINGTYPE = None
MATCHTYPE = None
+INDEXABLE = None
+_resolve_value_to_type = None
class TypeEngine(Visitable):
@@ -90,7 +93,7 @@ class TypeEngine(Visitable):
boolean comparison or special SQL keywords like MATCH or BETWEEN.
"""
- return op, other_comparator.type
+ return op, self.type
def __reduce__(self):
return _reconstitute_comparator, (self.expr, )
@@ -128,6 +131,76 @@ class TypeEngine(Visitable):
"""
+ should_evaluate_none = False
+ """If True, the Python constant ``None`` is considered to be handled
+ explicitly by this type.
+
+ The ORM uses this flag to indicate that a positive value of ``None``
+ is passed to the column in an INSERT statement, rather than omitting
+ the column from the INSERT statement which has the effect of firing
+ off column-level defaults. It also allows types which have special
+ behavior for Python None, such as a JSON type, to indicate that
+ they'd like to handle the None value explicitly.
+
+ To set this flag on an existing type, use the
+ :meth:`.TypeEngine.evaluates_none` method.
+
+ .. seealso::
+
+ :meth:`.TypeEngine.evaluates_none`
+
+ .. versionadded:: 1.1
+
+
+ """
+
+ def evaluates_none(self):
+ """Return a copy of this type which has the :attr:`.should_evaluate_none`
+ flag set to True.
+
+ E.g.::
+
+ Table(
+ 'some_table', metadata,
+ Column(
+ String(50).evaluates_none(),
+ nullable=True,
+ server_default='no value')
+ )
+
+ The ORM uses this flag to indicate that a positive value of ``None``
+ is passed to the column in an INSERT statement, rather than omitting
+ the column from the INSERT statement which has the effect of firing
+ off column-level defaults. It also allows for types which have
+ special behavior associated with the Python None value to indicate
+ that the value doesn't necessarily translate into SQL NULL; a
+ prime example of this is a JSON type which may wish to persist the
+ JSON value ``'null'``.
+
+ In all cases, the actual NULL SQL value can be always be
+ persisted in any column by using
+ the :obj:`~.expression.null` SQL construct in an INSERT statement
+ or associated with an ORM-mapped attribute.
+
+ .. versionadded:: 1.1
+
+ .. seealso::
+
+ :ref:`session_forcing_null` - in the ORM documentation
+
+ :paramref:`.postgresql.JSON.none_as_null` - Postgresql JSON
+ interaction with this flag.
+
+ :attr:`.TypeEngine.should_evaluate_none` - class-level flag
+
+ """
+ typ = self.copy()
+ typ.should_evaluate_none = True
+ return typ
+
+ def copy(self, **kw):
+ return self.adapt(self.__class__)
+
def compare_against_backend(self, dialect, conn_type):
"""Compare this type against the given backend type.
@@ -440,7 +513,7 @@ class TypeEngine(Visitable):
end-user customization of this behavior.
"""
- _coerced_type = _type_map.get(type(value), NULLTYPE)
+ _coerced_type = _resolve_value_to_type(value)
if _coerced_type is NULLTYPE or _coerced_type._type_affinity \
is self._type_affinity:
return self
@@ -577,7 +650,7 @@ class UserDefinedType(util.with_metaclass(VisitableCheckKWArg, TypeEngine)):
return self
-class TypeDecorator(TypeEngine):
+class TypeDecorator(SchemaEventTarget, TypeEngine):
"""Allows the creation of types which add additional functionality
to an existing type.
@@ -602,7 +675,7 @@ class TypeDecorator(TypeEngine):
def process_result_value(self, value, dialect):
return value[7:]
- def copy(self):
+ def copy(self, **kw):
return MyType(self.impl.length)
The class-level "impl" attribute is required, and can reference any
@@ -656,6 +729,26 @@ class TypeDecorator(TypeEngine):
else:
return self
+ .. warning::
+
+ Note that the **behavior of coerce_compared_value is not inherited
+ by default from that of the base type**.
+ If the :class:`.TypeDecorator` is augmenting a
+ type that requires special logic for certain types of operators,
+ this method **must** be overridden. A key example is when decorating
+ the :class:`.postgresql.JSON` and :class:`.postgresql.JSONB` types;
+ the default rules of :meth:`.TypeEngine.coerce_compared_value` should
+ be used in order to deal with operators like index operations::
+
+ class MyJsonType(TypeDecorator):
+ impl = postgresql.JSON
+
+ def coerce_compared_value(self, op, value):
+ return self.impl.coerce_compared_value(op, value)
+
+ Without the above step, index operations such as ``mycol['foo']``
+ will cause the index value ``'foo'`` to be JSON encoded.
+
"""
__visit_name__ = "type_decorator"
@@ -757,6 +850,18 @@ class TypeDecorator(TypeEngine):
"""
return self.impl._type_affinity
+ def _set_parent(self, column):
+ """Support SchemaEentTarget"""
+
+ if isinstance(self.impl, SchemaEventTarget):
+ self.impl._set_parent(column)
+
+ def _set_parent_with_dispatch(self, parent):
+ """Support SchemaEentTarget"""
+
+ if isinstance(self.impl, SchemaEventTarget):
+ self.impl._set_parent_with_dispatch(parent)
+
def type_engine(self, dialect):
"""Return a dialect-specific :class:`.TypeEngine` instance
for this :class:`.TypeDecorator`.
@@ -1031,7 +1136,7 @@ class TypeDecorator(TypeEngine):
"""
return self
- def copy(self):
+ def copy(self, **kw):
"""Produce a copy of this :class:`.TypeDecorator` instance.
This is a shallow copy and is provided to fulfill part of
diff --git a/lib/sqlalchemy/sql/util.py b/lib/sqlalchemy/sql/util.py
index 8f502fc86..f5aa9f228 100644
--- a/lib/sqlalchemy/sql/util.py
+++ b/lib/sqlalchemy/sql/util.py
@@ -154,6 +154,7 @@ def unwrap_order_by(clause):
without DESC/ASC/NULLS FIRST/NULLS LAST"""
cols = util.column_set()
+ result = []
stack = deque([clause])
while stack:
t = stack.popleft()
@@ -166,11 +167,13 @@ def unwrap_order_by(clause):
t = t.element
if isinstance(t, (_textual_label_reference)):
continue
- cols.add(t)
+ if t not in cols:
+ cols.add(t)
+ result.append(t)
else:
for c in t.get_children():
stack.append(c)
- return cols
+ return result
def clause_is_present(clause, search):
@@ -200,6 +203,21 @@ def surface_selectables(clause):
stack.append(elem.element)
+def surface_column_elements(clause):
+ """traverse and yield only outer-exposed column elements, such as would
+ be addressable in the WHERE clause of a SELECT if this element were
+ in the columns clause."""
+
+ stack = deque([clause])
+ while stack:
+ elem = stack.popleft()
+ yield elem
+ for sub in elem.get_children():
+ if isinstance(sub, FromGrouping):
+ continue
+ stack.append(sub)
+
+
def selectables_overlap(left, right):
"""Return True if left/right have some overlapping selectable"""
@@ -433,7 +451,6 @@ def criterion_as_pairs(expression, consider_as_foreign_keys=None,
return pairs
-
class ClauseAdapter(visitors.ReplacingCloningVisitor):
"""Clones and modifies clauses based on column correspondence.
diff --git a/lib/sqlalchemy/testing/__init__.py b/lib/sqlalchemy/testing/__init__.py
index 7482e32a1..bd6377eb7 100644
--- a/lib/sqlalchemy/testing/__init__.py
+++ b/lib/sqlalchemy/testing/__init__.py
@@ -21,7 +21,8 @@ def against(*queries):
from .assertions import emits_warning, emits_warning_on, uses_deprecated, \
eq_, ne_, le_, is_, is_not_, startswith_, assert_raises, \
assert_raises_message, AssertsCompiledSQL, ComparesTables, \
- AssertsExecutionResults, expect_deprecated, expect_warnings
+ AssertsExecutionResults, expect_deprecated, expect_warnings, \
+ in_, not_in_
from .util import run_as_contextmanager, rowset, fail, \
provide_metadata, adict, force_drop_names, \
diff --git a/lib/sqlalchemy/testing/assertions.py b/lib/sqlalchemy/testing/assertions.py
index 01fa0b8a9..63667654d 100644
--- a/lib/sqlalchemy/testing/assertions.py
+++ b/lib/sqlalchemy/testing/assertions.py
@@ -121,7 +121,7 @@ def uses_deprecated(*messages):
def _expect_warnings(exc_cls, messages, regex=True, assert_=True):
if regex:
- filters = [re.compile(msg, re.I) for msg in messages]
+ filters = [re.compile(msg, re.I | re.S) for msg in messages]
else:
filters = messages
@@ -229,6 +229,16 @@ def is_not_(a, b, msg=None):
assert a is not b, msg or "%r is %r" % (a, b)
+def in_(a, b, msg=None):
+ """Assert a in b, with repr messaging on failure."""
+ assert a in b, msg or "%r not in %r" % (a, b)
+
+
+def not_in_(a, b, msg=None):
+ """Assert a in not b, with repr messaging on failure."""
+ assert a not in b, msg or "%r is in %r" % (a, b)
+
+
def startswith_(a, fragment, msg=None):
"""Assert a.startswith(fragment), with repr messaging on failure."""
assert a.startswith(fragment), msg or "%r does not start with %r" % (
diff --git a/lib/sqlalchemy/testing/assertsql.py b/lib/sqlalchemy/testing/assertsql.py
index 243493607..39d078985 100644
--- a/lib/sqlalchemy/testing/assertsql.py
+++ b/lib/sqlalchemy/testing/assertsql.py
@@ -13,6 +13,7 @@ import contextlib
from .. import event
from sqlalchemy.schema import _DDLCompiles
from sqlalchemy.engine.util import _distill_params
+from sqlalchemy.engine import url
class AssertRule(object):
@@ -58,16 +59,25 @@ class CursorSQL(SQLMatchRule):
class CompiledSQL(SQLMatchRule):
- def __init__(self, statement, params=None):
+ def __init__(self, statement, params=None, dialect='default'):
self.statement = statement
self.params = params
+ self.dialect = dialect
def _compare_sql(self, execute_observed, received_statement):
stmt = re.sub(r'[\n\t]', '', self.statement)
return received_statement == stmt
def _compile_dialect(self, execute_observed):
- return DefaultDialect()
+ if self.dialect == 'default':
+ return DefaultDialect()
+ else:
+ # ugh
+ if self.dialect == 'postgresql':
+ params = {'implicit_returning': True}
+ else:
+ params = {}
+ return url.URL(self.dialect).get_dialect()(**params)
def _received_statement(self, execute_observed):
"""reconstruct the statement and params in terms
@@ -159,7 +169,7 @@ class CompiledSQL(SQLMatchRule):
'Testing for compiled statement %r partial params %r, '
'received %%(received_statement)r with params '
'%%(received_parameters)r' % (
- self.statement, expected_params
+ self.statement.replace('%', '%%'), expected_params
)
)
@@ -170,6 +180,7 @@ class RegexSQL(CompiledSQL):
self.regex = re.compile(regex)
self.orig_regex = regex
self.params = params
+ self.dialect = 'default'
def _failure_message(self, expected_params):
return (
diff --git a/lib/sqlalchemy/testing/distutils_run.py b/lib/sqlalchemy/testing/distutils_run.py
deleted file mode 100644
index 38de8872c..000000000
--- a/lib/sqlalchemy/testing/distutils_run.py
+++ /dev/null
@@ -1,11 +0,0 @@
-"""Quick and easy way to get setup.py test to run py.test without any
-custom setuptools/distutils code.
-
-"""
-import unittest
-import pytest
-
-
-class TestSuite(unittest.TestCase):
- def test_sqlalchemy(self):
- pytest.main(["-n", "4", "-q"])
diff --git a/lib/sqlalchemy/testing/exclusions.py b/lib/sqlalchemy/testing/exclusions.py
index 972dec3a9..5d7baeb9c 100644
--- a/lib/sqlalchemy/testing/exclusions.py
+++ b/lib/sqlalchemy/testing/exclusions.py
@@ -12,6 +12,7 @@ from . import config
from .. import util
import inspect
import contextlib
+from sqlalchemy.util.compat import inspect_getargspec
def skip_if(predicate, reason=None):
@@ -295,7 +296,7 @@ class SpecPredicate(Predicate):
class LambdaPredicate(Predicate):
def __init__(self, lambda_, description=None, args=None, kw=None):
- spec = inspect.getargspec(lambda_)
+ spec = inspect_getargspec(lambda_)
if not spec[0]:
self.lambda_ = lambda db: lambda_()
else:
@@ -397,8 +398,8 @@ def closed():
return skip_if(BooleanPredicate(True, "marked as skip"))
-def fails():
- return fails_if(BooleanPredicate(True, "expected to fail"))
+def fails(reason=None):
+ return fails_if(BooleanPredicate(True, reason or "expected to fail"))
@decorator
@@ -407,19 +408,19 @@ def future(fn, *arg):
def fails_on(db, reason=None):
- return fails_if(SpecPredicate(db), reason)
+ return fails_if(Predicate.as_predicate(db), reason)
def fails_on_everything_except(*dbs):
return succeeds_if(
OrPredicate([
- SpecPredicate(db) for db in dbs
+ Predicate.as_predicate(db) for db in dbs
])
)
def skip(db, reason=None):
- return skip_if(SpecPredicate(db), reason)
+ return skip_if(Predicate.as_predicate(db), reason)
def only_on(dbs, reason=None):
diff --git a/lib/sqlalchemy/testing/fixtures.py b/lib/sqlalchemy/testing/fixtures.py
index e16bc77c0..5cd0244ef 100644
--- a/lib/sqlalchemy/testing/fixtures.py
+++ b/lib/sqlalchemy/testing/fixtures.py
@@ -275,12 +275,14 @@ class MappedTest(_ORMTest, TablesTest, assertions.AssertsExecutionResults):
def setup(self):
self._setup_each_tables()
+ self._setup_each_classes()
self._setup_each_mappers()
self._setup_each_inserts()
def teardown(self):
sa.orm.session.Session.close_all()
self._teardown_each_mappers()
+ self._teardown_each_classes()
self._teardown_each_tables()
@classmethod
@@ -302,6 +304,10 @@ class MappedTest(_ORMTest, TablesTest, assertions.AssertsExecutionResults):
if self.run_setup_mappers == 'each':
self._with_register_classes(self.setup_mappers)
+ def _setup_each_classes(self):
+ if self.run_setup_classes == 'each':
+ self._with_register_classes(self.setup_classes)
+
@classmethod
def _with_register_classes(cls, fn):
"""Run a setup method, framing the operation with a Base class
@@ -336,6 +342,10 @@ class MappedTest(_ORMTest, TablesTest, assertions.AssertsExecutionResults):
if self.run_setup_mappers != 'once':
sa.orm.clear_mappers()
+ def _teardown_each_classes(self):
+ if self.run_setup_classes != 'once':
+ self.classes.clear()
+
@classmethod
def setup_classes(cls):
pass
diff --git a/lib/sqlalchemy/testing/plugin/plugin_base.py b/lib/sqlalchemy/testing/plugin/plugin_base.py
index ef304afa6..6cdec05ad 100644
--- a/lib/sqlalchemy/testing/plugin/plugin_base.py
+++ b/lib/sqlalchemy/testing/plugin/plugin_base.py
@@ -40,7 +40,6 @@ file_config = None
logging = None
-db_opts = {}
include_tags = set()
exclude_tags = set()
options = None
@@ -115,7 +114,6 @@ def memoize_important_follower_config(dict_):
"""
dict_['memoized_config'] = {
- 'db_opts': db_opts,
'include_tags': include_tags,
'exclude_tags': exclude_tags
}
@@ -127,8 +125,7 @@ def restore_important_follower_config(dict_):
This invokes in the follower process.
"""
- global db_opts, include_tags, exclude_tags
- db_opts.update(dict_['memoized_config']['db_opts'])
+ global include_tags, exclude_tags
include_tags.update(dict_['memoized_config']['include_tags'])
exclude_tags.update(dict_['memoized_config']['exclude_tags'])
@@ -268,7 +265,7 @@ def _engine_uri(options, file_config):
for db_url in db_urls:
cfg = provision.setup_config(
- db_url, db_opts, options, file_config, provision.FOLLOWER_IDENT)
+ db_url, options, file_config, provision.FOLLOWER_IDENT)
if not config._current:
cfg.set_as_current(cfg, testing)
diff --git a/lib/sqlalchemy/testing/provision.py b/lib/sqlalchemy/testing/provision.py
index 8469a0658..3f9ddae73 100644
--- a/lib/sqlalchemy/testing/provision.py
+++ b/lib/sqlalchemy/testing/provision.py
@@ -2,7 +2,7 @@ from sqlalchemy.engine import url as sa_url
from sqlalchemy import text
from sqlalchemy.util import compat
from . import config, engines
-
+import os
FOLLOWER_IDENT = None
@@ -46,11 +46,13 @@ def configure_follower(follower_ident):
_configure_follower(cfg, follower_ident)
-def setup_config(db_url, db_opts, options, file_config, follower_ident):
+def setup_config(db_url, options, file_config, follower_ident):
if follower_ident:
db_url = _follower_url_from_main(db_url, follower_ident)
+ db_opts = {}
_update_db_opts(db_url, db_opts)
eng = engines.testing_engine(db_url, db_opts)
+ _post_configure_engine(db_url, eng, follower_ident)
eng.connect().close()
cfg = config.Config.register(eng, db_opts, options, file_config)
if follower_ident:
@@ -105,6 +107,11 @@ def _configure_follower(cfg, ident):
@register.init
+def _post_configure_engine(url, engine, follower_ident):
+ pass
+
+
+@register.init
def _follower_url_from_main(url, ident):
url = sa_url.make_url(url)
url.database = ident
@@ -125,6 +132,23 @@ def _sqlite_follower_url_from_main(url, ident):
return sa_url.make_url("sqlite:///%s.db" % ident)
+@_post_configure_engine.for_db("sqlite")
+def _sqlite_post_configure_engine(url, engine, follower_ident):
+ from sqlalchemy import event
+
+ @event.listens_for(engine, "connect")
+ def connect(dbapi_connection, connection_record):
+ # use file DBs in all cases, memory acts kind of strangely
+ # as an attached
+ if not follower_ident:
+ dbapi_connection.execute(
+ 'ATTACH DATABASE "test_schema.db" AS test_schema')
+ else:
+ dbapi_connection.execute(
+ 'ATTACH DATABASE "%s_test_schema.db" AS test_schema'
+ % follower_ident)
+
+
@_create_db.for_db("postgresql")
def _pg_create_db(cfg, eng, ident):
with eng.connect().execution_options(
@@ -175,8 +199,10 @@ def _pg_drop_db(cfg, eng, ident):
@_drop_db.for_db("sqlite")
def _sqlite_drop_db(cfg, eng, ident):
- pass
- #os.remove("%s.db" % ident)
+ if ident:
+ os.remove("%s_test_schema.db" % ident)
+ else:
+ os.remove("%s.db" % ident)
@_drop_db.for_db("mysql")
diff --git a/lib/sqlalchemy/testing/requirements.py b/lib/sqlalchemy/testing/requirements.py
index e8b3a995f..15bfad831 100644
--- a/lib/sqlalchemy/testing/requirements.py
+++ b/lib/sqlalchemy/testing/requirements.py
@@ -111,6 +111,32 @@ class SuiteRequirements(Requirements):
return exclusions.open()
@property
+ def parens_in_union_contained_select_w_limit_offset(self):
+ """Target database must support parenthesized SELECT in UNION
+ when LIMIT/OFFSET is specifically present.
+
+ E.g. (SELECT ...) UNION (SELECT ..)
+
+ This is known to fail on SQLite.
+
+ """
+ return exclusions.open()
+
+ @property
+ def parens_in_union_contained_select_wo_limit_offset(self):
+ """Target database must support parenthesized SELECT in UNION
+ when OFFSET/LIMIT is specifically not present.
+
+ E.g. (SELECT ... LIMIT ..) UNION (SELECT .. OFFSET ..)
+
+ This is known to fail on SQLite. It also fails on Oracle
+ because without LIMIT/OFFSET, there is currently no step that
+ creates an additional subquery.
+
+ """
+ return exclusions.open()
+
+ @property
def boolean_col_expressions(self):
"""Target database must support boolean expressions as columns"""
diff --git a/lib/sqlalchemy/testing/schema.py b/lib/sqlalchemy/testing/schema.py
index 93b52ad58..257578668 100644
--- a/lib/sqlalchemy/testing/schema.py
+++ b/lib/sqlalchemy/testing/schema.py
@@ -71,9 +71,12 @@ def Column(*args, **kw):
args = [arg for arg in args if not isinstance(arg, schema.ForeignKey)]
col = schema.Column(*args, **kw)
- if 'test_needs_autoincrement' in test_opts and \
+ if test_opts.get('test_needs_autoincrement', False) and \
kw.get('primary_key', False):
+ if col.default is None and col.server_default is None:
+ col.autoincrement = True
+
# allow any test suite to pick up on this
col.info['test_needs_autoincrement'] = True
diff --git a/lib/sqlalchemy/testing/suite/test_reflection.py b/lib/sqlalchemy/testing/suite/test_reflection.py
index 3edbdeb8c..288a85973 100644
--- a/lib/sqlalchemy/testing/suite/test_reflection.py
+++ b/lib/sqlalchemy/testing/suite/test_reflection.py
@@ -531,12 +531,20 @@ class ComponentReflectionTest(fixtures.TablesTest):
@testing.provide_metadata
def _test_get_unique_constraints(self, schema=None):
+ # SQLite dialect needs to parse the names of the constraints
+ # separately from what it gets from PRAGMA index_list(), and
+ # then matches them up. so same set of column_names in two
+ # constraints will confuse it. Perhaps we should no longer
+ # bother with index_list() here since we have the whole
+ # CREATE TABLE?
uniques = sorted(
[
{'name': 'unique_a', 'column_names': ['a']},
{'name': 'unique_a_b_c', 'column_names': ['a', 'b', 'c']},
{'name': 'unique_c_a_b', 'column_names': ['c', 'a', 'b']},
{'name': 'unique_asc_key', 'column_names': ['asc', 'key']},
+ {'name': 'i.have.dots', 'column_names': ['b']},
+ {'name': 'i have spaces', 'column_names': ['c']},
],
key=operator.itemgetter('name')
)
diff --git a/lib/sqlalchemy/testing/suite/test_select.py b/lib/sqlalchemy/testing/suite/test_select.py
index d4bf63b55..e7de356b8 100644
--- a/lib/sqlalchemy/testing/suite/test_select.py
+++ b/lib/sqlalchemy/testing/suite/test_select.py
@@ -2,7 +2,7 @@ from .. import fixtures, config
from ..assertions import eq_
from sqlalchemy import util
-from sqlalchemy import Integer, String, select, func, bindparam
+from sqlalchemy import Integer, String, select, func, bindparam, union
from sqlalchemy import testing
from ..schema import Table, Column
@@ -146,7 +146,7 @@ class LimitOffsetTest(fixtures.TablesTest):
select([table]).order_by(table.c.id).limit(2).offset(1),
[(2, 2, 3), (3, 3, 4)]
)
-
+
@testing.requires.offset
def test_limit_offset_nobinds(self):
"""test that 'literal binds' mode works - no bound params."""
@@ -190,3 +190,123 @@ class LimitOffsetTest(fixtures.TablesTest):
[(2, 2, 3), (3, 3, 4)],
params={"l": 2, "o": 1}
)
+
+
+class CompoundSelectTest(fixtures.TablesTest):
+ __backend__ = True
+
+ @classmethod
+ def define_tables(cls, metadata):
+ Table("some_table", metadata,
+ Column('id', Integer, primary_key=True),
+ Column('x', Integer),
+ Column('y', Integer))
+
+ @classmethod
+ def insert_data(cls):
+ config.db.execute(
+ cls.tables.some_table.insert(),
+ [
+ {"id": 1, "x": 1, "y": 2},
+ {"id": 2, "x": 2, "y": 3},
+ {"id": 3, "x": 3, "y": 4},
+ {"id": 4, "x": 4, "y": 5},
+ ]
+ )
+
+ def _assert_result(self, select, result, params=()):
+ eq_(
+ config.db.execute(select, params).fetchall(),
+ result
+ )
+
+ def test_plain_union(self):
+ table = self.tables.some_table
+ s1 = select([table]).where(table.c.id == 2)
+ s2 = select([table]).where(table.c.id == 3)
+
+ u1 = union(s1, s2)
+ self._assert_result(
+ u1.order_by(u1.c.id),
+ [(2, 2, 3), (3, 3, 4)]
+ )
+
+ def test_select_from_plain_union(self):
+ table = self.tables.some_table
+ s1 = select([table]).where(table.c.id == 2)
+ s2 = select([table]).where(table.c.id == 3)
+
+ u1 = union(s1, s2).alias().select()
+ self._assert_result(
+ u1.order_by(u1.c.id),
+ [(2, 2, 3), (3, 3, 4)]
+ )
+
+ @testing.requires.parens_in_union_contained_select_w_limit_offset
+ def test_limit_offset_selectable_in_unions(self):
+ table = self.tables.some_table
+ s1 = select([table]).where(table.c.id == 2).\
+ limit(1).order_by(table.c.id)
+ s2 = select([table]).where(table.c.id == 3).\
+ limit(1).order_by(table.c.id)
+
+ u1 = union(s1, s2).limit(2)
+ self._assert_result(
+ u1.order_by(u1.c.id),
+ [(2, 2, 3), (3, 3, 4)]
+ )
+
+ @testing.requires.parens_in_union_contained_select_wo_limit_offset
+ def test_order_by_selectable_in_unions(self):
+ table = self.tables.some_table
+ s1 = select([table]).where(table.c.id == 2).\
+ order_by(table.c.id)
+ s2 = select([table]).where(table.c.id == 3).\
+ order_by(table.c.id)
+
+ u1 = union(s1, s2).limit(2)
+ self._assert_result(
+ u1.order_by(u1.c.id),
+ [(2, 2, 3), (3, 3, 4)]
+ )
+
+ def test_distinct_selectable_in_unions(self):
+ table = self.tables.some_table
+ s1 = select([table]).where(table.c.id == 2).\
+ distinct()
+ s2 = select([table]).where(table.c.id == 3).\
+ distinct()
+
+ u1 = union(s1, s2).limit(2)
+ self._assert_result(
+ u1.order_by(u1.c.id),
+ [(2, 2, 3), (3, 3, 4)]
+ )
+
+ @testing.requires.parens_in_union_contained_select_w_limit_offset
+ def test_limit_offset_in_unions_from_alias(self):
+ table = self.tables.some_table
+ s1 = select([table]).where(table.c.id == 2).\
+ limit(1).order_by(table.c.id)
+ s2 = select([table]).where(table.c.id == 3).\
+ limit(1).order_by(table.c.id)
+
+ # this necessarily has double parens
+ u1 = union(s1, s2).alias()
+ self._assert_result(
+ u1.select().limit(2).order_by(u1.c.id),
+ [(2, 2, 3), (3, 3, 4)]
+ )
+
+ def test_limit_offset_aliased_selectable_in_unions(self):
+ table = self.tables.some_table
+ s1 = select([table]).where(table.c.id == 2).\
+ limit(1).order_by(table.c.id).alias().select()
+ s2 = select([table]).where(table.c.id == 3).\
+ limit(1).order_by(table.c.id).alias().select()
+
+ u1 = union(s1, s2).limit(2)
+ self._assert_result(
+ u1.order_by(u1.c.id),
+ [(2, 2, 3), (3, 3, 4)]
+ )
diff --git a/lib/sqlalchemy/types.py b/lib/sqlalchemy/types.py
index 9ab92e90b..d82e683d9 100644
--- a/lib/sqlalchemy/types.py
+++ b/lib/sqlalchemy/types.py
@@ -16,7 +16,8 @@ __all__ = ['TypeEngine', 'TypeDecorator', 'UserDefinedType',
'SMALLINT', 'INTEGER', 'DATE', 'TIME', 'String', 'Integer',
'SmallInteger', 'BigInteger', 'Numeric', 'Float', 'DateTime',
'Date', 'Time', 'LargeBinary', 'Binary', 'Boolean', 'Unicode',
- 'Concatenable', 'UnicodeText', 'PickleType', 'Interval', 'Enum']
+ 'Concatenable', 'UnicodeText', 'PickleType', 'Interval', 'Enum',
+ 'Indexable', 'Array']
from .sql.type_api import (
adapt_type,
@@ -27,6 +28,7 @@ from .sql.type_api import (
UserDefinedType
)
from .sql.sqltypes import (
+ Array,
BIGINT,
BINARY,
BLOB,
@@ -46,6 +48,7 @@ from .sql.sqltypes import (
Enum,
FLOAT,
Float,
+ Indexable,
INT,
INTEGER,
Integer,
@@ -74,5 +77,4 @@ from .sql.sqltypes import (
UnicodeText,
VARBINARY,
VARCHAR,
- _type_map
)
diff --git a/lib/sqlalchemy/util/__init__.py b/lib/sqlalchemy/util/__init__.py
index ed968f168..a15ca8efa 100644
--- a/lib/sqlalchemy/util/__init__.py
+++ b/lib/sqlalchemy/util/__init__.py
@@ -6,7 +6,7 @@
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from .compat import callable, cmp, reduce, \
- threading, py3k, py33, py2k, jython, pypy, cpython, win32, \
+ threading, py3k, py33, py36, py2k, jython, pypy, cpython, win32, \
pickle, dottedgetter, parse_qsl, namedtuple, next, reraise, \
raise_from_cause, text_type, safe_kwarg, string_types, int_types, \
binary_type, nested, \
@@ -36,7 +36,7 @@ from .langhelpers import iterate_attributes, class_hierarchy, \
generic_repr, counter, PluginLoader, hybridproperty, hybridmethod, \
safe_reraise,\
get_callable_argspec, only_once, attrsetter, ellipses_string, \
- warn_limited, map_bits, MemoizedSlots, EnsureKWArgType
+ warn_limited, map_bits, MemoizedSlots, EnsureKWArgType, wrap_callable
from .deprecations import warn_deprecated, warn_pending_deprecation, \
deprecated, pending_deprecation, inject_docstring_text
diff --git a/lib/sqlalchemy/util/compat.py b/lib/sqlalchemy/util/compat.py
index 5b6f691f1..25c88c662 100644
--- a/lib/sqlalchemy/util/compat.py
+++ b/lib/sqlalchemy/util/compat.py
@@ -14,6 +14,7 @@ try:
except ImportError:
import dummy_threading as threading
+py36 = sys.version_info >= (3, 6)
py33 = sys.version_info >= (3, 3)
py32 = sys.version_info >= (3, 2)
py3k = sys.version_info >= (3, 0)
diff --git a/lib/sqlalchemy/util/langhelpers.py b/lib/sqlalchemy/util/langhelpers.py
index 499515142..11aa9384d 100644
--- a/lib/sqlalchemy/util/langhelpers.py
+++ b/lib/sqlalchemy/util/langhelpers.py
@@ -426,7 +426,7 @@ def getargspec_init(method):
"""
try:
- return inspect.getargspec(method)
+ return compat.inspect_getargspec(method)
except TypeError:
if method is object.__init__:
return (['self'], None, None, None)
@@ -464,7 +464,7 @@ def generic_repr(obj, additional_kw=(), to_inspect=None, omit_kwarg=()):
for i, insp in enumerate(to_inspect):
try:
(_args, _vargs, vkw, defaults) = \
- inspect.getargspec(insp.__init__)
+ compat.inspect_getargspec(insp.__init__)
except TypeError:
continue
else:
@@ -625,7 +625,7 @@ def monkeypatch_proxied_specials(into_cls, from_cls, skip=None, only=None,
except AttributeError:
continue
try:
- spec = inspect.getargspec(fn)
+ spec = compat.inspect_getargspec(fn)
fn_args = inspect.formatargspec(spec[0])
d_args = inspect.formatargspec(spec[0][1:])
except TypeError:
@@ -805,6 +805,8 @@ class MemoizedSlots(object):
"""
+ __slots__ = ()
+
def _fallback_getattr(self, key):
raise AttributeError(key)
@@ -1017,7 +1019,9 @@ def constructor_copy(obj, cls, *args, **kw):
"""
names = get_cls_kwargs(cls)
- kw.update((k, obj.__dict__[k]) for k in names if k in obj.__dict__)
+ kw.update(
+ (k, obj.__dict__[k]) for k in names.difference(kw)
+ if k in obj.__dict__)
return cls(*args, **kw)
@@ -1361,7 +1365,7 @@ class EnsureKWArgType(type):
m = re.match(fn_reg, key)
if m:
fn = clsdict[key]
- spec = inspect.getargspec(fn)
+ spec = compat.inspect_getargspec(fn)
if not spec.keywords:
clsdict[key] = wrapped = cls._wrap_w_kw(fn)
setattr(cls, key, wrapped)
@@ -1373,3 +1377,25 @@ class EnsureKWArgType(type):
return fn(*arg)
return update_wrapper(wrap, fn)
+
+def wrap_callable(wrapper, fn):
+ """Augment functools.update_wrapper() to work with objects with
+ a ``__call__()`` method.
+
+ :param fn:
+ object with __call__ method
+
+ """
+ if hasattr(fn, '__name__'):
+ return update_wrapper(wrapper, fn)
+ else:
+ _f = wrapper
+ _f.__name__ = fn.__class__.__name__
+ _f.__module__ = fn.__module__
+
+ if hasattr(fn.__call__, '__doc__') and fn.__call__.__doc__:
+ _f.__doc__ = fn.__call__.__doc__
+ elif fn.__doc__:
+ _f.__doc__ = fn.__doc__
+
+ return _f
diff --git a/setup.cfg b/setup.cfg
index dc10877f7..2d203f1ed 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -43,7 +43,7 @@ pg8000=postgresql+pg8000://scott:tiger@127.0.0.1:5432/test
postgres=postgresql://scott:tiger@127.0.0.1:5432/test
postgresql_jython=postgresql+zxjdbc://scott:tiger@127.0.0.1:5432/test
postgresql_psycopg2cffi=postgresql+psycopg2cffi://scott:tiger@127.0.0.1:5432/test
-mysql=mysql://scott:tiger@127.0.0.1:3306/test
+mysql=mysql://scott:tiger@127.0.0.1:3306/test?charset=utf8&use_unicode=0
mysqlconnector=mysql+mysqlconnector://scott:tiger@127.0.0.1:3306/test
mssql=mssql+pyodbc://scott:tiger@ms_2008
oursql=mysql+oursql://scott:tiger@127.0.0.1:3306/test
diff --git a/setup.py b/setup.py
index 09b524cd2..5b97cb9fe 100644
--- a/setup.py
+++ b/setup.py
@@ -1,40 +1,20 @@
-"""setup.py
-
-Please see README for basic installation instructions.
-
-"""
-
import os
+import platform
import re
import sys
from distutils.command.build_ext import build_ext
-from distutils.errors import (CCompilerError, DistutilsExecError,
- DistutilsPlatformError)
-
-has_feature = False
-try:
- from setuptools import setup, Extension
- try:
- # see
- # https://bitbucket.org/pypa/setuptools/issue/65/deprecate-and-remove-features,
- # where they may remove Feature.
- from setuptools import Feature
- has_feature = True
- except ImportError:
- pass
-except ImportError:
- from distutils.core import setup, Extension
-
-py3k = False
+from distutils.errors import CCompilerError
+from distutils.errors import DistutilsExecError
+from distutils.errors import DistutilsPlatformError
+from setuptools import Distribution as _Distribution, Extension
+from setuptools import setup
+from setuptools import find_packages
+from setuptools.command.test import test as TestCommand
cmdclass = {}
-extra = {}
if sys.version_info < (2, 6):
raise Exception("SQLAlchemy requires Python 2.6 or higher.")
-elif sys.version_info >= (3, 0):
- py3k = True
-import platform
cpython = platform.python_implementation() == 'CPython'
ext_modules = [
@@ -44,7 +24,7 @@ ext_modules = [
sources=['lib/sqlalchemy/cextension/resultproxy.c']),
Extension('sqlalchemy.cutils',
sources=['lib/sqlalchemy/cextension/utils.c'])
- ]
+]
ext_errors = (CCompilerError, DistutilsExecError, DistutilsPlatformError)
if sys.platform == 'win32':
@@ -82,6 +62,44 @@ class ve_build_ext(build_ext):
cmdclass['build_ext'] = ve_build_ext
+class Distribution(_Distribution):
+
+ def has_ext_modules(self):
+ # We want to always claim that we have ext_modules. This will be fine
+ # if we don't actually have them (such as on PyPy) because nothing
+ # will get built, however we don't want to provide an overally broad
+ # Wheel package when building a wheel without C support. This will
+ # ensure that Wheel knows to treat us as if the build output is
+ # platform specific.
+ return True
+
+
+class PyTest(TestCommand):
+ # from https://pytest.org/latest/goodpractises.html\
+ # #integration-with-setuptools-test-commands
+ user_options = [('pytest-args=', 'a', "Arguments to pass to py.test")]
+
+ default_options = ["-n", "4", "-q"]
+
+ def initialize_options(self):
+ TestCommand.initialize_options(self)
+ self.pytest_args = ""
+
+ def finalize_options(self):
+ TestCommand.finalize_options(self)
+ self.test_args = []
+ self.test_suite = True
+
+ def run_tests(self):
+ # import here, cause outside the eggs aren't loaded
+ import pytest
+ errno = pytest.main(
+ " ".join(self.default_options) + " " + self.pytest_args)
+ sys.exit(errno)
+
+cmdclass['test'] = PyTest
+
+
def status_msgs(*msgs):
print('*' * 75)
for msg in msgs:
@@ -89,66 +107,53 @@ def status_msgs(*msgs):
print('*' * 75)
-def find_packages(location):
- packages = []
- for pkg in ['sqlalchemy']:
- for _dir, subdirectories, files in (
- os.walk(os.path.join(location, pkg))):
- if '__init__.py' in files:
- tokens = _dir.split(os.sep)[len(location.split(os.sep)):]
- packages.append(".".join(tokens))
- return packages
+with open(
+ os.path.join(
+ os.path.dirname(__file__),
+ 'lib', 'sqlalchemy', '__init__.py')) as v_file:
+ VERSION = re.compile(
+ r".*__version__ = '(.*?)'",
+ re.S).match(v_file.read()).group(1)
-v_file = open(os.path.join(os.path.dirname(__file__),
- 'lib', 'sqlalchemy', '__init__.py'))
-VERSION = re.compile(r".*__version__ = '(.*?)'",
- re.S).match(v_file.read()).group(1)
-v_file.close()
-
-r_file = open(os.path.join(os.path.dirname(__file__), 'README.rst'))
-readme = r_file.read()
-r_file.close()
+with open(os.path.join(os.path.dirname(__file__), 'README.rst')) as r_file:
+ readme = r_file.read()
def run_setup(with_cext):
- kwargs = extra.copy()
+ kwargs = {}
if with_cext:
- if has_feature:
- kwargs['features'] = {'cextensions': Feature(
- "optional C speed-enhancements",
- standard=True,
- ext_modules=ext_modules
- )}
- else:
- kwargs['ext_modules'] = ext_modules
-
- setup(name="SQLAlchemy",
- version=VERSION,
- description="Database Abstraction Library",
- author="Mike Bayer",
- author_email="mike_mp@zzzcomputing.com",
- url="http://www.sqlalchemy.org",
- packages=find_packages('lib'),
- package_dir={'': 'lib'},
- license="MIT License",
- cmdclass=cmdclass,
- tests_require=['pytest >= 2.5.2', 'mock', 'pytest-xdist'],
- test_suite="sqlalchemy.testing.distutils_run",
- long_description=readme,
- classifiers=[
- "Development Status :: 5 - Production/Stable",
- "Intended Audience :: Developers",
- "License :: OSI Approved :: MIT License",
- "Programming Language :: Python",
- "Programming Language :: Python :: 3",
- "Programming Language :: Python :: Implementation :: CPython",
- "Programming Language :: Python :: Implementation :: Jython",
- "Programming Language :: Python :: Implementation :: PyPy",
- "Topic :: Database :: Front-Ends",
- "Operating System :: OS Independent",
- ],
- **kwargs
- )
+ kwargs['ext_modules'] = ext_modules
+ else:
+ kwargs['ext_modules'] = []
+
+ setup(
+ name="SQLAlchemy",
+ version=VERSION,
+ description="Database Abstraction Library",
+ author="Mike Bayer",
+ author_email="mike_mp@zzzcomputing.com",
+ url="http://www.sqlalchemy.org",
+ packages=find_packages('lib'),
+ package_dir={'': 'lib'},
+ license="MIT License",
+ cmdclass=cmdclass,
+ tests_require=['pytest >= 2.5.2', 'mock', 'pytest-xdist'],
+ long_description=readme,
+ classifiers=[
+ "Development Status :: 5 - Production/Stable",
+ "Intended Audience :: Developers",
+ "License :: OSI Approved :: MIT License",
+ "Programming Language :: Python",
+ "Programming Language :: Python :: 3",
+ "Programming Language :: Python :: Implementation :: CPython",
+ "Programming Language :: Python :: Implementation :: Jython",
+ "Programming Language :: Python :: Implementation :: PyPy",
+ "Topic :: Database :: Front-Ends",
+ "Operating System :: OS Independent",
+ ],
+ distclass=Distribution,
+ **kwargs
+ )
if not cpython:
run_setup(False)
diff --git a/test/aaa_profiling/test_compiler.py b/test/aaa_profiling/test_compiler.py
index 5eece4602..5095be103 100644
--- a/test/aaa_profiling/test_compiler.py
+++ b/test/aaa_profiling/test_compiler.py
@@ -32,8 +32,8 @@ class CompileTest(fixtures.TestBase, AssertsExecutionResults):
for t in (t1, t2):
for c in t.c:
c.type._type_affinity
- from sqlalchemy import types
- for t in list(types._type_map.values()):
+ from sqlalchemy.sql import sqltypes
+ for t in list(sqltypes._type_map.values()):
t._type_affinity
cls.dialect = default.DefaultDialect()
diff --git a/test/base/test_tutorials.py b/test/base/test_tutorials.py
new file mode 100644
index 000000000..73dcbb524
--- /dev/null
+++ b/test/base/test_tutorials.py
@@ -0,0 +1,144 @@
+from __future__ import print_function
+from sqlalchemy.testing import fixtures
+from sqlalchemy.testing import config
+import doctest
+import logging
+import sys
+import re
+import os
+
+
+class DocTest(fixtures.TestBase):
+ def _setup_logger(self):
+ rootlogger = logging.getLogger('sqlalchemy.engine.base.Engine')
+
+ class MyStream(object):
+ def write(self, string):
+ sys.stdout.write(string)
+ sys.stdout.flush()
+
+ def flush(self):
+ pass
+
+ self._handler = handler = logging.StreamHandler(MyStream())
+ handler.setFormatter(logging.Formatter('%(message)s'))
+ rootlogger.addHandler(handler)
+
+ def _teardown_logger(self):
+ rootlogger = logging.getLogger('sqlalchemy.engine.base.Engine')
+ rootlogger.removeHandler(self._handler)
+
+ def _setup_create_table_patcher(self):
+ from sqlalchemy.sql import ddl
+ self.orig_sort = ddl.sort_tables_and_constraints
+
+ def our_sort(tables, **kw):
+ return self.orig_sort(
+ sorted(tables, key=lambda t: t.key), **kw
+ )
+ ddl.sort_tables_and_constraints = our_sort
+
+ def _teardown_create_table_patcher(self):
+ from sqlalchemy.sql import ddl
+ ddl.sort_tables_and_constraints = self.orig_sort
+
+ def setup(self):
+ self._setup_logger()
+ self._setup_create_table_patcher()
+
+ def teardown(self):
+ self._teardown_create_table_patcher()
+ self._teardown_logger()
+
+
+ def _run_doctest_for_content(self, name, content):
+ optionflags = (
+ doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE |
+ _get_allow_unicode_flag()
+ )
+ runner = doctest.DocTestRunner(
+ verbose=None, optionflags=optionflags,
+ checker=_get_unicode_checker())
+ globs = {
+ 'print_function': print_function}
+ parser = doctest.DocTestParser()
+ test = parser.get_doctest(content, globs, name, name, 0)
+ runner.run(test)
+ runner.summarize()
+ assert not runner.failures
+
+ def _run_doctest(self, fname):
+ here = os.path.dirname(__file__)
+ sqla_base = os.path.normpath(os.path.join(here, "..", ".."))
+ path = os.path.join(sqla_base, "doc/build", fname)
+ if not os.path.exists(path):
+ config.skip_test("Can't find documentation file %r" % path)
+ with open(path) as file_:
+ content = file_.read()
+ content = re.sub(r'{(?:stop|sql|opensql)}', '', content)
+ self._run_doctest_for_content(fname, content)
+
+ def test_orm(self):
+ self._run_doctest("orm/tutorial.rst")
+
+ def test_core(self):
+ self._run_doctest("core/tutorial.rst")
+
+
+# unicode checker courtesy py.test
+
+
+def _get_unicode_checker():
+ """
+ Returns a doctest.OutputChecker subclass that takes in account the
+ ALLOW_UNICODE option to ignore u'' prefixes in strings. Useful
+ when the same doctest should run in Python 2 and Python 3.
+
+ An inner class is used to avoid importing "doctest" at the module
+ level.
+ """
+ if hasattr(_get_unicode_checker, 'UnicodeOutputChecker'):
+ return _get_unicode_checker.UnicodeOutputChecker()
+
+ import doctest
+ import re
+
+ class UnicodeOutputChecker(doctest.OutputChecker):
+ """
+ Copied from doctest_nose_plugin.py from the nltk project:
+ https://github.com/nltk/nltk
+ """
+
+ _literal_re = re.compile(r"(\W|^)[uU]([rR]?[\'\"])", re.UNICODE)
+
+ def check_output(self, want, got, optionflags):
+ res = doctest.OutputChecker.check_output(self, want, got,
+ optionflags)
+ if res:
+ return True
+
+ if not (optionflags & _get_allow_unicode_flag()):
+ return False
+
+ else: # pragma: no cover
+ # the code below will end up executed only in Python 2 in
+ # our tests, and our coverage check runs in Python 3 only
+ def remove_u_prefixes(txt):
+ return re.sub(self._literal_re, r'\1\2', txt)
+
+ want = remove_u_prefixes(want)
+ got = remove_u_prefixes(got)
+ res = doctest.OutputChecker.check_output(self, want, got,
+ optionflags)
+ return res
+
+ _get_unicode_checker.UnicodeOutputChecker = UnicodeOutputChecker
+ return _get_unicode_checker.UnicodeOutputChecker()
+
+
+def _get_allow_unicode_flag():
+ """
+ Registers and returns the ALLOW_UNICODE flag.
+ """
+ import doctest
+ return doctest.register_optionflag('ALLOW_UNICODE')
diff --git a/test/base/test_utils.py b/test/base/test_utils.py
index 256f52850..4370d612b 100644
--- a/test/base/test_utils.py
+++ b/test/base/test_utils.py
@@ -2,13 +2,14 @@ import copy
from sqlalchemy import util, sql, exc, testing
from sqlalchemy.testing import assert_raises, assert_raises_message, fixtures
-from sqlalchemy.testing import eq_, is_, ne_, fails_if
+from sqlalchemy.testing import eq_, is_, ne_, fails_if, mock
from sqlalchemy.testing.util import picklers, gc_collect
from sqlalchemy.util import classproperty, WeakSequence, get_callable_argspec
from sqlalchemy.sql import column
from sqlalchemy.util import langhelpers
import inspect
+
class _KeyedTupleTest(object):
def _fixture(self, values, labels):
@@ -284,6 +285,102 @@ class MemoizedAttrTest(fixtures.TestBase):
eq_(f1.bar(), 20)
eq_(val[0], 21)
+ def test_memoized_slots(self):
+ canary = mock.Mock()
+
+ class Foob(util.MemoizedSlots):
+ __slots__ = ('foo_bar', 'gogo')
+
+ def _memoized_method_gogo(self):
+ canary.method()
+ return "gogo"
+
+ def _memoized_attr_foo_bar(self):
+ canary.attr()
+ return "foobar"
+
+ f1 = Foob()
+ assert_raises(AttributeError, setattr, f1, "bar", "bat")
+
+ eq_(f1.foo_bar, "foobar")
+
+ eq_(f1.foo_bar, "foobar")
+
+ eq_(f1.gogo(), "gogo")
+
+ eq_(f1.gogo(), "gogo")
+
+ eq_(canary.mock_calls, [mock.call.attr(), mock.call.method()])
+
+
+class WrapCallableTest(fixtures.TestBase):
+ def test_wrapping_update_wrapper_fn(self):
+ def my_fancy_default():
+ """run the fancy default"""
+ return 10
+
+ c = util.wrap_callable(lambda: my_fancy_default, my_fancy_default)
+
+ eq_(c.__name__, "my_fancy_default")
+ eq_(c.__doc__, "run the fancy default")
+
+ def test_wrapping_update_wrapper_fn_nodocstring(self):
+ def my_fancy_default():
+ return 10
+
+ c = util.wrap_callable(lambda: my_fancy_default, my_fancy_default)
+ eq_(c.__name__, "my_fancy_default")
+ eq_(c.__doc__, None)
+
+ def test_wrapping_update_wrapper_cls(self):
+ class MyFancyDefault(object):
+ """a fancy default"""
+
+ def __call__(self):
+ """run the fancy default"""
+ return 10
+
+ def_ = MyFancyDefault()
+ c = util.wrap_callable(lambda: def_(), def_)
+
+ eq_(c.__name__, "MyFancyDefault")
+ eq_(c.__doc__, "run the fancy default")
+
+ def test_wrapping_update_wrapper_cls_noclsdocstring(self):
+ class MyFancyDefault(object):
+
+ def __call__(self):
+ """run the fancy default"""
+ return 10
+
+ def_ = MyFancyDefault()
+ c = util.wrap_callable(lambda: def_(), def_)
+ eq_(c.__name__, "MyFancyDefault")
+ eq_(c.__doc__, "run the fancy default")
+
+ def test_wrapping_update_wrapper_cls_nomethdocstring(self):
+ class MyFancyDefault(object):
+ """a fancy default"""
+
+ def __call__(self):
+ return 10
+
+ def_ = MyFancyDefault()
+ c = util.wrap_callable(lambda: def_(), def_)
+ eq_(c.__name__, "MyFancyDefault")
+ eq_(c.__doc__, "a fancy default")
+
+ def test_wrapping_update_wrapper_cls_noclsdocstring_nomethdocstring(self):
+ class MyFancyDefault(object):
+
+ def __call__(self):
+ return 10
+
+ def_ = MyFancyDefault()
+ c = util.wrap_callable(lambda: def_(), def_)
+ eq_(c.__name__, "MyFancyDefault")
+ eq_(c.__doc__, None)
+
class ToListTest(fixtures.TestBase):
def test_from_string(self):
@@ -1103,7 +1200,10 @@ class IdentitySetTest(fixtures.TestBase):
return super_, sub_, twin1, twin2, unique1, unique2
def _assert_unorderable_types(self, callable_):
- if util.py3k:
+ if util.py36:
+ assert_raises_message(
+ TypeError, 'not supported between instances of', callable_)
+ elif util.py3k:
assert_raises_message(
TypeError, 'unorderable types', callable_)
else:
diff --git a/test/dialect/mssql/test_compiler.py b/test/dialect/mssql/test_compiler.py
index 9d89f040b..80be9f67d 100644
--- a/test/dialect/mssql/test_compiler.py
+++ b/test/dialect/mssql/test_compiler.py
@@ -12,7 +12,7 @@ from sqlalchemy import Integer, String, Table, Column, select, MetaData,\
class CompileTest(fixtures.TestBase, AssertsCompiledSQL):
- __dialect__ = mssql.dialect(legacy_schema_aliasing=False)
+ __dialect__ = mssql.dialect()
def test_true_false(self):
self.assert_compile(
diff --git a/test/dialect/mssql/test_query.py b/test/dialect/mssql/test_query.py
index 61ae32ef4..32edfd7eb 100644
--- a/test/dialect/mssql/test_query.py
+++ b/test/dialect/mssql/test_query.py
@@ -41,17 +41,15 @@ class LegacySchemaAliasingTest(fixtures.TestBase, AssertsCompiledSQL):
)
def _assert_sql(self, element, legacy_sql, modern_sql=None):
- dialect = mssql.dialect()
+ dialect = mssql.dialect(legacy_schema_aliasing=True)
- with assertions.expect_warnings(
- "legacy_schema_aliasing flag is defaulted to True.*"):
- self.assert_compile(
- element,
- legacy_sql,
- dialect=dialect
- )
+ self.assert_compile(
+ element,
+ legacy_sql,
+ dialect=dialect
+ )
- dialect = mssql.dialect(legacy_schema_aliasing=False)
+ dialect = mssql.dialect()
self.assert_compile(
element,
modern_sql or "foob",
diff --git a/test/dialect/mssql/test_reflection.py b/test/dialect/mssql/test_reflection.py
index bee441586..e016a6e41 100644
--- a/test/dialect/mssql/test_reflection.py
+++ b/test/dialect/mssql/test_reflection.py
@@ -1,5 +1,5 @@
# -*- encoding: utf-8
-from sqlalchemy.testing import eq_
+from sqlalchemy.testing import eq_, is_, in_
from sqlalchemy import *
from sqlalchemy import types, schema, event
from sqlalchemy.databases import mssql
@@ -24,14 +24,14 @@ class ReflectionTest(fixtures.TestBase, ComparesTables):
Column('user_name', types.VARCHAR(20), nullable=False),
Column('test1', types.CHAR(5), nullable=False),
Column('test2', types.Float(5), nullable=False),
- Column('test3', types.Text('max')),
+ Column('test3', types.Text()),
Column('test4', types.Numeric, nullable=False),
Column('test5', types.DateTime),
Column('parent_user_id', types.Integer,
ForeignKey('engine_users.user_id')),
Column('test6', types.DateTime, nullable=False),
- Column('test7', types.Text('max')),
- Column('test8', types.LargeBinary('max')),
+ Column('test7', types.Text()),
+ Column('test8', types.LargeBinary()),
Column('test_passivedefault2', types.Integer,
server_default='5'),
Column('test9', types.BINARY(100)),
@@ -171,6 +171,32 @@ class ReflectionTest(fixtures.TestBase, ComparesTables):
set([t2.c['x col'], t2.c.y])
)
+ @testing.provide_metadata
+ def test_max_ident_in_varchar_not_present(self):
+ """test [ticket:3504].
+
+ Here we are testing not just that the "max" token comes back
+ as None, but also that these types accept "max" as the value
+ of "length" on construction, which isn't a directly documented
+ pattern however is likely in common use.
+
+ """
+ metadata = self.metadata
+
+ Table(
+ 't', metadata,
+ Column('t1', types.String),
+ Column('t2', types.Text('max')),
+ Column('t3', types.Text('max')),
+ Column('t4', types.LargeBinary('max')),
+ Column('t5', types.VARBINARY('max')),
+ )
+ metadata.create_all()
+ for col in inspect(testing.db).get_columns('t'):
+ is_(col['type'].length, None)
+ in_('max', str(col['type'].compile(dialect=testing.db.dialect)))
+
+
from sqlalchemy.dialects.mssql.information_schema import CoerceUnicode, tables
from sqlalchemy.dialects.mssql import base
@@ -187,7 +213,7 @@ class InfoCoerceUnicodeTest(fixtures.TestBase, AssertsCompiledSQL):
stmt = tables.c.table_name == 'somename'
self.assert_compile(
stmt,
- "[TABLES_1].[TABLE_NAME] = :table_name_1",
+ "[INFORMATION_SCHEMA].[TABLES].[TABLE_NAME] = :table_name_1",
dialect=dialect
)
@@ -197,7 +223,7 @@ class InfoCoerceUnicodeTest(fixtures.TestBase, AssertsCompiledSQL):
stmt = tables.c.table_name == 'somename'
self.assert_compile(
stmt,
- "[TABLES_1].[TABLE_NAME] = CAST(:table_name_1 AS NVARCHAR(max))",
+ "[INFORMATION_SCHEMA].[TABLES].[TABLE_NAME] = CAST(:table_name_1 AS NVARCHAR(max))",
dialect=dialect
)
diff --git a/test/dialect/mssql/test_types.py b/test/dialect/mssql/test_types.py
index 17ceb6b61..dad86c60a 100644
--- a/test/dialect/mssql/test_types.py
+++ b/test/dialect/mssql/test_types.py
@@ -1,5 +1,5 @@
# -*- encoding: utf-8
-from sqlalchemy.testing import eq_, engines, pickleable
+from sqlalchemy.testing import eq_, engines, pickleable, assert_raises_message
import datetime
import os
from sqlalchemy import Table, Column, MetaData, Float, \
@@ -8,7 +8,8 @@ from sqlalchemy import Table, Column, MetaData, Float, \
UnicodeText, LargeBinary
from sqlalchemy import types, schema
from sqlalchemy.databases import mssql
-from sqlalchemy.dialects.mssql.base import TIME
+from sqlalchemy.dialects.mssql.base import TIME, _MSDate
+from sqlalchemy.dialects.mssql.base import MS_2005_VERSION, MS_2008_VERSION
from sqlalchemy.testing import fixtures, \
AssertsExecutionResults, ComparesTables
from sqlalchemy import testing
@@ -33,6 +34,36 @@ class TimeTypeTest(fixtures.TestBase):
result_processor = mssql_time_type.result_processor(None, None)
eq_(expected, result_processor(value))
+ def test_result_processor_invalid(self):
+ mssql_time_type = TIME()
+ result_processor = mssql_time_type.result_processor(None, None)
+ assert_raises_message(
+ ValueError,
+ "could not parse 'abc' as a time value",
+ result_processor, 'abc'
+ )
+
+
+class MSDateTypeTest(fixtures.TestBase):
+
+ def test_result_processor(self):
+ expected = datetime.date(2000, 1, 2)
+ self._assert_result_processor(expected, '2000-01-02')
+
+ def _assert_result_processor(self, expected, value):
+ mssql_date_type = _MSDate()
+ result_processor = mssql_date_type.result_processor(None, None)
+ eq_(expected, result_processor(value))
+
+ def test_result_processor_invalid(self):
+ mssql_date_type = _MSDate()
+ result_processor = mssql_date_type.result_processor(None, None)
+ assert_raises_message(
+ ValueError,
+ "could not parse 'abc' as a date value",
+ result_processor, 'abc'
+ )
+
class TypeDDLTest(fixtures.TestBase):
@@ -173,6 +204,91 @@ class TypeDDLTest(fixtures.TestBase):
"%s %s" % (col.name, columns[index][3]))
self.assert_(repr(col))
+ def test_dates(self):
+ "Exercise type specification for date types."
+
+ columns = [
+ # column type, args, kwargs, expected ddl
+ (mssql.MSDateTime, [], {},
+ 'DATETIME', None),
+
+ (types.DATE, [], {},
+ 'DATE', None),
+ (types.Date, [], {},
+ 'DATE', None),
+ (types.Date, [], {},
+ 'DATETIME', MS_2005_VERSION),
+ (mssql.MSDate, [], {},
+ 'DATE', None),
+ (mssql.MSDate, [], {},
+ 'DATETIME', MS_2005_VERSION),
+
+ (types.TIME, [], {},
+ 'TIME', None),
+ (types.Time, [], {},
+ 'TIME', None),
+ (mssql.MSTime, [], {},
+ 'TIME', None),
+ (mssql.MSTime, [1], {},
+ 'TIME(1)', None),
+ (types.Time, [], {},
+ 'DATETIME', MS_2005_VERSION),
+ (mssql.MSTime, [], {},
+ 'TIME', None),
+
+ (mssql.MSSmallDateTime, [], {},
+ 'SMALLDATETIME', None),
+
+ (mssql.MSDateTimeOffset, [], {},
+ 'DATETIMEOFFSET', None),
+ (mssql.MSDateTimeOffset, [1], {},
+ 'DATETIMEOFFSET(1)', None),
+
+ (mssql.MSDateTime2, [], {},
+ 'DATETIME2', None),
+ (mssql.MSDateTime2, [0], {},
+ 'DATETIME2(0)', None),
+ (mssql.MSDateTime2, [1], {},
+ 'DATETIME2(1)', None),
+
+ (mssql.MSTime, [0], {},
+ 'TIME(0)', None),
+
+ (mssql.MSDateTimeOffset, [0], {},
+ 'DATETIMEOFFSET(0)', None),
+
+ ]
+
+ metadata = MetaData()
+ table_args = ['test_mssql_dates', metadata]
+ for index, spec in enumerate(columns):
+ type_, args, kw, res, server_version = spec
+ table_args.append(
+ Column('c%s' % index, type_(*args, **kw), nullable=None))
+
+ date_table = Table(*table_args)
+ dialect = mssql.dialect()
+ dialect.server_version_info = MS_2008_VERSION
+ ms_2005_dialect = mssql.dialect()
+ ms_2005_dialect.server_version_info = MS_2005_VERSION
+ gen = dialect.ddl_compiler(dialect, schema.CreateTable(date_table))
+ gen2005 = ms_2005_dialect.ddl_compiler(
+ ms_2005_dialect, schema.CreateTable(date_table))
+
+ for col in date_table.c:
+ index = int(col.name[1:])
+ server_version = columns[index][4]
+ if not server_version:
+ testing.eq_(
+ gen.get_column_specification(col),
+ "%s %s" % (col.name, columns[index][3]))
+ else:
+ testing.eq_(
+ gen2005.get_column_specification(col),
+ "%s %s" % (col.name, columns[index][3]))
+
+ self.assert_(repr(col))
+
def test_large_type_deprecation(self):
d1 = mssql.dialect(deprecate_large_types=True)
d2 = mssql.dialect(deprecate_large_types=False)
@@ -313,9 +429,7 @@ class TypeRoundTripTest(
def teardown(self):
metadata.drop_all()
- @testing.fails_on_everything_except(
- 'mssql+pyodbc',
- 'this is some pyodbc-specific feature')
+ @testing.fails_on_everything_except('mssql+pyodbc')
def test_decimal_notation(self):
numeric_table = Table(
'numeric_table', metadata,
@@ -466,6 +580,8 @@ class TypeRoundTripTest(
(mssql.MSDateTime2, [], {},
'DATETIME2', ['>=', (10,)]),
+ (mssql.MSDateTime2, [0], {},
+ 'DATETIME2(0)', ['>=', (10,)]),
(mssql.MSDateTime2, [1], {},
'DATETIME2(1)', ['>=', (10,)]),
diff --git a/test/dialect/mysql/test_compiler.py b/test/dialect/mysql/test_compiler.py
index 304c31012..60af82bab 100644
--- a/test/dialect/mysql/test_compiler.py
+++ b/test/dialect/mysql/test_compiler.py
@@ -511,9 +511,8 @@ class SQLTest(fixtures.TestBase, AssertsCompiledSQL):
self.assert_compile(schema.CreateTable(t1),
'CREATE TABLE sometable (assigned_id '
'INTEGER NOT NULL, id INTEGER NOT NULL '
- 'AUTO_INCREMENT, PRIMARY KEY (assigned_id, '
- 'id), KEY idx_autoinc_id (id))ENGINE=Inn'
- 'oDB')
+ 'AUTO_INCREMENT, PRIMARY KEY (id, assigned_id)'
+ ')ENGINE=InnoDB')
t1 = Table('sometable', MetaData(),
Column('assigned_id', Integer(), primary_key=True,
@@ -537,8 +536,7 @@ class SQLTest(fixtures.TestBase, AssertsCompiledSQL):
'CREATE TABLE sometable ('
'id INTEGER NOT NULL, '
'`order` INTEGER NOT NULL AUTO_INCREMENT, '
- 'PRIMARY KEY (id, `order`), '
- 'KEY idx_autoinc_order (`order`)'
+ 'PRIMARY KEY (`order`, id)'
')ENGINE=InnoDB')
def test_create_table_with_partition(self):
diff --git a/test/dialect/mysql/test_query.py b/test/dialect/mysql/test_query.py
index f19177c2a..85513167c 100644
--- a/test/dialect/mysql/test_query.py
+++ b/test/dialect/mysql/test_query.py
@@ -5,7 +5,6 @@ from sqlalchemy import *
from sqlalchemy.testing import fixtures, AssertsCompiledSQL
from sqlalchemy import testing
-
class IdiosyncrasyTest(fixtures.TestBase, AssertsCompiledSQL):
__only_on__ = 'mysql'
__backend__ = True
@@ -177,3 +176,57 @@ class MatchTest(fixtures.TestBase, AssertsCompiledSQL):
eq_([1, 3, 5], [r.id for r in results])
+class AnyAllTest(fixtures.TablesTest, AssertsCompiledSQL):
+ __only_on__ = 'mysql'
+ __backend__ = True
+
+ @classmethod
+ def define_tables(cls, metadata):
+ Table(
+ 'stuff', metadata,
+ Column('id', Integer, primary_key=True),
+ Column('value', Integer)
+ )
+
+ @classmethod
+ def insert_data(cls):
+ stuff = cls.tables.stuff
+ testing.db.execute(
+ stuff.insert(),
+ [
+ {'id': 1, 'value': 1},
+ {'id': 2, 'value': 2},
+ {'id': 3, 'value': 3},
+ {'id': 4, 'value': 4},
+ {'id': 5, 'value': 5},
+ ]
+ )
+
+ def test_any_w_comparator(self):
+ stuff = self.tables.stuff
+ stmt = select([stuff.c.id]).where(
+ stuff.c.value > any_(select([stuff.c.value])))
+
+ eq_(
+ testing.db.execute(stmt).fetchall(),
+ [(2,), (3,), (4,), (5,)]
+ )
+
+ def test_all_w_comparator(self):
+ stuff = self.tables.stuff
+ stmt = select([stuff.c.id]).where(
+ stuff.c.value >= all_(select([stuff.c.value])))
+
+ eq_(
+ testing.db.execute(stmt).fetchall(),
+ [(5,)]
+ )
+
+ def test_any_literal(self):
+ stuff = self.tables.stuff
+ stmt = select([4 == any_(select([stuff.c.value]))])
+
+ is_(
+ testing.db.execute(stmt).scalar(), True
+ )
+
diff --git a/test/dialect/mysql/test_reflection.py b/test/dialect/mysql/test_reflection.py
index 39b39e006..a28876262 100644
--- a/test/dialect/mysql/test_reflection.py
+++ b/test/dialect/mysql/test_reflection.py
@@ -1,13 +1,195 @@
# coding: utf-8
-from sqlalchemy.testing import eq_
-from sqlalchemy import *
+from sqlalchemy.testing import eq_, is_
+from sqlalchemy import Column, Table, DDL, MetaData, TIMESTAMP, \
+ DefaultClause, String, Integer, Text, UnicodeText, SmallInteger,\
+ NCHAR, LargeBinary, DateTime, select, UniqueConstraint, Unicode,\
+ BigInteger
+from sqlalchemy import event
from sqlalchemy import sql
+from sqlalchemy import inspect
from sqlalchemy.dialects.mysql import base as mysql
from sqlalchemy.testing import fixtures, AssertsExecutionResults
from sqlalchemy import testing
+class TypeReflectionTest(fixtures.TestBase):
+ __only_on__ = 'mysql'
+ __backend__ = True
+
+ @testing.provide_metadata
+ def _run_test(self, specs, attributes):
+ columns = [Column('c%i' % (i + 1), t[0]) for i, t in enumerate(specs)]
+
+ # Early 5.0 releases seem to report more "general" for columns
+ # in a view, e.g. char -> varchar, tinyblob -> mediumblob
+ use_views = testing.db.dialect.server_version_info > (5, 0, 10)
+
+ m = self.metadata
+ Table('mysql_types', m, *columns)
+
+ if use_views:
+ event.listen(
+ m, 'after_create',
+ DDL(
+ 'CREATE OR REPLACE VIEW mysql_types_v '
+ 'AS SELECT * from mysql_types')
+ )
+ event.listen(
+ m, 'before_drop',
+ DDL("DROP VIEW IF EXISTS mysql_types_v")
+ )
+ m.create_all()
+
+ m2 = MetaData(testing.db)
+ tables = [
+ Table('mysql_types', m2, autoload=True)
+ ]
+ if use_views:
+ tables.append(Table('mysql_types_v', m2, autoload=True))
+
+ for table in tables:
+ for i, (reflected_col, spec) in enumerate(zip(table.c, specs)):
+ expected_spec = spec[1]
+ reflected_type = reflected_col.type
+ is_(type(reflected_type), type(expected_spec))
+
+ for attr in attributes:
+ eq_(
+ getattr(reflected_type, attr),
+ getattr(expected_spec, attr),
+ "Column %s: Attribute %s value of %s does not "
+ "match %s for type %s" % (
+ "c%i" % (i + 1),
+ attr,
+ getattr(reflected_type, attr),
+ getattr(expected_spec, attr),
+ spec[0]
+ )
+ )
+
+ def test_time_types(self):
+ specs = []
+
+ if testing.requires.mysql_fsp.enabled:
+ fsps = [None, 0, 5]
+ else:
+ fsps = [None]
+
+ for type_ in (mysql.TIMESTAMP, mysql.DATETIME, mysql.TIME):
+ # MySQL defaults fsp to 0, and if 0 does not report it.
+ # we don't actually render 0 right now in DDL but even if we do,
+ # it comes back blank
+ for fsp in fsps:
+ if fsp:
+ specs.append((type_(fsp=fsp), type_(fsp=fsp)))
+ else:
+ specs.append((type_(), type_()))
+
+ specs.extend([
+ (TIMESTAMP(), mysql.TIMESTAMP()),
+ (DateTime(), mysql.DATETIME()),
+ ])
+
+ # note 'timezone' should always be None on both
+ self._run_test(specs, ['fsp', 'timezone'])
+
+ def test_year_types(self):
+ specs = [
+ (mysql.YEAR(), mysql.YEAR(display_width=4)),
+ (mysql.YEAR(display_width=2), mysql.YEAR(display_width=2)),
+ (mysql.YEAR(display_width=4), mysql.YEAR(display_width=4)),
+ ]
+
+ self._run_test(specs, ['display_width'])
+
+ def test_string_types(self):
+ specs = [
+ (String(1), mysql.MSString(1)),
+ (String(3), mysql.MSString(3)),
+ (Text(), mysql.MSText()),
+ (Unicode(1), mysql.MSString(1)),
+ (Unicode(3), mysql.MSString(3)),
+ (UnicodeText(), mysql.MSText()),
+ (mysql.MSChar(1), mysql.MSChar(1)),
+ (mysql.MSChar(3), mysql.MSChar(3)),
+ (NCHAR(2), mysql.MSChar(2)),
+ (mysql.MSNChar(2), mysql.MSChar(2)),
+ (mysql.MSNVarChar(22), mysql.MSString(22),),
+ ]
+ self._run_test(specs, ['length'])
+
+ def test_integer_types(self):
+ specs = []
+ for type_ in [
+ mysql.TINYINT, mysql.SMALLINT,
+ mysql.MEDIUMINT, mysql.INTEGER, mysql.BIGINT]:
+ for display_width in [None, 4, 7]:
+ for unsigned in [False, True]:
+ for zerofill in [None, True]:
+ kw = {}
+ if display_width:
+ kw['display_width'] = display_width
+ if unsigned is not None:
+ kw['unsigned'] = unsigned
+ if zerofill is not None:
+ kw['zerofill'] = zerofill
+
+ zerofill = bool(zerofill)
+ source_type = type_(**kw)
+
+ if display_width is None:
+ display_width = {
+ mysql.MEDIUMINT: 9,
+ mysql.SMALLINT: 6,
+ mysql.TINYINT: 4,
+ mysql.INTEGER: 11,
+ mysql.BIGINT: 20
+ }[type_]
+
+ if zerofill:
+ unsigned = True
+
+ expected_type = type_(
+ display_width=display_width,
+ unsigned=unsigned,
+ zerofill=zerofill
+ )
+ specs.append(
+ (source_type, expected_type)
+ )
+
+ specs.extend([
+ (SmallInteger(), mysql.SMALLINT(display_width=6)),
+ (Integer(), mysql.INTEGER(display_width=11)),
+ (BigInteger, mysql.BIGINT(display_width=20))
+ ])
+ self._run_test(specs, ['display_width', 'unsigned', 'zerofill'])
+
+ def test_binary_types(self):
+ specs = [
+ (LargeBinary(3), mysql.TINYBLOB(), ),
+ (LargeBinary(), mysql.BLOB()),
+ (mysql.MSBinary(3), mysql.MSBinary(3), ),
+ (mysql.MSVarBinary(3), mysql.MSVarBinary(3)),
+ (mysql.MSTinyBlob(), mysql.MSTinyBlob()),
+ (mysql.MSBlob(), mysql.MSBlob()),
+ (mysql.MSBlob(1234), mysql.MSBlob()),
+ (mysql.MSMediumBlob(), mysql.MSMediumBlob()),
+ (mysql.MSLongBlob(), mysql.MSLongBlob()),
+ ]
+ self._run_test(specs, [])
+
+ @testing.uses_deprecated('Manually quoting ENUM value literals')
+ def test_legacy_enum_types(self):
+
+ specs = [
+ (mysql.ENUM("''","'fleem'"), mysql.ENUM("''","'fleem'")), # noqa
+ ]
+
+ self._run_test(specs, ['enums'])
+
+
class ReflectionTest(fixtures.TestBase, AssertsExecutionResults):
__only_on__ = 'mysql'
@@ -75,7 +257,8 @@ class ReflectionTest(fixtures.TestBase, AssertsExecutionResults):
def test_reflection_with_table_options(self):
comment = r"""Comment types type speedily ' " \ '' Fun!"""
- def_table = Table('mysql_def', MetaData(testing.db),
+ def_table = Table(
+ 'mysql_def', MetaData(testing.db),
Column('c1', Integer()),
mysql_engine='MEMORY',
mysql_comment=comment,
@@ -88,8 +271,9 @@ class ReflectionTest(fixtures.TestBase, AssertsExecutionResults):
def_table.create()
try:
- reflected = Table('mysql_def', MetaData(testing.db),
- autoload=True)
+ reflected = Table(
+ 'mysql_def', MetaData(testing.db),
+ autoload=True)
finally:
def_table.drop()
@@ -108,15 +292,16 @@ class ReflectionTest(fixtures.TestBase, AssertsExecutionResults):
assert reflected.kwargs['mysql_connection'] == 'fish'
# This field doesn't seem to be returned by mysql itself.
- #assert reflected.kwargs['mysql_password'] == 'secret'
+ # assert reflected.kwargs['mysql_password'] == 'secret'
# This is explicitly ignored when reflecting schema.
- #assert reflected.kwargs['mysql_auto_increment'] == '5'
+ # assert reflected.kwargs['mysql_auto_increment'] == '5'
def test_reflection_on_include_columns(self):
"""Test reflection of include_columns to be sure they respect case."""
- case_table = Table('mysql_case', MetaData(testing.db),
+ case_table = Table(
+ 'mysql_case', MetaData(testing.db),
Column('c1', String(10)),
Column('C2', String(10)),
Column('C3', String(10)))
@@ -128,132 +313,68 @@ class ReflectionTest(fixtures.TestBase, AssertsExecutionResults):
for t in case_table, reflected:
assert 'c1' in t.c.keys()
assert 'C2' in t.c.keys()
- reflected2 = Table('mysql_case', MetaData(testing.db),
- autoload=True, include_columns=['c1', 'c2'])
+ reflected2 = Table(
+ 'mysql_case', MetaData(testing.db),
+ autoload=True, include_columns=['c1', 'c2'])
assert 'c1' in reflected2.c.keys()
for c in ['c2', 'C2', 'C3']:
assert c not in reflected2.c.keys()
finally:
case_table.drop()
- @testing.exclude('mysql', '<', (5, 0, 0), 'early types are squirrely')
- @testing.uses_deprecated('Using String type with no length')
- @testing.uses_deprecated('Manually quoting ENUM value literals')
- def test_type_reflection(self):
- # (ask_for, roundtripped_as_if_different)
- specs = [(String(1), mysql.MSString(1), ),
- (String(3), mysql.MSString(3), ),
- (Text(), mysql.MSText(), ),
- (Unicode(1), mysql.MSString(1), ),
- (Unicode(3), mysql.MSString(3), ),
- (UnicodeText(), mysql.MSText(), ),
- (mysql.MSChar(1), ),
- (mysql.MSChar(3), ),
- (NCHAR(2), mysql.MSChar(2), ),
- (mysql.MSNChar(2), mysql.MSChar(2), ), # N is CREATE only
- (mysql.MSNVarChar(22), mysql.MSString(22), ),
- (SmallInteger(), mysql.MSSmallInteger(), ),
- (SmallInteger(), mysql.MSSmallInteger(4), ),
- (mysql.MSSmallInteger(), ),
- (mysql.MSSmallInteger(4), mysql.MSSmallInteger(4), ),
- (mysql.MSMediumInteger(), mysql.MSMediumInteger(), ),
- (mysql.MSMediumInteger(8), mysql.MSMediumInteger(8), ),
- (LargeBinary(3), mysql.TINYBLOB(), ),
- (LargeBinary(), mysql.BLOB() ),
- (mysql.MSBinary(3), mysql.MSBinary(3), ),
- (mysql.MSVarBinary(3),),
- (mysql.MSTinyBlob(),),
- (mysql.MSBlob(),),
- (mysql.MSBlob(1234), mysql.MSBlob()),
- (mysql.MSMediumBlob(),),
- (mysql.MSLongBlob(),),
- (mysql.ENUM("''","'fleem'"), ),
- ]
-
- columns = [Column('c%i' % (i + 1), t[0]) for i, t in enumerate(specs)]
-
- db = testing.db
- m = MetaData(db)
- t_table = Table('mysql_types', m, *columns)
- try:
- m.create_all()
-
- m2 = MetaData(db)
- rt = Table('mysql_types', m2, autoload=True)
- try:
- db.execute('CREATE OR REPLACE VIEW mysql_types_v '
- 'AS SELECT * from mysql_types')
- rv = Table('mysql_types_v', m2, autoload=True)
-
- expected = [len(c) > 1 and c[1] or c[0] for c in specs]
-
- # Early 5.0 releases seem to report more "general" for columns
- # in a view, e.g. char -> varchar, tinyblob -> mediumblob
- #
- # Not sure exactly which point version has the fix.
- if db.dialect.server_version_info < (5, 0, 11):
- tables = rt,
- else:
- tables = rt, rv
-
- for table in tables:
- for i, reflected in enumerate(table.c):
- assert isinstance(reflected.type,
- type(expected[i])), \
- 'element %d: %r not instance of %r' % (i,
- reflected.type, type(expected[i]))
- finally:
- db.execute('DROP VIEW mysql_types_v')
- finally:
- m.drop_all()
-
def test_autoincrement(self):
meta = MetaData(testing.db)
try:
Table('ai_1', meta,
- Column('int_y', Integer, primary_key=True),
+ Column('int_y', Integer, primary_key=True,
+ autoincrement=True),
Column('int_n', Integer, DefaultClause('0'),
primary_key=True),
- mysql_engine='MyISAM')
+ mysql_engine='MyISAM')
Table('ai_2', meta,
- Column('int_y', Integer, primary_key=True),
+ Column('int_y', Integer, primary_key=True,
+ autoincrement=True),
Column('int_n', Integer, DefaultClause('0'),
primary_key=True),
- mysql_engine='MyISAM')
+ mysql_engine='MyISAM')
Table('ai_3', meta,
Column('int_n', Integer, DefaultClause('0'),
primary_key=True, autoincrement=False),
- Column('int_y', Integer, primary_key=True),
- mysql_engine='MyISAM')
+ Column('int_y', Integer, primary_key=True,
+ autoincrement=True),
+ mysql_engine='MyISAM')
Table('ai_4', meta,
Column('int_n', Integer, DefaultClause('0'),
primary_key=True, autoincrement=False),
Column('int_n2', Integer, DefaultClause('0'),
primary_key=True, autoincrement=False),
- mysql_engine='MyISAM')
+ mysql_engine='MyISAM')
Table('ai_5', meta,
- Column('int_y', Integer, primary_key=True),
+ Column('int_y', Integer, primary_key=True,
+ autoincrement=True),
Column('int_n', Integer, DefaultClause('0'),
primary_key=True, autoincrement=False),
- mysql_engine='MyISAM')
+ mysql_engine='MyISAM')
Table('ai_6', meta,
Column('o1', String(1), DefaultClause('x'),
primary_key=True),
- Column('int_y', Integer, primary_key=True),
- mysql_engine='MyISAM')
+ Column('int_y', Integer, primary_key=True,
+ autoincrement=True),
+ mysql_engine='MyISAM')
Table('ai_7', meta,
Column('o1', String(1), DefaultClause('x'),
primary_key=True),
Column('o2', String(1), DefaultClause('x'),
primary_key=True),
- Column('int_y', Integer, primary_key=True),
- mysql_engine='MyISAM')
+ Column('int_y', Integer, primary_key=True,
+ autoincrement=True),
+ mysql_engine='MyISAM')
Table('ai_8', meta,
Column('o1', String(1), DefaultClause('x'),
primary_key=True),
Column('o2', String(1), DefaultClause('x'),
primary_key=True),
- mysql_engine='MyISAM')
+ mysql_engine='MyISAM')
meta.create_all()
table_names = ['ai_1', 'ai_2', 'ai_3', 'ai_4',
@@ -309,7 +430,7 @@ class ReflectionTest(fixtures.TestBase, AssertsExecutionResults):
["t TIMESTAMP"],
["u TIMESTAMP DEFAULT CURRENT_TIMESTAMP"]
]):
- Table("nn_t%d" % idx, meta) # to allow DROP
+ Table("nn_t%d" % idx, meta) # to allow DROP
testing.db.execute("""
CREATE TABLE nn_t%d (
@@ -380,7 +501,8 @@ class ReflectionTest(fixtures.TestBase, AssertsExecutionResults):
class RawReflectionTest(fixtures.TestBase):
def setup(self):
dialect = mysql.dialect()
- self.parser = mysql.MySQLTableDefinitionParser(dialect, dialect.identifier_preparer)
+ self.parser = mysql.MySQLTableDefinitionParser(
+ dialect, dialect.identifier_preparer)
def test_key_reflection(self):
regex = self.parser._re_key
@@ -391,10 +513,14 @@ class RawReflectionTest(fixtures.TestBase):
assert regex.match(' PRIMARY KEY (`id`)')
assert regex.match(' PRIMARY KEY USING BTREE (`id`)')
assert regex.match(' PRIMARY KEY (`id`) USING BTREE')
- assert regex.match(' PRIMARY KEY (`id`) USING BTREE KEY_BLOCK_SIZE 16')
- assert regex.match(' PRIMARY KEY (`id`) USING BTREE KEY_BLOCK_SIZE=16')
- assert regex.match(' PRIMARY KEY (`id`) USING BTREE KEY_BLOCK_SIZE = 16')
- assert not regex.match(' PRIMARY KEY (`id`) USING BTREE KEY_BLOCK_SIZE = = 16')
+ assert regex.match(
+ ' PRIMARY KEY (`id`) USING BTREE KEY_BLOCK_SIZE 16')
+ assert regex.match(
+ ' PRIMARY KEY (`id`) USING BTREE KEY_BLOCK_SIZE=16')
+ assert regex.match(
+ ' PRIMARY KEY (`id`) USING BTREE KEY_BLOCK_SIZE = 16')
+ assert not regex.match(
+ ' PRIMARY KEY (`id`) USING BTREE KEY_BLOCK_SIZE = = 16')
def test_fk_reflection(self):
regex = self.parser._re_constraint
diff --git a/test/dialect/postgresql/test_compiler.py b/test/dialect/postgresql/test_compiler.py
index 9fa5c9804..71d8fa3e5 100644
--- a/test/dialect/postgresql/test_compiler.py
+++ b/test/dialect/postgresql/test_compiler.py
@@ -9,11 +9,13 @@ from sqlalchemy import Sequence, Table, Column, Integer, update, String,\
Text
from sqlalchemy.dialects.postgresql import ExcludeConstraint, array
from sqlalchemy import exc, schema
-from sqlalchemy.dialects.postgresql import base as postgresql
+from sqlalchemy.dialects import postgresql
from sqlalchemy.dialects.postgresql import TSRANGE
from sqlalchemy.orm import mapper, aliased, Session
-from sqlalchemy.sql import table, column, operators
+from sqlalchemy.sql import table, column, operators, literal_column
+from sqlalchemy.sql import util as sql_util
from sqlalchemy.util import u
+from sqlalchemy.dialects.postgresql import aggregate_order_by
class SequenceTest(fixtures.TestBase, AssertsCompiledSQL):
@@ -21,7 +23,7 @@ class SequenceTest(fixtures.TestBase, AssertsCompiledSQL):
def test_format(self):
seq = Sequence('my_seq_no_schema')
- dialect = postgresql.PGDialect()
+ dialect = postgresql.dialect()
assert dialect.identifier_preparer.format_sequence(seq) \
== 'my_seq_no_schema'
seq = Sequence('my_seq', schema='some_schema')
@@ -508,6 +510,19 @@ class CompileTest(fixtures.TestBase, AssertsCompiledSQL):
'(CAST("Room" AS TEXT) WITH =)'
)
+ def test_exclude_constraint_when(self):
+ m = MetaData()
+ tbl = Table(
+ 'testtbl', m,
+ Column('room', String)
+ )
+ cons = ExcludeConstraint(('room', '='), where=tbl.c.room.in_(['12']))
+ tbl.append_constraint(cons)
+ self.assert_compile(schema.AddConstraint(cons),
+ 'ALTER TABLE testtbl ADD EXCLUDE USING gist '
+ '(room WITH =) WHERE (testtbl.room IN (\'12\'))',
+ dialect=postgresql.dialect())
+
def test_substring(self):
self.assert_compile(func.substring('abc', 1, 2),
'SUBSTRING(%(substring_1)s FROM %(substring_2)s '
@@ -578,6 +593,22 @@ class CompileTest(fixtures.TestBase, AssertsCompiledSQL):
"WHERE mytable_1.myid = %(myid_1)s FOR UPDATE OF mytable_1"
)
+ def test_for_update_with_schema(self):
+ m = MetaData()
+ table1 = Table(
+ 'mytable', m,
+ Column('myid'),
+ Column('name'),
+ schema='testschema'
+ )
+
+ self.assert_compile(
+ table1.select(table1.c.myid == 7).with_for_update(of=table1),
+ "SELECT testschema.mytable.myid, testschema.mytable.name "
+ "FROM testschema.mytable "
+ "WHERE testschema.mytable.myid = %(myid_1)s "
+ "FOR UPDATE OF mytable")
+
def test_reserved_words(self):
table = Table("pg_table", MetaData(),
Column("col1", Integer),
@@ -693,7 +724,7 @@ class CompileTest(fixtures.TestBase, AssertsCompiledSQL):
self._test_array_zero_indexes(False)
def test_array_literal_type(self):
- is_(postgresql.array([1, 2]).type._type_affinity, postgresql.ARRAY)
+ isinstance(postgresql.array([1, 2]).type, postgresql.ARRAY)
is_(postgresql.array([1, 2]).type.item_type._type_affinity, Integer)
is_(postgresql.array([1, 2], type_=String).
@@ -800,6 +831,48 @@ class CompileTest(fixtures.TestBase, AssertsCompiledSQL):
dialect=postgresql.dialect()
)
+ def test_aggregate_order_by_one(self):
+ m = MetaData()
+ table = Table('table1', m, Column('a', Integer), Column('b', Integer))
+ expr = func.array_agg(aggregate_order_by(table.c.a, table.c.b.desc()))
+ stmt = select([expr])
+
+ # note this tests that the object exports FROM objects
+ # correctly
+ self.assert_compile(
+ stmt,
+ "SELECT array_agg(table1.a ORDER BY table1.b DESC) "
+ "AS array_agg_1 FROM table1"
+ )
+
+ def test_aggregate_order_by_two(self):
+ m = MetaData()
+ table = Table('table1', m, Column('a', Integer), Column('b', Integer))
+ expr = func.string_agg(
+ table.c.a,
+ aggregate_order_by(literal_column("','"), table.c.a)
+ )
+ stmt = select([expr])
+
+ self.assert_compile(
+ stmt,
+ "SELECT string_agg(table1.a, ',' ORDER BY table1.a) "
+ "AS string_agg_1 FROM table1"
+ )
+
+ def test_aggregate_order_by_adapt(self):
+ m = MetaData()
+ table = Table('table1', m, Column('a', Integer), Column('b', Integer))
+ expr = func.array_agg(aggregate_order_by(table.c.a, table.c.b.desc()))
+ stmt = select([expr])
+
+ a1 = table.alias('foo')
+ stmt2 = sql_util.ClauseAdapter(a1).traverse(stmt)
+ self.assert_compile(
+ stmt2,
+ "SELECT array_agg(foo.a ORDER BY foo.b DESC) AS array_agg_1 FROM table1 AS foo"
+ )
+
class DistinctOnTest(fixtures.TestBase, AssertsCompiledSQL):
diff --git a/test/dialect/postgresql/test_query.py b/test/dialect/postgresql/test_query.py
index 4a33644e0..9f92a7830 100644
--- a/test/dialect/postgresql/test_query.py
+++ b/test/dialect/postgresql/test_query.py
@@ -12,7 +12,7 @@ from sqlalchemy import exc
from sqlalchemy.dialects import postgresql
import datetime
-metadata = matchtable = cattable = None
+matchtable = cattable = None
class InsertTest(fixtures.TestBase, AssertsExecutionResults):
@@ -22,23 +22,19 @@ class InsertTest(fixtures.TestBase, AssertsExecutionResults):
@classmethod
def setup_class(cls):
- global metadata
- cls.engine = testing.db
- metadata = MetaData(testing.db)
+ cls.metadata = MetaData(testing.db)
def teardown(self):
- metadata.drop_all()
- metadata.clear()
- if self.engine is not testing.db:
- self.engine.dispose()
+ self.metadata.drop_all()
+ self.metadata.clear()
def test_compiled_insert(self):
table = Table(
- 'testtable', metadata, Column(
+ 'testtable', self.metadata, Column(
'id', Integer, primary_key=True),
Column(
'data', String(30)))
- metadata.create_all()
+ self.metadata.create_all()
ins = table.insert(
inline=True,
values={'data': bindparam('x')}).compile()
@@ -49,17 +45,18 @@ class InsertTest(fixtures.TestBase, AssertsExecutionResults):
)
def test_foreignkey_missing_insert(self):
- t1 = Table('t1', metadata, Column('id', Integer,
- primary_key=True))
+ Table(
+ 't1', self.metadata,
+ Column('id', Integer, primary_key=True))
t2 = Table(
't2',
- metadata,
+ self.metadata,
Column(
'id',
Integer,
ForeignKey('t1.id'),
primary_key=True))
- metadata.create_all()
+ self.metadata.create_all()
# want to ensure that "null value in column "id" violates not-
# null constraint" is raised (IntegrityError on psycoopg2, but
@@ -72,14 +69,16 @@ class InsertTest(fixtures.TestBase, AssertsExecutionResults):
engines.testing_engine(options={'implicit_returning': False}),
engines.testing_engine(options={'implicit_returning': True})
]:
- assert_raises_message(exc.DBAPIError,
- 'violates not-null constraint',
- eng.execute, t2.insert())
+ assert_raises_message(
+ exc.CompileError,
+ ".*has no Python-side or server-side default.*",
+ eng.execute, t2.insert()
+ )
def test_sequence_insert(self):
table = Table(
'testtable',
- metadata,
+ self.metadata,
Column(
'id',
Integer,
@@ -88,14 +87,14 @@ class InsertTest(fixtures.TestBase, AssertsExecutionResults):
Column(
'data',
String(30)))
- metadata.create_all()
+ self.metadata.create_all()
self._assert_data_with_sequence(table, 'my_seq')
@testing.requires.returning
def test_sequence_returning_insert(self):
table = Table(
'testtable',
- metadata,
+ self.metadata,
Column(
'id',
Integer,
@@ -104,57 +103,57 @@ class InsertTest(fixtures.TestBase, AssertsExecutionResults):
Column(
'data',
String(30)))
- metadata.create_all()
+ self.metadata.create_all()
self._assert_data_with_sequence_returning(table, 'my_seq')
def test_opt_sequence_insert(self):
table = Table(
- 'testtable', metadata,
+ 'testtable', self.metadata,
Column(
'id', Integer, Sequence(
'my_seq', optional=True), primary_key=True),
Column(
'data', String(30)))
- metadata.create_all()
+ self.metadata.create_all()
self._assert_data_autoincrement(table)
@testing.requires.returning
def test_opt_sequence_returning_insert(self):
table = Table(
- 'testtable', metadata,
+ 'testtable', self.metadata,
Column(
'id', Integer, Sequence(
'my_seq', optional=True), primary_key=True),
Column(
'data', String(30)))
- metadata.create_all()
+ self.metadata.create_all()
self._assert_data_autoincrement_returning(table)
def test_autoincrement_insert(self):
table = Table(
- 'testtable', metadata,
+ 'testtable', self.metadata,
Column(
'id', Integer, primary_key=True),
Column(
'data', String(30)))
- metadata.create_all()
+ self.metadata.create_all()
self._assert_data_autoincrement(table)
@testing.requires.returning
def test_autoincrement_returning_insert(self):
table = Table(
- 'testtable', metadata,
+ 'testtable', self.metadata,
Column(
'id', Integer, primary_key=True),
Column(
'data', String(30)))
- metadata.create_all()
+ self.metadata.create_all()
self._assert_data_autoincrement_returning(table)
def test_noautoincrement_insert(self):
table = Table(
'testtable',
- metadata,
+ self.metadata,
Column(
'id',
Integer,
@@ -163,42 +162,45 @@ class InsertTest(fixtures.TestBase, AssertsExecutionResults):
Column(
'data',
String(30)))
- metadata.create_all()
+ self.metadata.create_all()
self._assert_data_noautoincrement(table)
def _assert_data_autoincrement(self, table):
- self.engine = \
+ engine = \
engines.testing_engine(options={'implicit_returning': False})
- metadata.bind = self.engine
- with self.sql_execution_asserter(self.engine) as asserter:
+ with self.sql_execution_asserter(engine) as asserter:
- # execute with explicit id
+ with engine.connect() as conn:
+ # execute with explicit id
- r = table.insert().execute({'id': 30, 'data': 'd1'})
- assert r.inserted_primary_key == [30]
+ r = conn.execute(table.insert(), {'id': 30, 'data': 'd1'})
+ eq_(r.inserted_primary_key, [30])
- # execute with prefetch id
+ # execute with prefetch id
- r = table.insert().execute({'data': 'd2'})
- assert r.inserted_primary_key == [1]
+ r = conn.execute(table.insert(), {'data': 'd2'})
+ eq_(r.inserted_primary_key, [1])
- # executemany with explicit ids
+ # executemany with explicit ids
- table.insert().execute({'id': 31, 'data': 'd3'}, {'id': 32,
- 'data': 'd4'})
+ conn.execute(
+ table.insert(),
+ {'id': 31, 'data': 'd3'}, {'id': 32, 'data': 'd4'})
- # executemany, uses SERIAL
+ # executemany, uses SERIAL
- table.insert().execute({'data': 'd5'}, {'data': 'd6'})
+ conn.execute(table.insert(), {'data': 'd5'}, {'data': 'd6'})
- # single execute, explicit id, inline
+ # single execute, explicit id, inline
- table.insert(inline=True).execute({'id': 33, 'data': 'd7'})
+ conn.execute(
+ table.insert(inline=True),
+ {'id': 33, 'data': 'd7'})
- # single execute, inline, uses SERIAL
+ # single execute, inline, uses SERIAL
- table.insert(inline=True).execute({'data': 'd8'})
+ conn.execute(table.insert(inline=True), {'data': 'd8'})
asserter.assert_(
DialectSQL(
@@ -221,37 +223,41 @@ class InsertTest(fixtures.TestBase, AssertsExecutionResults):
[{'data': 'd8'}]),
)
- eq_(
- table.select().execute().fetchall(),
- [
- (30, 'd1'),
- (1, 'd2'),
- (31, 'd3'),
- (32, 'd4'),
- (2, 'd5'),
- (3, 'd6'),
- (33, 'd7'),
- (4, 'd8'),
- ]
- )
+ with engine.connect() as conn:
+ eq_(
+ conn.execute(table.select()).fetchall(),
+ [
+ (30, 'd1'),
+ (1, 'd2'),
+ (31, 'd3'),
+ (32, 'd4'),
+ (2, 'd5'),
+ (3, 'd6'),
+ (33, 'd7'),
+ (4, 'd8'),
+ ]
+ )
- table.delete().execute()
+ conn.execute(table.delete())
# test the same series of events using a reflected version of
# the table
- m2 = MetaData(self.engine)
+ m2 = MetaData(engine)
table = Table(table.name, m2, autoload=True)
- with self.sql_execution_asserter(self.engine) as asserter:
- table.insert().execute({'id': 30, 'data': 'd1'})
- r = table.insert().execute({'data': 'd2'})
- assert r.inserted_primary_key == [5]
- table.insert().execute({'id': 31, 'data': 'd3'}, {'id': 32,
- 'data': 'd4'})
- table.insert().execute({'data': 'd5'}, {'data': 'd6'})
- table.insert(inline=True).execute({'id': 33, 'data': 'd7'})
- table.insert(inline=True).execute({'data': 'd8'})
+ with self.sql_execution_asserter(engine) as asserter:
+ with engine.connect() as conn:
+ conn.execute(table.insert(), {'id': 30, 'data': 'd1'})
+ r = conn.execute(table.insert(), {'data': 'd2'})
+ eq_(r.inserted_primary_key, [5])
+ conn.execute(
+ table.insert(),
+ {'id': 31, 'data': 'd3'}, {'id': 32, 'data': 'd4'})
+ conn.execute(table.insert(), {'data': 'd5'}, {'data': 'd6'})
+ conn.execute(
+ table.insert(inline=True), {'id': 33, 'data': 'd7'})
+ conn.execute(table.insert(inline=True), {'data': 'd8'})
asserter.assert_(
DialectSQL(
@@ -273,278 +279,305 @@ class InsertTest(fixtures.TestBase, AssertsExecutionResults):
'INSERT INTO testtable (data) VALUES (:data)',
[{'data': 'd8'}]),
)
- eq_(
- table.select().execute().fetchall(),
- [
- (30, 'd1'),
- (5, 'd2'),
- (31, 'd3'),
- (32, 'd4'),
- (6, 'd5'),
- (7, 'd6'),
- (33, 'd7'),
- (8, 'd8'),
- ]
- )
- table.delete().execute()
+ with engine.connect() as conn:
+ eq_(
+ conn.execute(table.select()).fetchall(),
+ [
+ (30, 'd1'),
+ (5, 'd2'),
+ (31, 'd3'),
+ (32, 'd4'),
+ (6, 'd5'),
+ (7, 'd6'),
+ (33, 'd7'),
+ (8, 'd8'),
+ ]
+ )
+ conn.execute(table.delete())
def _assert_data_autoincrement_returning(self, table):
- self.engine = \
+ engine = \
engines.testing_engine(options={'implicit_returning': True})
- metadata.bind = self.engine
- with self.sql_execution_asserter(self.engine) as asserter:
+ with self.sql_execution_asserter(engine) as asserter:
+ with engine.connect() as conn:
- # execute with explicit id
+ # execute with explicit id
- r = table.insert().execute({'id': 30, 'data': 'd1'})
- assert r.inserted_primary_key == [30]
+ r = conn.execute(table.insert(), {'id': 30, 'data': 'd1'})
+ eq_(r.inserted_primary_key, [30])
- # execute with prefetch id
+ # execute with prefetch id
- r = table.insert().execute({'data': 'd2'})
- assert r.inserted_primary_key == [1]
+ r = conn.execute(table.insert(), {'data': 'd2'})
+ eq_(r.inserted_primary_key, [1])
- # executemany with explicit ids
+ # executemany with explicit ids
- table.insert().execute({'id': 31, 'data': 'd3'}, {'id': 32,
- 'data': 'd4'})
+ conn.execute(
+ table.insert(),
+ {'id': 31, 'data': 'd3'}, {'id': 32, 'data': 'd4'})
- # executemany, uses SERIAL
+ # executemany, uses SERIAL
- table.insert().execute({'data': 'd5'}, {'data': 'd6'})
+ conn.execute(table.insert(), {'data': 'd5'}, {'data': 'd6'})
- # single execute, explicit id, inline
+ # single execute, explicit id, inline
- table.insert(inline=True).execute({'id': 33, 'data': 'd7'})
+ conn.execute(
+ table.insert(inline=True), {'id': 33, 'data': 'd7'})
- # single execute, inline, uses SERIAL
+ # single execute, inline, uses SERIAL
- table.insert(inline=True).execute({'data': 'd8'})
+ conn.execute(table.insert(inline=True), {'data': 'd8'})
asserter.assert_(
DialectSQL('INSERT INTO testtable (id, data) VALUES (:id, :data)',
- {'id': 30, 'data': 'd1'}),
+ {'id': 30, 'data': 'd1'}),
DialectSQL('INSERT INTO testtable (data) VALUES (:data) RETURNING '
- 'testtable.id', {'data': 'd2'}),
+ 'testtable.id', {'data': 'd2'}),
DialectSQL('INSERT INTO testtable (id, data) VALUES (:id, :data)',
- [{'id': 31, 'data': 'd3'}, {'id': 32, 'data': 'd4'}]),
+ [{'id': 31, 'data': 'd3'}, {'id': 32, 'data': 'd4'}]),
DialectSQL('INSERT INTO testtable (data) VALUES (:data)',
- [{'data': 'd5'}, {'data': 'd6'}]),
+ [{'data': 'd5'}, {'data': 'd6'}]),
DialectSQL('INSERT INTO testtable (id, data) VALUES (:id, :data)',
- [{'id': 33, 'data': 'd7'}]),
+ [{'id': 33, 'data': 'd7'}]),
DialectSQL('INSERT INTO testtable (data) VALUES (:data)',
- [{'data': 'd8'}]),
+ [{'data': 'd8'}]),
)
- eq_(
- table.select().execute().fetchall(),
- [
- (30, 'd1'),
- (1, 'd2'),
- (31, 'd3'),
- (32, 'd4'),
- (2, 'd5'),
- (3, 'd6'),
- (33, 'd7'),
- (4, 'd8'),
- ]
- )
- table.delete().execute()
+ with engine.connect() as conn:
+ eq_(
+ conn.execute(table.select()).fetchall(),
+ [
+ (30, 'd1'),
+ (1, 'd2'),
+ (31, 'd3'),
+ (32, 'd4'),
+ (2, 'd5'),
+ (3, 'd6'),
+ (33, 'd7'),
+ (4, 'd8'),
+ ]
+ )
+ conn.execute(table.delete())
# test the same series of events using a reflected version of
# the table
- m2 = MetaData(self.engine)
+ m2 = MetaData(engine)
table = Table(table.name, m2, autoload=True)
- with self.sql_execution_asserter(self.engine) as asserter:
- table.insert().execute({'id': 30, 'data': 'd1'})
- r = table.insert().execute({'data': 'd2'})
- assert r.inserted_primary_key == [5]
- table.insert().execute({'id': 31, 'data': 'd3'}, {'id': 32,
- 'data': 'd4'})
- table.insert().execute({'data': 'd5'}, {'data': 'd6'})
- table.insert(inline=True).execute({'id': 33, 'data': 'd7'})
- table.insert(inline=True).execute({'data': 'd8'})
+ with self.sql_execution_asserter(engine) as asserter:
+ with engine.connect() as conn:
+ conn.execute(table.insert(), {'id': 30, 'data': 'd1'})
+ r = conn.execute(table.insert(), {'data': 'd2'})
+ eq_(r.inserted_primary_key, [5])
+ conn.execute(
+ table.insert(),
+ {'id': 31, 'data': 'd3'}, {'id': 32, 'data': 'd4'})
+ conn.execute(table.insert(), {'data': 'd5'}, {'data': 'd6'})
+ conn.execute(
+ table.insert(inline=True), {'id': 33, 'data': 'd7'})
+ conn.execute(table.insert(inline=True), {'data': 'd8'})
asserter.assert_(
DialectSQL('INSERT INTO testtable (id, data) VALUES (:id, :data)',
- {'id': 30, 'data': 'd1'}),
+ {'id': 30, 'data': 'd1'}),
DialectSQL('INSERT INTO testtable (data) VALUES (:data) RETURNING '
- 'testtable.id', {'data': 'd2'}),
+ 'testtable.id', {'data': 'd2'}),
DialectSQL('INSERT INTO testtable (id, data) VALUES (:id, :data)',
- [{'id': 31, 'data': 'd3'}, {'id': 32, 'data': 'd4'}]),
+ [{'id': 31, 'data': 'd3'}, {'id': 32, 'data': 'd4'}]),
DialectSQL('INSERT INTO testtable (data) VALUES (:data)',
- [{'data': 'd5'}, {'data': 'd6'}]),
+ [{'data': 'd5'}, {'data': 'd6'}]),
DialectSQL('INSERT INTO testtable (id, data) VALUES (:id, :data)',
- [{'id': 33, 'data': 'd7'}]),
- DialectSQL('INSERT INTO testtable (data) VALUES (:data)', [{'data': 'd8'}]),
- )
- eq_(
- table.select().execute().fetchall(),
- [
- (30, 'd1'),
- (5, 'd2'),
- (31, 'd3'),
- (32, 'd4'),
- (6, 'd5'),
- (7, 'd6'),
- (33, 'd7'),
- (8, 'd8'),
- ]
+ [{'id': 33, 'data': 'd7'}]),
+ DialectSQL(
+ 'INSERT INTO testtable (data) VALUES (:data)',
+ [{'data': 'd8'}]),
)
- table.delete().execute()
+
+ with engine.connect() as conn:
+ eq_(
+ conn.execute(table.select()).fetchall(),
+ [
+ (30, 'd1'),
+ (5, 'd2'),
+ (31, 'd3'),
+ (32, 'd4'),
+ (6, 'd5'),
+ (7, 'd6'),
+ (33, 'd7'),
+ (8, 'd8'),
+ ]
+ )
+ conn.execute(table.delete())
def _assert_data_with_sequence(self, table, seqname):
- self.engine = \
+ engine = \
engines.testing_engine(options={'implicit_returning': False})
- metadata.bind = self.engine
- with self.sql_execution_asserter(self.engine) as asserter:
- table.insert().execute({'id': 30, 'data': 'd1'})
- table.insert().execute({'data': 'd2'})
- table.insert().execute({'id': 31, 'data': 'd3'}, {'id': 32,
- 'data': 'd4'})
- table.insert().execute({'data': 'd5'}, {'data': 'd6'})
- table.insert(inline=True).execute({'id': 33, 'data': 'd7'})
- table.insert(inline=True).execute({'data': 'd8'})
+ with self.sql_execution_asserter(engine) as asserter:
+ with engine.connect() as conn:
+ conn.execute(table.insert(), {'id': 30, 'data': 'd1'})
+ conn.execute(table.insert(), {'data': 'd2'})
+ conn.execute(table.insert(),
+ {'id': 31, 'data': 'd3'},
+ {'id': 32, 'data': 'd4'})
+ conn.execute(table.insert(), {'data': 'd5'}, {'data': 'd6'})
+ conn.execute(table.insert(inline=True),
+ {'id': 33, 'data': 'd7'})
+ conn.execute(table.insert(inline=True), {'data': 'd8'})
asserter.assert_(
DialectSQL('INSERT INTO testtable (id, data) VALUES (:id, :data)',
- {'id': 30, 'data': 'd1'}),
+ {'id': 30, 'data': 'd1'}),
CursorSQL("select nextval('my_seq')"),
DialectSQL('INSERT INTO testtable (id, data) VALUES (:id, :data)',
- {'id': 1, 'data': 'd2'}),
+ {'id': 1, 'data': 'd2'}),
DialectSQL('INSERT INTO testtable (id, data) VALUES (:id, :data)',
- [{'id': 31, 'data': 'd3'}, {'id': 32, 'data': 'd4'}]),
- DialectSQL("INSERT INTO testtable (id, data) VALUES (nextval('%s'), "
- ":data)" % seqname, [{'data': 'd5'}, {'data': 'd6'}]),
+ [{'id': 31, 'data': 'd3'}, {'id': 32, 'data': 'd4'}]),
+ DialectSQL(
+ "INSERT INTO testtable (id, data) VALUES (nextval('%s'), "
+ ":data)" % seqname, [{'data': 'd5'}, {'data': 'd6'}]),
DialectSQL('INSERT INTO testtable (id, data) VALUES (:id, :data)',
- [{'id': 33, 'data': 'd7'}]),
- DialectSQL("INSERT INTO testtable (id, data) VALUES (nextval('%s'), "
- ":data)" % seqname, [{'data': 'd8'}]),
- )
- eq_(
- table.select().execute().fetchall(),
- [
- (30, 'd1'),
- (1, 'd2'),
- (31, 'd3'),
- (32, 'd4'),
- (2, 'd5'),
- (3, 'd6'),
- (33, 'd7'),
- (4, 'd8'),
- ]
+ [{'id': 33, 'data': 'd7'}]),
+ DialectSQL(
+ "INSERT INTO testtable (id, data) VALUES (nextval('%s'), "
+ ":data)" % seqname, [{'data': 'd8'}]),
)
+ with engine.connect() as conn:
+ eq_(
+ conn.execute(table.select()).fetchall(),
+ [
+ (30, 'd1'),
+ (1, 'd2'),
+ (31, 'd3'),
+ (32, 'd4'),
+ (2, 'd5'),
+ (3, 'd6'),
+ (33, 'd7'),
+ (4, 'd8'),
+ ]
+ )
# cant test reflection here since the Sequence must be
# explicitly specified
def _assert_data_with_sequence_returning(self, table, seqname):
- self.engine = \
+ engine = \
engines.testing_engine(options={'implicit_returning': True})
- metadata.bind = self.engine
- with self.sql_execution_asserter(self.engine) as asserter:
- table.insert().execute({'id': 30, 'data': 'd1'})
- table.insert().execute({'data': 'd2'})
- table.insert().execute({'id': 31, 'data': 'd3'}, {'id': 32,
- 'data': 'd4'})
- table.insert().execute({'data': 'd5'}, {'data': 'd6'})
- table.insert(inline=True).execute({'id': 33, 'data': 'd7'})
- table.insert(inline=True).execute({'data': 'd8'})
+ with self.sql_execution_asserter(engine) as asserter:
+ with engine.connect() as conn:
+ conn.execute(table.insert(), {'id': 30, 'data': 'd1'})
+ conn.execute(table.insert(), {'data': 'd2'})
+ conn.execute(table.insert(),
+ {'id': 31, 'data': 'd3'},
+ {'id': 32, 'data': 'd4'})
+ conn.execute(table.insert(), {'data': 'd5'}, {'data': 'd6'})
+ conn.execute(
+ table.insert(inline=True), {'id': 33, 'data': 'd7'})
+ conn.execute(table.insert(inline=True), {'data': 'd8'})
asserter.assert_(
DialectSQL('INSERT INTO testtable (id, data) VALUES (:id, :data)',
- {'id': 30, 'data': 'd1'}),
+ {'id': 30, 'data': 'd1'}),
DialectSQL("INSERT INTO testtable (id, data) VALUES "
- "(nextval('my_seq'), :data) RETURNING testtable.id",
- {'data': 'd2'}),
+ "(nextval('my_seq'), :data) RETURNING testtable.id",
+ {'data': 'd2'}),
DialectSQL('INSERT INTO testtable (id, data) VALUES (:id, :data)',
- [{'id': 31, 'data': 'd3'}, {'id': 32, 'data': 'd4'}]),
- DialectSQL("INSERT INTO testtable (id, data) VALUES (nextval('%s'), "
- ":data)" % seqname, [{'data': 'd5'}, {'data': 'd6'}]),
+ [{'id': 31, 'data': 'd3'}, {'id': 32, 'data': 'd4'}]),
+ DialectSQL(
+ "INSERT INTO testtable (id, data) VALUES (nextval('%s'), "
+ ":data)" % seqname, [{'data': 'd5'}, {'data': 'd6'}]),
DialectSQL('INSERT INTO testtable (id, data) VALUES (:id, :data)',
- [{'id': 33, 'data': 'd7'}]),
- DialectSQL("INSERT INTO testtable (id, data) VALUES (nextval('%s'), "
- ":data)" % seqname, [{'data': 'd8'}]),
+ [{'id': 33, 'data': 'd7'}]),
+ DialectSQL(
+ "INSERT INTO testtable (id, data) VALUES (nextval('%s'), "
+ ":data)" % seqname, [{'data': 'd8'}]),
)
- eq_(
- table.select().execute().fetchall(),
- [
- (30, 'd1'),
- (1, 'd2'),
- (31, 'd3'),
- (32, 'd4'),
- (2, 'd5'),
- (3, 'd6'),
- (33, 'd7'),
- (4, 'd8'),
- ]
- )
+ with engine.connect() as conn:
+ eq_(
+ conn.execute(table.select()).fetchall(),
+ [
+ (30, 'd1'),
+ (1, 'd2'),
+ (31, 'd3'),
+ (32, 'd4'),
+ (2, 'd5'),
+ (3, 'd6'),
+ (33, 'd7'),
+ (4, 'd8'),
+ ]
+ )
- # cant test reflection here since the Sequence must be
- # explicitly specified
+ # cant test reflection here since the Sequence must be
+ # explicitly specified
def _assert_data_noautoincrement(self, table):
- self.engine = \
+ engine = \
engines.testing_engine(options={'implicit_returning': False})
- metadata.bind = self.engine
- table.insert().execute({'id': 30, 'data': 'd1'})
- if self.engine.driver == 'pg8000':
- exception_cls = exc.ProgrammingError
- elif self.engine.driver == 'pypostgresql':
- exception_cls = Exception
- else:
- exception_cls = exc.IntegrityError
- assert_raises_message(exception_cls,
- 'violates not-null constraint',
- table.insert().execute, {'data': 'd2'})
- assert_raises_message(exception_cls,
- 'violates not-null constraint',
- table.insert().execute, {'data': 'd2'},
- {'data': 'd3'})
- assert_raises_message(exception_cls,
- 'violates not-null constraint',
- table.insert().execute, {'data': 'd2'})
- assert_raises_message(exception_cls,
- 'violates not-null constraint',
- table.insert().execute, {'data': 'd2'},
- {'data': 'd3'})
- table.insert().execute({'id': 31, 'data': 'd2'}, {'id': 32,
- 'data': 'd3'})
- table.insert(inline=True).execute({'id': 33, 'data': 'd4'})
- assert table.select().execute().fetchall() == [
- (30, 'd1'),
- (31, 'd2'),
- (32, 'd3'),
- (33, 'd4')]
- table.delete().execute()
+
+ with engine.connect() as conn:
+ conn.execute(table.insert(), {'id': 30, 'data': 'd1'})
+
+ assert_raises_message(
+ exc.CompileError,
+ ".*has no Python-side or server-side default.*",
+ conn.execute, table.insert(), {'data': 'd2'})
+ assert_raises_message(
+ exc.CompileError,
+ ".*has no Python-side or server-side default.*",
+ conn.execute, table.insert(), {'data': 'd2'},
+ {'data': 'd3'})
+ assert_raises_message(
+ exc.CompileError,
+ ".*has no Python-side or server-side default.*",
+ conn.execute, table.insert(), {'data': 'd2'})
+ assert_raises_message(
+ exc.CompileError,
+ ".*has no Python-side or server-side default.*",
+ conn.execute, table.insert(), {'data': 'd2'},
+ {'data': 'd3'})
+
+ conn.execute(
+ table.insert(),
+ {'id': 31, 'data': 'd2'}, {'id': 32, 'data': 'd3'})
+ conn.execute(table.insert(inline=True), {'id': 33, 'data': 'd4'})
+ eq_(conn.execute(table.select()).fetchall(), [
+ (30, 'd1'),
+ (31, 'd2'),
+ (32, 'd3'),
+ (33, 'd4')])
+ conn.execute(table.delete())
# test the same series of events using a reflected version of
# the table
- m2 = MetaData(self.engine)
+ m2 = MetaData(engine)
table = Table(table.name, m2, autoload=True)
- table.insert().execute({'id': 30, 'data': 'd1'})
- assert_raises_message(exception_cls,
- 'violates not-null constraint',
- table.insert().execute, {'data': 'd2'})
- assert_raises_message(exception_cls,
- 'violates not-null constraint',
- table.insert().execute, {'data': 'd2'},
- {'data': 'd3'})
- table.insert().execute({'id': 31, 'data': 'd2'}, {'id': 32,
- 'data': 'd3'})
- table.insert(inline=True).execute({'id': 33, 'data': 'd4'})
- assert table.select().execute().fetchall() == [
- (30, 'd1'),
- (31, 'd2'),
- (32, 'd3'),
- (33, 'd4')]
+ with engine.connect() as conn:
+ conn.execute(table.insert(), {'id': 30, 'data': 'd1'})
+ assert_raises_message(
+ exc.CompileError,
+ ".*has no Python-side or server-side default.*",
+ conn.execute, table.insert(), {'data': 'd2'})
+ assert_raises_message(
+ exc.CompileError,
+ ".*has no Python-side or server-side default.*",
+ conn.execute, table.insert(), {'data': 'd2'},
+ {'data': 'd3'})
+ conn.execute(
+ table.insert(),
+ {'id': 31, 'data': 'd2'}, {'id': 32, 'data': 'd3'})
+ conn.execute(table.insert(inline=True), {'id': 33, 'data': 'd4'})
+ eq_(conn.execute(table.select()).fetchall(), [
+ (30, 'd1'),
+ (31, 'd2'),
+ (32, 'd3'),
+ (33, 'd4')])
class ServerSideCursorsTest(fixtures.TestBase, AssertsExecutionResults):
@@ -837,6 +870,19 @@ class ExtractTest(fixtures.TablesTest):
run_deletes = None
@classmethod
+ def setup_bind(cls):
+ from sqlalchemy import event
+ eng = engines.testing_engine()
+
+ @event.listens_for(eng, "connect")
+ def connect(dbapi_conn, rec):
+ cursor = dbapi_conn.cursor()
+ cursor.execute("SET SESSION TIME ZONE 0")
+ cursor.close()
+
+ return eng
+
+ @classmethod
def define_tables(cls, metadata):
Table('t', metadata,
Column('id', Integer, primary_key=True),
@@ -856,23 +902,17 @@ class ExtractTest(fixtures.TablesTest):
def utcoffset(self, dt):
return datetime.timedelta(hours=4)
- with testing.db.connect() as conn:
-
- # we aren't resetting this at the moment but we don't have
- # any other tests that are TZ specific
- conn.execute("SET SESSION TIME ZONE 0")
- conn.execute(
- cls.tables.t.insert(),
- {
- 'dtme': datetime.datetime(2012, 5, 10, 12, 15, 25),
- 'dt': datetime.date(2012, 5, 10),
- 'tm': datetime.time(12, 15, 25),
- 'intv': datetime.timedelta(seconds=570),
- 'dttz':
- datetime.datetime(2012, 5, 10, 12, 15, 25,
- tzinfo=TZ())
- },
- )
+ cls.bind.execute(
+ cls.tables.t.insert(),
+ {
+ 'dtme': datetime.datetime(2012, 5, 10, 12, 15, 25),
+ 'dt': datetime.date(2012, 5, 10),
+ 'tm': datetime.time(12, 15, 25),
+ 'intv': datetime.timedelta(seconds=570),
+ 'dttz': datetime.datetime(2012, 5, 10, 12, 15, 25,
+ tzinfo=TZ())
+ },
+ )
def _test(self, expr, field="all", overrides=None):
t = self.tables.t
@@ -898,7 +938,7 @@ class ExtractTest(fixtures.TablesTest):
fields.update(overrides)
for field in fields:
- result = testing.db.scalar(
+ result = self.bind.scalar(
select([extract(field, expr)]).select_from(t))
eq_(result, fields[field])
@@ -912,9 +952,9 @@ class ExtractTest(fixtures.TablesTest):
overrides={"epoch": 1336652695.0, "minute": 24})
def test_three(self):
- t = self.tables.t
+ self.tables.t
- actual_ts = testing.db.scalar(func.current_timestamp()) - \
+ actual_ts = self.bind.scalar(func.current_timestamp()) - \
datetime.timedelta(days=5)
self._test(func.current_timestamp() - datetime.timedelta(days=5),
{"hour": actual_ts.hour, "year": actual_ts.year,
@@ -963,7 +1003,7 @@ class ExtractTest(fixtures.TablesTest):
def test_twelve(self):
t = self.tables.t
- actual_ts = testing.db.scalar(
+ actual_ts = self.bind.scalar(
func.current_timestamp()).replace(tzinfo=None) - \
datetime.datetime(2012, 5, 10, 12, 15, 25)
diff --git a/test/dialect/postgresql/test_reflection.py b/test/dialect/postgresql/test_reflection.py
index 0354fa436..851facd2a 100644
--- a/test/dialect/postgresql/test_reflection.py
+++ b/test/dialect/postgresql/test_reflection.py
@@ -13,6 +13,7 @@ from sqlalchemy import exc
import sqlalchemy as sa
from sqlalchemy.dialects.postgresql import base as postgresql
from sqlalchemy.dialects.postgresql import ARRAY
+import re
class ForeignTableReflectionTest(fixtures.TablesTest, AssertsExecutionResults):
@@ -130,6 +131,15 @@ class MaterializedViewReflectionTest(
insp = inspect(testing.db)
eq_(set(insp.get_view_names()), set(['test_mview', 'test_regview']))
+ def test_get_view_definition(self):
+ insp = inspect(testing.db)
+ eq_(
+ re.sub(
+ r'[\n\t ]+', ' ',
+ insp.get_view_definition("test_mview").strip()),
+ "SELECT testtable.id, testtable.data FROM testtable;"
+ )
+
class DomainReflectionTest(fixtures.TestBase, AssertsExecutionResults):
"""Test PostgreSQL domains"""
@@ -673,6 +683,7 @@ class ReflectionTest(fixtures.TestBase):
eq_(ind, [{'unique': False, 'column_names': ['y'], 'name': 'idx1'}])
conn.close()
+ @testing.fails_if("postgresql < 8.2", "reloptions not supported")
@testing.provide_metadata
def test_index_reflection_with_storage_options(self):
"""reflect indexes with storage options set"""
diff --git a/test/dialect/postgresql/test_types.py b/test/dialect/postgresql/test_types.py
index fac0f2df8..49a8cfabd 100644
--- a/test/dialect/postgresql/test_types.py
+++ b/test/dialect/postgresql/test_types.py
@@ -7,11 +7,11 @@ from sqlalchemy import testing
import datetime
from sqlalchemy import Table, MetaData, Column, Integer, Enum, Float, select, \
func, DateTime, Numeric, exc, String, cast, REAL, TypeDecorator, Unicode, \
- Text, null, text
+ Text, null, text, column, Array, any_, all_
from sqlalchemy.sql import operators
from sqlalchemy import types
import sqlalchemy as sa
-from sqlalchemy.dialects.postgresql import base as postgresql
+from sqlalchemy.dialects import postgresql
from sqlalchemy.dialects.postgresql import HSTORE, hstore, array, \
INT4RANGE, INT8RANGE, NUMRANGE, DATERANGE, TSRANGE, TSTZRANGE, \
JSON, JSONB
@@ -20,6 +20,8 @@ from sqlalchemy import util
from sqlalchemy.testing.util import round_decimal
from sqlalchemy import inspect
from sqlalchemy import event
+from sqlalchemy.ext.declarative import declarative_base
+from sqlalchemy.orm import Session
tztable = notztable = metadata = table = None
@@ -497,6 +499,34 @@ class EnumTest(fixtures.TestBase, AssertsExecutionResults):
finally:
metadata.drop_all()
+ @testing.provide_metadata
+ def test_custom_subclass(self):
+ class MyEnum(TypeDecorator):
+ impl = Enum('oneHI', 'twoHI', 'threeHI', name='myenum')
+
+ def process_bind_param(self, value, dialect):
+ if value is not None:
+ value += "HI"
+ return value
+
+ def process_result_value(self, value, dialect):
+ if value is not None:
+ value += "THERE"
+ return value
+
+ t1 = Table(
+ 'table1', self.metadata,
+ Column('data', MyEnum())
+ )
+ self.metadata.create_all(testing.db)
+
+ with testing.db.connect() as conn:
+ conn.execute(t1.insert(), {"data": "two"})
+ eq_(
+ conn.scalar(select([t1.c.data])),
+ "twoHITHERE"
+ )
+
class OIDTest(fixtures.TestBase):
__only_on__ = 'postgresql'
@@ -559,6 +589,14 @@ class NumericInterpretationTest(fixtures.TestBase):
)
+class PythonTypeTest(fixtures.TestBase):
+ def test_interval(self):
+ is_(
+ postgresql.INTERVAL().python_type,
+ datetime.timedelta
+ )
+
+
class TimezoneTest(fixtures.TestBase):
__backend__ = True
@@ -698,7 +736,178 @@ class TimePrecisionTest(fixtures.TestBase, AssertsCompiledSQL):
eq_(t2.c.c6.type.timezone, True)
-class ArrayTest(fixtures.TablesTest, AssertsExecutionResults):
+class ArrayTest(AssertsCompiledSQL, fixtures.TestBase):
+ __dialect__ = 'postgresql'
+
+ def test_array_int_index(self):
+ col = column('x', postgresql.ARRAY(Integer))
+ self.assert_compile(
+ select([col[3]]),
+ "SELECT x[%(x_1)s] AS anon_1",
+ checkparams={'x_1': 3}
+ )
+
+ def test_array_any(self):
+ col = column('x', postgresql.ARRAY(Integer))
+ self.assert_compile(
+ select([col.any(7, operator=operators.lt)]),
+ "SELECT %(param_1)s < ANY (x) AS anon_1",
+ checkparams={'param_1': 7}
+ )
+
+ def test_array_all(self):
+ col = column('x', postgresql.ARRAY(Integer))
+ self.assert_compile(
+ select([col.all(7, operator=operators.lt)]),
+ "SELECT %(param_1)s < ALL (x) AS anon_1",
+ checkparams={'param_1': 7}
+ )
+
+ def test_array_contains(self):
+ col = column('x', postgresql.ARRAY(Integer))
+ self.assert_compile(
+ select([col.contains(array([4, 5, 6]))]),
+ "SELECT x @> ARRAY[%(param_1)s, %(param_2)s, %(param_3)s] "
+ "AS anon_1",
+ checkparams={'param_1': 4, 'param_3': 6, 'param_2': 5}
+ )
+
+ def test_array_contained_by(self):
+ col = column('x', postgresql.ARRAY(Integer))
+ self.assert_compile(
+ select([col.contained_by(array([4, 5, 6]))]),
+ "SELECT x <@ ARRAY[%(param_1)s, %(param_2)s, %(param_3)s] "
+ "AS anon_1",
+ checkparams={'param_1': 4, 'param_3': 6, 'param_2': 5}
+ )
+
+ def test_array_overlap(self):
+ col = column('x', postgresql.ARRAY(Integer))
+ self.assert_compile(
+ select([col.overlap(array([4, 5, 6]))]),
+ "SELECT x && ARRAY[%(param_1)s, %(param_2)s, %(param_3)s] "
+ "AS anon_1",
+ checkparams={'param_1': 4, 'param_3': 6, 'param_2': 5}
+ )
+
+ def test_array_slice_index(self):
+ col = column('x', postgresql.ARRAY(Integer))
+ self.assert_compile(
+ select([col[5:10]]),
+ "SELECT x[%(x_1)s:%(x_2)s] AS anon_1",
+ checkparams={'x_2': 10, 'x_1': 5}
+ )
+
+ def test_array_dim_index(self):
+ col = column('x', postgresql.ARRAY(Integer, dimensions=2))
+ self.assert_compile(
+ select([col[3][5]]),
+ "SELECT x[%(x_1)s][%(param_1)s] AS anon_1",
+ checkparams={'x_1': 3, 'param_1': 5}
+ )
+
+ def test_array_concat(self):
+ col = column('x', postgresql.ARRAY(Integer))
+ literal = array([4, 5])
+
+ self.assert_compile(
+ select([col + literal]),
+ "SELECT x || ARRAY[%(param_1)s, %(param_2)s] AS anon_1",
+ checkparams={'param_1': 4, 'param_2': 5}
+ )
+
+ def test_array_index_map_dimensions(self):
+ col = column('x', postgresql.ARRAY(Integer, dimensions=3))
+ is_(
+ col[5].type._type_affinity, Array
+ )
+ assert isinstance(
+ col[5].type, postgresql.ARRAY
+ )
+ eq_(
+ col[5].type.dimensions, 2
+ )
+ is_(
+ col[5][6].type._type_affinity, Array
+ )
+ assert isinstance(
+ col[5][6].type, postgresql.ARRAY
+ )
+ eq_(
+ col[5][6].type.dimensions, 1
+ )
+ is_(
+ col[5][6][7].type._type_affinity, Integer
+ )
+
+ def test_array_getitem_single_type(self):
+ m = MetaData()
+ arrtable = Table(
+ 'arrtable', m,
+ Column('intarr', postgresql.ARRAY(Integer)),
+ Column('strarr', postgresql.ARRAY(String)),
+ )
+ is_(arrtable.c.intarr[1].type._type_affinity, Integer)
+ is_(arrtable.c.strarr[1].type._type_affinity, String)
+
+ def test_array_getitem_slice_type(self):
+ m = MetaData()
+ arrtable = Table(
+ 'arrtable', m,
+ Column('intarr', postgresql.ARRAY(Integer)),
+ Column('strarr', postgresql.ARRAY(String)),
+ )
+
+ # type affinity is Array...
+ is_(arrtable.c.intarr[1:3].type._type_affinity, Array)
+ is_(arrtable.c.strarr[1:3].type._type_affinity, Array)
+
+ # but the slice returns the actual type
+ assert isinstance(arrtable.c.intarr[1:3].type, postgresql.ARRAY)
+ assert isinstance(arrtable.c.strarr[1:3].type, postgresql.ARRAY)
+
+ def test_array_functions_plus_getitem(self):
+ """test parenthesizing of functions plus indexing, which seems
+ to be required by Postgresql.
+
+ """
+ stmt = select([
+ func.array_cat(
+ array([1, 2, 3]),
+ array([4, 5, 6]),
+ type_=postgresql.ARRAY(Integer)
+ )[2:5]
+ ])
+ self.assert_compile(
+ stmt,
+ "SELECT (array_cat(ARRAY[%(param_1)s, %(param_2)s, %(param_3)s], "
+ "ARRAY[%(param_4)s, %(param_5)s, %(param_6)s]))"
+ "[%(param_7)s:%(param_8)s] AS anon_1"
+ )
+
+ self.assert_compile(
+ func.array_cat(
+ array([1, 2, 3]),
+ array([4, 5, 6]),
+ type_=postgresql.ARRAY(Integer)
+ )[3],
+ "(array_cat(ARRAY[%(param_1)s, %(param_2)s, %(param_3)s], "
+ "ARRAY[%(param_4)s, %(param_5)s, %(param_6)s]))[%(param_7)s]"
+ )
+
+ def test_array_agg_generic(self):
+ expr = func.array_agg(column('q', Integer))
+ is_(expr.type.__class__, types.Array)
+ is_(expr.type.item_type.__class__, Integer)
+
+ def test_array_agg_specific(self):
+ from sqlalchemy.dialects.postgresql import array_agg
+ expr = array_agg(column('q', Integer))
+ is_(expr.type.__class__, postgresql.ARRAY)
+ is_(expr.type.item_type.__class__, Integer)
+
+
+class ArrayRoundTripTest(fixtures.TablesTest, AssertsExecutionResults):
__only_on__ = 'postgresql'
__backend__ = True
@@ -754,6 +963,89 @@ class ArrayTest(fixtures.TablesTest, AssertsExecutionResults):
assert isinstance(tbl.c.intarr.type.item_type, Integer)
assert isinstance(tbl.c.strarr.type.item_type, String)
+ @testing.provide_metadata
+ def test_array_agg(self):
+ values_table = Table('values', self.metadata, Column('value', Integer))
+ self.metadata.create_all(testing.db)
+ testing.db.execute(
+ values_table.insert(),
+ [{'value': i} for i in range(1, 10)]
+ )
+
+ stmt = select([func.array_agg(values_table.c.value)])
+ eq_(
+ testing.db.execute(stmt).scalar(),
+ list(range(1, 10))
+ )
+
+ stmt = select([func.array_agg(values_table.c.value)[3]])
+ eq_(
+ testing.db.execute(stmt).scalar(),
+ 3
+ )
+
+ stmt = select([func.array_agg(values_table.c.value)[2:4]])
+ eq_(
+ testing.db.execute(stmt).scalar(),
+ [2, 3, 4]
+ )
+
+ def test_array_index_slice_exprs(self):
+ """test a variety of expressions that sometimes need parenthesizing"""
+
+ stmt = select([array([1, 2, 3, 4])[2:3]])
+ eq_(
+ testing.db.execute(stmt).scalar(),
+ [2, 3]
+ )
+
+ stmt = select([array([1, 2, 3, 4])[2]])
+ eq_(
+ testing.db.execute(stmt).scalar(),
+ 2
+ )
+
+ stmt = select([(array([1, 2]) + array([3, 4]))[2:3]])
+ eq_(
+ testing.db.execute(stmt).scalar(),
+ [2, 3]
+ )
+
+ stmt = select([array([1, 2]) + array([3, 4])[2:3]])
+ eq_(
+ testing.db.execute(stmt).scalar(),
+ [1, 2, 4]
+ )
+
+ stmt = select([array([1, 2])[2:3] + array([3, 4])])
+ eq_(
+ testing.db.execute(stmt).scalar(),
+ [2, 3, 4]
+ )
+
+ stmt = select([
+ func.array_cat(
+ array([1, 2, 3]),
+ array([4, 5, 6]),
+ type_=postgresql.ARRAY(Integer)
+ )[2:5]
+ ])
+ eq_(
+ testing.db.execute(stmt).scalar(), [2, 3, 4, 5]
+ )
+
+ def test_any_all_exprs(self):
+ stmt = select([
+ 3 == any_(func.array_cat(
+ array([1, 2, 3]),
+ array([4, 5, 6]),
+ type_=postgresql.ARRAY(Integer)
+ ))
+ ])
+ eq_(
+ testing.db.execute(stmt).scalar(), True
+ )
+
def test_insert_array(self):
arrtable = self.tables.arrtable
arrtable.insert().execute(intarr=[1, 2, 3], strarr=[util.u('abc'),
@@ -828,16 +1120,6 @@ class ArrayTest(fixtures.TablesTest, AssertsExecutionResults):
), True
)
- def test_array_getitem_single_type(self):
- arrtable = self.tables.arrtable
- is_(arrtable.c.intarr[1].type._type_affinity, Integer)
- is_(arrtable.c.strarr[1].type._type_affinity, String)
-
- def test_array_getitem_slice_type(self):
- arrtable = self.tables.arrtable
- is_(arrtable.c.intarr[1:3].type._type_affinity, postgresql.ARRAY)
- is_(arrtable.c.strarr[1:3].type._type_affinity, postgresql.ARRAY)
-
def test_array_getitem_single_exec(self):
arrtable = self.tables.arrtable
self._fixture_456(arrtable)
@@ -926,6 +1208,14 @@ class ArrayTest(fixtures.TablesTest, AssertsExecutionResults):
lambda elem: (
x for x in elem))
+ def test_multi_dim_roundtrip(self):
+ arrtable = self.tables.arrtable
+ testing.db.execute(arrtable.insert(), dimarr=[[1, 2, 3], [4, 5, 6]])
+ eq_(
+ testing.db.scalar(select([arrtable.c.dimarr])),
+ [[-1, 0, 1], [2, 3, 4]]
+ )
+
def test_array_contained_by_exec(self):
arrtable = self.tables.arrtable
with testing.db.connect() as conn:
@@ -1030,12 +1320,98 @@ class ArrayTest(fixtures.TablesTest, AssertsExecutionResults):
set([('1', '2', '3'), ('4', '5', '6'), (('4', '5'), ('6', '7'))])
)
- def test_dimension(self):
- arrtable = self.tables.arrtable
- testing.db.execute(arrtable.insert(), dimarr=[[1, 2, 3], [4, 5, 6]])
+ def test_array_plus_native_enum_create(self):
+ m = MetaData()
+ t = Table(
+ 't', m,
+ Column(
+ 'data_1',
+ postgresql.ARRAY(
+ postgresql.ENUM('a', 'b', 'c', name='my_enum_1')
+ )
+ ),
+ Column(
+ 'data_2',
+ postgresql.ARRAY(
+ types.Enum('a', 'b', 'c', name='my_enum_2')
+ )
+ )
+ )
+
+ t.create(testing.db)
eq_(
- testing.db.scalar(select([arrtable.c.dimarr])),
- [[-1, 0, 1], [2, 3, 4]]
+ set(e['name'] for e in inspect(testing.db).get_enums()),
+ set(['my_enum_1', 'my_enum_2'])
+ )
+ t.drop(testing.db)
+ eq_(inspect(testing.db).get_enums(), [])
+
+
+class HashableFlagORMTest(fixtures.TestBase):
+ """test the various 'collection' types that they flip the 'hashable' flag
+ appropriately. [ticket:3499]"""
+
+ __only_on__ = 'postgresql'
+
+ def _test(self, type_, data):
+ Base = declarative_base(metadata=self.metadata)
+
+ class A(Base):
+ __tablename__ = 'a1'
+ id = Column(Integer, primary_key=True)
+ data = Column(type_)
+ Base.metadata.create_all(testing.db)
+ s = Session(testing.db)
+ s.add_all([
+ A(data=elem) for elem in data
+ ])
+ s.commit()
+
+ eq_(
+ [(obj.A.id, obj.data) for obj in
+ s.query(A, A.data).order_by(A.id)],
+ list(enumerate(data, 1))
+ )
+
+ @testing.provide_metadata
+ def test_array(self):
+ self._test(
+ postgresql.ARRAY(Text()),
+ [['a', 'b', 'c'], ['d', 'e', 'f']]
+ )
+
+ @testing.requires.hstore
+ @testing.provide_metadata
+ def test_hstore(self):
+ self._test(
+ postgresql.HSTORE(),
+ [
+ {'a': '1', 'b': '2', 'c': '3'},
+ {'d': '4', 'e': '5', 'f': '6'}
+ ]
+ )
+
+ @testing.provide_metadata
+ def test_json(self):
+ self._test(
+ postgresql.JSON(),
+ [
+ {'a': '1', 'b': '2', 'c': '3'},
+ {'d': '4', 'e': {'e1': '5', 'e2': '6'},
+ 'f': {'f1': [9, 10, 11]}}
+ ]
+ )
+
+ @testing.requires.postgresql_jsonb
+ @testing.provide_metadata
+ def test_jsonb(self):
+ self._test(
+ postgresql.JSONB(),
+ [
+ {'a': '1', 'b': '2', 'c': '3'},
+ {'d': '4', 'e': {'e1': '5', 'e2': '6'},
+ 'f': {'f1': [9, 10, 11]}}
+ ]
)
@@ -1051,6 +1427,16 @@ class TimestampTest(fixtures.TestBase, AssertsExecutionResults):
result = connection.execute(s).first()
eq_(result[0], datetime.datetime(2007, 12, 25, 0, 0))
+ def test_interval_arithmetic(self):
+ # basically testing that we get timedelta back for an INTERVAL
+ # result. more of a driver assertion.
+ engine = testing.db
+ connection = engine.connect()
+
+ s = select([text("timestamp '2007-12-25' - timestamp '2007-11-15'")])
+ result = connection.execute(s).first()
+ eq_(result[0], datetime.timedelta(40))
+
class SpecialTypesTest(fixtures.TestBase, ComparesTables, AssertsCompiledSQL):
@@ -1372,6 +1758,19 @@ class HStoreTest(AssertsCompiledSQL, fixtures.TestBase):
{"key1": "value1", "key2": "value2"}
)
+ def test_ret_type_text(self):
+ col = column('x', HSTORE())
+
+ is_(col['foo'].type.__class__, Text)
+
+ def test_ret_type_custom(self):
+ class MyType(types.UserDefinedType):
+ pass
+
+ col = column('x', HSTORE(text_type=MyType))
+
+ is_(col['foo'].type.__class__, MyType)
+
def test_where_has_key(self):
self._test_where(
# hide from 2to3
@@ -1394,7 +1793,7 @@ class HStoreTest(AssertsCompiledSQL, fixtures.TestBase):
def test_where_defined(self):
self._test_where(
self.hashcol.defined('foo'),
- "defined(test_table.hash, %(param_1)s)"
+ "defined(test_table.hash, %(defined_1)s)"
)
def test_where_contains(self):
@@ -1425,7 +1824,7 @@ class HStoreTest(AssertsCompiledSQL, fixtures.TestBase):
def test_cols_delete_single_key(self):
self._test_cols(
self.hashcol.delete('foo'),
- "delete(test_table.hash, %(param_1)s) AS delete_1",
+ "delete(test_table.hash, %(delete_2)s) AS delete_1",
True
)
@@ -1440,7 +1839,7 @@ class HStoreTest(AssertsCompiledSQL, fixtures.TestBase):
def test_cols_delete_matching_pairs(self):
self._test_cols(
self.hashcol.delete(hstore('1', '2')),
- ("delete(test_table.hash, hstore(%(param_1)s, %(param_2)s)) "
+ ("delete(test_table.hash, hstore(%(hstore_1)s, %(hstore_2)s)) "
"AS delete_1"),
True
)
@@ -1456,7 +1855,7 @@ class HStoreTest(AssertsCompiledSQL, fixtures.TestBase):
def test_cols_hstore_pair_text(self):
self._test_cols(
hstore('foo', '3')['foo'],
- "hstore(%(param_1)s, %(param_2)s) -> %(hstore_1)s AS anon_1",
+ "hstore(%(hstore_1)s, %(hstore_2)s) -> %(hstore_3)s AS anon_1",
False
)
@@ -1481,14 +1880,14 @@ class HStoreTest(AssertsCompiledSQL, fixtures.TestBase):
self._test_cols(
self.hashcol.concat(hstore(cast(self.test_table.c.id, Text), '3')),
("test_table.hash || hstore(CAST(test_table.id AS TEXT), "
- "%(param_1)s) AS anon_1"),
+ "%(hstore_1)s) AS anon_1"),
True
)
def test_cols_concat_op(self):
self._test_cols(
hstore('foo', 'bar') + self.hashcol,
- "hstore(%(param_1)s, %(param_2)s) || test_table.hash AS anon_1",
+ "hstore(%(hstore_1)s, %(hstore_2)s) || test_table.hash AS anon_1",
True
)
@@ -2093,19 +2492,59 @@ class JSONTest(AssertsCompiledSQL, fixtures.TestBase):
"(test_table.test_column #> %(test_column_1)s) IS NULL"
)
+ def test_path_typing(self):
+ col = column('x', JSON())
+ is_(
+ col['q'].type._type_affinity, JSON
+ )
+ is_(
+ col[('q', )].type._type_affinity, JSON
+ )
+ is_(
+ col['q']['p'].type._type_affinity, JSON
+ )
+ is_(
+ col[('q', 'p')].type._type_affinity, JSON
+ )
+
+ def test_custom_astext_type(self):
+ class MyType(types.UserDefinedType):
+ pass
+
+ col = column('x', JSON(astext_type=MyType))
+
+ is_(
+ col['q'].astext.type.__class__, MyType
+ )
+
+ is_(
+ col[('q', 'p')].astext.type.__class__, MyType
+ )
+
+ is_(
+ col['q']['p'].astext.type.__class__, MyType
+ )
+
def test_where_getitem_as_text(self):
self._test_where(
self.jsoncol['bar'].astext == None,
"(test_table.test_column ->> %(test_column_1)s) IS NULL"
)
- def test_where_getitem_as_cast(self):
+ def test_where_getitem_astext_cast(self):
self._test_where(
- self.jsoncol['bar'].cast(Integer) == 5,
+ self.jsoncol['bar'].astext.cast(Integer) == 5,
"CAST(test_table.test_column ->> %(test_column_1)s AS INTEGER) "
"= %(param_1)s"
)
+ def test_where_getitem_json_cast(self):
+ self._test_where(
+ self.jsoncol['bar'].cast(Integer) == 5,
+ "CAST(test_table.test_column -> %(test_column_1)s AS INTEGER) "
+ "= %(param_1)s"
+ )
+
def test_where_path_as_text(self):
self._test_where(
self.jsoncol[("foo", 1)].astext == None,
@@ -2144,6 +2583,7 @@ class JSONRoundTripTest(fixtures.TablesTest):
{'name': 'r3', 'data': {"k1": "r3v1", "k2": "r3v2"}},
{'name': 'r4', 'data': {"k1": "r4v1", "k2": "r4v2"}},
{'name': 'r5', 'data': {"k1": "r5v1", "k2": "r5v2", "k3": 5}},
+ {'name': 'r6', 'data': {"k1": {"r6v1": {'subr': [1, 2, 3]}}}},
)
def _assert_data(self, compare, column='data'):
@@ -2164,6 +2604,15 @@ class JSONRoundTripTest(fixtures.TablesTest):
).fetchall()
eq_([d for d, in data], [None])
+ def _assert_column_is_JSON_NULL(self, column='data'):
+ col = self.tables.data_table.c[column]
+
+ data = testing.db.execute(
+ select([col]).
+ where(cast(col, String) == "null")
+ ).fetchall()
+ eq_([d for d, in data], [None])
+
def _test_insert(self, engine):
engine.execute(
self.tables.data_table.insert(),
@@ -2185,6 +2634,13 @@ class JSONRoundTripTest(fixtures.TablesTest):
)
self._assert_column_is_NULL(column='nulldata')
+ def _test_insert_nulljson_into_none_as_null(self, engine):
+ engine.execute(
+ self.tables.data_table.insert(),
+ {'name': 'r1', 'nulldata': JSON.NULL}
+ )
+ self._assert_column_is_JSON_NULL(column='nulldata')
+
def _non_native_engine(self, json_serializer=None, json_deserializer=None):
if json_serializer is not None or json_deserializer is not None:
options = {
@@ -2233,6 +2689,11 @@ class JSONRoundTripTest(fixtures.TablesTest):
engine = testing.db
self._test_insert_none_as_null(engine)
+ @testing.requires.psycopg2_native_json
+ def test_insert_native_nulljson_into_none_as_null(self):
+ engine = testing.db
+ self._test_insert_nulljson_into_none_as_null(engine)
+
def test_insert_python(self):
engine = self._non_native_engine()
self._test_insert(engine)
@@ -2245,6 +2706,10 @@ class JSONRoundTripTest(fixtures.TablesTest):
engine = self._non_native_engine()
self._test_insert_none_as_null(engine)
+ def test_insert_python_nulljson_into_none_as_null(self):
+ engine = self._non_native_engine()
+ self._test_insert_nulljson_into_none_as_null(engine)
+
def _test_custom_serialize_deserialize(self, native):
import json
@@ -2309,12 +2774,28 @@ class JSONRoundTripTest(fixtures.TablesTest):
engine = testing.db
self._fixture_data(engine)
data_table = self.tables.data_table
+
result = engine.execute(
- select([data_table.c.data]).where(
- data_table.c.data[('k1',)].astext == 'r3v1'
+ select([data_table.c.name]).where(
+ data_table.c.data[('k1', 'r6v1', 'subr')].astext == "[1, 2, 3]"
)
- ).first()
- eq_(result, ({'k1': 'r3v1', 'k2': 'r3v2'},))
+ )
+ eq_(result.scalar(), 'r6')
+
+ @testing.fails_on(
+ "postgresql < 9.4",
+ "Improvement in Postgresql behavior?")
+ def test_multi_index_query(self):
+ engine = testing.db
+ self._fixture_data(engine)
+ data_table = self.tables.data_table
+
+ result = engine.execute(
+ select([data_table.c.name]).where(
+ data_table.c.data['k1']['r6v1']['subr'].astext == "[1, 2, 3]"
+ )
+ )
+ eq_(result.scalar(), 'r6')
def test_query_returned_as_text(self):
engine = testing.db
@@ -2330,7 +2811,7 @@ class JSONRoundTripTest(fixtures.TablesTest):
self._fixture_data(engine)
data_table = self.tables.data_table
result = engine.execute(
- select([data_table.c.data['k3'].cast(Integer)]).where(
+ select([data_table.c.data['k3'].astext.cast(Integer)]).where(
data_table.c.name == 'r5')
).first()
assert isinstance(result[0], int)
@@ -2398,6 +2879,36 @@ class JSONRoundTripTest(fixtures.TablesTest):
engine = testing.db
self._test_unicode_round_trip(engine)
+ def test_eval_none_flag_orm(self):
+ Base = declarative_base()
+
+ class Data(Base):
+ __table__ = self.tables.data_table
+
+ s = Session(testing.db)
+
+ d1 = Data(name='d1', data=None, nulldata=None)
+ s.add(d1)
+ s.commit()
+
+ s.bulk_insert_mappings(
+ Data, [{"name": "d2", "data": None, "nulldata": None}]
+ )
+ eq_(
+ s.query(
+ cast(self.tables.data_table.c.data, String),
+ cast(self.tables.data_table.c.nulldata, String)
+ ).filter(self.tables.data_table.c.name == 'd1').first(),
+ ("null", None)
+ )
+ eq_(
+ s.query(
+ cast(self.tables.data_table.c.data, String),
+ cast(self.tables.data_table.c.nulldata, String)
+ ).filter(self.tables.data_table.c.name == 'd2').first(),
+ ("null", None)
+ )
+
class JSONBTest(JSONTest):
@@ -2444,7 +2955,6 @@ class JSONBTest(JSONTest):
class JSONBRoundTripTest(JSONRoundTripTest):
- __only_on__ = ('postgresql >= 9.4',)
__requires__ = ('postgresql_jsonb', )
test_type = JSONB
diff --git a/test/dialect/test_oracle.py b/test/dialect/test_oracle.py
index e080568cf..dd4a888ff 100644
--- a/test/dialect/test_oracle.py
+++ b/test/dialect/test_oracle.py
@@ -5,6 +5,7 @@ from sqlalchemy.testing import eq_
from sqlalchemy import *
from sqlalchemy import types as sqltypes, exc, schema
from sqlalchemy.sql import table, column
+from sqlalchemy.sql.elements import quoted_name
from sqlalchemy.testing import fixtures, AssertsExecutionResults, AssertsCompiledSQL
from sqlalchemy import testing
from sqlalchemy.util import u, b
@@ -1859,6 +1860,31 @@ class TableReflectionTest(fixtures.TestBase):
tbl = Table('test_compress', m2, autoload=True)
assert tbl.dialect_options['oracle']['compress'] == "OLTP"
+ @testing.provide_metadata
+ def test_reflect_lowercase_forced_tables(self):
+ metadata = self.metadata
+
+ Table(
+ quoted_name('t1', quote=True), metadata,
+ Column('id', Integer, primary_key=True),
+ )
+ Table(
+ quoted_name('t2', quote=True), metadata,
+ Column('id', Integer, primary_key=True),
+ Column('t1id', ForeignKey('t1.id'))
+ )
+ metadata.create_all()
+
+ m2 = MetaData(testing.db)
+ t2_ref = Table(quoted_name('t2', quote=True), m2, autoload=True)
+ t1_ref = m2.tables['t1']
+ assert t2_ref.c.t1id.references(t1_ref.c.id)
+
+ m3 = MetaData(testing.db)
+ m3.reflect(only=lambda name, m: name.lower() in ('t1', 't2'))
+ assert m3.tables['t2'].c.t1id.references(m3.tables['t1'].c.id)
+
+
class RoundTripIndexTest(fixtures.TestBase):
__only_on__ = 'oracle'
diff --git a/test/dialect/test_sqlite.py b/test/dialect/test_sqlite.py
index 17920c127..33903ff89 100644
--- a/test/dialect/test_sqlite.py
+++ b/test/dialect/test_sqlite.py
@@ -20,7 +20,7 @@ from sqlalchemy.engine.url import make_url
from sqlalchemy.testing import fixtures, AssertsCompiledSQL, \
AssertsExecutionResults, engines
from sqlalchemy import testing
-from sqlalchemy.schema import CreateTable
+from sqlalchemy.schema import CreateTable, FetchedValue
from sqlalchemy.engine.reflection import Inspector
from sqlalchemy.testing import mock
@@ -535,29 +535,12 @@ class DialectTest(fixtures.TestBase, AssertsExecutionResults):
assert e.pool.__class__ is pool.NullPool
-
-class AttachedMemoryDBTest(fixtures.TestBase):
+class AttachedDBTest(fixtures.TestBase):
__only_on__ = 'sqlite'
- dbname = None
-
- def setUp(self):
- self.conn = conn = testing.db.connect()
- if self.dbname is None:
- dbname = ':memory:'
- else:
- dbname = self.dbname
- conn.execute('ATTACH DATABASE "%s" AS test_schema' % dbname)
- self.metadata = MetaData()
-
- def tearDown(self):
- self.metadata.drop_all(self.conn)
- self.conn.execute('DETACH DATABASE test_schema')
- if self.dbname:
- os.remove(self.dbname)
-
def _fixture(self):
meta = self.metadata
+ self.conn = testing.db.connect()
ct = Table(
'created', meta,
Column('id', Integer),
@@ -567,6 +550,14 @@ class AttachedMemoryDBTest(fixtures.TestBase):
meta.create_all(self.conn)
return ct
+ def setup(self):
+ self.conn = testing.db.connect()
+ self.metadata = MetaData()
+
+ def teardown(self):
+ self.metadata.drop_all(self.conn)
+ self.conn.close()
+
def test_no_tables(self):
insp = inspect(self.conn)
eq_(insp.get_table_names("test_schema"), [])
@@ -581,6 +572,18 @@ class AttachedMemoryDBTest(fixtures.TestBase):
insp = inspect(self.conn)
eq_(insp.get_table_names("test_schema"), ["created"])
+ def test_schema_names(self):
+ self._fixture()
+ insp = inspect(self.conn)
+ eq_(insp.get_schema_names(), ["main", "test_schema"])
+
+ # implicitly creates a "temp" schema
+ self.conn.execute("select * from sqlite_temp_master")
+
+ # we're not including it
+ insp = inspect(self.conn)
+ eq_(insp.get_schema_names(), ["main", "test_schema"])
+
def test_reflect_system_table(self):
meta = MetaData(self.conn)
alt_master = Table(
@@ -633,10 +636,6 @@ class AttachedMemoryDBTest(fixtures.TestBase):
eq_(row['name'], 'foo')
-class AttachedFileDBTest(AttachedMemoryDBTest):
- dbname = 'attached_db.db'
-
-
class SQLTest(fixtures.TestBase, AssertsCompiledSQL):
"""Tests SQLite-dialect specific compilation."""
@@ -752,6 +751,17 @@ class SQLTest(fixtures.TestBase, AssertsCompiledSQL):
"WHERE data > 'a' AND data < 'b''s'",
dialect=sqlite.dialect())
+ def test_no_autoinc_on_composite_pk(self):
+ m = MetaData()
+ t = Table(
+ 't', m,
+ Column('x', Integer, primary_key=True, autoincrement=True),
+ Column('y', Integer, primary_key=True))
+ assert_raises_message(
+ exc.CompileError,
+ "SQLite does not support autoincrement for composite",
+ CreateTable(t).compile, dialect=sqlite.dialect()
+ )
class InsertTest(fixtures.TestBase, AssertsExecutionResults):
@@ -782,23 +792,46 @@ class InsertTest(fixtures.TestBase, AssertsExecutionResults):
@testing.exclude('sqlite', '<', (3, 3, 8), 'no database support')
def test_empty_insert_pk2(self):
+ # now raises CompileError due to [ticket:3216]
assert_raises(
- exc.DBAPIError, self._test_empty_insert,
+ exc.CompileError, self._test_empty_insert,
Table(
'b', MetaData(testing.db),
Column('x', Integer, primary_key=True),
Column('y', Integer, primary_key=True)))
@testing.exclude('sqlite', '<', (3, 3, 8), 'no database support')
- def test_empty_insert_pk3(self):
+ def test_empty_insert_pk2_fv(self):
assert_raises(
exc.DBAPIError, self._test_empty_insert,
Table(
+ 'b', MetaData(testing.db),
+ Column('x', Integer, primary_key=True,
+ server_default=FetchedValue()),
+ Column('y', Integer, primary_key=True,
+ server_default=FetchedValue())))
+
+ @testing.exclude('sqlite', '<', (3, 3, 8), 'no database support')
+ def test_empty_insert_pk3(self):
+ # now raises CompileError due to [ticket:3216]
+ assert_raises(
+ exc.CompileError, self._test_empty_insert,
+ Table(
'c', MetaData(testing.db),
Column('x', Integer, primary_key=True),
Column('y', Integer, DefaultClause('123'), primary_key=True)))
@testing.exclude('sqlite', '<', (3, 3, 8), 'no database support')
+ def test_empty_insert_pk3_fv(self):
+ assert_raises(
+ exc.DBAPIError, self._test_empty_insert,
+ Table(
+ 'c', MetaData(testing.db),
+ Column('x', Integer, primary_key=True,
+ server_default=FetchedValue()),
+ Column('y', Integer, DefaultClause('123'), primary_key=True)))
+
+ @testing.exclude('sqlite', '<', (3, 3, 8), 'no database support')
def test_empty_insert_pk4(self):
self._test_empty_insert(
Table(
diff --git a/test/engine/test_pool.py b/test/engine/test_pool.py
index 451cb8b0e..8551e1fcb 100644
--- a/test/engine/test_pool.py
+++ b/test/engine/test_pool.py
@@ -8,8 +8,9 @@ from sqlalchemy.testing import eq_, assert_raises, is_not_, is_
from sqlalchemy.testing.engines import testing_engine
from sqlalchemy.testing import fixtures
import random
-from sqlalchemy.testing.mock import Mock, call, patch
+from sqlalchemy.testing.mock import Mock, call, patch, ANY
import weakref
+import collections
join_timeout = 10
@@ -1480,6 +1481,98 @@ class QueuePoolTest(PoolTestBase):
time.sleep(1.5)
self._assert_cleanup_on_pooled_reconnect(dbapi, p)
+ def test_connect_handler_not_called_for_recycled(self):
+ """test [ticket:3497]"""
+
+ dbapi, p = self._queuepool_dbapi_fixture(
+ pool_size=2, max_overflow=2)
+
+ canary = Mock()
+
+ c1 = p.connect()
+ c2 = p.connect()
+
+ c1.close()
+ c2.close()
+
+ dbapi.shutdown(True)
+
+ bad = p.connect()
+ p._invalidate(bad)
+ bad.close()
+ assert p._invalidate_time
+
+ event.listen(p, "connect", canary.connect)
+ event.listen(p, "checkout", canary.checkout)
+
+ assert_raises(
+ Exception,
+ p.connect
+ )
+
+ p._pool.queue = collections.deque(
+ [
+ c for c in p._pool.queue
+ if c.connection is not None
+ ]
+ )
+
+ dbapi.shutdown(False)
+ c = p.connect()
+ c.close()
+
+ eq_(
+ canary.mock_calls,
+ [
+ call.connect(ANY, ANY),
+ call.checkout(ANY, ANY, ANY)
+ ]
+ )
+
+ def test_connect_checkout_handler_always_gets_info(self):
+ """test [ticket:3497]"""
+
+ dbapi, p = self._queuepool_dbapi_fixture(
+ pool_size=2, max_overflow=2)
+
+ c1 = p.connect()
+ c2 = p.connect()
+
+ c1.close()
+ c2.close()
+
+ dbapi.shutdown(True)
+
+ bad = p.connect()
+ p._invalidate(bad)
+ bad.close()
+ assert p._invalidate_time
+
+ @event.listens_for(p, "connect")
+ def connect(conn, conn_rec):
+ conn_rec.info['x'] = True
+
+ @event.listens_for(p, "checkout")
+ def checkout(conn, conn_rec, conn_f):
+ assert 'x' in conn_rec.info
+
+ assert_raises(
+ Exception,
+ p.connect
+ )
+
+ p._pool.queue = collections.deque(
+ [
+ c for c in p._pool.queue
+ if c.connection is not None
+ ]
+ )
+
+ dbapi.shutdown(False)
+ c = p.connect()
+ c.close()
+
+
def test_error_on_pooled_reconnect_cleanup_wcheckout_event(self):
dbapi, p = self._queuepool_dbapi_fixture(pool_size=1,
max_overflow=2)
diff --git a/test/engine/test_reflection.py b/test/engine/test_reflection.py
index 83650609d..b7bf87d63 100644
--- a/test/engine/test_reflection.py
+++ b/test/engine/test_reflection.py
@@ -311,22 +311,22 @@ class ReflectionTest(fixtures.TestBase, ComparesTables):
Don't mark this test as unsupported for any backend !
- (technically it fails with MySQL InnoDB since "id" comes before "id2")
-
"""
meta = self.metadata
- Table('test', meta,
+ Table(
+ 'test', meta,
Column('id', sa.Integer, primary_key=True),
Column('data', sa.String(50)),
- mysql_engine='MyISAM'
+ mysql_engine='InnoDB'
)
- Table('test2', meta,
- Column('id', sa.Integer, sa.ForeignKey('test.id'),
- primary_key=True),
+ Table(
+ 'test2', meta,
+ Column(
+ 'id', sa.Integer, sa.ForeignKey('test.id'), primary_key=True),
Column('id2', sa.Integer, primary_key=True),
Column('data', sa.String(50)),
- mysql_engine='MyISAM'
+ mysql_engine='InnoDB'
)
meta.create_all()
m2 = MetaData(testing.db)
@@ -334,7 +334,8 @@ class ReflectionTest(fixtures.TestBase, ComparesTables):
assert t1a._autoincrement_column is t1a.c.id
t2a = Table('test2', m2, autoload=True)
- assert t2a._autoincrement_column is t2a.c.id2
+ assert t2a._autoincrement_column is None
+
@skip('sqlite')
@testing.provide_metadata
diff --git a/test/ext/declarative/test_basic.py b/test/ext/declarative/test_basic.py
index ab0de801c..ae1a85f8b 100644
--- a/test/ext/declarative/test_basic.py
+++ b/test/ext/declarative/test_basic.py
@@ -102,6 +102,29 @@ class DeclarativeTest(DeclarativeTestBase):
assert User.addresses.property.mapper.class_ is Address
+ def test_unicode_string_resolve_backref(self):
+ class User(Base, fixtures.ComparableEntity):
+ __tablename__ = 'users'
+
+ id = Column('id', Integer, primary_key=True,
+ test_needs_autoincrement=True)
+ name = Column('name', String(50))
+
+ class Address(Base, fixtures.ComparableEntity):
+ __tablename__ = 'addresses'
+
+ id = Column(Integer, primary_key=True,
+ test_needs_autoincrement=True)
+ email = Column(String(50), key='_email')
+ user_id = Column('user_id', Integer, ForeignKey('users.id'),
+ key='_user_id')
+ user = relationship(
+ User,
+ backref=backref("addresses",
+ order_by=util.u("Address.email")))
+
+ assert Address.user.property.mapper.class_ is User
+
def test_no_table(self):
def go():
class User(Base):
@@ -1570,8 +1593,7 @@ class DeclarativeTest(DeclarativeTestBase):
meta = MetaData(testing.db)
t1 = Table(
't1', meta,
- Column('id', String(50),
- primary_key=True, test_needs_autoincrement=True),
+ Column('id', String(50), primary_key=True),
Column('data', String(50)))
meta.create_all()
try:
diff --git a/test/ext/declarative/test_inheritance.py b/test/ext/declarative/test_inheritance.py
index 3e6980190..274a6aa28 100644
--- a/test/ext/declarative/test_inheritance.py
+++ b/test/ext/declarative/test_inheritance.py
@@ -1453,3 +1453,33 @@ class ConcreteExtensionConfigTest(
"FROM actual_documents) AS pjoin"
)
+ def test_column_attr_names(self):
+ """test #3480"""
+
+ class Document(Base, AbstractConcreteBase):
+ documentType = Column('documenttype', String)
+
+ class Offer(Document):
+ __tablename__ = 'offers'
+
+ id = Column(Integer, primary_key=True)
+ __mapper_args__ = {
+ 'polymorphic_identity': 'offer'
+ }
+
+ configure_mappers()
+ session = Session()
+ self.assert_compile(
+ session.query(Document),
+ "SELECT pjoin.documenttype AS pjoin_documenttype, "
+ "pjoin.id AS pjoin_id, pjoin.type AS pjoin_type FROM "
+ "(SELECT offers.documenttype AS documenttype, offers.id AS id, "
+ "'offer' AS type FROM offers) AS pjoin"
+ )
+
+ self.assert_compile(
+ session.query(Document.documentType),
+ "SELECT pjoin.documenttype AS pjoin_documenttype FROM "
+ "(SELECT offers.documenttype AS documenttype, offers.id AS id, "
+ "'offer' AS type FROM offers) AS pjoin"
+ )
diff --git a/test/ext/declarative/test_mixin.py b/test/ext/declarative/test_mixin.py
index b9e40421c..1f9fa1dfa 100644
--- a/test/ext/declarative/test_mixin.py
+++ b/test/ext/declarative/test_mixin.py
@@ -1441,7 +1441,7 @@ class DeclaredAttrTest(DeclarativeTestBase, testing.AssertsCompiledSQL):
"SELECT b.x AS b_x, b.x + :x_1 AS anon_1, b.id AS b_id FROM b"
)
-
+ @testing.requires.predictable_gc
def test_singleton_gc(self):
counter = mock.Mock()
diff --git a/test/ext/test_associationproxy.py b/test/ext/test_associationproxy.py
index 8fb335b06..98e40b11e 100644
--- a/test/ext/test_associationproxy.py
+++ b/test/ext/test_associationproxy.py
@@ -1593,3 +1593,23 @@ class DictOfTupleUpdateTest(fixtures.TestBase):
a1.elements.update,
(("B", 3), 'elem2'), (("C", 4), "elem3")
)
+
+
+class InfoTest(fixtures.TestBase):
+ def test_constructor(self):
+ assoc = association_proxy('a', 'b', info={'some_assoc': 'some_value'})
+ eq_(assoc.info, {"some_assoc": "some_value"})
+
+ def test_empty(self):
+ assoc = association_proxy('a', 'b')
+ eq_(assoc.info, {})
+
+ def test_via_cls(self):
+ class Foob(object):
+ assoc = association_proxy('a', 'b')
+
+ eq_(Foob.assoc.info, {})
+
+ Foob.assoc.info["foo"] = 'bar'
+
+ eq_(Foob.assoc.info, {'foo': 'bar'})
diff --git a/test/ext/test_baked.py b/test/ext/test_baked.py
index 78c43fc7e..8bfa58403 100644
--- a/test/ext/test_baked.py
+++ b/test/ext/test_baked.py
@@ -1,6 +1,7 @@
from sqlalchemy.orm import Session, subqueryload, \
mapper, relationship, lazyload, clear_mappers
-from sqlalchemy.testing import eq_, is_, is_not_, assert_raises
+from sqlalchemy.testing import eq_, is_, is_not_
+from sqlalchemy.testing import assert_raises, assert_raises_message
from sqlalchemy import testing
from test.orm import _fixtures
from sqlalchemy.ext.baked import BakedQuery, baked_lazyload, BakedLazyLoader
@@ -151,25 +152,68 @@ class LikeQueryTest(BakedTest):
(8, )
)
+ def test_one_or_none_no_result(self):
+ User = self.classes.User
+
+ bq = self.bakery(lambda s: s.query(User))
+ bq += lambda q: q.filter(User.name == 'asdf')
+
+ eq_(
+ bq(Session()).one_or_none(),
+ None
+ )
+
+ def test_one_or_none_result(self):
+ User = self.classes.User
+
+ bq = self.bakery(lambda s: s.query(User))
+ bq += lambda q: q.filter(User.name == 'ed')
+
+ u1 = bq(Session()).one_or_none()
+ eq_(u1.name, 'ed')
+
+ def test_one_or_none_multiple_result(self):
+ User = self.classes.User
+
+ bq = self.bakery(lambda s: s.query(User))
+ bq += lambda q: q.filter(User.name.like('%ed%'))
+
+ assert_raises_message(
+ orm_exc.MultipleResultsFound,
+ "Multiple rows were found for one_or_none()",
+ bq(Session()).one_or_none
+ )
+
def test_one_no_result(self):
User = self.classes.User
bq = self.bakery(lambda s: s.query(User))
bq += lambda q: q.filter(User.name == 'asdf')
- assert_raises(
+ assert_raises_message(
orm_exc.NoResultFound,
+ "No row was found for one()",
bq(Session()).one
)
+ def test_one_result(self):
+ User = self.classes.User
+
+ bq = self.bakery(lambda s: s.query(User))
+ bq += lambda q: q.filter(User.name == 'ed')
+
+ u1 = bq(Session()).one()
+ eq_(u1.name, 'ed')
+
def test_one_multiple_result(self):
User = self.classes.User
bq = self.bakery(lambda s: s.query(User))
bq += lambda q: q.filter(User.name.like('%ed%'))
- assert_raises(
+ assert_raises_message(
orm_exc.MultipleResultsFound,
+ "Multiple rows were found for one()",
bq(Session()).one
)
@@ -227,6 +271,32 @@ class LikeQueryTest(BakedTest):
eq_(u2.name, 'chuck')
self.assert_sql_count(testing.db, go, 0)
+ def test_get_includes_getclause(self):
+ # test issue #3597
+ User = self.classes.User
+
+ bq = self.bakery(lambda s: s.query(User))
+
+ for i in range(5):
+ sess = Session()
+ u1 = bq(sess).get(7)
+ eq_(u1.name, 'jack')
+ sess.close()
+
+ eq_(len(bq._bakery), 2)
+
+ # simulate race where mapper._get_clause
+ # may be generated more than once
+ from sqlalchemy import inspect
+ del inspect(User).__dict__['_get_clause']
+
+ for i in range(5):
+ sess = Session()
+ u1 = bq(sess).get(7)
+ eq_(u1.name, 'jack')
+ sess.close()
+ eq_(len(bq._bakery), 4)
+
class ResultTest(BakedTest):
__backend__ = True
@@ -552,14 +622,14 @@ class ResultTest(BakedTest):
class LazyLoaderTest(BakedTest):
run_setup_mappers = 'each'
- def _o2m_fixture(self, lazy="select"):
+ def _o2m_fixture(self, lazy="select", **kw):
User = self.classes.User
Address = self.classes.Address
mapper(User, self.tables.users, properties={
'addresses': relationship(
Address, order_by=self.tables.addresses.c.id,
- lazy=lazy)
+ lazy=lazy, **kw)
})
mapper(Address, self.tables.addresses)
return User, Address
@@ -647,6 +717,24 @@ class LazyLoaderTest(BakedTest):
u1._sa_instance_state
)
+ def test_systemwide_loaders_loadable_via_lazyloader(self):
+ from sqlalchemy.orm import configure_mappers
+ from sqlalchemy.orm.strategies import LazyLoader
+
+ baked.bake_lazy_loaders()
+ try:
+ User, Address = self._o2m_fixture(lazy='joined')
+
+ configure_mappers()
+
+ is_(
+ User.addresses.property.
+ _get_strategy_by_cls(LazyLoader).__class__,
+ BakedLazyLoader
+ )
+ finally:
+ baked.unbake_lazy_loaders()
+
def test_invocation_systemwide_loaders(self):
baked.bake_lazy_loaders()
try:
@@ -676,6 +764,50 @@ class LazyLoaderTest(BakedTest):
# not invoked
eq_(el.mock_calls, [])
+ def test_baked_lazy_loading_relationship_flag_true(self):
+ self._test_baked_lazy_loading_relationship_flag(True)
+
+ def test_baked_lazy_loading_relationship_flag_false(self):
+ self._test_baked_lazy_loading_relationship_flag(False)
+
+ def _test_baked_lazy_loading_relationship_flag(self, flag):
+ baked.bake_lazy_loaders()
+ try:
+ User, Address = self._o2m_fixture(bake_queries=flag)
+
+ sess = Session()
+ u1 = sess.query(User).first()
+
+ from sqlalchemy.orm import Query
+
+ canary = mock.Mock()
+
+ # I would think Mock can do this but apparently
+ # it cannot (wrap / autospec don't work together)
+ real_compile_context = Query._compile_context
+
+ def _my_compile_context(*arg, **kw):
+ if arg[0].column_descriptions[0]['entity'] is Address:
+ canary()
+ return real_compile_context(*arg, **kw)
+
+ with mock.patch.object(
+ Query,
+ "_compile_context",
+ _my_compile_context
+ ):
+ u1.addresses
+
+ sess.expire(u1)
+ u1.addresses
+ finally:
+ baked.unbake_lazy_loaders()
+
+ if flag:
+ eq_(canary.call_count, 1)
+ else:
+ eq_(canary.call_count, 2)
+
def test_baked_lazy_loading_option_o2m(self):
User, Address = self._o2m_fixture()
self._test_baked_lazy_loading(set_option=True)
diff --git a/test/ext/test_mutable.py b/test/ext/test_mutable.py
index a6bcdc47f..ed97a0d92 100644
--- a/test/ext/test_mutable.py
+++ b/test/ext/test_mutable.py
@@ -136,6 +136,38 @@ class _MutableDictTestBase(_MutableDictTestFixture):
eq_(f1.data, {'a': 'z'})
+ def test_pop(self):
+ sess = Session()
+
+ f1 = Foo(data={'a': 'b', 'c': 'd'})
+ sess.add(f1)
+ sess.commit()
+
+ eq_(f1.data.pop('a'), 'b')
+ sess.commit()
+
+ eq_(f1.data, {'c': 'd'})
+
+ def test_popitem(self):
+ sess = Session()
+
+ orig = {'a': 'b', 'c': 'd'}
+
+ # the orig dict remains unchanged when we assign,
+ # but just making this future-proof
+ data = dict(orig)
+ f1 = Foo(data=data)
+ sess.add(f1)
+ sess.commit()
+
+ k, v = f1.data.popitem()
+ assert k in ('a', 'c')
+ orig.pop(k)
+
+ sess.commit()
+
+ eq_(f1.data, orig)
+
def test_setdefault(self):
sess = Session()
diff --git a/test/orm/inheritance/test_poly_persistence.py b/test/orm/inheritance/test_poly_persistence.py
index c6a54c0b5..361377de8 100644
--- a/test/orm/inheritance/test_poly_persistence.py
+++ b/test/orm/inheritance/test_poly_persistence.py
@@ -1,6 +1,6 @@
"""tests basic polymorphic mapper loading/saving, minimal relationships"""
-from sqlalchemy.testing import eq_, assert_raises, assert_raises_message
+from sqlalchemy.testing import eq_, is_, assert_raises, assert_raises_message
from sqlalchemy import *
from sqlalchemy.orm import *
from sqlalchemy.orm import exc as orm_exc
@@ -271,18 +271,30 @@ def _generate_round_trip_test(include_base, lazy_relationship,
# into the "person_join" conversion.
palias = people.alias("palias")
dilbert = session.query(Person).get(dilbert.person_id)
- assert dilbert is session.query(Person).filter(
- (palias.c.name=='dilbert') & \
- (palias.c.person_id==Person.person_id)).first()
- assert dilbert is session.query(Engineer).filter(
- (palias.c.name=='dilbert') & \
- (palias.c.person_id==Person.person_id)).first()
- assert dilbert is session.query(Person).filter(
- (Engineer.engineer_name=="engineer1") & \
- (engineers.c.person_id==people.c.person_id)
- ).first()
- assert dilbert is session.query(Engineer).\
- filter(Engineer.engineer_name=="engineer1")[0]
+ is_(
+ dilbert,
+ session.query(Person).filter(
+ (palias.c.name == 'dilbert') &
+ (palias.c.person_id == Person.person_id)).first()
+ )
+ is_(
+ dilbert,
+ session.query(Engineer).filter(
+ (palias.c.name == 'dilbert') &
+ (palias.c.person_id == Person.person_id)).first()
+ )
+ is_(
+ dilbert,
+ session.query(Person).filter(
+ (Engineer.engineer_name == "engineer1") &
+ (engineers.c.person_id == people.c.person_id)
+ ).first()
+ )
+ is_(
+ dilbert,
+ session.query(Engineer).
+ filter(Engineer.engineer_name == "engineer1")[0]
+ )
session.flush()
session.expunge_all()
diff --git a/test/orm/inheritance/test_relationship.py b/test/orm/inheritance/test_relationship.py
index b1d99415d..e75d974d4 100644
--- a/test/orm/inheritance/test_relationship.py
+++ b/test/orm/inheritance/test_relationship.py
@@ -1,6 +1,6 @@
from sqlalchemy.orm import create_session, relationship, mapper, \
contains_eager, joinedload, subqueryload, subqueryload_all,\
- Session, aliased, with_polymorphic
+ Session, aliased, with_polymorphic, joinedload_all
from sqlalchemy import Integer, String, ForeignKey
from sqlalchemy.engine import default
@@ -1360,6 +1360,216 @@ class SubClassToSubClassMultiTest(AssertsCompiledSQL, fixtures.MappedTest):
"JOIN ep2 ON anon_1.base2_id = ep2.base2_id"
)
+
+class JoinedloadOverWPolyAliased(
+ fixtures.DeclarativeMappedTest,
+ testing.AssertsCompiledSQL):
+ """exercise issues in #3593 and #3611"""
+
+ run_setup_mappers = 'each'
+ run_setup_classes = 'each'
+ run_define_tables = 'each'
+ __dialect__ = 'default'
+
+ @classmethod
+ def setup_classes(cls):
+ Base = cls.DeclarativeBasic
+
+ class Owner(Base):
+ __tablename__ = 'owner'
+
+ id = Column(Integer, primary_key=True)
+ type = Column(String(20))
+
+ __mapper_args__ = {
+ 'polymorphic_on': type,
+ 'with_polymorphic': ('*', None),
+ }
+
+ class SubOwner(Owner):
+ __mapper_args__ = {'polymorphic_identity': 'so'}
+
+ class Parent(Base):
+ __tablename__ = 'parent'
+
+ id = Column(Integer, primary_key=True)
+ type = Column(String(20))
+
+ __mapper_args__ = {
+ 'polymorphic_on': type,
+ 'with_polymorphic': ('*', None),
+ }
+
+ class Sub1(Parent):
+ __mapper_args__ = {'polymorphic_identity': 's1'}
+
+ class Link(Base):
+ __tablename__ = 'link'
+
+ parent_id = Column(
+ Integer, ForeignKey('parent.id'), primary_key=True)
+ child_id = Column(
+ Integer, ForeignKey('parent.id'), primary_key=True)
+
+ def _fixture_from_base(self):
+ Parent = self.classes.Parent
+ Link = self.classes.Link
+ Link.child = relationship(
+ Parent, primaryjoin=Link.child_id == Parent.id)
+
+ Parent.links = relationship(
+ Link,
+ primaryjoin=Parent.id == Link.parent_id,
+ )
+ return Parent
+
+ def _fixture_from_subclass(self):
+ Sub1 = self.classes.Sub1
+ Link = self.classes.Link
+ Parent = self.classes.Parent
+ Link.child = relationship(
+ Parent, primaryjoin=Link.child_id == Parent.id)
+
+ Sub1.links = relationship(
+ Link,
+ primaryjoin=Sub1.id == Link.parent_id,
+ )
+ return Sub1
+
+ def _fixture_to_subclass_to_base(self):
+ Owner = self.classes.Owner
+ Parent = self.classes.Parent
+ Sub1 = self.classes.Sub1
+ Link = self.classes.Link
+
+ # Link -> Sub1 -> Owner
+
+ Link.child = relationship(
+ Sub1, primaryjoin=Link.child_id == Sub1.id)
+
+ Parent.owner_id = Column(ForeignKey('owner.id'))
+
+ Parent.owner = relationship(Owner)
+ return Parent
+
+ def _fixture_to_base_to_base(self):
+ Owner = self.classes.Owner
+ Parent = self.classes.Parent
+ Link = self.classes.Link
+
+ # Link -> Parent -> Owner
+
+ Link.child = relationship(
+ Parent, primaryjoin=Link.child_id == Parent.id)
+
+ Parent.owner_id = Column(ForeignKey('owner.id'))
+
+ Parent.owner = relationship(Owner)
+ return Parent
+
+ def test_from_base(self):
+ self._test_poly_single_poly(self._fixture_from_base)
+
+ def test_from_sub(self):
+ self._test_poly_single_poly(self._fixture_from_subclass)
+
+ def test_to_sub_to_base(self):
+ self._test_single_poly_poly(self._fixture_to_subclass_to_base)
+
+ def test_to_base_to_base(self):
+ self._test_single_poly_poly(self._fixture_to_base_to_base)
+
+ def _test_poly_single_poly(self, fn):
+ cls = fn()
+ Link = self.classes.Link
+
+ session = Session()
+ q = session.query(cls).options(
+ joinedload_all(
+ cls.links,
+ Link.child,
+ cls.links
+ )
+ )
+ if cls is self.classes.Sub1:
+ extra = " WHERE parent.type IN (:type_1)"
+ else:
+ extra = ""
+
+ self.assert_compile(
+ q,
+ "SELECT parent.id AS parent_id, parent.type AS parent_type, "
+ "link_1.parent_id AS link_1_parent_id, "
+ "link_1.child_id AS link_1_child_id, "
+ "parent_1.id AS parent_1_id, parent_1.type AS parent_1_type, "
+ "link_2.parent_id AS link_2_parent_id, "
+ "link_2.child_id AS link_2_child_id "
+ "FROM parent "
+ "LEFT OUTER JOIN link AS link_1 ON parent.id = link_1.parent_id "
+ "LEFT OUTER JOIN parent "
+ "AS parent_1 ON link_1.child_id = parent_1.id "
+ "LEFT OUTER JOIN link AS link_2 "
+ "ON parent_1.id = link_2.parent_id" + extra
+ )
+
+ def _test_single_poly_poly(self, fn):
+ parent_cls = fn()
+ Link = self.classes.Link
+
+ session = Session()
+ q = session.query(Link).options(
+ joinedload_all(
+ Link.child,
+ parent_cls.owner
+ )
+ )
+
+ if Link.child.property.mapper.class_ is self.classes.Sub1:
+ extra = "AND parent_1.type IN (:type_1) "
+ else:
+ extra = ""
+
+ self.assert_compile(
+ q,
+ "SELECT link.parent_id AS link_parent_id, "
+ "link.child_id AS link_child_id, parent_1.id AS parent_1_id, "
+ "parent_1.type AS parent_1_type, "
+ "parent_1.owner_id AS parent_1_owner_id, "
+ "owner_1.id AS owner_1_id, owner_1.type AS owner_1_type "
+ "FROM link LEFT OUTER JOIN parent AS parent_1 "
+ "ON link.child_id = parent_1.id " + extra +
+ "LEFT OUTER JOIN owner AS owner_1 "
+ "ON owner_1.id = parent_1.owner_id"
+ )
+
+ def test_local_wpoly(self):
+ Sub1 = self._fixture_from_subclass()
+ Parent = self.classes.Parent
+ Link = self.classes.Link
+
+ poly = with_polymorphic(Parent, [Sub1])
+
+ session = Session()
+ q = session.query(poly).options(
+ joinedload(poly.Sub1.links).
+ joinedload(Link.child.of_type(Sub1)).
+ joinedload(poly.Sub1.links)
+ )
+ self.assert_compile(
+ q,
+ "SELECT parent.id AS parent_id, parent.type AS parent_type, "
+ "link_1.parent_id AS link_1_parent_id, "
+ "link_1.child_id AS link_1_child_id, "
+ "parent_1.id AS parent_1_id, parent_1.type AS parent_1_type, "
+ "link_2.parent_id AS link_2_parent_id, "
+ "link_2.child_id AS link_2_child_id FROM parent "
+ "LEFT OUTER JOIN link AS link_1 ON parent.id = link_1.parent_id "
+ "LEFT OUTER JOIN parent AS parent_1 "
+ "ON link_1.child_id = parent_1.id "
+ "LEFT OUTER JOIN link AS link_2 ON parent_1.id = link_2.parent_id"
+ )
+
+
class JoinAcrossJoinedInhMultiPath(fixtures.DeclarativeMappedTest,
testing.AssertsCompiledSQL):
"""test long join paths with a joined-inh in the middle, where we go multiple
diff --git a/test/orm/inheritance/test_single.py b/test/orm/inheritance/test_single.py
index 9f5d21a43..0d102c065 100644
--- a/test/orm/inheritance/test_single.py
+++ b/test/orm/inheritance/test_single.py
@@ -9,6 +9,8 @@ from sqlalchemy.testing.schema import Table, Column
class SingleInheritanceTest(testing.AssertsCompiledSQL, fixtures.MappedTest):
+ __dialect__ = 'default'
+
@classmethod
def define_tables(cls, metadata):
Table('employees', metadata,
@@ -208,6 +210,19 @@ class SingleInheritanceTest(testing.AssertsCompiledSQL, fixtures.MappedTest):
eq_(sess.query(Manager).filter(Manager.name.like('%m%')).count(), 2)
eq_(sess.query(Employee).filter(Employee.name.like('%m%')).count(), 3)
+ def test_exists_standalone(self):
+ Engineer = self.classes.Engineer
+
+ sess = create_session()
+
+ self.assert_compile(
+ sess.query(
+ sess.query(Engineer).filter(Engineer.name == 'foo').exists()),
+ "SELECT EXISTS (SELECT 1 FROM employees WHERE "
+ "employees.name = :name_1 AND employees.type "
+ "IN (:type_1, :type_2)) AS anon_1"
+ )
+
def test_type_filtering(self):
Employee, Manager, reports, Engineer = (self.classes.Employee,
self.classes.Manager,
diff --git a/test/orm/test_bulk.py b/test/orm/test_bulk.py
index e2a1464a6..878560cf6 100644
--- a/test/orm/test_bulk.py
+++ b/test/orm/test_bulk.py
@@ -2,7 +2,7 @@ from sqlalchemy import testing
from sqlalchemy.testing import eq_
from sqlalchemy.testing.schema import Table, Column
from sqlalchemy.testing import fixtures
-from sqlalchemy import Integer, String, ForeignKey
+from sqlalchemy import Integer, String, ForeignKey, FetchedValue
from sqlalchemy.orm import mapper, Session
from sqlalchemy.testing.assertsql import CompiledSQL
from test.orm import _fixtures
@@ -156,6 +156,59 @@ class BulkInsertUpdateTest(BulkTest, _fixtures.FixtureTest):
)
+class BulkUDPostfetchTest(BulkTest, fixtures.MappedTest):
+ @classmethod
+ def define_tables(cls, metadata):
+ Table(
+ 'a', metadata,
+ Column(
+ 'id', Integer,
+ primary_key=True,
+ test_needs_autoincrement=True),
+ Column('x', Integer),
+ Column('y', Integer,
+ server_default=FetchedValue(),
+ server_onupdate=FetchedValue()))
+
+ @classmethod
+ def setup_classes(cls):
+ class A(cls.Comparable):
+ pass
+
+ @classmethod
+ def setup_mappers(cls):
+ A = cls.classes.A
+ a = cls.tables.a
+
+ mapper(A, a)
+
+ def test_insert_w_fetch(self):
+ A = self.classes.A
+
+ s = Session()
+ a1 = A(x=1)
+ s.bulk_save_objects([a1])
+ s.commit()
+
+ def test_update_w_fetch(self):
+ A = self.classes.A
+
+ s = Session()
+ a1 = A(x=1, y=2)
+ s.add(a1)
+ s.commit()
+
+ eq_(a1.id, 1) # force a load
+ a1.x = 5
+ s.expire(a1, ['y'])
+ assert 'y' not in a1.__dict__
+ s.bulk_save_objects([a1])
+ s.commit()
+
+ eq_(a1.x, 5)
+ eq_(a1.y, 2)
+
+
class BulkInheritanceTest(BulkTest, fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
diff --git a/test/orm/test_composites.py b/test/orm/test_composites.py
index 8b777dcdf..48027ec2d 100644
--- a/test/orm/test_composites.py
+++ b/test/orm/test_composites.py
@@ -313,8 +313,7 @@ class PrimaryKeyTest(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
Table('graphs', metadata,
- Column('id', Integer, primary_key=True,
- test_needs_autoincrement=True),
+ Column('id', Integer, primary_key=True),
Column('version_id', Integer, primary_key=True,
nullable=True),
Column('name', String(30)))
diff --git a/test/orm/test_cycles.py b/test/orm/test_cycles.py
index c95b8d152..b5c1b6467 100644
--- a/test/orm/test_cycles.py
+++ b/test/orm/test_cycles.py
@@ -10,7 +10,7 @@ from sqlalchemy import Integer, String, ForeignKey
from sqlalchemy.testing.schema import Table, Column
from sqlalchemy.orm import mapper, relationship, backref, \
create_session, sessionmaker
-from sqlalchemy.testing import eq_
+from sqlalchemy.testing import eq_, is_
from sqlalchemy.testing.assertsql import RegexSQL, CompiledSQL, AllOf
from sqlalchemy.testing import fixtures
@@ -816,6 +816,39 @@ class OneToManyManyToOneTest(fixtures.MappedTest):
{'id': b4.id}])
)
+ def test_post_update_m2o_detect_none(self):
+ person, ball, Ball, Person = (
+ self.tables.person,
+ self.tables.ball,
+ self.classes.Ball,
+ self.classes.Person)
+
+ mapper(Ball, ball, properties={
+ 'person': relationship(
+ Person, post_update=True,
+ primaryjoin=person.c.id == ball.c.person_id)
+ })
+ mapper(Person, person)
+
+ sess = create_session(autocommit=False, expire_on_commit=True)
+ sess.add(Ball(person=Person()))
+ sess.commit()
+ b1 = sess.query(Ball).first()
+
+ # needs to be unloaded
+ assert 'person' not in b1.__dict__
+ b1.person = None
+
+ self.assert_sql_execution(
+ testing.db,
+ sess.flush,
+ CompiledSQL(
+ "UPDATE ball SET person_id=:person_id WHERE ball.id = :ball_id",
+ lambda ctx: {'person_id': None, 'ball_id': b1.id})
+ )
+
+ is_(b1.person, None)
+
class SelfReferentialPostUpdateTest(fixtures.MappedTest):
"""Post_update on a single self-referential mapper.
@@ -1181,9 +1214,10 @@ class PostUpdateBatchingTest(fixtures.MappedTest):
testing.db,
sess.flush,
CompiledSQL(
- "UPDATE parent SET c1_id=:c1_id, c2_id=:c2_id, "
- "c3_id=:c3_id WHERE parent.id = :parent_id",
- lambda ctx: {'c2_id': c23.id, 'parent_id': p1.id, 'c1_id': c12.id, 'c3_id': c31.id}
+ "UPDATE parent SET c1_id=:c1_id, c2_id=:c2_id, c3_id=:c3_id "
+ "WHERE parent.id = :parent_id",
+ lambda ctx: {'c2_id': c23.id, 'parent_id': p1.id,
+ 'c1_id': c12.id, 'c3_id': c31.id}
)
)
@@ -1193,8 +1227,9 @@ class PostUpdateBatchingTest(fixtures.MappedTest):
testing.db,
sess.flush,
CompiledSQL(
- "UPDATE parent SET c1_id=:c1_id, c2_id=:c2_id, "
- "c3_id=:c3_id WHERE parent.id = :parent_id",
- lambda ctx: {'c2_id': None, 'parent_id': p1.id, 'c1_id': None, 'c3_id': None}
+ "UPDATE parent SET c1_id=:c1_id, c2_id=:c2_id, c3_id=:c3_id "
+ "WHERE parent.id = :parent_id",
+ lambda ctx: {'c2_id': None, 'parent_id': p1.id,
+ 'c1_id': None, 'c3_id': None}
)
)
diff --git a/test/orm/test_eager_relations.py b/test/orm/test_eager_relations.py
index 6d9d9ec4b..1c3b57690 100644
--- a/test/orm/test_eager_relations.py
+++ b/test/orm/test_eager_relations.py
@@ -5,7 +5,7 @@ import sqlalchemy as sa
from sqlalchemy import testing
from sqlalchemy.orm import joinedload, deferred, undefer, \
joinedload_all, backref, Session,\
- defaultload, Load
+ defaultload, Load, load_only
from sqlalchemy import Integer, String, Date, ForeignKey, and_, select, \
func, text
from sqlalchemy.testing.schema import Table, Column
@@ -2442,6 +2442,7 @@ class SubqueryAliasingTest(fixtures.MappedTest, testing.AssertsCompiledSQL):
"""test #2188"""
__dialect__ = 'default'
+ run_create_tables = None
@classmethod
def define_tables(cls, metadata):
@@ -4013,6 +4014,7 @@ class CyclicalInheritingEagerTestTwo(fixtures.DeclarativeMappedTest,
class CyclicalInheritingEagerTestThree(fixtures.DeclarativeMappedTest,
testing.AssertsCompiledSQL):
__dialect__ = 'default'
+ run_create_tables = None
@classmethod
def setup_classes(cls):
@@ -4067,3 +4069,112 @@ class CyclicalInheritingEagerTestThree(fixtures.DeclarativeMappedTest,
"director_1.id = persistent_1.id) "
"ON director.other_id = persistent_1.id"
)
+
+
+class EnsureColumnsAddedTest(
+ fixtures.DeclarativeMappedTest, testing.AssertsCompiledSQL):
+ __dialect__ = 'default'
+ run_create_tables = None
+
+ @classmethod
+ def setup_classes(cls):
+ Base = cls.DeclarativeBasic
+
+ class Parent(Base):
+ __tablename__ = 'parent'
+ id = Column(Integer, primary_key=True,
+ test_needs_autoincrement=True)
+ arb = Column(Integer, unique=True)
+ data = Column(Integer)
+ o2mchild = relationship("O2MChild")
+ m2mchild = relationship("M2MChild", secondary=Table(
+ 'parent_to_m2m', Base.metadata,
+ Column('parent_id', ForeignKey('parent.arb')),
+ Column('child_id', ForeignKey('m2mchild.id'))
+ ))
+
+ class O2MChild(Base):
+ __tablename__ = 'o2mchild'
+ id = Column(Integer, primary_key=True,
+ test_needs_autoincrement=True)
+ parent_id = Column(ForeignKey('parent.arb'))
+
+ class M2MChild(Base):
+ __tablename__ = 'm2mchild'
+ id = Column(Integer, primary_key=True,
+ test_needs_autoincrement=True)
+
+ def test_joinedload_defered_pk_limit_o2m(self):
+ Parent = self.classes.Parent
+
+ s = Session()
+
+ self.assert_compile(
+ s.query(Parent).options(
+ load_only('data'),
+ joinedload(Parent.o2mchild)).limit(10),
+ "SELECT anon_1.parent_id AS anon_1_parent_id, "
+ "anon_1.parent_data AS anon_1_parent_data, "
+ "anon_1.parent_arb AS anon_1_parent_arb, "
+ "o2mchild_1.id AS o2mchild_1_id, "
+ "o2mchild_1.parent_id AS o2mchild_1_parent_id "
+ "FROM (SELECT parent.id AS parent_id, parent.data AS parent_data, "
+ "parent.arb AS parent_arb FROM parent LIMIT :param_1) AS anon_1 "
+ "LEFT OUTER JOIN o2mchild AS o2mchild_1 "
+ "ON anon_1.parent_arb = o2mchild_1.parent_id"
+ )
+
+ def test_joinedload_defered_pk_limit_m2m(self):
+ Parent = self.classes.Parent
+
+ s = Session()
+
+ self.assert_compile(
+ s.query(Parent).options(
+ load_only('data'),
+ joinedload(Parent.m2mchild)).limit(10),
+ "SELECT anon_1.parent_id AS anon_1_parent_id, "
+ "anon_1.parent_data AS anon_1_parent_data, "
+ "anon_1.parent_arb AS anon_1_parent_arb, "
+ "m2mchild_1.id AS m2mchild_1_id "
+ "FROM (SELECT parent.id AS parent_id, "
+ "parent.data AS parent_data, parent.arb AS parent_arb "
+ "FROM parent LIMIT :param_1) AS anon_1 "
+ "LEFT OUTER JOIN (parent_to_m2m AS parent_to_m2m_1 "
+ "JOIN m2mchild AS m2mchild_1 "
+ "ON m2mchild_1.id = parent_to_m2m_1.child_id) "
+ "ON anon_1.parent_arb = parent_to_m2m_1.parent_id"
+ )
+
+ def test_joinedload_defered_pk_o2m(self):
+ Parent = self.classes.Parent
+
+ s = Session()
+
+ self.assert_compile(
+ s.query(Parent).options(
+ load_only('data'),
+ joinedload(Parent.o2mchild)),
+ "SELECT parent.id AS parent_id, parent.data AS parent_data, "
+ "parent.arb AS parent_arb, o2mchild_1.id AS o2mchild_1_id, "
+ "o2mchild_1.parent_id AS o2mchild_1_parent_id "
+ "FROM parent LEFT OUTER JOIN o2mchild AS o2mchild_1 "
+ "ON parent.arb = o2mchild_1.parent_id"
+ )
+
+ def test_joinedload_defered_pk_m2m(self):
+ Parent = self.classes.Parent
+
+ s = Session()
+
+ self.assert_compile(
+ s.query(Parent).options(
+ load_only('data'),
+ joinedload(Parent.m2mchild)),
+ "SELECT parent.id AS parent_id, parent.data AS parent_data, "
+ "parent.arb AS parent_arb, m2mchild_1.id AS m2mchild_1_id "
+ "FROM parent LEFT OUTER JOIN (parent_to_m2m AS parent_to_m2m_1 "
+ "JOIN m2mchild AS m2mchild_1 "
+ "ON m2mchild_1.id = parent_to_m2m_1.child_id) "
+ "ON parent.arb = parent_to_m2m_1.parent_id"
+ )
diff --git a/test/orm/test_events.py b/test/orm/test_events.py
index ae7ba98c1..ab61077ae 100644
--- a/test/orm/test_events.py
+++ b/test/orm/test_events.py
@@ -111,6 +111,43 @@ class MapperEventsTest(_RemoveListeners, _fixtures.FixtureTest):
event.listen(mapper, meth, evt(meth), **kw)
return canary
+ def test_init_allow_kw_modify(self):
+ User, users = self.classes.User, self.tables.users
+ mapper(User, users)
+
+ @event.listens_for(User, 'init')
+ def add_name(obj, args, kwargs):
+ kwargs['name'] = 'ed'
+
+ u1 = User()
+ eq_(u1.name, 'ed')
+
+ def test_init_failure_hook(self):
+ users = self.tables.users
+
+ class Thing(object):
+ def __init__(self, **kw):
+ if kw.get('fail'):
+ raise Exception("failure")
+
+ mapper(Thing, users)
+
+ canary = Mock()
+ event.listen(Thing, 'init_failure', canary)
+
+ Thing()
+ eq_(canary.mock_calls, [])
+
+ assert_raises_message(
+ Exception,
+ "failure",
+ Thing, fail=True
+ )
+ eq_(
+ canary.mock_calls,
+ [call(ANY, (), {'fail': True})]
+ )
+
def test_listen_doesnt_force_compile(self):
User, users = self.classes.User, self.tables.users
m = mapper(User, users, properties={
@@ -1580,6 +1617,506 @@ class SessionEventsTest(_RemoveListeners, _fixtures.FixtureTest):
)
+class SessionLifecycleEventsTest(_RemoveListeners, _fixtures.FixtureTest):
+ run_inserts = None
+
+ def _fixture(self, include_address=False):
+ users, User = self.tables.users, self.classes.User
+
+ if include_address:
+ addresses, Address = self.tables.addresses, self.classes.Address
+ mapper(User, users, properties={
+ "addresses": relationship(
+ Address, cascade="all, delete-orphan")
+ })
+ mapper(Address, addresses)
+ else:
+ mapper(User, users)
+
+ listener = Mock()
+
+ sess = Session()
+
+ def start_events():
+ event.listen(
+ sess, "transient_to_pending", listener.transient_to_pending)
+ event.listen(
+ sess, "pending_to_transient", listener.pending_to_transient)
+ event.listen(
+ sess, "persistent_to_transient",
+ listener.persistent_to_transient)
+ event.listen(
+ sess, "pending_to_persistent", listener.pending_to_persistent)
+ event.listen(
+ sess, "detached_to_persistent",
+ listener.detached_to_persistent)
+ event.listen(
+ sess, "loaded_as_persistent", listener.loaded_as_persistent)
+
+ event.listen(
+ sess, "persistent_to_detached",
+ listener.persistent_to_detached)
+ event.listen(
+ sess, "deleted_to_detached", listener.deleted_to_detached)
+
+ event.listen(
+ sess, "persistent_to_deleted", listener.persistent_to_deleted)
+ event.listen(
+ sess, "deleted_to_persistent", listener.deleted_to_persistent)
+ return listener
+
+ if include_address:
+ return sess, User, Address, start_events
+ else:
+ return sess, User, start_events
+
+ def test_transient_to_pending(self):
+ sess, User, start_events = self._fixture()
+
+ listener = start_events()
+
+ @event.listens_for(sess, "transient_to_pending")
+ def trans_to_pending(session, instance):
+ assert instance in session
+ listener.flag_checked(instance)
+
+ u1 = User(name='u1')
+ sess.add(u1)
+
+ eq_(
+ listener.mock_calls,
+ [
+ call.transient_to_pending(sess, u1),
+ call.flag_checked(u1)
+ ]
+ )
+
+ def test_pending_to_transient_via_rollback(self):
+ sess, User, start_events = self._fixture()
+
+ u1 = User(name='u1')
+ sess.add(u1)
+
+ listener = start_events()
+
+ @event.listens_for(sess, "pending_to_transient")
+ def test_deleted_flag(session, instance):
+ assert instance not in session
+ listener.flag_checked(instance)
+
+ sess.rollback()
+ assert u1 not in sess
+
+ eq_(
+ listener.mock_calls,
+ [
+ call.pending_to_transient(sess, u1),
+ call.flag_checked(u1)
+ ]
+ )
+
+ def test_pending_to_transient_via_expunge(self):
+ sess, User, start_events = self._fixture()
+
+ u1 = User(name='u1')
+ sess.add(u1)
+
+ listener = start_events()
+
+ @event.listens_for(sess, "pending_to_transient")
+ def test_deleted_flag(session, instance):
+ assert instance not in session
+ listener.flag_checked(instance)
+
+ sess.expunge(u1)
+ assert u1 not in sess
+
+ eq_(
+ listener.mock_calls,
+ [
+ call.pending_to_transient(sess, u1),
+ call.flag_checked(u1)
+ ]
+ )
+
+ def test_pending_to_persistent(self):
+ sess, User, start_events = self._fixture()
+
+ u1 = User(name='u1')
+ sess.add(u1)
+
+ listener = start_events()
+
+ @event.listens_for(sess, "pending_to_persistent")
+ def test_flag(session, instance):
+ assert instance in session
+ assert instance._sa_instance_state.persistent
+ assert instance._sa_instance_state.key in session.identity_map
+ listener.flag_checked(instance)
+
+ sess.flush()
+
+ eq_(
+ listener.mock_calls,
+ [
+ call.pending_to_persistent(sess, u1),
+ call.flag_checked(u1)
+ ]
+ )
+
+ def test_detached_to_persistent(self):
+ sess, User, start_events = self._fixture()
+
+ u1 = User(name='u1')
+ sess.add(u1)
+ sess.flush()
+
+ sess.expunge(u1)
+
+ listener = start_events()
+
+ @event.listens_for(sess, "detached_to_persistent")
+ def test_deleted_flag(session, instance):
+ assert instance not in session.deleted
+ assert instance in session
+ listener.flag_checked()
+
+ sess.add(u1)
+
+ eq_(
+ listener.mock_calls,
+ [
+ call.detached_to_persistent(sess, u1),
+ call.flag_checked()
+ ]
+ )
+
+ def test_loaded_as_persistent(self):
+ sess, User, start_events = self._fixture()
+
+ u1 = User(name='u1')
+ sess.add(u1)
+ sess.commit()
+ sess.close()
+
+ listener = start_events()
+
+ @event.listens_for(sess, "loaded_as_persistent")
+ def test_identity_flag(session, instance):
+ assert instance in session
+ assert instance._sa_instance_state.persistent
+ assert instance._sa_instance_state.key in session.identity_map
+ assert not instance._sa_instance_state.deleted
+ assert not instance._sa_instance_state.detached
+ assert instance._sa_instance_state.persistent
+ listener.flag_checked(instance)
+
+ u1 = sess.query(User).filter_by(name='u1').one()
+
+ eq_(
+ listener.mock_calls,
+ [
+ call.loaded_as_persistent(sess, u1),
+ call.flag_checked(u1)
+ ]
+ )
+
+ def test_detached_to_persistent_via_deleted(self):
+ sess, User, start_events = self._fixture()
+
+ u1 = User(name='u1')
+ sess.add(u1)
+ sess.commit()
+ sess.close()
+
+ listener = start_events()
+
+ @event.listens_for(sess, "detached_to_persistent")
+ def test_deleted_flag_persistent(session, instance):
+ assert instance not in session.deleted
+ assert instance in session
+ assert not instance._sa_instance_state.deleted
+ assert not instance._sa_instance_state.detached
+ assert instance._sa_instance_state.persistent
+ listener.dtp_flag_checked(instance)
+
+ @event.listens_for(sess, "persistent_to_deleted")
+ def test_deleted_flag_detached(session, instance):
+ assert instance not in session.deleted
+ assert instance not in session
+ assert not instance._sa_instance_state.persistent
+ assert instance._sa_instance_state.deleted
+ assert not instance._sa_instance_state.detached
+ listener.ptd_flag_checked(instance)
+
+ sess.delete(u1)
+ assert u1 in sess.deleted
+
+ eq_(
+ listener.mock_calls,
+ [
+ call.detached_to_persistent(sess, u1),
+ call.dtp_flag_checked(u1)
+ ]
+ )
+
+ sess.flush()
+
+ eq_(
+ listener.mock_calls,
+ [
+ call.detached_to_persistent(sess, u1),
+ call.dtp_flag_checked(u1),
+ call.persistent_to_deleted(sess, u1),
+ call.ptd_flag_checked(u1),
+ ]
+ )
+
+ def test_detached_to_persistent_via_cascaded_delete(self):
+ sess, User, Address, start_events = self._fixture(include_address=True)
+
+ u1 = User(name='u1')
+ sess.add(u1)
+ a1 = Address(email_address='e1')
+ u1.addresses.append(a1)
+ sess.commit()
+ u1.addresses # ensure u1.addresses refers to a1 before detachment
+ sess.close()
+
+ listener = start_events()
+
+ @event.listens_for(sess, "detached_to_persistent")
+ def test_deleted_flag(session, instance):
+ assert instance not in session.deleted
+ assert instance in session
+ assert not instance._sa_instance_state.deleted
+ assert not instance._sa_instance_state.detached
+ assert instance._sa_instance_state.persistent
+ listener.flag_checked(instance)
+
+ sess.delete(u1)
+ assert u1 in sess.deleted
+ assert a1 in sess.deleted
+
+ eq_(
+ listener.mock_calls,
+ [
+ call.detached_to_persistent(sess, u1),
+ call.flag_checked(u1),
+ call.detached_to_persistent(sess, a1),
+ call.flag_checked(a1),
+ ]
+ )
+
+ sess.flush()
+
+ def test_persistent_to_deleted(self):
+ sess, User, start_events = self._fixture()
+
+ u1 = User(name='u1')
+ sess.add(u1)
+ sess.commit()
+
+ listener = start_events()
+
+ @event.listens_for(sess, "persistent_to_deleted")
+ def test_deleted_flag(session, instance):
+ assert instance not in session.deleted
+ assert instance not in session
+ assert instance._sa_instance_state.deleted
+ assert not instance._sa_instance_state.detached
+ assert not instance._sa_instance_state.persistent
+ listener.flag_checked(instance)
+
+ sess.delete(u1)
+ assert u1 in sess.deleted
+
+ eq_(
+ listener.mock_calls,
+ []
+ )
+
+ sess.flush()
+ assert u1 not in sess
+
+ eq_(
+ listener.mock_calls,
+ [
+ call.persistent_to_deleted(sess, u1),
+ call.flag_checked(u1)
+ ]
+ )
+
+ def test_persistent_to_detached_via_expunge(self):
+ sess, User, start_events = self._fixture()
+
+ u1 = User(name='u1')
+ sess.add(u1)
+ sess.flush()
+
+ listener = start_events()
+
+ @event.listens_for(sess, "persistent_to_detached")
+ def test_deleted_flag(session, instance):
+ assert instance not in session.deleted
+ assert instance not in session
+ assert not instance._sa_instance_state.deleted
+ assert instance._sa_instance_state.detached
+ assert not instance._sa_instance_state.persistent
+ listener.flag_checked(instance)
+
+ assert u1 in sess
+ sess.expunge(u1)
+ assert u1 not in sess
+
+ eq_(
+ listener.mock_calls,
+ [
+ call.persistent_to_detached(sess, u1),
+ call.flag_checked(u1)
+ ]
+ )
+
+ def test_persistent_to_detached_via_expunge_all(self):
+ sess, User, start_events = self._fixture()
+
+ u1 = User(name='u1')
+ sess.add(u1)
+ sess.flush()
+
+ listener = start_events()
+
+ @event.listens_for(sess, "persistent_to_detached")
+ def test_deleted_flag(session, instance):
+ assert instance not in session.deleted
+ assert instance not in session
+ assert not instance._sa_instance_state.deleted
+ assert instance._sa_instance_state.detached
+ assert not instance._sa_instance_state.persistent
+ listener.flag_checked(instance)
+
+ assert u1 in sess
+ sess.expunge_all()
+ assert u1 not in sess
+
+ eq_(
+ listener.mock_calls,
+ [
+ call.persistent_to_detached(sess, u1),
+ call.flag_checked(u1)
+ ]
+ )
+
+ def test_persistent_to_transient_via_rollback(self):
+ sess, User, start_events = self._fixture()
+
+ u1 = User(name='u1')
+ sess.add(u1)
+ sess.flush()
+
+ listener = start_events()
+
+ @event.listens_for(sess, "persistent_to_transient")
+ def test_deleted_flag(session, instance):
+ assert instance not in session.deleted
+ assert instance not in session
+ assert not instance._sa_instance_state.deleted
+ assert not instance._sa_instance_state.detached
+ assert not instance._sa_instance_state.persistent
+ assert instance._sa_instance_state.transient
+ listener.flag_checked(instance)
+
+ sess.rollback()
+
+ eq_(
+ listener.mock_calls,
+ [
+ call.persistent_to_transient(sess, u1),
+ call.flag_checked(u1)
+ ]
+ )
+
+ def test_deleted_to_persistent_via_rollback(self):
+ sess, User, start_events = self._fixture()
+
+ u1 = User(name='u1')
+ sess.add(u1)
+ sess.commit()
+
+ sess.delete(u1)
+ sess.flush()
+
+ listener = start_events()
+
+ @event.listens_for(sess, "deleted_to_persistent")
+ def test_deleted_flag(session, instance):
+ assert instance not in session.deleted
+ assert instance in session
+ assert not instance._sa_instance_state.deleted
+ assert not instance._sa_instance_state.detached
+ assert instance._sa_instance_state.persistent
+ listener.flag_checked(instance)
+
+ assert u1 not in sess
+ assert u1._sa_instance_state.deleted
+ assert not u1._sa_instance_state.persistent
+ assert not u1._sa_instance_state.detached
+
+ sess.rollback()
+
+ assert u1 in sess
+ assert u1._sa_instance_state.persistent
+ assert not u1._sa_instance_state.deleted
+ assert not u1._sa_instance_state.detached
+
+ eq_(
+ listener.mock_calls,
+ [
+ call.deleted_to_persistent(sess, u1),
+ call.flag_checked(u1)
+ ]
+ )
+
+ def test_deleted_to_detached_via_commit(self):
+ sess, User, start_events = self._fixture()
+
+ u1 = User(name='u1')
+ sess.add(u1)
+ sess.commit()
+
+ sess.delete(u1)
+ sess.flush()
+
+ listener = start_events()
+
+ @event.listens_for(sess, "deleted_to_detached")
+ def test_detached_flag(session, instance):
+ assert instance not in session.deleted
+ assert instance not in session
+ assert not instance._sa_instance_state.deleted
+ assert instance._sa_instance_state.detached
+ listener.flag_checked(instance)
+
+ assert u1 not in sess
+ assert u1._sa_instance_state.deleted
+ assert not u1._sa_instance_state.persistent
+ assert not u1._sa_instance_state.detached
+
+ sess.commit()
+
+ assert u1 not in sess
+ assert not u1._sa_instance_state.deleted
+ assert u1._sa_instance_state.detached
+
+ eq_(
+ listener.mock_calls,
+ [
+ call.deleted_to_detached(sess, u1),
+ call.flag_checked(u1)
+ ]
+ )
+
+
class MapperExtensionTest(_fixtures.FixtureTest):
"""Superseded by MapperEventsTest - test backwards
diff --git a/test/orm/test_hasparent.py b/test/orm/test_hasparent.py
index fd246b527..df4b05980 100644
--- a/test/orm/test_hasparent.py
+++ b/test/orm/test_hasparent.py
@@ -116,7 +116,7 @@ class ParentRemovalTest(fixtures.MappedTest):
User = self.classes.User
s, u1, a1 = self._fixture()
- s._expunge_state(attributes.instance_state(u1))
+ s._expunge_states([attributes.instance_state(u1)])
del u1
gc_collect()
@@ -178,7 +178,7 @@ class ParentRemovalTest(fixtures.MappedTest):
u2 = User(addresses=[a1])
s.add(u2)
s.flush()
- s._expunge_state(attributes.instance_state(u2))
+ s._expunge_states([attributes.instance_state(u2)])
del u2
gc_collect()
diff --git a/test/orm/test_lazy_relations.py b/test/orm/test_lazy_relations.py
index ea39753b4..f2e1db2da 100644
--- a/test/orm/test_lazy_relations.py
+++ b/test/orm/test_lazy_relations.py
@@ -1073,3 +1073,78 @@ class RefersToSelfLazyLoadInterferenceTest(fixtures.MappedTest):
session.query(B).options(
sa.orm.joinedload('parent').joinedload('zc')).all()
+
+class TypeCoerceTest(fixtures.MappedTest, testing.AssertsExecutionResults,):
+ """ORM-level test for [ticket:3531]"""
+
+ # mysql is having a recursion issue in the bind_expression
+ __only_on__ = ('sqlite', 'postgresql')
+
+ class StringAsInt(TypeDecorator):
+ impl = String(50)
+
+ def column_expression(self, col):
+ return sa.cast(col, Integer)
+
+ def bind_expression(self, col):
+ return sa.cast(col, String)
+
+ @classmethod
+ def define_tables(cls, metadata):
+ Table(
+ 'person', metadata,
+ Column("id", cls.StringAsInt, primary_key=True),
+ )
+ Table(
+ "pets", metadata,
+ Column("id", Integer, primary_key=True),
+ Column("person_id", Integer),
+ )
+
+ @classmethod
+ def setup_classes(cls):
+ class Person(cls.Basic):
+ pass
+
+ class Pet(cls.Basic):
+ pass
+
+ @classmethod
+ def setup_mappers(cls):
+ mapper(cls.classes.Person, cls.tables.person, properties=dict(
+ pets=relationship(
+ cls.classes.Pet, primaryjoin=(
+ orm.foreign(cls.tables.pets.c.person_id) ==
+ sa.cast(
+ sa.type_coerce(cls.tables.person.c.id, Integer),
+ Integer
+ )
+ )
+ )
+ ))
+
+ mapper(cls.classes.Pet, cls.tables.pets)
+
+ def test_lazyload_singlecast(self):
+ Person = self.classes.Person
+ Pet = self.classes.Pet
+
+ s = Session()
+ s.add_all([
+ Person(id=5), Pet(id=1, person_id=5)
+ ])
+ s.commit()
+
+ p1 = s.query(Person).first()
+
+ with self.sql_execution_asserter() as asserter:
+ p1.pets
+
+ asserter.assert_(
+ CompiledSQL(
+ "SELECT pets.id AS pets_id, pets.person_id "
+ "AS pets_person_id FROM pets "
+ "WHERE pets.person_id = CAST(:param_1 AS INTEGER)",
+ [{'param_1': 5}]
+ )
+ )
diff --git a/test/orm/test_load_on_fks.py b/test/orm/test_load_on_fks.py
index 813d8d17a..471c8665a 100644
--- a/test/orm/test_load_on_fks.py
+++ b/test/orm/test_load_on_fks.py
@@ -301,7 +301,8 @@ class LoadOnFKsTest(AssertsExecutionResults, fixtures.TestBase):
c2 = Child()
if attach:
- sess._attach(instance_state(c2))
+ state = instance_state(c2)
+ state.session_id = sess.hash_key
if enable_relationship_rel:
sess.enable_relationship_loading(c2)
diff --git a/test/orm/test_mapper.py b/test/orm/test_mapper.py
index 264b386d4..6845ababb 100644
--- a/test/orm/test_mapper.py
+++ b/test/orm/test_mapper.py
@@ -8,7 +8,7 @@ from sqlalchemy.testing.schema import Table, Column
from sqlalchemy.engine import default
from sqlalchemy.orm import mapper, relationship, backref, \
create_session, class_mapper, configure_mappers, reconstructor, \
- validates, aliased, defer, deferred, synonym, attributes, \
+ aliased, deferred, synonym, attributes, \
column_property, composite, dynamic_loader, \
comparable_property, Session
from sqlalchemy.orm.persistence import _sort_states
@@ -19,6 +19,7 @@ from sqlalchemy.testing.assertsql import CompiledSQL
import logging
import logging.handlers
+
class MapperTest(_fixtures.FixtureTest, AssertsCompiledSQL):
__dialect__ = 'default'
@@ -26,33 +27,34 @@ class MapperTest(_fixtures.FixtureTest, AssertsCompiledSQL):
"""A backref name may not shadow an existing property name."""
Address, addresses, users, User = (self.classes.Address,
- self.tables.addresses,
- self.tables.users,
- self.classes.User)
-
+ self.tables.addresses,
+ self.tables.users,
+ self.classes.User)
mapper(Address, addresses)
mapper(User, users,
- properties={
- 'addresses':relationship(Address, backref='email_address')
- })
+ properties={
+ 'addresses': relationship(Address, backref='email_address')
+ })
assert_raises(sa.exc.ArgumentError, sa.orm.configure_mappers)
def test_update_attr_keys(self):
- """test that update()/insert() use the correct key when given InstrumentedAttributes."""
+ """test that update()/insert() use the correct key when given
+ InstrumentedAttributes."""
User, users = self.classes.User, self.tables.users
-
mapper(User, users, properties={
- 'foobar':users.c.name
+ 'foobar': users.c.name
})
- users.insert().values({User.foobar:'name1'}).execute()
- eq_(sa.select([User.foobar]).where(User.foobar=='name1').execute().fetchall(), [('name1',)])
+ users.insert().values({User.foobar: 'name1'}).execute()
+ eq_(sa.select([User.foobar]).where(User.foobar == 'name1').
+ execute().fetchall(), [('name1',)])
- users.update().values({User.foobar:User.foobar + 'foo'}).execute()
- eq_(sa.select([User.foobar]).where(User.foobar=='name1foo').execute().fetchall(), [('name1foo',)])
+ users.update().values({User.foobar: User.foobar + 'foo'}).execute()
+ eq_(sa.select([User.foobar]).where(User.foobar == 'name1foo').
+ execute().fetchall(), [('name1foo',)])
def test_utils(self):
users = self.tables.users
@@ -63,12 +65,12 @@ class MapperTest(_fixtures.FixtureTest, AssertsCompiledSQL):
class Foo(object):
x = "something"
+
@property
def y(self):
return "something else"
-
- m = mapper(Foo, users, properties={"addresses":relationship(Address)})
+ m = mapper(Foo, users, properties={"addresses": relationship(Address)})
mapper(Address, addresses)
a1 = aliased(Foo)
@@ -100,14 +102,13 @@ class MapperTest(_fixtures.FixtureTest, AssertsCompiledSQL):
class Foo(object):
x = "something"
+
@property
def y(self):
return "something else"
m = mapper(Foo, users)
a1 = aliased(Foo)
- f = Foo()
-
for arg, key, ret in [
(m, "x", Foo.x),
(Foo, "x", Foo.x),
@@ -122,7 +123,7 @@ class MapperTest(_fixtures.FixtureTest, AssertsCompiledSQL):
def boom():
raise Exception("it broke")
mapper(User, users, properties={
- 'addresses':relationship(boom)
+ 'addresses': relationship(boom)
})
# test that QueryableAttribute.__str__() doesn't
@@ -137,12 +138,11 @@ class MapperTest(_fixtures.FixtureTest, AssertsCompiledSQL):
"""
Address, addresses, User = (self.classes.Address,
- self.tables.addresses,
- self.classes.User)
-
+ self.tables.addresses,
+ self.classes.User)
mapper(Address, addresses, properties={
- 'user':relationship(User)
+ 'user': relationship(User)
})
try:
@@ -156,8 +156,8 @@ class MapperTest(_fixtures.FixtureTest, AssertsCompiledSQL):
"initialize - can't proceed with "
"initialization of other mappers. "
"Original exception was: Class "
- "'test.orm._fixtures.User' is not mapped$"
- , configure_mappers)
+ "'test.orm._fixtures.User' is not mapped$",
+ configure_mappers)
def test_column_prefix(self):
users, User = self.tables.users, self.classes.User
@@ -169,7 +169,7 @@ class MapperTest(_fixtures.FixtureTest, AssertsCompiledSQL):
s = create_session()
u = s.query(User).get(7)
eq_(u._name, 'jack')
- eq_(u._id,7)
+ eq_(u._id, 7)
u2 = s.query(User).filter_by(user_name='jack').one()
assert u is u2
@@ -190,16 +190,16 @@ class MapperTest(_fixtures.FixtureTest, AssertsCompiledSQL):
still triggers a check against all mappers."""
users, Address, addresses, User = (self.tables.users,
- self.classes.Address,
- self.tables.addresses,
- self.classes.User)
+ self.classes.Address,
+ self.tables.addresses,
+ self.classes.User)
mapper(User, users)
sa.orm.configure_mappers()
assert sa.orm.mapperlib.Mapper._new_mappers is False
m = mapper(Address, addresses, properties={
- 'user': relationship(User, backref="addresses")})
+ 'user': relationship(User, backref="addresses")})
assert m.configured is False
assert sa.orm.mapperlib.Mapper._new_mappers is True
@@ -232,13 +232,13 @@ class MapperTest(_fixtures.FixtureTest, AssertsCompiledSQL):
def test_column_not_present(self):
users, addresses, User = (self.tables.users,
- self.tables.addresses,
- self.classes.User)
+ self.tables.addresses,
+ self.classes.User)
assert_raises_message(sa.exc.ArgumentError,
"not represented in the mapper's table",
- mapper, User, users, properties={'foo'
- : addresses.c.user_id})
+ mapper, User, users,
+ properties={'foo': addresses.c.user_id})
def test_constructor_exc(self):
"""TypeError is raised for illegal constructor args,
@@ -246,10 +246,11 @@ class MapperTest(_fixtures.FixtureTest, AssertsCompiledSQL):
users, addresses = self.tables.users, self.tables.addresses
-
class Foo(object):
+
def __init__(self):
pass
+
class Bar(object):
pass
@@ -266,13 +267,15 @@ class MapperTest(_fixtures.FixtureTest, AssertsCompiledSQL):
"""
class Foo(object):
+
def __init__(self, id):
self.id = id
m = MetaData()
foo_t = Table('foo', m,
- Column('id', String, primary_key=True)
- )
+ Column('id', String, primary_key=True)
+ )
m = mapper(Foo, foo_t)
+
class DontCompareMeToString(int):
if util.py2k:
def __lt__(self, other):
@@ -292,24 +295,23 @@ class MapperTest(_fixtures.FixtureTest, AssertsCompiledSQL):
[states[4], states[3], states[0], states[1], states[2]]
)
-
def test_props(self):
users, Address, addresses, User = (self.tables.users,
- self.classes.Address,
- self.tables.addresses,
- self.classes.User)
+ self.classes.Address,
+ self.tables.addresses,
+ self.classes.User)
- m = mapper(User, users, properties = {
- 'addresses' : relationship(mapper(Address, addresses))
+ m = mapper(User, users, properties={
+ 'addresses': relationship(mapper(Address, addresses))
})
assert User.addresses.property is m.get_property('addresses')
def test_unicode_relationship_backref_names(self):
# test [ticket:2901]
users, Address, addresses, User = (self.tables.users,
- self.classes.Address,
- self.tables.addresses,
- self.classes.User)
+ self.classes.Address,
+ self.tables.addresses,
+ self.classes.User)
mapper(Address, addresses)
mapper(User, users, properties={
@@ -322,56 +324,62 @@ class MapperTest(_fixtures.FixtureTest, AssertsCompiledSQL):
def test_configure_on_prop_1(self):
users, Address, addresses, User = (self.tables.users,
- self.classes.Address,
- self.tables.addresses,
- self.classes.User)
+ self.classes.Address,
+ self.tables.addresses,
+ self.classes.User)
- mapper(User, users, properties = {
- 'addresses' : relationship(mapper(Address, addresses))
+ mapper(User, users, properties={
+ 'addresses': relationship(mapper(Address, addresses))
})
- User.addresses.any(Address.email_address=='foo@bar.com')
+ User.addresses.any(Address.email_address == 'foo@bar.com')
def test_configure_on_prop_2(self):
users, Address, addresses, User = (self.tables.users,
- self.classes.Address,
- self.tables.addresses,
- self.classes.User)
+ self.classes.Address,
+ self.tables.addresses,
+ self.classes.User)
- mapper(User, users, properties = {
- 'addresses' : relationship(mapper(Address, addresses))
+ mapper(User, users, properties={
+ 'addresses': relationship(mapper(Address, addresses))
})
- eq_(str(User.id == 3), str(users.c.id==3))
+ eq_(str(User.id == 3), str(users.c.id == 3))
def test_configure_on_prop_3(self):
users, addresses, User = (self.tables.users,
- self.tables.addresses,
- self.classes.User)
+ self.tables.addresses,
+ self.classes.User)
+
+ class Foo(User):
+ pass
- class Foo(User):pass
mapper(User, users)
mapper(Foo, addresses, inherits=User, properties={
- 'address_id': addresses.c.id
- })
+ 'address_id': addresses.c.id
+ })
assert getattr(Foo().__class__, 'name').impl is not None
def test_deferred_subclass_attribute_instrument(self):
users, addresses, User = (self.tables.users,
- self.tables.addresses,
- self.classes.User)
+ self.tables.addresses,
+ self.classes.User)
+
+ class Foo(User):
+ pass
- class Foo(User):pass
mapper(User, users)
configure_mappers()
mapper(Foo, addresses, inherits=User, properties={
- 'address_id': addresses.c.id
- })
+ 'address_id': addresses.c.id
+ })
assert getattr(Foo().__class__, 'name').impl is not None
def test_check_descriptor_as_method(self):
User, users = self.classes.User, self.tables.users
m = mapper(User, users)
+
class MyClass(User):
+
def foo(self):
pass
m._is_userland_descriptor(MyClass.foo)
@@ -379,7 +387,7 @@ class MapperTest(_fixtures.FixtureTest, AssertsCompiledSQL):
def test_configure_on_get_props_1(self):
User, users = self.classes.User, self.tables.users
- m =mapper(User, users)
+ m = mapper(User, users)
assert not m.configured
assert list(m.iterate_properties)
assert m.configured
@@ -387,29 +395,30 @@ class MapperTest(_fixtures.FixtureTest, AssertsCompiledSQL):
def test_configure_on_get_props_2(self):
User, users = self.classes.User, self.tables.users
- m= mapper(User, users)
+ m = mapper(User, users)
assert not m.configured
assert m.get_property('name')
assert m.configured
def test_configure_on_get_props_3(self):
users, Address, addresses, User = (self.tables.users,
- self.classes.Address,
- self.tables.addresses,
- self.classes.User)
+ self.classes.Address,
+ self.tables.addresses,
+ self.classes.User)
- m= mapper(User, users)
+ m = mapper(User, users)
assert not m.configured
configure_mappers()
m2 = mapper(Address, addresses, properties={
- 'user':relationship(User, backref='addresses')
- })
+ 'user': relationship(User, backref='addresses')
+ })
assert m.get_property('addresses')
def test_info(self):
users = self.tables.users
Address = self.classes.Address
+
class MyComposite(object):
pass
for constructor, args in [
@@ -434,17 +443,17 @@ class MapperTest(_fixtures.FixtureTest, AssertsCompiledSQL):
# create specific tables here as we don't want
# users.c.id.info to be pre-initialized
users = Table('u', m, Column('id', Integer, primary_key=True),
- Column('name', String))
+ Column('name', String))
addresses = Table('a', m, Column('id', Integer, primary_key=True),
- Column('name', String),
- Column('user_id', Integer, ForeignKey('u.id')))
+ Column('name', String),
+ Column('user_id', Integer, ForeignKey('u.id')))
Address = self.classes.Address
User = self.classes.User
mapper(User, users, properties={
- "name_lower": column_property(func.lower(users.c.name)),
- "addresses": relationship(Address)
- })
+ "name_lower": column_property(func.lower(users.c.name)),
+ "addresses": relationship(Address)
+ })
mapper(Address, addresses)
# attr.info goes down to the original Column object
@@ -460,18 +469,19 @@ class MapperTest(_fixtures.FixtureTest, AssertsCompiledSQL):
# same for relationships
is_(User.addresses.info, User.addresses.property.info)
-
def test_add_property(self):
users, addresses, Address = (self.tables.users,
- self.tables.addresses,
- self.classes.Address)
+ self.tables.addresses,
+ self.classes.Address)
assert_col = []
class User(fixtures.ComparableEntity):
+
def _get_name(self):
assert_col.append(('get', self._name))
return self._name
+
def _set_name(self, name):
assert_col.append(('set', name))
self._name = name
@@ -503,7 +513,7 @@ class MapperTest(_fixtures.FixtureTest, AssertsCompiledSQL):
m.add_property('addresses', relationship(Address))
m.add_property('uc_name', sa.orm.comparable_property(UCComparator))
m.add_property('uc_name2', sa.orm.comparable_property(
- UCComparator, User.uc_name2))
+ UCComparator, User.uc_name2))
sess = create_session(autocommit=False)
assert sess.query(User).get(7)
@@ -534,7 +544,7 @@ class MapperTest(_fixtures.FixtureTest, AssertsCompiledSQL):
User()
m2 = mapper(Address, addresses, properties={
- 'user':relationship(User, backref="addresses")
+ 'user': relationship(User, backref="addresses")
})
# configure mappers takes place when User is generated
User()
@@ -545,7 +555,7 @@ class MapperTest(_fixtures.FixtureTest, AssertsCompiledSQL):
users, User = self.tables.users, self.classes.User
m = mapper(User, users)
- m.add_property('_name',users.c.name)
+ m.add_property('_name', users.c.name)
m.add_property('name', synonym('_name'))
sess = create_session()
@@ -572,8 +582,8 @@ class MapperTest(_fixtures.FixtureTest, AssertsCompiledSQL):
addresses, Address = self.tables.addresses, self.classes.Address
m = mapper(User, users, properties={
- "addresses": relationship(Address)
- })
+ "addresses": relationship(Address)
+ })
mapper(Address, addresses)
assert_raises_message(
@@ -588,14 +598,15 @@ class MapperTest(_fixtures.FixtureTest, AssertsCompiledSQL):
def test_add_column_prop_deannotate(self):
User, users = self.classes.User, self.tables.users
Address, addresses = self.classes.Address, self.tables.addresses
+
class SubUser(User):
pass
m = mapper(User, users)
m2 = mapper(SubUser, addresses, inherits=User, properties={
- 'address_id': addresses.c.id
- })
+ 'address_id': addresses.c.id
+ })
m3 = mapper(Address, addresses, properties={
- 'foo':relationship(m2)
+ 'foo': relationship(m2)
})
# add property using annotated User.name,
# needs to be deannotated
@@ -612,7 +623,8 @@ class MapperTest(_fixtures.FixtureTest, AssertsCompiledSQL):
"addresses_1.email_address AS "
"addresses_1_email_address, "
"users_1.name || :name_1 AS anon_1 "
- "FROM addresses JOIN (users AS users_1 JOIN addresses AS addresses_1 ON users_1.id = "
+ "FROM addresses JOIN (users AS users_1 JOIN addresses "
+ "AS addresses_1 ON users_1.id = "
"addresses_1.user_id) ON "
"users_1.id = addresses.user_id"
)
@@ -638,20 +650,23 @@ class MapperTest(_fixtures.FixtureTest, AssertsCompiledSQL):
assert User.y.property.columns[0] is not expr2
assert User.y.property.columns[0].element.\
- _raw_columns[0] is users.c.name
+ _raw_columns[0] is users.c.name
assert User.y.property.columns[0].element.\
- _raw_columns[1] is users.c.id
+ _raw_columns[1] is users.c.id
def test_synonym_replaces_backref(self):
addresses, users, User = (self.tables.addresses,
- self.tables.users,
- self.classes.User)
+ self.tables.users,
+ self.classes.User)
assert_calls = []
+
class Address(object):
+
def _get_user(self):
assert_calls.append("get")
return self._user
+
def _set_user(self, user):
assert_calls.append("set")
self._user = user
@@ -659,20 +674,20 @@ class MapperTest(_fixtures.FixtureTest, AssertsCompiledSQL):
# synonym is created against nonexistent prop
mapper(Address, addresses, properties={
- 'user':synonym('_user')
+ 'user': synonym('_user')
})
sa.orm.configure_mappers()
# later, backref sets up the prop
mapper(User, users, properties={
- 'addresses':relationship(Address, backref='_user')
+ 'addresses': relationship(Address, backref='_user')
})
sess = create_session()
u1 = sess.query(User).get(7)
u2 = sess.query(User).get(8)
# comparaison ops need to work
- a1 = sess.query(Address).filter(Address.user==u1).one()
+ a1 = sess.query(Address).filter(Address.user == u1).one()
eq_(a1.id, 1)
a1.user = u2
assert a1.user is u2
@@ -680,16 +695,19 @@ class MapperTest(_fixtures.FixtureTest, AssertsCompiledSQL):
def test_self_ref_synonym(self):
t = Table('nodes', MetaData(),
- Column('id', Integer, primary_key=True, test_needs_autoincrement=True),
- Column('parent_id', Integer, ForeignKey('nodes.id')))
+ Column(
+ 'id', Integer, primary_key=True,
+ test_needs_autoincrement=True),
+ Column('parent_id', Integer, ForeignKey('nodes.id')))
class Node(object):
pass
mapper(Node, t, properties={
- '_children':relationship(Node, backref=backref('_parent', remote_side=t.c.id)),
- 'children':synonym('_children'),
- 'parent':synonym('_parent')
+ '_children': relationship(
+ Node, backref=backref('_parent', remote_side=t.c.id)),
+ 'children': synonym('_children'),
+ 'parent': synonym('_parent')
})
n1 = Node()
@@ -702,13 +720,14 @@ class MapperTest(_fixtures.FixtureTest, AssertsCompiledSQL):
def test_non_primary_identity_class(self):
User = self.classes.User
users, addresses = self.tables.users, self.tables.addresses
+
class AddressUser(User):
pass
m1 = mapper(User, users, polymorphic_identity='user')
m2 = mapper(AddressUser, addresses, inherits=User,
- polymorphic_identity='address', properties={
- 'address_id': addresses.c.id
- })
+ polymorphic_identity='address', properties={
+ 'address_id': addresses.c.id
+ })
m3 = mapper(AddressUser, addresses, non_primary=True)
assert m3._identity_class is m2._identity_class
eq_(
@@ -719,6 +738,7 @@ class MapperTest(_fixtures.FixtureTest, AssertsCompiledSQL):
def test_reassign_polymorphic_identity_warns(self):
User = self.classes.User
users = self.tables.users
+
class MyUser(User):
pass
m1 = mapper(User, users, polymorphic_on=users.c.name,
@@ -730,17 +750,16 @@ class MapperTest(_fixtures.FixtureTest, AssertsCompiledSQL):
MyUser, users, inherits=User, polymorphic_identity='user'
)
-
def test_illegal_non_primary(self):
users, Address, addresses, User = (self.tables.users,
- self.classes.Address,
- self.tables.addresses,
- self.classes.User)
+ self.classes.Address,
+ self.tables.addresses,
+ self.classes.User)
mapper(User, users)
mapper(Address, addresses)
mapper(User, users, non_primary=True, properties={
- 'addresses':relationship(Address)
+ 'addresses': relationship(Address)
})
assert_raises_message(
sa.exc.ArgumentError,
@@ -762,62 +781,90 @@ class MapperTest(_fixtures.FixtureTest, AssertsCompiledSQL):
class Base(object):
pass
+
class Sub(Base):
pass
mapper(Base, users)
assert_raises_message(sa.exc.InvalidRequestError,
- "Configure a primary mapper first",
- mapper, Sub, addresses, non_primary=True
- )
+ "Configure a primary mapper first",
+ mapper, Sub, addresses, non_primary=True
+ )
def test_prop_filters(self):
t = Table('person', MetaData(),
Column('id', Integer, primary_key=True,
- test_needs_autoincrement=True),
+ test_needs_autoincrement=True),
Column('type', String(128)),
Column('name', String(128)),
Column('employee_number', Integer),
Column('boss_id', Integer, ForeignKey('person.id')),
Column('vendor_id', Integer))
- class Person(object): pass
- class Vendor(Person): pass
- class Employee(Person): pass
- class Manager(Employee): pass
- class Hoho(object): pass
- class Lala(object): pass
- class Fub(object):pass
- class Frob(object):pass
+ class Person(object):
+ pass
+
+ class Vendor(Person):
+ pass
+
+ class Employee(Person):
+ pass
+
+ class Manager(Employee):
+ pass
+
+ class Hoho(object):
+ pass
+
+ class Lala(object):
+ pass
+
+ class Fub(object):
+ pass
+
+ class Frob(object):
+ pass
+
class HasDef(object):
+
def name(self):
pass
- class Empty(object):pass
- empty = mapper(Empty, t, properties={'empty_id' : t.c.id},
- include_properties=[])
+ class Empty(object):
+ pass
+
+ mapper(
+ Empty, t, properties={'empty_id': t.c.id},
+ include_properties=[])
p_m = mapper(Person, t, polymorphic_on=t.c.type,
include_properties=('id', 'type', 'name'))
e_m = mapper(Employee, inherits=p_m,
- polymorphic_identity='employee', properties={'boss'
- : relationship(Manager, backref=backref('peon'),
- remote_side=t.c.id)},
+ polymorphic_identity='employee',
+ properties={
+ 'boss': relationship(
+ Manager, backref=backref('peon'),
+ remote_side=t.c.id)},
exclude_properties=('vendor_id', ))
- m_m = mapper(Manager, inherits=e_m, polymorphic_identity='manager',
- include_properties=('id', 'type'))
+ mapper(
+ Manager, inherits=e_m, polymorphic_identity='manager',
+ include_properties=('id', 'type'))
- v_m = mapper(Vendor, inherits=p_m, polymorphic_identity='vendor',
- exclude_properties=('boss_id', 'employee_number'))
- h_m = mapper(Hoho, t, include_properties=('id', 'type', 'name'))
- l_m = mapper(Lala, t, exclude_properties=('vendor_id', 'boss_id'),
- column_prefix="p_")
+ mapper(
+ Vendor, inherits=p_m, polymorphic_identity='vendor',
+ exclude_properties=('boss_id', 'employee_number'))
+ mapper(Hoho, t, include_properties=('id', 'type', 'name'))
+ mapper(
+ Lala, t, exclude_properties=('vendor_id', 'boss_id'),
+ column_prefix="p_")
- hd_m = mapper(HasDef, t, column_prefix="h_")
+ mapper(HasDef, t, column_prefix="h_")
- fb_m = mapper(Fub, t, include_properties=(t.c.id, t.c.type))
- frb_m = mapper(Frob, t, column_prefix='f_',
- exclude_properties=(t.c.boss_id,
- 'employee_number', t.c.vendor_id))
+ mapper(Fub, t, include_properties=(t.c.id, t.c.type))
+ mapper(
+ Frob, t, column_prefix='f_',
+ exclude_properties=(
+ t.c.boss_id,
+ 'employee_number', t.c.vendor_id))
configure_mappers()
@@ -832,13 +879,13 @@ class MapperTest(_fixtures.FixtureTest, AssertsCompiledSQL):
eq_(have, want)
assert_props(HasDef, ['h_boss_id', 'h_employee_number', 'h_id',
- 'name', 'h_name', 'h_vendor_id', 'h_type'])
+ 'name', 'h_name', 'h_vendor_id', 'h_type'])
assert_props(Person, ['id', 'name', 'type'])
assert_instrumented(Person, ['id', 'name', 'type'])
assert_props(Employee, ['boss', 'boss_id', 'employee_number',
'id', 'name', 'type'])
- assert_instrumented(Employee,['boss', 'boss_id', 'employee_number',
- 'id', 'name', 'type'])
+ assert_instrumented(Employee, ['boss', 'boss_id', 'employee_number',
+ 'id', 'name', 'type'])
assert_props(Manager, ['boss', 'boss_id', 'employee_number', 'peon',
'id', 'name', 'type'])
@@ -851,7 +898,6 @@ class MapperTest(_fixtures.FixtureTest, AssertsCompiledSQL):
assert_props(Fub, ['id', 'type'])
assert_props(Frob, ['f_id', 'f_type', 'f_name', ])
-
# putting the discriminator column in exclude_properties,
# very weird. As of 0.7.4 this re-maps it.
class Foo(Person):
@@ -869,10 +915,13 @@ class MapperTest(_fixtures.FixtureTest, AssertsCompiledSQL):
def test_prop_filters_defaults(self):
metadata = self.metadata
t = Table('t', metadata,
- Column('id', Integer(), primary_key=True, test_needs_autoincrement=True),
- Column('x', Integer(), nullable=False, server_default='0')
- )
+ Column(
+ 'id', Integer(), primary_key=True,
+ test_needs_autoincrement=True),
+ Column('x', Integer(), nullable=False, server_default='0')
+ )
t.create()
+
class A(object):
pass
mapper(A, t, include_properties=['id'])
@@ -882,6 +931,7 @@ class MapperTest(_fixtures.FixtureTest, AssertsCompiledSQL):
def test_we_dont_call_bool(self):
class NoBoolAllowed(object):
+
def __bool__(self):
raise Exception("nope")
mapper(NoBoolAllowed, self.tables.users)
@@ -894,6 +944,7 @@ class MapperTest(_fixtures.FixtureTest, AssertsCompiledSQL):
def test_we_dont_call_eq(self):
class NoEqAllowed(object):
+
def __eq__(self, other):
raise Exception("nope")
@@ -901,7 +952,7 @@ class MapperTest(_fixtures.FixtureTest, AssertsCompiledSQL):
Address = self.classes.Address
mapper(NoEqAllowed, users, properties={
- 'addresses':relationship(Address, backref='user')
+ 'addresses': relationship(Address, backref='user')
})
mapper(Address, addresses)
@@ -919,9 +970,8 @@ class MapperTest(_fixtures.FixtureTest, AssertsCompiledSQL):
"""Test implicit merging of two cols raises."""
addresses, users, User = (self.tables.addresses,
- self.tables.users,
- self.classes.User)
-
+ self.tables.users,
+ self.classes.User)
usersaddresses = sa.join(users, addresses,
users.c.id == addresses.c.user_id)
@@ -935,14 +985,13 @@ class MapperTest(_fixtures.FixtureTest, AssertsCompiledSQL):
"""Mapping to a join"""
User, addresses, users = (self.classes.User,
- self.tables.addresses,
- self.tables.users)
-
+ self.tables.addresses,
+ self.tables.users)
usersaddresses = sa.join(users, addresses, users.c.id
== addresses.c.user_id)
mapper(User, usersaddresses, primary_key=[users.c.id],
- properties={'add_id':addresses.c.id}
+ properties={'add_id': addresses.c.id}
)
l = create_session().query(User).order_by(users.c.id).all()
eq_(l, self.static.user_result[:3])
@@ -951,9 +1000,8 @@ class MapperTest(_fixtures.FixtureTest, AssertsCompiledSQL):
"""Mapping to a join"""
User, addresses, users = (self.classes.User,
- self.tables.addresses,
- self.tables.users)
-
+ self.tables.addresses,
+ self.tables.users)
usersaddresses = sa.join(users, addresses, users.c.id
== addresses.c.user_id)
@@ -965,13 +1013,13 @@ class MapperTest(_fixtures.FixtureTest, AssertsCompiledSQL):
def test_mapping_to_join_no_pk(self):
email_bounces, addresses, Address = (self.tables.email_bounces,
- self.tables.addresses,
- self.classes.Address)
+ self.tables.addresses,
+ self.classes.Address)
m = mapper(Address,
- addresses.join(email_bounces),
- properties={'id':[addresses.c.id, email_bounces.c.id]}
- )
+ addresses.join(email_bounces),
+ properties={'id': [addresses.c.id, email_bounces.c.id]}
+ )
configure_mappers()
assert addresses in m._pks_by_table
assert email_bounces not in m._pks_by_table
@@ -988,10 +1036,8 @@ class MapperTest(_fixtures.FixtureTest, AssertsCompiledSQL):
"""Mapping to an outer join with a nullable composite primary key."""
users, addresses, User = (self.tables.users,
- self.tables.addresses,
- self.classes.User)
-
-
+ self.tables.addresses,
+ self.classes.User)
mapper(User, users.outerjoin(addresses),
primary_key=[users.c.id, addresses.c.id],
@@ -1013,13 +1059,11 @@ class MapperTest(_fixtures.FixtureTest, AssertsCompiledSQL):
"""test the allow_partial_pks=False flag."""
users, addresses, User = (self.tables.users,
- self.tables.addresses,
- self.classes.User)
-
-
+ self.tables.addresses,
+ self.classes.User)
mapper(User, users.outerjoin(addresses),
- allow_partial_pks=False,
+ allow_partial_pks=False,
primary_key=[users.c.id, addresses.c.id],
properties=dict(
address_id=addresses.c.id))
@@ -1037,11 +1081,11 @@ class MapperTest(_fixtures.FixtureTest, AssertsCompiledSQL):
def test_scalar_pk_arg(self):
users, Keyword, items, Item, User, keywords = (self.tables.users,
- self.classes.Keyword,
- self.tables.items,
- self.classes.Item,
- self.classes.User,
- self.tables.keywords)
+ self.classes.Keyword,
+ self.tables.items,
+ self.classes.Item,
+ self.classes.User,
+ self.tables.keywords)
m1 = mapper(Item, items, primary_key=[items.c.id])
m2 = mapper(Keyword, keywords, primary_key=keywords.c.id)
@@ -1051,18 +1095,17 @@ class MapperTest(_fixtures.FixtureTest, AssertsCompiledSQL):
assert m2.primary_key[0] is keywords.c.id
assert m3.primary_key[0] is users.c.id
-
def test_custom_join(self):
"""select_from totally replace the FROM parameters."""
- users, items, order_items, orders, Item, User, Order = (self.tables.users,
- self.tables.items,
- self.tables.order_items,
- self.tables.orders,
- self.classes.Item,
- self.classes.User,
- self.classes.Order)
-
+ users, items, order_items, orders, Item, User, Order = (
+ self.tables.users,
+ self.tables.items,
+ self.tables.order_items,
+ self.tables.orders,
+ self.classes.Item,
+ self.classes.User,
+ self.classes.Order)
mapper(Item, items)
@@ -1086,18 +1129,24 @@ class MapperTest(_fixtures.FixtureTest, AssertsCompiledSQL):
mapper(User, users, order_by=users.c.name.desc())
- assert "order by users.name desc" in str(create_session().query(User).statement).lower()
- assert "order by" not in str(create_session().query(User).order_by(None).statement).lower()
- assert "order by users.name asc" in str(create_session().query(User).order_by(User.name.asc()).statement).lower()
+ assert "order by users.name desc" in \
+ str(create_session().query(User).statement).lower()
+ assert "order by" not in \
+ str(create_session().query(User).order_by(None).statement).lower()
+ assert "order by users.name asc" in \
+ str(create_session().query(User).order_by(
+ User.name.asc()).statement).lower()
eq_(
create_session().query(User).all(),
- [User(id=7, name='jack'), User(id=9, name='fred'), User(id=8, name='ed'), User(id=10, name='chuck')]
+ [User(id=7, name='jack'), User(id=9, name='fred'),
+ User(id=8, name='ed'), User(id=10, name='chuck')]
)
eq_(
create_session().query(User).order_by(User.name).all(),
- [User(id=10, name='chuck'), User(id=8, name='ed'), User(id=9, name='fred'), User(id=7, name='jack')]
+ [User(id=10, name='chuck'), User(id=8, name='ed'),
+ User(id=9, name='fred'), User(id=7, name='jack')]
)
# 'Raises a "expression evaluation not supported" error at prepare time
@@ -1106,9 +1155,8 @@ class MapperTest(_fixtures.FixtureTest, AssertsCompiledSQL):
"""Mapping to a SELECT statement that has functions in it."""
addresses, users, User = (self.tables.addresses,
- self.tables.users,
- self.classes.User)
-
+ self.tables.users,
+ self.classes.User)
s = sa.select([users,
(users.c.id * 2).label('concat'),
@@ -1129,29 +1177,29 @@ class MapperTest(_fixtures.FixtureTest, AssertsCompiledSQL):
User, users = self.classes.User, self.tables.users
-
mapper(User, users)
session = create_session()
q = session.query(User)
eq_(q.count(), 4)
- eq_(q.filter(User.id.in_([8,9])).count(), 2)
- eq_(q.filter(users.c.id.in_([8,9])).count(), 2)
+ eq_(q.filter(User.id.in_([8, 9])).count(), 2)
+ eq_(q.filter(users.c.id.in_([8, 9])).count(), 2)
eq_(session.query(User.id).count(), 4)
eq_(session.query(User.id).filter(User.id.in_((8, 9))).count(), 2)
def test_many_to_many_count(self):
- keywords, items, item_keywords, Keyword, Item = (self.tables.keywords,
- self.tables.items,
- self.tables.item_keywords,
- self.classes.Keyword,
- self.classes.Item)
+ keywords, items, item_keywords, Keyword, Item = (
+ self.tables.keywords,
+ self.tables.items,
+ self.tables.item_keywords,
+ self.classes.Keyword,
+ self.classes.Item)
mapper(Keyword, keywords)
mapper(Item, items, properties=dict(
- keywords = relationship(Keyword, item_keywords, lazy='select')))
+ keywords=relationship(Keyword, item_keywords, lazy='select')))
session = create_session()
q = (session.query(Item).
@@ -1164,9 +1212,9 @@ class MapperTest(_fixtures.FixtureTest, AssertsCompiledSQL):
"""Overriding a column raises an error."""
Address, addresses, users, User = (self.classes.Address,
- self.tables.addresses,
- self.tables.users,
- self.classes.User)
+ self.tables.addresses,
+ self.tables.users,
+ self.classes.User)
def go():
mapper(User, users,
@@ -1179,10 +1227,9 @@ class MapperTest(_fixtures.FixtureTest, AssertsCompiledSQL):
"""exclude_properties cancels the error."""
Address, addresses, users, User = (self.classes.Address,
- self.tables.addresses,
- self.tables.users,
- self.classes.User)
-
+ self.tables.addresses,
+ self.tables.users,
+ self.classes.User)
mapper(User, users,
exclude_properties=['name'],
@@ -1195,9 +1242,9 @@ class MapperTest(_fixtures.FixtureTest, AssertsCompiledSQL):
"""The column being named elsewhere also cancels the error,"""
Address, addresses, users, User = (self.classes.Address,
- self.tables.addresses,
- self.tables.users,
- self.classes.User)
+ self.tables.addresses,
+ self.tables.users,
+ self.classes.User)
mapper(User, users,
properties=dict(
@@ -1206,28 +1253,30 @@ class MapperTest(_fixtures.FixtureTest, AssertsCompiledSQL):
def test_synonym(self):
users, addresses, Address = (self.tables.users,
- self.tables.addresses,
- self.classes.Address)
-
+ self.tables.addresses,
+ self.classes.Address)
assert_col = []
+
class extendedproperty(property):
attribute = 123
class User(object):
+
def _get_name(self):
assert_col.append(('get', self.name))
return self.name
+
def _set_name(self, name):
assert_col.append(('set', name))
self.name = name
uname = extendedproperty(_get_name, _set_name)
mapper(User, users, properties=dict(
- addresses = relationship(mapper(Address, addresses), lazy='select'),
- uname = synonym('name'),
- adlist = synonym('addresses'),
- adname = synonym('addresses')
+ addresses=relationship(mapper(Address, addresses), lazy='select'),
+ uname=synonym('name'),
+ adlist=synonym('addresses'),
+ adname=synonym('addresses')
))
# ensure the synonym can get at the proxied comparators without
@@ -1251,7 +1300,7 @@ class MapperTest(_fixtures.FixtureTest, AssertsCompiledSQL):
row = sess.query(User.id, User.uname).first()
assert row.uname == row[1]
- u = sess.query(User).filter(User.uname=='jack').one()
+ u = sess.query(User).filter(User.uname == 'jack').one()
fixture = self.static.user_address_result[0].addresses
eq_(u.adlist, fixture)
@@ -1274,25 +1323,24 @@ class MapperTest(_fixtures.FixtureTest, AssertsCompiledSQL):
eq_(User.uname.attribute, 123)
def test_synonym_of_synonym(self):
- users, User = (self.tables.users,
- self.classes.User)
+ users, User = (self.tables.users,
+ self.classes.User)
mapper(User, users, properties={
- 'x':synonym('id'),
- 'y':synonym('x')
+ 'x': synonym('id'),
+ 'y': synonym('x')
})
s = Session()
- u = s.query(User).filter(User.y==8).one()
+ u = s.query(User).filter(User.y == 8).one()
eq_(u.y, 8)
-
def test_synonym_column_location(self):
users, User = self.tables.users, self.classes.User
def go():
mapper(User, users, properties={
- 'not_name':synonym('_name', map_column=True)})
+ 'not_name': synonym('_name', map_column=True)})
assert_raises_message(
sa.exc.ArgumentError,
@@ -1301,28 +1349,30 @@ class MapperTest(_fixtures.FixtureTest, AssertsCompiledSQL):
go)
def test_column_synonyms(self):
- """Synonyms which automatically instrument properties, set up aliased column, etc."""
+ """Synonyms which automatically instrument properties,
+ set up aliased column, etc."""
addresses, users, Address = (self.tables.addresses,
- self.tables.users,
- self.classes.Address)
-
-
+ self.tables.users,
+ self.classes.Address)
assert_col = []
+
class User(object):
+
def _get_name(self):
assert_col.append(('get', self._name))
return self._name
+
def _set_name(self, name):
assert_col.append(('set', name))
self._name = name
name = property(_get_name, _set_name)
mapper(Address, addresses)
- mapper(User, users, properties = {
- 'addresses':relationship(Address, lazy='select'),
- 'name':synonym('_name', map_column=True)
+ mapper(User, users, properties={
+ 'addresses': relationship(Address, lazy='select'),
+ 'name': synonym('_name', map_column=True)
})
# test compile
@@ -1369,6 +1419,7 @@ class MapperTest(_fixtures.FixtureTest, AssertsCompiledSQL):
return "method1"
from sqlalchemy.orm.properties import ColumnProperty
+
class UCComparator(ColumnProperty.Comparator):
__hash__ = None
@@ -1388,6 +1439,7 @@ class MapperTest(_fixtures.FixtureTest, AssertsCompiledSQL):
def map_(with_explicit_property):
class User(object):
+
@extendedproperty
def uc_name(self):
if self.name is None:
@@ -1398,7 +1450,7 @@ class MapperTest(_fixtures.FixtureTest, AssertsCompiledSQL):
else:
args = (UCComparator,)
mapper(User, users, properties=dict(
- uc_name = sa.orm.comparable_property(*args)))
+ uc_name=sa.orm.comparable_property(*args)))
return User
for User in (map_(True), map_(False)):
@@ -1415,12 +1467,13 @@ class MapperTest(_fixtures.FixtureTest, AssertsCompiledSQL):
assert_raises_message(
AttributeError,
"Neither 'extendedproperty' object nor 'UCComparator' "
- "object associated with User.uc_name has an attribute 'nonexistent'",
+ "object associated with User.uc_name has an attribute "
+ "'nonexistent'",
getattr, User.uc_name, 'nonexistent')
# test compile
assert not isinstance(User.uc_name == 'jack', bool)
- u = q.filter(User.uc_name=='JACK').one()
+ u = q.filter(User.uc_name == 'JACK').one()
assert u.uc_name == "JACK"
assert u not in sess.dirty
@@ -1447,10 +1500,11 @@ class MapperTest(_fixtures.FixtureTest, AssertsCompiledSQL):
class MyComparator(sa.orm.properties.ColumnProperty.Comparator):
__hash__ = None
+
def __eq__(self, other):
# lower case comparison
return func.lower(self.__clause_element__()
- ) == func.lower(other)
+ ) == func.lower(other)
def intersects(self, other):
# non-standard comparator
@@ -1458,7 +1512,7 @@ class MapperTest(_fixtures.FixtureTest, AssertsCompiledSQL):
mapper(User, users, properties={
'name': sa.orm.column_property(users.c.name,
- comparator_factory=MyComparator)
+ comparator_factory=MyComparator)
})
assert_raises_message(
@@ -1470,39 +1524,41 @@ class MapperTest(_fixtures.FixtureTest, AssertsCompiledSQL):
eq_(
str((User.name == 'ed').compile(
- dialect=sa.engine.default.DefaultDialect())),
+ dialect=sa.engine.default.DefaultDialect())),
"lower(users.name) = lower(:lower_1)")
eq_(
str((User.name.intersects('ed')).compile(
- dialect=sa.engine.default.DefaultDialect())),
+ dialect=sa.engine.default.DefaultDialect())),
"users.name &= :name_1")
-
def test_reentrant_compile(self):
users, Address, addresses, User = (self.tables.users,
- self.classes.Address,
- self.tables.addresses,
- self.classes.User)
+ self.classes.Address,
+ self.tables.addresses,
+ self.classes.User)
class MyFakeProperty(sa.orm.properties.ColumnProperty):
+
def post_instrument_class(self, mapper):
super(MyFakeProperty, self).post_instrument_class(mapper)
configure_mappers()
m1 = mapper(User, users, properties={
- 'name':MyFakeProperty(users.c.name)
+ 'name': MyFakeProperty(users.c.name)
})
m2 = mapper(Address, addresses)
configure_mappers()
sa.orm.clear_mappers()
+
class MyFakeProperty(sa.orm.properties.ColumnProperty):
+
def post_instrument_class(self, mapper):
super(MyFakeProperty, self).post_instrument_class(mapper)
configure_mappers()
m1 = mapper(User, users, properties={
- 'name':MyFakeProperty(users.c.name)
+ 'name': MyFakeProperty(users.c.name)
})
m2 = mapper(Address, addresses)
configure_mappers()
@@ -1513,6 +1569,7 @@ class MapperTest(_fixtures.FixtureTest, AssertsCompiledSQL):
recon = []
class User(object):
+
@reconstructor
def reconstruct(self):
recon.append('go')
@@ -1528,19 +1585,23 @@ class MapperTest(_fixtures.FixtureTest, AssertsCompiledSQL):
users = self.tables.users
recon = []
+
class A(object):
+
@reconstructor
def reconstruct(self):
assert isinstance(self, A)
recon.append('A')
class B(A):
+
@reconstructor
def reconstruct(self):
assert isinstance(self, B)
recon.append('B')
class C(A):
+
@reconstructor
def reconstruct(self):
assert isinstance(self, C)
@@ -1566,7 +1627,9 @@ class MapperTest(_fixtures.FixtureTest, AssertsCompiledSQL):
users = self.tables.users
recon = []
+
class Base(object):
+
@reconstructor
def reconstruct(self):
recon.append('go')
@@ -1584,15 +1647,15 @@ class MapperTest(_fixtures.FixtureTest, AssertsCompiledSQL):
def test_unmapped_error(self):
Address, addresses, users, User = (self.classes.Address,
- self.tables.addresses,
- self.tables.users,
- self.classes.User)
+ self.tables.addresses,
+ self.tables.users,
+ self.classes.User)
mapper(Address, addresses)
sa.orm.clear_mappers()
mapper(User, users, properties={
- 'addresses':relationship(Address)
+ 'addresses': relationship(Address)
})
assert_raises_message(
@@ -1621,9 +1684,10 @@ class MapperTest(_fixtures.FixtureTest, AssertsCompiledSQL):
Address = self.classes.Address
mapper(User, users, properties={
- "addresses": relationship(Address,
- primaryjoin=lambda: users.c.id == addresses.wrong.user_id)
- })
+ "addresses": relationship(
+ Address,
+ primaryjoin=lambda: users.c.id == addresses.wrong.user_id)
+ })
mapper(Address, addresses)
assert_raises_message(
AttributeError,
@@ -1638,10 +1702,10 @@ class MapperTest(_fixtures.FixtureTest, AssertsCompiledSQL):
Address = self.classes.Address
mapper(User, users, properties={
- "addresses": relationship(Address,
- primaryjoin=lambda: users.c.id ==
- addresses.__dict__['wrong'].user_id)
- })
+ "addresses": relationship(Address,
+ primaryjoin=lambda: users.c.id ==
+ addresses.__dict__['wrong'].user_id)
+ })
mapper(Address, addresses)
assert_raises_message(
KeyError,
@@ -1654,6 +1718,7 @@ class MapperTest(_fixtures.FixtureTest, AssertsCompiledSQL):
class Base(object):
pass
+
class Sub(Base):
pass
@@ -1671,7 +1736,7 @@ class MapperTest(_fixtures.FixtureTest, AssertsCompiledSQL):
# using it with an ORM operation, raises
assert_raises(sa.orm.exc.UnmappedClassError,
- create_session().add, Sub())
+ create_session().add, Sub())
def test_unmapped_subclass_error_premap(self):
users = self.tables.users
@@ -1697,13 +1762,14 @@ class MapperTest(_fixtures.FixtureTest, AssertsCompiledSQL):
# using it with an ORM operation, raises
assert_raises(sa.orm.exc.UnmappedClassError,
- create_session().add, Sub())
+ create_session().add, Sub())
def test_oldstyle_mixin(self):
users = self.tables.users
class OldStyle:
pass
+
class NewStyle(object):
pass
@@ -1717,22 +1783,26 @@ class MapperTest(_fixtures.FixtureTest, AssertsCompiledSQL):
mapper(B, users)
+
class DocumentTest(fixtures.TestBase):
def test_doc_propagate(self):
metadata = MetaData()
t1 = Table('t1', metadata,
- Column('col1', Integer, primary_key=True, doc="primary key column"),
- Column('col2', String, doc="data col"),
- Column('col3', String, doc="data col 2"),
- Column('col4', String, doc="data col 3"),
- Column('col5', String),
- )
+ Column('col1', Integer, primary_key=True,
+ doc="primary key column"),
+ Column('col2', String, doc="data col"),
+ Column('col3', String, doc="data col 2"),
+ Column('col4', String, doc="data col 3"),
+ Column('col5', String),
+ )
t2 = Table('t2', metadata,
- Column('col1', Integer, primary_key=True, doc="primary key column"),
- Column('col2', String, doc="data col"),
- Column('col3', Integer, ForeignKey('t1.col1'), doc="foreign key to t1.col1")
- )
+ Column('col1', Integer, primary_key=True,
+ doc="primary key column"),
+ Column('col2', String, doc="data col"),
+ Column('col3', Integer, ForeignKey('t1.col1'),
+ doc="foreign key to t1.col1")
+ )
class Foo(object):
pass
@@ -1741,12 +1811,12 @@ class DocumentTest(fixtures.TestBase):
pass
mapper(Foo, t1, properties={
- 'bars':relationship(Bar,
- doc="bar relationship",
- backref=backref('foo',doc='foo relationship')
- ),
- 'foober':column_property(t1.c.col3, doc='alternate data col'),
- 'hoho':synonym("col4", doc="syn of col4")
+ 'bars': relationship(Bar,
+ doc="bar relationship",
+ backref=backref('foo', doc='foo relationship')
+ ),
+ 'foober': column_property(t1.c.col3, doc='alternate data col'),
+ 'hoho': synonym("col4", doc="syn of col4")
})
mapper(Bar, t2)
configure_mappers()
@@ -1759,7 +1829,9 @@ class DocumentTest(fixtures.TestBase):
eq_(Bar.col1.__doc__, "primary key column")
eq_(Bar.foo.__doc__, "foo relationship")
+
class ORMLoggingTest(_fixtures.FixtureTest):
+
def setup(self):
self.buf = logging.handlers.BufferingHandler(100)
for log in [
@@ -1787,18 +1859,19 @@ class ORMLoggingTest(_fixtures.FixtureTest):
for msg in self._current_messages():
assert msg.startswith('(User|%%(%d anon)s) ' % id(tb))
+
class OptionsTest(_fixtures.FixtureTest):
def test_synonym_options(self):
Address, addresses, users, User = (self.classes.Address,
- self.tables.addresses,
- self.tables.users,
- self.classes.User)
+ self.tables.addresses,
+ self.tables.users,
+ self.classes.User)
mapper(User, users, properties=dict(
- addresses = relationship(mapper(Address, addresses), lazy='select',
- order_by=addresses.c.id),
- adlist = synonym('addresses')))
+ addresses=relationship(mapper(Address, addresses), lazy='select',
+ order_by=addresses.c.id),
+ adlist=synonym('addresses')))
def go():
sess = create_session()
@@ -1814,13 +1887,13 @@ class OptionsTest(_fixtures.FixtureTest):
"""A lazy relationship can be upgraded to an eager relationship."""
Address, addresses, users, User = (self.classes.Address,
- self.tables.addresses,
- self.tables.users,
- self.classes.User)
+ self.tables.addresses,
+ self.tables.users,
+ self.classes.User)
mapper(User, users, properties=dict(
- addresses = relationship(mapper(Address, addresses),
- order_by=addresses.c.id)))
+ addresses=relationship(mapper(Address, addresses),
+ order_by=addresses.c.id)))
sess = create_session()
l = (sess.query(User).
@@ -1833,9 +1906,9 @@ class OptionsTest(_fixtures.FixtureTest):
def test_eager_options_with_limit(self):
Address, addresses, users, User = (self.classes.Address,
- self.tables.addresses,
- self.tables.users,
- self.classes.User)
+ self.tables.addresses,
+ self.tables.users,
+ self.classes.User)
mapper(User, users, properties=dict(
addresses=relationship(mapper(Address, addresses), lazy='select')))
@@ -1858,12 +1931,12 @@ class OptionsTest(_fixtures.FixtureTest):
def test_lazy_options_with_limit(self):
Address, addresses, users, User = (self.classes.Address,
- self.tables.addresses,
- self.tables.users,
- self.classes.User)
+ self.tables.addresses,
+ self.tables.users,
+ self.classes.User)
mapper(User, users, properties=dict(
- addresses = relationship(mapper(Address, addresses), lazy='joined')))
+ addresses=relationship(mapper(Address, addresses), lazy='joined')))
sess = create_session()
u = (sess.query(User).
@@ -1880,16 +1953,17 @@ class OptionsTest(_fixtures.FixtureTest):
if eager columns are not available"""
Address, addresses, users, User = (self.classes.Address,
- self.tables.addresses,
- self.tables.users,
- self.classes.User)
+ self.tables.addresses,
+ self.tables.users,
+ self.classes.User)
mapper(User, users, properties=dict(
- addresses = relationship(mapper(Address, addresses),
- lazy='joined', order_by=addresses.c.id)))
+ addresses=relationship(mapper(Address, addresses),
+ lazy='joined', order_by=addresses.c.id)))
sess = create_session()
# first test straight eager load, 1 statement
+
def go():
l = sess.query(User).order_by(User.id).all()
eq_(l, self.static.user_address_result)
@@ -1902,24 +1976,27 @@ class OptionsTest(_fixtures.FixtureTest):
# (previous users in session fell out of scope and were removed from
# session's identity map)
r = users.select().order_by(users.c.id).execute()
+
def go():
l = list(sess.query(User).instances(r))
eq_(l, self.static.user_address_result)
self.sql_count_(4, go)
def test_eager_degrade_deep(self):
- users, Keyword, items, order_items, orders, Item, User, Address, keywords, item_keywords, Order, addresses = (self.tables.users,
- self.classes.Keyword,
- self.tables.items,
- self.tables.order_items,
- self.tables.orders,
- self.classes.Item,
- self.classes.User,
- self.classes.Address,
- self.tables.keywords,
- self.tables.item_keywords,
- self.classes.Order,
- self.tables.addresses)
+ users, Keyword, items, order_items, orders, \
+ Item, User, Address, keywords, item_keywords, Order, addresses = (
+ self.tables.users,
+ self.classes.Keyword,
+ self.tables.items,
+ self.tables.order_items,
+ self.tables.orders,
+ self.classes.Item,
+ self.classes.User,
+ self.classes.Address,
+ self.tables.keywords,
+ self.tables.item_keywords,
+ self.classes.Order,
+ self.tables.addresses)
# test with a deeper set of eager loads. when we first load the three
# users, they will have no addresses or orders. the number of lazy
@@ -1931,18 +2008,18 @@ class OptionsTest(_fixtures.FixtureTest):
mapper(Item, items, properties=dict(
keywords=relationship(Keyword, secondary=item_keywords,
- lazy='joined',
- order_by=item_keywords.c.keyword_id)))
+ lazy='joined',
+ order_by=item_keywords.c.keyword_id)))
mapper(Order, orders, properties=dict(
items=relationship(Item, secondary=order_items, lazy='joined',
- order_by=order_items.c.item_id)))
+ order_by=order_items.c.item_id)))
mapper(User, users, properties=dict(
addresses=relationship(Address, lazy='joined',
- order_by=addresses.c.id),
+ order_by=addresses.c.id),
orders=relationship(Order, lazy='joined',
- order_by=orders.c.id)))
+ order_by=orders.c.id)))
sess = create_session()
@@ -1957,6 +2034,7 @@ class OptionsTest(_fixtures.FixtureTest):
# then select just from users. run it into instances.
# then assert the data, which will launch 6 more lazy loads
r = users.select().execute()
+
def go():
l = list(sess.query(User).instances(r))
eq_(l, self.static.user_all_result)
@@ -1966,12 +2044,12 @@ class OptionsTest(_fixtures.FixtureTest):
"""An eager relationship can be upgraded to a lazy relationship."""
Address, addresses, users, User = (self.classes.Address,
- self.tables.addresses,
- self.tables.users,
- self.classes.User)
+ self.tables.addresses,
+ self.tables.users,
+ self.classes.User)
mapper(User, users, properties=dict(
- addresses = relationship(mapper(Address, addresses), lazy='joined')
+ addresses=relationship(mapper(Address, addresses), lazy='joined')
))
sess = create_session()
@@ -1984,19 +2062,20 @@ class OptionsTest(_fixtures.FixtureTest):
self.sql_count_(4, go)
def test_option_propagate(self):
- users, items, order_items, Order, Item, User, orders = (self.tables.users,
- self.tables.items,
- self.tables.order_items,
- self.classes.Order,
- self.classes.Item,
- self.classes.User,
- self.tables.orders)
+ users, items, order_items, Order, Item, User, orders = (
+ self.tables.users,
+ self.tables.items,
+ self.tables.order_items,
+ self.classes.Order,
+ self.classes.Item,
+ self.classes.User,
+ self.tables.orders)
mapper(User, users, properties=dict(
- orders = relationship(Order)
+ orders=relationship(Order)
))
mapper(Order, orders, properties=dict(
- items = relationship(Item, secondary=order_items)
+ items=relationship(Item, secondary=order_items)
))
mapper(Item, items)
@@ -2005,35 +2084,39 @@ class OptionsTest(_fixtures.FixtureTest):
oalias = aliased(Order)
opt1 = sa.orm.joinedload(User.orders, Order.items)
opt2 = sa.orm.contains_eager(User.orders, Order.items, alias=oalias)
- u1 = sess.query(User).join(oalias, User.orders).options(opt1, opt2).first()
+ u1 = sess.query(User).join(oalias, User.orders).\
+ options(opt1, opt2).first()
ustate = attributes.instance_state(u1)
assert opt1 in ustate.load_options
assert opt2 not in ustate.load_options
class DeepOptionsTest(_fixtures.FixtureTest):
+
@classmethod
def setup_mappers(cls):
- users, Keyword, items, order_items, Order, Item, User, keywords, item_keywords, orders = (cls.tables.users,
- cls.classes.Keyword,
- cls.tables.items,
- cls.tables.order_items,
- cls.classes.Order,
- cls.classes.Item,
- cls.classes.User,
- cls.tables.keywords,
- cls.tables.item_keywords,
- cls.tables.orders)
+ users, Keyword, items, order_items, Order, Item, User, \
+ keywords, item_keywords, orders = (
+ cls.tables.users,
+ cls.classes.Keyword,
+ cls.tables.items,
+ cls.tables.order_items,
+ cls.classes.Order,
+ cls.classes.Item,
+ cls.classes.User,
+ cls.tables.keywords,
+ cls.tables.item_keywords,
+ cls.tables.orders)
mapper(Keyword, keywords)
mapper(Item, items, properties=dict(
keywords=relationship(Keyword, item_keywords,
- order_by=item_keywords.c.item_id)))
+ order_by=item_keywords.c.item_id)))
mapper(Order, orders, properties=dict(
items=relationship(Item, order_items,
- order_by=items.c.id)))
+ order_by=items.c.id)))
mapper(User, users, order_by=users.c.id, properties=dict(
orders=relationship(Order, order_by=orders.c.id)))
@@ -2045,8 +2128,9 @@ class DeepOptionsTest(_fixtures.FixtureTest):
# joinedload nothing.
u = sess.query(User).all()
+
def go():
- x = u[0].orders[1].items[0].keywords[1]
+ u[0].orders[1].items[0].keywords[1]
self.assert_sql_count(testing.db, go, 3)
def test_deep_options_2(self):
@@ -2054,24 +2138,24 @@ class DeepOptionsTest(_fixtures.FixtureTest):
User = self.classes.User
-
sess = create_session()
l = (sess.query(User).
- options(sa.orm.joinedload_all('orders.items.keywords'))).all()
+ options(sa.orm.joinedload_all('orders.items.keywords'))).all()
+
def go():
- x = l[0].orders[1].items[0].keywords[1]
+ l[0].orders[1].items[0].keywords[1]
self.sql_count_(0, go)
sess = create_session()
l = (sess.query(User).
- options(sa.orm.subqueryload_all('orders.items.keywords'))).all()
+ options(sa.orm.subqueryload_all('orders.items.keywords'))).all()
+
def go():
- x = l[0].orders[1].items[0].keywords[1]
+ l[0].orders[1].items[0].keywords[1]
self.sql_count_(0, go)
-
def test_deep_options_3(self):
User = self.classes.User
@@ -2083,14 +2167,15 @@ class DeepOptionsTest(_fixtures.FixtureTest):
options(sa.orm.joinedload('orders.items')).
options(sa.orm.joinedload('orders.items.keywords')))
u = q2.all()
+
def go():
- x = u[0].orders[1].items[0].keywords[1]
+ u[0].orders[1].items[0].keywords[1]
self.sql_count_(0, go)
def test_deep_options_4(self):
Item, User, Order = (self.classes.Item,
- self.classes.User,
- self.classes.Order)
+ self.classes.User,
+ self.classes.Order)
sess = create_session()
@@ -2103,25 +2188,31 @@ class DeepOptionsTest(_fixtures.FixtureTest):
# joinedload "keywords" on items. it will lazy load "orders", then
# lazy load the "items" on the order, but on "items" it will eager
# load the "keywords"
- q3 = sess.query(User).options(sa.orm.joinedload('orders.items.keywords'))
+ q3 = sess.query(User).options(
+ sa.orm.joinedload('orders.items.keywords'))
u = q3.all()
+
def go():
- x = u[0].orders[1].items[0].keywords[1]
+ u[0].orders[1].items[0].keywords[1]
self.sql_count_(2, go)
sess = create_session()
q3 = sess.query(User).options(
- sa.orm.joinedload(User.orders, Order.items, Item.keywords))
+ sa.orm.joinedload(User.orders, Order.items, Item.keywords))
u = q3.all()
+
def go():
- x = u[0].orders[1].items[0].keywords[1]
+ u[0].orders[1].items[0].keywords[1]
self.sql_count_(2, go)
+
class ComparatorFactoryTest(_fixtures.FixtureTest, AssertsCompiledSQL):
+
def test_kwarg_accepted(self):
users, Address = self.tables.users, self.classes.Address
class DummyComposite(object):
+
def __init__(self, x, y):
pass
@@ -2151,41 +2242,56 @@ class ComparatorFactoryTest(_fixtures.FixtureTest, AssertsCompiledSQL):
class MyFactory(ColumnProperty.Comparator):
__hash__ = None
+
def __eq__(self, other):
- return func.foobar(self.__clause_element__()) == func.foobar(other)
- mapper(User, users, properties={'name':column_property(users.c.name, comparator_factory=MyFactory)})
- self.assert_compile(User.name == 'ed', "foobar(users.name) = foobar(:foobar_1)", dialect=default.DefaultDialect())
- self.assert_compile(aliased(User).name == 'ed', "foobar(users_1.name) = foobar(:foobar_1)", dialect=default.DefaultDialect())
+ return func.foobar(self.__clause_element__()) == \
+ func.foobar(other)
+ mapper(
+ User, users,
+ properties={
+ 'name': column_property(
+ users.c.name, comparator_factory=MyFactory)})
+ self.assert_compile(
+ User.name == 'ed',
+ "foobar(users.name) = foobar(:foobar_1)",
+ dialect=default.DefaultDialect()
+ )
+ self.assert_compile(
+ aliased(User).name == 'ed',
+ "foobar(users_1.name) = foobar(:foobar_1)",
+ dialect=default.DefaultDialect())
def test_synonym(self):
users, User = self.tables.users, self.classes.User
from sqlalchemy.orm.properties import ColumnProperty
+
class MyFactory(ColumnProperty.Comparator):
__hash__ = None
+
def __eq__(self, other):
return func.foobar(self.__clause_element__()) ==\
- func.foobar(other)
+ func.foobar(other)
mapper(User, users, properties={
- 'name':synonym('_name', map_column=True,
- comparator_factory=MyFactory)
- })
+ 'name': synonym('_name', map_column=True,
+ comparator_factory=MyFactory)
+ })
self.assert_compile(
- User.name == 'ed',
- "foobar(users.name) = foobar(:foobar_1)",
- dialect=default.DefaultDialect())
+ User.name == 'ed',
+ "foobar(users.name) = foobar(:foobar_1)",
+ dialect=default.DefaultDialect())
self.assert_compile(
- aliased(User).name == 'ed',
- "foobar(users_1.name) = foobar(:foobar_1)",
- dialect=default.DefaultDialect())
+ aliased(User).name == 'ed',
+ "foobar(users_1.name) = foobar(:foobar_1)",
+ dialect=default.DefaultDialect())
def test_relationship(self):
users, Address, addresses, User = (self.tables.users,
- self.classes.Address,
- self.tables.addresses,
- self.classes.User)
+ self.classes.Address,
+ self.tables.addresses,
+ self.classes.User)
from sqlalchemy.orm.properties import RelationshipProperty
@@ -2194,46 +2300,50 @@ class ComparatorFactoryTest(_fixtures.FixtureTest, AssertsCompiledSQL):
# primaryjoin/secondaryjoin
class MyFactory(RelationshipProperty.Comparator):
__hash__ = None
+
def __eq__(self, other):
return func.foobar(self._source_selectable().c.user_id) == \
func.foobar(other.id)
class MyFactory2(RelationshipProperty.Comparator):
__hash__ = None
+
def __eq__(self, other):
return func.foobar(self._source_selectable().c.id) == \
func.foobar(other.user_id)
mapper(User, users)
mapper(Address, addresses, properties={
- 'user': relationship(User, comparator_factory=MyFactory,
+ 'user': relationship(
+ User, comparator_factory=MyFactory,
backref=backref("addresses", comparator_factory=MyFactory2)
)
- }
+ }
)
# these are kind of nonsensical tests.
self.assert_compile(Address.user == User(id=5),
- "foobar(addresses.user_id) = foobar(:foobar_1)",
- dialect=default.DefaultDialect())
+ "foobar(addresses.user_id) = foobar(:foobar_1)",
+ dialect=default.DefaultDialect())
self.assert_compile(User.addresses == Address(id=5, user_id=7),
- "foobar(users.id) = foobar(:foobar_1)",
- dialect=default.DefaultDialect())
+ "foobar(users.id) = foobar(:foobar_1)",
+ dialect=default.DefaultDialect())
self.assert_compile(
- aliased(Address).user == User(id=5),
- "foobar(addresses_1.user_id) = foobar(:foobar_1)",
- dialect=default.DefaultDialect())
+ aliased(Address).user == User(id=5),
+ "foobar(addresses_1.user_id) = foobar(:foobar_1)",
+ dialect=default.DefaultDialect())
self.assert_compile(
- aliased(User).addresses == Address(id=5, user_id=7),
- "foobar(users_1.id) = foobar(:foobar_1)",
- dialect=default.DefaultDialect())
-
+ aliased(User).addresses == Address(id=5, user_id=7),
+ "foobar(users_1.id) = foobar(:foobar_1)",
+ dialect=default.DefaultDialect())
class SecondaryOptionsTest(fixtures.MappedTest):
- """test that the contains_eager() option doesn't bleed into a secondary load."""
+
+ """test that the contains_eager() option doesn't bleed
+ into a secondary load."""
run_inserts = 'once'
@@ -2242,80 +2352,84 @@ class SecondaryOptionsTest(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
Table("base", metadata,
- Column('id', Integer, primary_key=True),
- Column('type', String(50), nullable=False)
- )
+ Column('id', Integer, primary_key=True),
+ Column('type', String(50), nullable=False)
+ )
Table("child1", metadata,
- Column('id', Integer, ForeignKey('base.id'), primary_key=True),
- Column('child2id', Integer, ForeignKey('child2.id'), nullable=False)
- )
+ Column('id', Integer, ForeignKey('base.id'), primary_key=True),
+ Column(
+ 'child2id', Integer, ForeignKey('child2.id'), nullable=False)
+ )
Table("child2", metadata,
- Column('id', Integer, ForeignKey('base.id'), primary_key=True),
- )
+ Column('id', Integer, ForeignKey('base.id'), primary_key=True),
+ )
Table('related', metadata,
- Column('id', Integer, ForeignKey('base.id'), primary_key=True),
- )
+ Column('id', Integer, ForeignKey('base.id'), primary_key=True),
+ )
@classmethod
def setup_mappers(cls):
child1, child2, base, related = (cls.tables.child1,
- cls.tables.child2,
- cls.tables.base,
- cls.tables.related)
+ cls.tables.child2,
+ cls.tables.base,
+ cls.tables.related)
class Base(cls.Comparable):
pass
+
class Child1(Base):
pass
+
class Child2(Base):
pass
+
class Related(cls.Comparable):
pass
mapper(Base, base, polymorphic_on=base.c.type, properties={
- 'related':relationship(Related, uselist=False)
+ 'related': relationship(Related, uselist=False)
})
mapper(Child1, child1, inherits=Base,
- polymorphic_identity='child1',
- properties={
- 'child2':relationship(Child2,
- primaryjoin=child1.c.child2id==base.c.id,
- foreign_keys=child1.c.child2id)
- })
+ polymorphic_identity='child1',
+ properties={
+ 'child2': relationship(Child2,
+ primaryjoin=child1.c.child2id == base.c.id,
+ foreign_keys=child1.c.child2id)
+ })
mapper(Child2, child2, inherits=Base, polymorphic_identity='child2')
mapper(Related, related)
@classmethod
def insert_data(cls):
child1, child2, base, related = (cls.tables.child1,
- cls.tables.child2,
- cls.tables.base,
- cls.tables.related)
+ cls.tables.child2,
+ cls.tables.base,
+ cls.tables.related)
base.insert().execute([
- {'id':1, 'type':'child1'},
- {'id':2, 'type':'child1'},
- {'id':3, 'type':'child1'},
- {'id':4, 'type':'child2'},
- {'id':5, 'type':'child2'},
- {'id':6, 'type':'child2'},
+ {'id': 1, 'type': 'child1'},
+ {'id': 2, 'type': 'child1'},
+ {'id': 3, 'type': 'child1'},
+ {'id': 4, 'type': 'child2'},
+ {'id': 5, 'type': 'child2'},
+ {'id': 6, 'type': 'child2'},
])
child2.insert().execute([
- {'id':4},
- {'id':5},
- {'id':6},
+ {'id': 4},
+ {'id': 5},
+ {'id': 6},
])
child1.insert().execute([
- {'id':1, 'child2id':4},
- {'id':2, 'child2id':5},
- {'id':3, 'child2id':6},
+ {'id': 1, 'child2id': 4},
+ {'id': 2, 'child2id': 5},
+ {'id': 3, 'child2id': 6},
])
related.insert().execute([
- {'id':1},
- {'id':2},
- {'id':3},
- {'id':4},
- {'id':5},
- {'id':6},
+ {'id': 1},
+ {'id': 2},
+ {'id': 3},
+ {'id': 4},
+ {'id': 5},
+ {'id': 6},
])
def test_contains_eager(self):
@@ -2324,9 +2438,9 @@ class SecondaryOptionsTest(fixtures.MappedTest):
sess = create_session()
child1s = sess.query(Child1).\
- join(Child1.related).\
- options(sa.orm.contains_eager(Child1.related)).\
- order_by(Child1.id)
+ join(Child1.related).\
+ options(sa.orm.contains_eager(Child1.related)).\
+ order_by(Child1.id)
def go():
eq_(
@@ -2345,10 +2459,11 @@ class SecondaryOptionsTest(fixtures.MappedTest):
testing.db,
lambda: c1.child2,
CompiledSQL(
- "SELECT child2.id AS child2_id, base.id AS base_id, base.type AS base_type "
+ "SELECT child2.id AS child2_id, base.id AS base_id, "
+ "base.type AS base_type "
"FROM base JOIN child2 ON base.id = child2.id "
"WHERE base.id = :param_1",
- {'param_1':4}
+ {'param_1': 4}
)
)
@@ -2357,12 +2472,15 @@ class SecondaryOptionsTest(fixtures.MappedTest):
sess = create_session()
- child1s = sess.query(Child1).join(Child1.related).options(sa.orm.joinedload(Child1.related)).order_by(Child1.id)
+ child1s = sess.query(Child1).join(Child1.related).options(
+ sa.orm.joinedload(Child1.related)).order_by(Child1.id)
def go():
eq_(
child1s.all(),
- [Child1(id=1, related=Related(id=1)), Child1(id=2, related=Related(id=2)), Child1(id=3, related=Related(id=3))]
+ [Child1(id=1, related=Related(id=1)),
+ Child1(id=2, related=Related(id=2)),
+ Child1(id=3, related=Related(id=3))]
)
self.assert_sql_count(testing.db, go, 1)
@@ -2372,30 +2490,32 @@ class SecondaryOptionsTest(fixtures.MappedTest):
testing.db,
lambda: c1.child2,
CompiledSQL(
- "SELECT child2.id AS child2_id, base.id AS base_id, base.type AS base_type "
- "FROM base JOIN child2 ON base.id = child2.id WHERE base.id = :param_1",
-
-# joinedload- this shouldn't happen
-# "SELECT base.id AS base_id, child2.id AS child2_id, base.type AS base_type, "
-# "related_1.id AS related_1_id FROM base JOIN child2 ON base.id = child2.id "
-# "LEFT OUTER JOIN related AS related_1 ON base.id = related_1.id WHERE base.id = :param_1",
- {'param_1':4}
+ "SELECT child2.id AS child2_id, base.id AS base_id, "
+ "base.type AS base_type "
+ "FROM base JOIN child2 ON base.id = child2.id "
+ "WHERE base.id = :param_1",
+
+ {'param_1': 4}
)
)
def test_joinedload_on_same(self):
Child1, Child2, Related = (self.classes.Child1,
- self.classes.Child2,
- self.classes.Related)
+ self.classes.Child2,
+ self.classes.Related)
sess = create_session()
- child1s = sess.query(Child1).join(Child1.related).options(sa.orm.joinedload(Child1.child2, Child2.related)).order_by(Child1.id)
+ child1s = sess.query(Child1).join(Child1.related).options(
+ sa.orm.joinedload(Child1.child2, Child2.related)
+ ).order_by(Child1.id)
def go():
eq_(
child1s.all(),
- [Child1(id=1, related=Related(id=1)), Child1(id=2, related=Related(id=2)), Child1(id=3, related=Related(id=3))]
+ [Child1(id=1, related=Related(id=1)),
+ Child1(id=2, related=Related(id=2)),
+ Child1(id=3, related=Related(id=3))]
)
self.assert_sql_count(testing.db, go, 4)
@@ -2406,32 +2526,43 @@ class SecondaryOptionsTest(fixtures.MappedTest):
testing.db,
lambda: c1.child2,
CompiledSQL(
- "SELECT child2.id AS child2_id, base.id AS base_id, base.type AS base_type, "
- "related_1.id AS related_1_id FROM base JOIN child2 ON base.id = child2.id "
- "LEFT OUTER JOIN related AS related_1 ON base.id = related_1.id WHERE base.id = :param_1",
- {'param_1':4}
+ "SELECT child2.id AS child2_id, base.id AS base_id, "
+ "base.type AS base_type, "
+ "related_1.id AS related_1_id FROM base JOIN child2 "
+ "ON base.id = child2.id "
+ "LEFT OUTER JOIN related AS related_1 "
+ "ON base.id = related_1.id WHERE base.id = :param_1",
+ {'param_1': 4}
)
)
class DeferredPopulationTest(fixtures.MappedTest):
+
@classmethod
def define_tables(cls, metadata):
Table("thing", metadata,
- Column("id", Integer, primary_key=True, test_needs_autoincrement=True),
- Column("name", String(20)))
+ Column(
+ "id", Integer, primary_key=True,
+ test_needs_autoincrement=True),
+ Column("name", String(20)))
Table("human", metadata,
- Column("id", Integer, primary_key=True, test_needs_autoincrement=True),
- Column("thing_id", Integer, ForeignKey("thing.id")),
- Column("name", String(20)))
+ Column(
+ "id", Integer, primary_key=True,
+ test_needs_autoincrement=True),
+ Column("thing_id", Integer, ForeignKey("thing.id")),
+ Column("name", String(20)))
@classmethod
def setup_mappers(cls):
thing, human = cls.tables.thing, cls.tables.human
- class Human(cls.Basic): pass
- class Thing(cls.Basic): pass
+ class Human(cls.Basic):
+ pass
+
+ class Thing(cls.Basic):
+ pass
mapper(Human, human, properties={"thing": relationship(Thing)})
mapper(Thing, thing, properties={"name": deferred(thing.c.name)})
@@ -2462,7 +2593,7 @@ class DeferredPopulationTest(fixtures.MappedTest):
Thing = self.classes.Thing
session = create_session()
- result = session.query(Thing).first()
+ result = session.query(Thing).first() # noqa
session.expunge_all()
thing = session.query(Thing).options(sa.orm.undefer("name")).first()
self._test(thing)
@@ -2471,7 +2602,7 @@ class DeferredPopulationTest(fixtures.MappedTest):
Thing = self.classes.Thing
session = create_session()
- result = session.query(Thing).first()
+ result = session.query(Thing).first() # noqa
thing = session.query(Thing).options(sa.orm.undefer("name")).first()
self._test(thing)
@@ -2479,7 +2610,8 @@ class DeferredPopulationTest(fixtures.MappedTest):
Thing, Human = self.classes.Thing, self.classes.Human
session = create_session()
- human = session.query(Human).options(sa.orm.joinedload("thing")).first()
+ human = session.query(Human).options( # noqa
+ sa.orm.joinedload("thing")).first()
session.expunge_all()
thing = session.query(Thing).options(sa.orm.undefer("name")).first()
self._test(thing)
@@ -2488,7 +2620,8 @@ class DeferredPopulationTest(fixtures.MappedTest):
Thing, Human = self.classes.Thing, self.classes.Human
session = create_session()
- human = session.query(Human).options(sa.orm.joinedload("thing")).first()
+ human = session.query(Human).options( # noqa
+ sa.orm.joinedload("thing")).first()
thing = session.query(Thing).options(sa.orm.undefer("name")).first()
self._test(thing)
@@ -2496,7 +2629,8 @@ class DeferredPopulationTest(fixtures.MappedTest):
Thing, Human = self.classes.Thing, self.classes.Human
session = create_session()
- result = session.query(Human).add_entity(Thing).join("thing").first()
+ result = session.query(Human).add_entity( # noqa
+ Thing).join("thing").first()
session.expunge_all()
thing = session.query(Thing).options(sa.orm.undefer("name")).first()
self._test(thing)
@@ -2505,88 +2639,119 @@ class DeferredPopulationTest(fixtures.MappedTest):
Thing, Human = self.classes.Thing, self.classes.Human
session = create_session()
- result = session.query(Human).add_entity(Thing).join("thing").first()
+ result = session.query(Human).add_entity( # noqa
+ Thing).join("thing").first()
thing = session.query(Thing).options(sa.orm.undefer("name")).first()
self._test(thing)
-
-
class NoLoadTest(_fixtures.FixtureTest):
run_inserts = 'once'
run_deletes = None
- def test_basic(self):
- """A basic one-to-many lazy load"""
+ def test_o2m_noload(self):
- Address, addresses, users, User = (self.classes.Address,
- self.tables.addresses,
- self.tables.users,
- self.classes.User)
+ Address, addresses, users, User = (
+ self.classes.Address,
+ self.tables.addresses,
+ self.tables.users,
+ self.classes.User)
m = mapper(User, users, properties=dict(
- addresses = relationship(mapper(Address, addresses), lazy='noload')
+ addresses=relationship(mapper(Address, addresses), lazy='noload')
))
q = create_session().query(m)
l = [None]
+
def go():
x = q.filter(User.id == 7).all()
x[0].addresses
l[0] = x
self.assert_sql_count(testing.db, go, 1)
- self.assert_result(l[0], User,
- {'id' : 7, 'addresses' : (Address, [])},
- )
+ self.assert_result(
+ l[0], User,
+ {'id': 7, 'addresses': (Address, [])},
+ )
- def test_options(self):
- Address, addresses, users, User = (self.classes.Address,
- self.tables.addresses,
- self.tables.users,
- self.classes.User)
+ def test_upgrade_o2m_noload_lazyload_option(self):
+ Address, addresses, users, User = (
+ self.classes.Address,
+ self.tables.addresses,
+ self.tables.users,
+ self.classes.User)
m = mapper(User, users, properties=dict(
- addresses = relationship(mapper(Address, addresses), lazy='noload')
+ addresses=relationship(mapper(Address, addresses), lazy='noload')
))
q = create_session().query(m).options(sa.orm.lazyload('addresses'))
l = [None]
+
def go():
x = q.filter(User.id == 7).all()
x[0].addresses
l[0] = x
self.sql_count_(2, go)
- self.assert_result(l[0], User,
- {'id' : 7, 'addresses' : (Address, [{'id' : 1}])},
- )
-
+ self.assert_result(
+ l[0], User,
+ {'id': 7, 'addresses': (Address, [{'id': 1}])},
+ )
+ def test_m2o_noload_option(self):
+ Address, addresses, users, User = (
+ self.classes.Address,
+ self.tables.addresses,
+ self.tables.users,
+ self.classes.User)
+ mapper(Address, addresses, properties={
+ 'user': relationship(User)
+ })
+ mapper(User, users)
+ s = Session()
+ a1 = s.query(Address).filter_by(id=1).options(
+ sa.orm.noload('user')).first()
+ def go():
+ eq_(a1.user, None)
+ self.sql_count_(0, go)
class RequirementsTest(fixtures.MappedTest):
+
"""Tests the contract for user classes."""
@classmethod
def define_tables(cls, metadata):
Table('ht1', metadata,
- Column('id', Integer, primary_key=True, test_needs_autoincrement=True),
+ Column(
+ 'id', Integer, primary_key=True,
+ test_needs_autoincrement=True),
Column('value', String(10)))
Table('ht2', metadata,
- Column('id', Integer, primary_key=True, test_needs_autoincrement=True),
+ Column(
+ 'id', Integer, primary_key=True,
+ test_needs_autoincrement=True),
Column('ht1_id', Integer, ForeignKey('ht1.id')),
Column('value', String(10)))
Table('ht3', metadata,
- Column('id', Integer, primary_key=True, test_needs_autoincrement=True),
+ Column(
+ 'id', Integer, primary_key=True,
+ test_needs_autoincrement=True),
Column('value', String(10)))
Table('ht4', metadata,
- Column('ht1_id', Integer, ForeignKey('ht1.id'), primary_key=True),
- Column('ht3_id', Integer, ForeignKey('ht3.id'), primary_key=True))
+ Column('ht1_id', Integer, ForeignKey('ht1.id'),
+ primary_key=True),
+ Column('ht3_id', Integer, ForeignKey('ht3.id'),
+ primary_key=True))
Table('ht5', metadata,
- Column('ht1_id', Integer, ForeignKey('ht1.id'), primary_key=True))
+ Column('ht1_id', Integer, ForeignKey('ht1.id'),
+ primary_key=True))
Table('ht6', metadata,
- Column('ht1a_id', Integer, ForeignKey('ht1.id'), primary_key=True),
- Column('ht1b_id', Integer, ForeignKey('ht1.id'), primary_key=True),
+ Column('ht1a_id', Integer, ForeignKey('ht1.id'),
+ primary_key=True),
+ Column('ht1b_id', Integer, ForeignKey('ht1.id'),
+ primary_key=True),
Column('value', String(10)))
if util.py2k:
@@ -2604,16 +2769,21 @@ class RequirementsTest(fixtures.MappedTest):
pass
# TODO: is weakref support detectable without an instance?
- #self.assertRaises(sa.exc.ArgumentError, mapper, NoWeakrefSupport, t2)
+ # self.assertRaises(
+ # sa.exc.ArgumentError, mapper, NoWeakrefSupport, t2)
class _ValueBase(object):
+
def __init__(self, value='abc', id=None):
self.id = id
self.value = value
+
def __bool__(self):
return False
+
def __hash__(self):
return hash(self.value)
+
def __eq__(self, other):
if isinstance(other, type(self)):
return self.value == other.value
@@ -2630,19 +2800,21 @@ class RequirementsTest(fixtures.MappedTest):
"""
ht6, ht5, ht4, ht3, ht2, ht1 = (self.tables.ht6,
- self.tables.ht5,
- self.tables.ht4,
- self.tables.ht3,
- self.tables.ht2,
- self.tables.ht1)
-
+ self.tables.ht5,
+ self.tables.ht4,
+ self.tables.ht3,
+ self.tables.ht2,
+ self.tables.ht1)
class H1(self._ValueBase):
pass
+
class H2(self._ValueBase):
pass
+
class H3(self._ValueBase):
pass
+
class H6(self._ValueBase):
pass
@@ -2651,10 +2823,10 @@ class RequirementsTest(fixtures.MappedTest):
'h3s': relationship(H3, secondary=ht4, backref='h1s'),
'h1s': relationship(H1, secondary=ht5, backref='parent_h1'),
't6a': relationship(H6, backref='h1a',
- primaryjoin=ht1.c.id==ht6.c.ht1a_id),
+ primaryjoin=ht1.c.id == ht6.c.ht1a_id),
't6b': relationship(H6, backref='h1b',
- primaryjoin=ht1.c.id==ht6.c.ht1b_id),
- })
+ primaryjoin=ht1.c.id == ht6.c.ht1b_id),
+ })
mapper(H2, ht2)
mapper(H3, ht3)
mapper(H6, ht6)
@@ -2709,18 +2881,19 @@ class RequirementsTest(fixtures.MappedTest):
sa.orm.joinedload_all('h3s.h1s')).all()
eq_(len(h1s), 5)
-
def test_composite_results(self):
ht2, ht1 = (self.tables.ht2,
- self.tables.ht1)
-
+ self.tables.ht1)
class H1(self._ValueBase):
+
def __init__(self, value, id, h2s):
self.value = value
self.id = id
self.h2s = h2s
+
class H2(self._ValueBase):
+
def __init__(self, value, id):
self.value = value
self.id = id
@@ -2745,8 +2918,8 @@ class RequirementsTest(fixtures.MappedTest):
s.commit()
eq_(
[(h1.value, h1.id, h2.value, h2.id)
- for h1, h2 in
- s.query(H1, H2).join(H1.h2s).order_by(H1.id, H2.id)],
+ for h1, h2 in
+ s.query(H1, H2).join(H1.h2s).order_by(H1.id, H2.id)],
[
('abc', 1, 'abc', 1),
('abc', 1, 'def', 2),
@@ -2761,6 +2934,7 @@ class RequirementsTest(fixtures.MappedTest):
ht1 = self.tables.ht1
class H1(object):
+
def __len__(self):
return len(self.get_value())
@@ -2769,6 +2943,7 @@ class RequirementsTest(fixtures.MappedTest):
return self.value
class H2(object):
+
def __bool__(self):
return bool(self.get_value())
@@ -2781,19 +2956,21 @@ class RequirementsTest(fixtures.MappedTest):
h1 = H1()
h1.value = "Asdf"
- h1.value = "asdf asdf" # ding
+ h1.value = "asdf asdf" # ding
h2 = H2()
h2.value = "Asdf"
- h2.value = "asdf asdf" # ding
+ h2.value = "asdf asdf" # ding
+
class IsUserlandTest(fixtures.MappedTest):
+
@classmethod
def define_tables(cls, metadata):
Table('foo', metadata,
- Column('id', Integer, primary_key=True),
- Column('someprop', Integer)
- )
+ Column('id', Integer, primary_key=True),
+ Column('someprop', Integer)
+ )
def _test(self, value, instancelevel=None):
class Foo(object):
@@ -2842,17 +3019,20 @@ class IsUserlandTest(fixtures.MappedTest):
return "hi"
self._test(property(somefunc), "hi")
+
class MagicNamesTest(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
Table('cartographers', metadata,
- Column('id', Integer, primary_key=True, test_needs_autoincrement=True),
+ Column('id', Integer, primary_key=True,
+ test_needs_autoincrement=True),
Column('name', String(50)),
Column('alias', String(50)),
Column('quip', String(100)))
Table('maps', metadata,
- Column('id', Integer, primary_key=True, test_needs_autoincrement=True),
+ Column('id', Integer, primary_key=True,
+ test_needs_autoincrement=True),
Column('cart_id', Integer,
ForeignKey('cartographers.id')),
Column('state', String(2)),
@@ -2868,9 +3048,9 @@ class MagicNamesTest(fixtures.MappedTest):
def test_mappish(self):
maps, Cartographer, cartographers, Map = (self.tables.maps,
- self.classes.Cartographer,
- self.tables.cartographers,
- self.classes.Map)
+ self.classes.Cartographer,
+ self.tables.cartographers,
+ self.classes.Map)
mapper(Cartographer, cartographers, properties=dict(
query=cartographers.c.quip))
@@ -2879,7 +3059,7 @@ class MagicNamesTest(fixtures.MappedTest):
c = Cartographer(name='Lenny', alias='The Dude',
query='Where be dragons?')
- m = Map(state='AK', mapper=c)
+ Map(state='AK', mapper=c)
sess = create_session()
sess.add(c)
@@ -2889,16 +3069,18 @@ class MagicNamesTest(fixtures.MappedTest):
for C, M in ((Cartographer, Map),
(sa.orm.aliased(Cartographer), sa.orm.aliased(Map))):
c1 = (sess.query(C).
- filter(C.alias=='The Dude').
- filter(C.query=='Where be dragons?')).one()
- m1 = sess.query(M).filter(M.mapper==c1).one()
+ filter(C.alias == 'The Dude').
+ filter(C.query == 'Where be dragons?')).one()
+ sess.query(M).filter(M.mapper == c1).one()
def test_direct_stateish(self):
for reserved in (sa.orm.instrumentation.ClassManager.STATE_ATTR,
sa.orm.instrumentation.ClassManager.MANAGER_ATTR):
t = Table('t', sa.MetaData(),
- Column('id', Integer, primary_key=True, test_needs_autoincrement=True),
+ Column('id', Integer, primary_key=True,
+ test_needs_autoincrement=True),
Column(reserved, Integer))
+
class T(object):
pass
assert_raises_message(
@@ -2920,6 +3102,4 @@ class MagicNamesTest(fixtures.MappedTest):
('requested attribute name conflicts with '
'instrumentation attribute of the same name'),
mapper, M, maps, properties={
- reserved: maps.c.state})
-
-
+ reserved: maps.c.state})
diff --git a/test/orm/test_merge.py b/test/orm/test_merge.py
index a52274896..dab9f4305 100644
--- a/test/orm/test_merge.py
+++ b/test/orm/test_merge.py
@@ -1102,6 +1102,101 @@ class MergeTest(_fixtures.FixtureTest):
eq_(ustate.load_path.path, (umapper, ))
eq_(ustate.load_options, set([opt2]))
+ def test_resolve_conflicts_pending_doesnt_interfere_no_ident(self):
+ User, Address, Order = (
+ self.classes.User, self.classes.Address, self.classes.Order)
+ users, addresses, orders = (
+ self.tables.users, self.tables.addresses, self.tables.orders)
+
+ mapper(User, users, properties={
+ 'orders': relationship(Order)
+ })
+ mapper(Order, orders, properties={
+ 'address': relationship(Address)
+ })
+ mapper(Address, addresses)
+
+ u1 = User(id=7, name='x')
+ u1.orders = [
+ Order(description='o1', address=Address(email_address='a')),
+ Order(description='o2', address=Address(email_address='b')),
+ Order(description='o3', address=Address(email_address='c'))
+ ]
+
+ sess = Session()
+ sess.merge(u1)
+ sess.flush()
+
+ eq_(
+ sess.query(Address.email_address).order_by(
+ Address.email_address).all(),
+ [('a', ), ('b', ), ('c', )]
+ )
+
+ def test_resolve_conflicts_pending(self):
+ User, Address, Order = (
+ self.classes.User, self.classes.Address, self.classes.Order)
+ users, addresses, orders = (
+ self.tables.users, self.tables.addresses, self.tables.orders)
+
+ mapper(User, users, properties={
+ 'orders': relationship(Order)
+ })
+ mapper(Order, orders, properties={
+ 'address': relationship(Address)
+ })
+ mapper(Address, addresses)
+
+ u1 = User(id=7, name='x')
+ u1.orders = [
+ Order(description='o1', address=Address(id=1, email_address='a')),
+ Order(description='o2', address=Address(id=1, email_address='b')),
+ Order(description='o3', address=Address(id=1, email_address='c'))
+ ]
+
+ sess = Session()
+ sess.merge(u1)
+ sess.flush()
+
+ eq_(
+ sess.query(Address).one(),
+ Address(id=1, email_address='c')
+ )
+
+ def test_resolve_conflicts_persistent(self):
+ User, Address, Order = (
+ self.classes.User, self.classes.Address, self.classes.Order)
+ users, addresses, orders = (
+ self.tables.users, self.tables.addresses, self.tables.orders)
+
+ mapper(User, users, properties={
+ 'orders': relationship(Order)
+ })
+ mapper(Order, orders, properties={
+ 'address': relationship(Address)
+ })
+ mapper(Address, addresses)
+
+ sess = Session()
+ sess.add(Address(id=1, email_address='z'))
+ sess.commit()
+
+ u1 = User(id=7, name='x')
+ u1.orders = [
+ Order(description='o1', address=Address(id=1, email_address='a')),
+ Order(description='o2', address=Address(id=1, email_address='b')),
+ Order(description='o3', address=Address(id=1, email_address='c'))
+ ]
+
+ sess = Session()
+ sess.merge(u1)
+ sess.flush()
+
+ eq_(
+ sess.query(Address).one(),
+ Address(id=1, email_address='c')
+ )
+
class M2ONoUseGetLoadingTest(fixtures.MappedTest):
"""Merge a one-to-many. The many-to-one on the other side is set up
diff --git a/test/orm/test_options.py b/test/orm/test_options.py
index 1c1a797a6..e1e26c62c 100644
--- a/test/orm/test_options.py
+++ b/test/orm/test_options.py
@@ -2,7 +2,7 @@ from sqlalchemy import inspect
from sqlalchemy.orm import attributes, mapper, relationship, backref, \
configure_mappers, create_session, synonym, Session, class_mapper, \
aliased, column_property, joinedload_all, joinedload, Query,\
- util as orm_util, Load
+ util as orm_util, Load, defer
import sqlalchemy as sa
from sqlalchemy import testing
from sqlalchemy.testing.assertions import eq_, assert_raises, assert_raises_message
@@ -46,8 +46,18 @@ class PathTest(object):
set([self._make_path(p) for p in paths])
)
+
class LoadTest(PathTest, QueryTest):
+ def test_str(self):
+ User = self.classes.User
+ l = Load(User)
+ l.strategy = (('deferred', False), ('instrument', True))
+ eq_(
+ str(l),
+ "Load(strategy=(('deferred', False), ('instrument', True)))"
+ )
+
def test_gen_path_attr_entity(self):
User = self.classes.User
Address = self.classes.Address
diff --git a/test/orm/test_query.py b/test/orm/test_query.py
index 55af023b1..d2f9e4a66 100644
--- a/test/orm/test_query.py
+++ b/test/orm/test_query.py
@@ -579,8 +579,7 @@ class GetTest(QueryTest):
table = Table(
'unicode_data', metadata,
Column(
- 'id', Unicode(40), primary_key=True,
- test_needs_autoincrement=True),
+ 'id', Unicode(40), primary_key=True),
Column('data', Unicode(40)))
metadata.create_all()
ustring = util.b('petit voix m\xe2\x80\x99a').decode('utf-8')
@@ -776,6 +775,42 @@ class InvalidGenerationsTest(QueryTest, AssertsCompiledSQL):
meth, q, *arg, **kw
)
+ def test_illegal_coercions(self):
+ User = self.classes.User
+
+ assert_raises_message(
+ sa_exc.ArgumentError,
+ "Object .*User.* is not legal as a SQL literal value",
+ distinct, User
+ )
+
+ ua = aliased(User)
+ assert_raises_message(
+ sa_exc.ArgumentError,
+ "Object .*User.* is not legal as a SQL literal value",
+ distinct, ua
+ )
+
+ s = Session()
+ assert_raises_message(
+ sa_exc.ArgumentError,
+ "Object .*User.* is not legal as a SQL literal value",
+ lambda: s.query(User).filter(User.name == User)
+ )
+
+ u1 = User()
+ assert_raises_message(
+ sa_exc.ArgumentError,
+ "Object .*User.* is not legal as a SQL literal value",
+ distinct, u1
+ )
+
+ assert_raises_message(
+ sa_exc.ArgumentError,
+ "Object .*User.* is not legal as a SQL literal value",
+ lambda: s.query(User).filter(User.name == u1)
+ )
+
class OperatorTest(QueryTest, AssertsCompiledSQL):
"""test sql.Comparator implementation for MapperProperties"""
@@ -1960,13 +1995,6 @@ class FilterTest(QueryTest, AssertsCompiledSQL):
sess.query(User). \
filter(User.addresses.any(email_address='fred@fred.com')).all()
- # test that any() doesn't overcorrelate
- assert [User(id=7), User(id=8)] == \
- sess.query(User).join("addresses"). \
- filter(
- ~User.addresses.any(
- Address.email_address == 'fred@fred.com')).all()
-
# test that the contents are not adapted by the aliased join
assert [User(id=7), User(id=8)] == \
sess.query(User).join("addresses", aliased=True). \
@@ -1978,6 +2006,18 @@ class FilterTest(QueryTest, AssertsCompiledSQL):
sess.query(User).outerjoin("addresses", aliased=True). \
filter(~User.addresses.any()).all()
+ def test_any_doesnt_overcorrelate(self):
+ User, Address = self.classes.User, self.classes.Address
+
+ sess = create_session()
+
+ # test that any() doesn't overcorrelate
+ assert [User(id=7), User(id=8)] == \
+ sess.query(User).join("addresses"). \
+ filter(
+ ~User.addresses.any(
+ Address.email_address == 'fred@fred.com')).all()
+
def test_has(self):
Dingaling, User, Address = (
self.classes.Dingaling, self.classes.User, self.classes.Address)
@@ -2190,6 +2230,42 @@ class FilterTest(QueryTest, AssertsCompiledSQL):
)
+class HasMapperEntitiesTest(QueryTest):
+ def test_entity(self):
+ User = self.classes.User
+ s = Session()
+
+ q = s.query(User)
+
+ assert q._has_mapper_entities
+
+ def test_cols(self):
+ User = self.classes.User
+ s = Session()
+
+ q = s.query(User.id)
+
+ assert not q._has_mapper_entities
+
+ def test_cols_set_entities(self):
+ User = self.classes.User
+ s = Session()
+
+ q = s.query(User.id)
+
+ q._set_entities(User)
+ assert q._has_mapper_entities
+
+ def test_entity_set_entities(self):
+ User = self.classes.User
+ s = Session()
+
+ q = s.query(User)
+
+ q._set_entities(User.id)
+ assert not q._has_mapper_entities
+
+
class SetOpsTest(QueryTest, AssertsCompiledSQL):
__dialect__ = 'default'
@@ -3140,6 +3216,39 @@ class ParentTest(QueryTest, AssertsCompiledSQL):
# sess.query(Order).with_parent(None, property='addresses').all()
# == [Order(description="order 5")]
+ def test_select_from(self):
+ User, Address = self.classes.User, self.classes.Address
+
+ sess = create_session()
+ u1 = sess.query(User).get(7)
+ q = sess.query(Address).select_from(Address).with_parent(u1)
+ self.assert_compile(
+ q,
+ "SELECT addresses.id AS addresses_id, "
+ "addresses.user_id AS addresses_user_id, "
+ "addresses.email_address AS addresses_email_address "
+ "FROM addresses WHERE :param_1 = addresses.user_id",
+ {'param_1': 7}
+ )
+
+ @testing.fails("issue #3607")
+ def test_select_from_alias(self):
+ User, Address = self.classes.User, self.classes.Address
+
+ sess = create_session()
+ u1 = sess.query(User).get(7)
+ a1 = aliased(Address)
+ q = sess.query(a1).with_parent(u1)
+ self.assert_compile(
+ q,
+ "SELECT addresses_1.id AS addresses_1_id, "
+ "addresses_1.user_id AS addresses_1_user_id, "
+ "addresses_1.email_address AS addresses_1_email_address "
+ "FROM addresses AS addresses_1 "
+ "WHERE :param_1 = addresses_1.user_id",
+ {'param_1': 7}
+ )
+
def test_noparent(self):
Item, User = self.classes.Item, self.classes.User
@@ -3547,13 +3656,17 @@ class ImmediateTest(_fixtures.FixtureTest):
sess = create_session()
- assert_raises(
+ assert_raises_message(
sa.orm.exc.NoResultFound,
+ "No row was found for one\(\)",
sess.query(User).filter(User.id == 99).one)
eq_(sess.query(User).filter(User.id == 7).one().id, 7)
- assert_raises(sa.orm.exc.MultipleResultsFound, sess.query(User).one)
+ assert_raises_message(
+ sa.orm.exc.MultipleResultsFound,
+ "Multiple rows were found for one\(\)",
+ sess.query(User).one)
assert_raises(
sa.orm.exc.NoResultFound,
@@ -3598,6 +3711,60 @@ class ImmediateTest(_fixtures.FixtureTest):
sess.query(User).join(User.addresses).filter(User.id.in_([8, 9])).
order_by(User.id).one)
+ def test_one_or_none(self):
+ User, Address = self.classes.User, self.classes.Address
+
+ sess = create_session()
+
+ eq_(sess.query(User).filter(User.id == 99).one_or_none(), None)
+
+ eq_(sess.query(User).filter(User.id == 7).one_or_none().id, 7)
+
+ assert_raises_message(
+ sa.orm.exc.MultipleResultsFound,
+ "Multiple rows were found for one_or_none\(\)",
+ sess.query(User).one_or_none)
+
+ eq_(sess.query(User.id, User.name).filter(User.id == 99).one_or_none(), None)
+
+ eq_(sess.query(User.id, User.name).filter(User.id == 7).one_or_none(),
+ (7, 'jack'))
+
+ assert_raises(
+ sa.orm.exc.MultipleResultsFound,
+ sess.query(User.id, User.name).one_or_none)
+
+ eq_(
+ (sess.query(User, Address).join(User.addresses).
+ filter(Address.id == 99)).one_or_none(), None)
+
+ eq_((sess.query(User, Address).
+ join(User.addresses).
+ filter(Address.id == 4)).one_or_none(),
+ (User(id=8), Address(id=4)))
+
+ assert_raises(
+ sa.orm.exc.MultipleResultsFound,
+ sess.query(User, Address).join(User.addresses).one_or_none)
+
+ # this result returns multiple rows, the first
+ # two rows being the same. but uniquing is
+ # not applied for a column based result.
+ assert_raises(
+ sa.orm.exc.MultipleResultsFound,
+ sess.query(User.id).join(User.addresses).
+ filter(User.id.in_([8, 9])).order_by(User.id).one_or_none)
+
+ # test that a join which ultimately returns
+ # multiple identities across many rows still
+ # raises, even though the first two rows are of
+ # the same identity and unique filtering
+ # is applied ([ticket:1688])
+ assert_raises(
+ sa.orm.exc.MultipleResultsFound,
+ sess.query(User).join(User.addresses).filter(User.id.in_([8, 9])).
+ order_by(User.id).one_or_none)
+
@testing.future
def test_getslice(self):
assert False
diff --git a/test/orm/test_relationships.py b/test/orm/test_relationships.py
index 9e4b38a90..061187330 100644
--- a/test/orm/test_relationships.py
+++ b/test/orm/test_relationships.py
@@ -931,14 +931,12 @@ class SynonymsAsFKsTest(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
Table("tableA", metadata,
- Column("id", Integer, primary_key=True,
- test_needs_autoincrement=True),
+ Column("id", Integer, primary_key=True),
Column("foo", Integer,),
test_needs_fk=True)
Table("tableB", metadata,
- Column("id", Integer, primary_key=True,
- test_needs_autoincrement=True),
+ Column("id", Integer, primary_key=True),
Column("_a_id", Integer, key='a_id', primary_key=True),
test_needs_fk=True)
@@ -1093,7 +1091,7 @@ class FKsAsPksTest(fixtures.MappedTest):
'tablec', tableA.metadata,
Column('id', Integer, primary_key=True),
Column('a_id', Integer, ForeignKey('tableA.id'),
- primary_key=True, autoincrement=False, nullable=True))
+ primary_key=True, nullable=True))
tableC.create()
class C(fixtures.BasicEntity):
@@ -2703,8 +2701,7 @@ class ExplicitLocalRemoteTest(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
Table('t1', metadata,
- Column('id', String(50), primary_key=True,
- test_needs_autoincrement=True),
+ Column('id', String(50), primary_key=True),
Column('data', String(50)))
Table('t2', metadata,
Column('id', Integer, primary_key=True,
diff --git a/test/orm/test_session.py b/test/orm/test_session.py
index 58551d763..caeb08530 100644
--- a/test/orm/test_session.py
+++ b/test/orm/test_session.py
@@ -17,6 +17,7 @@ from sqlalchemy.util import pypy
from sqlalchemy.testing import fixtures
from test.orm import _fixtures
from sqlalchemy import event, ForeignKey
+from sqlalchemy.util.compat import inspect_getargspec
class ExecutionTest(_fixtures.FixtureTest):
@@ -493,8 +494,10 @@ class SessionStateTest(_fixtures.FixtureTest):
'is already attached to session',
s2.delete, user)
u2 = s2.query(User).get(user.id)
- assert_raises_message(sa.exc.InvalidRequestError,
- 'another instance with key', s.delete, u2)
+ s2.expunge(u2)
+ assert_raises_message(
+ sa.exc.InvalidRequestError,
+ 'another instance .* is already present', s.delete, u2)
s.expire(user)
s.expunge(user)
assert user not in s
@@ -543,8 +546,14 @@ class SessionStateTest(_fixtures.FixtureTest):
s.expunge(u2)
s.identity_map.add(sa.orm.attributes.instance_state(u1))
- assert_raises(AssertionError, s.identity_map.add,
- sa.orm.attributes.instance_state(u2))
+ assert_raises_message(
+ sa.exc.InvalidRequestError,
+ "Can't attach instance <User.*?>; another instance "
+ "with key .*? is already "
+ "present in this session.",
+ s.identity_map.add,
+ sa.orm.attributes.instance_state(u2)
+ )
def test_pickled_update(self):
users, User = self.tables.users, pickleable.User
@@ -581,7 +590,13 @@ class SessionStateTest(_fixtures.FixtureTest):
assert u2 is not None and u2 is not u1
assert u2 in sess
- assert_raises(AssertionError, lambda: sess.add(u1))
+ assert_raises_message(
+ sa.exc.InvalidRequestError,
+ "Can't attach instance <User.*?>; another instance "
+ "with key .*? is already "
+ "present in this session.",
+ sess.add, u1
+ )
sess.expunge(u2)
assert u2 not in sess
@@ -1124,11 +1139,56 @@ class WeakIdentityMapTest(_fixtures.FixtureTest):
class StrongIdentityMapTest(_fixtures.FixtureTest):
run_inserts = None
+ def _strong_ident_fixture(self):
+ sess = create_session(weak_identity_map=False)
+ return sess, sess.prune
+
+ def _event_fixture(self):
+ session = create_session()
+
+ @event.listens_for(session, "pending_to_persistent")
+ @event.listens_for(session, "deleted_to_persistent")
+ @event.listens_for(session, "detached_to_persistent")
+ @event.listens_for(session, "loaded_as_persistent")
+ def strong_ref_object(sess, instance):
+ if 'refs' not in sess.info:
+ sess.info['refs'] = refs = set()
+ else:
+ refs = sess.info['refs']
+
+ refs.add(instance)
+
+ @event.listens_for(session, "persistent_to_detached")
+ @event.listens_for(session, "persistent_to_deleted")
+ @event.listens_for(session, "persistent_to_transient")
+ def deref_object(sess, instance):
+ sess.info['refs'].discard(instance)
+
+ def prune():
+ if 'refs' not in session.info:
+ return 0
+
+ sess_size = len(session.identity_map)
+ session.info['refs'].clear()
+ gc_collect()
+ session.info['refs'] = set(
+ s.obj() for s in session.identity_map.all_states())
+ return sess_size - len(session.identity_map)
+
+ return session, prune
+
@testing.uses_deprecated()
- def test_strong_ref(self):
+ def test_strong_ref_imap(self):
+ self._test_strong_ref(self._strong_ident_fixture)
+
+ def test_strong_ref_events(self):
+ self._test_strong_ref(self._event_fixture)
+
+ def _test_strong_ref(self, fixture):
+ s, prune = fixture()
+
users, User = self.tables.users, self.classes.User
- s = create_session(weak_identity_map=False)
mapper(User, users)
# save user
@@ -1148,12 +1208,19 @@ class StrongIdentityMapTest(_fixtures.FixtureTest):
eq_(users.select().execute().fetchall(), [(user.id, 'u2')])
@testing.uses_deprecated()
+ def test_prune_imap(self):
+ self._test_prune(self._strong_ident_fixture)
+
+ def test_prune_events(self):
+ self._test_prune(self._event_fixture)
+
@testing.fails_if(lambda: pypy, "pypy has a real GC")
@testing.fails_on('+zxjdbc', 'http://www.sqlalchemy.org/trac/ticket/1473')
- def test_prune(self):
+ def _test_prune(self, fixture):
+ s, prune = fixture()
+
users, User = self.tables.users, self.classes.User
- s = create_session(weak_identity_map=False)
mapper(User, users)
for o in [User(name='u%s' % x) for x in range(10)]:
@@ -1161,43 +1228,44 @@ class StrongIdentityMapTest(_fixtures.FixtureTest):
# o is still live after this loop...
self.assert_(len(s.identity_map) == 0)
- self.assert_(s.prune() == 0)
+ eq_(prune(), 0)
s.flush()
gc_collect()
- self.assert_(s.prune() == 9)
+ eq_(prune(), 9)
+ # o is still in local scope here, so still present
self.assert_(len(s.identity_map) == 1)
id = o.id
del o
- self.assert_(s.prune() == 1)
+ eq_(prune(), 1)
self.assert_(len(s.identity_map) == 0)
u = s.query(User).get(id)
- self.assert_(s.prune() == 0)
+ eq_(prune(), 0)
self.assert_(len(s.identity_map) == 1)
u.name = 'squiznart'
del u
- self.assert_(s.prune() == 0)
+ eq_(prune(), 0)
self.assert_(len(s.identity_map) == 1)
s.flush()
- self.assert_(s.prune() == 1)
+ eq_(prune(), 1)
self.assert_(len(s.identity_map) == 0)
s.add(User(name='x'))
- self.assert_(s.prune() == 0)
+ eq_(prune(), 0)
self.assert_(len(s.identity_map) == 0)
s.flush()
self.assert_(len(s.identity_map) == 1)
- self.assert_(s.prune() == 1)
+ eq_(prune(), 1)
self.assert_(len(s.identity_map) == 0)
u = s.query(User).get(id)
s.delete(u)
del u
- self.assert_(s.prune() == 0)
+ eq_(prune(), 0)
self.assert_(len(s.identity_map) == 1)
s.flush()
- self.assert_(s.prune() == 0)
+ eq_(prune(), 0)
self.assert_(len(s.identity_map) == 0)
@@ -1416,7 +1484,7 @@ class SessionInterface(fixtures.TestBase):
for meth in Session.public_methods:
if meth in blacklist:
continue
- spec = inspect.getargspec(getattr(Session, meth))
+ spec = inspect_getargspec(getattr(Session, meth))
if len(spec[0]) > 1 or spec[1]:
ok.add(meth)
return ok
diff --git a/test/orm/test_transaction.py b/test/orm/test_transaction.py
index 91846a67e..7efb5942b 100644
--- a/test/orm/test_transaction.py
+++ b/test/orm/test_transaction.py
@@ -657,6 +657,34 @@ class SessionTransactionTest(FixtureTest):
assert session.transaction is not None, \
'autocommit=False should start a new transaction'
+ @testing.skip_if("oracle", "oracle doesn't support release of savepoint")
+ @testing.requires.savepoints
+ def test_report_primary_error_when_rollback_fails(self):
+ User, users = self.classes.User, self.tables.users
+
+ mapper(User, users)
+
+ session = Session(testing.db)
+
+ with expect_warnings(".*due to an additional ROLLBACK.*INSERT INTO"):
+ session.begin_nested()
+ savepoint = session.\
+ connection()._Connection__transaction._savepoint
+
+ # force the savepoint to disappear
+ session.connection().dialect.do_release_savepoint(
+ session.connection(), savepoint
+ )
+
+ # now do a broken flush
+ session.add_all([User(id=1), User(id=1)])
+
+ assert_raises_message(
+ sa_exc.DBAPIError,
+ "ROLLBACK TO SAVEPOINT ",
+ session.flush
+ )
+
class _LocalFixture(FixtureTest):
run_setup_mappers = 'once'
@@ -895,7 +923,13 @@ class AutoExpireTest(_LocalFixture):
assert u1_state.obj() is None
s.rollback()
- assert u1_state in s.identity_map.all_states()
+ # new in 1.1, not in identity map if the object was
+ # gc'ed and we restore snapshot; we've changed update_impl
+ # to just skip this object
+ assert u1_state not in s.identity_map.all_states()
+
+ # in any version, the state is replaced by the query
+ # because the identity map would switch it
u1 = s.query(User).filter_by(name='ed').one()
assert u1_state not in s.identity_map.all_states()
assert s.scalar(users.count()) == 1
diff --git a/test/orm/test_unitofwork.py b/test/orm/test_unitofwork.py
index 5a47903f0..2f67943f1 100644
--- a/test/orm/test_unitofwork.py
+++ b/test/orm/test_unitofwork.py
@@ -260,7 +260,7 @@ class PKTest(fixtures.MappedTest):
def define_tables(cls, metadata):
Table('multipk1', metadata,
Column('multi_id', Integer, primary_key=True,
- test_needs_autoincrement=True),
+ test_needs_autoincrement=not testing.against('sqlite')),
Column('multi_rev', Integer, primary_key=True),
Column('name', String(50), nullable=False),
Column('value', String(100)))
diff --git a/test/orm/test_unitofworkv2.py b/test/orm/test_unitofworkv2.py
index 9e9f400be..c8ce13c91 100644
--- a/test/orm/test_unitofworkv2.py
+++ b/test/orm/test_unitofworkv2.py
@@ -5,7 +5,8 @@ from sqlalchemy.testing.schema import Table, Column
from test.orm import _fixtures
from sqlalchemy import exc, util
from sqlalchemy.testing import fixtures, config
-from sqlalchemy import Integer, String, ForeignKey, func, literal
+from sqlalchemy import Integer, String, ForeignKey, func, \
+ literal, FetchedValue, text
from sqlalchemy.orm import mapper, relationship, backref, \
create_session, unitofwork, attributes,\
Session, exc as orm_exc
@@ -1848,6 +1849,450 @@ class NoAttrEventInFlushTest(fixtures.MappedTest):
eq_(t1.returning_val, 5)
+class EagerDefaultsTest(fixtures.MappedTest):
+ __backend__ = True
+
+ @classmethod
+ def define_tables(cls, metadata):
+ Table(
+ 'test', metadata,
+ Column('id', Integer, primary_key=True),
+ Column('foo', Integer, server_default="3")
+ )
+
+ Table(
+ 'test2', metadata,
+ Column('id', Integer, primary_key=True),
+ Column('foo', Integer),
+ Column('bar', Integer, server_onupdate=FetchedValue())
+ )
+
+ @classmethod
+ def setup_classes(cls):
+ class Thing(cls.Basic):
+ pass
+
+ class Thing2(cls.Basic):
+ pass
+
+ @classmethod
+ def setup_mappers(cls):
+ Thing = cls.classes.Thing
+
+ mapper(Thing, cls.tables.test, eager_defaults=True)
+
+ Thing2 = cls.classes.Thing2
+
+ mapper(Thing2, cls.tables.test2, eager_defaults=True)
+
+ def test_insert_defaults_present(self):
+ Thing = self.classes.Thing
+ s = Session()
+
+ t1, t2 = (
+ Thing(id=1, foo=5),
+ Thing(id=2, foo=10)
+ )
+
+ s.add_all([t1, t2])
+
+ self.assert_sql_execution(
+ testing.db,
+ s.flush,
+ CompiledSQL(
+ "INSERT INTO test (id, foo) VALUES (:id, :foo)",
+ [{'foo': 5, 'id': 1}, {'foo': 10, 'id': 2}]
+ ),
+ )
+
+ def go():
+ eq_(t1.foo, 5)
+ eq_(t2.foo, 10)
+
+ self.assert_sql_count(testing.db, go, 0)
+
+ def test_insert_defaults_present_as_expr(self):
+ Thing = self.classes.Thing
+ s = Session()
+
+ t1, t2 = (
+ Thing(id=1, foo=text("2 + 5")),
+ Thing(id=2, foo=text("5 + 5"))
+ )
+
+ s.add_all([t1, t2])
+
+ if testing.db.dialect.implicit_returning:
+ self.assert_sql_execution(
+ testing.db,
+ s.flush,
+ CompiledSQL(
+ "INSERT INTO test (id, foo) VALUES (%(id)s, 2 + 5) "
+ "RETURNING test.foo",
+ [{'id': 1}],
+ dialect='postgresql'
+ ),
+ CompiledSQL(
+ "INSERT INTO test (id, foo) VALUES (%(id)s, 5 + 5) "
+ "RETURNING test.foo",
+ [{'id': 2}],
+ dialect='postgresql'
+ )
+ )
+
+ else:
+ self.assert_sql_execution(
+ testing.db,
+ s.flush,
+ CompiledSQL(
+ "INSERT INTO test (id, foo) VALUES (:id, 2 + 5)",
+ [{'id': 1}]
+ ),
+ CompiledSQL(
+ "INSERT INTO test (id, foo) VALUES (:id, 5 + 5)",
+ [{'id': 2}]
+ ),
+ CompiledSQL(
+ "SELECT test.foo AS test_foo FROM test "
+ "WHERE test.id = :param_1",
+ [{'param_1': 1}]
+ ),
+ CompiledSQL(
+ "SELECT test.foo AS test_foo FROM test "
+ "WHERE test.id = :param_1",
+ [{'param_1': 2}]
+ ),
+ )
+
+ def go():
+ eq_(t1.foo, 7)
+ eq_(t2.foo, 10)
+
+ self.assert_sql_count(testing.db, go, 0)
+
+ def test_insert_defaults_nonpresent(self):
+ Thing = self.classes.Thing
+ s = Session()
+
+ t1, t2 = (
+ Thing(id=1),
+ Thing(id=2)
+ )
+
+ s.add_all([t1, t2])
+
+ if testing.db.dialect.implicit_returning:
+ self.assert_sql_execution(
+ testing.db,
+ s.commit,
+ CompiledSQL(
+ "INSERT INTO test (id) VALUES (%(id)s) RETURNING test.foo",
+ [{'id': 1}],
+ dialect='postgresql'
+ ),
+ CompiledSQL(
+ "INSERT INTO test (id) VALUES (%(id)s) RETURNING test.foo",
+ [{'id': 2}],
+ dialect='postgresql'
+ ),
+ )
+ else:
+ self.assert_sql_execution(
+ testing.db,
+ s.commit,
+ CompiledSQL(
+ "INSERT INTO test (id) VALUES (:id)",
+ [{'id': 1}, {'id': 2}]
+ ),
+ CompiledSQL(
+ "SELECT test.foo AS test_foo FROM test "
+ "WHERE test.id = :param_1",
+ [{'param_1': 1}]
+ ),
+ CompiledSQL(
+ "SELECT test.foo AS test_foo FROM test "
+ "WHERE test.id = :param_1",
+ [{'param_1': 2}]
+ )
+ )
+
+ def test_update_defaults_nonpresent(self):
+ Thing2 = self.classes.Thing2
+ s = Session()
+
+ t1, t2, t3, t4 = (
+ Thing2(id=1, foo=1, bar=2),
+ Thing2(id=2, foo=2, bar=3),
+ Thing2(id=3, foo=3, bar=4),
+ Thing2(id=4, foo=4, bar=5)
+ )
+
+ s.add_all([t1, t2, t3, t4])
+ s.flush()
+
+ t1.foo = 5
+ t2.foo = 6
+ t2.bar = 10
+ t3.foo = 7
+ t4.foo = 8
+ t4.bar = 12
+
+ if testing.db.dialect.implicit_returning:
+ self.assert_sql_execution(
+ testing.db,
+ s.flush,
+ CompiledSQL(
+ "UPDATE test2 SET foo=%(foo)s "
+ "WHERE test2.id = %(test2_id)s "
+ "RETURNING test2.bar",
+ [{'foo': 5, 'test2_id': 1}],
+ dialect='postgresql'
+ ),
+ CompiledSQL(
+ "UPDATE test2 SET foo=%(foo)s, bar=%(bar)s "
+ "WHERE test2.id = %(test2_id)s",
+ [{'foo': 6, 'bar': 10, 'test2_id': 2}],
+ dialect='postgresql'
+ ),
+ CompiledSQL(
+ "UPDATE test2 SET foo=%(foo)s "
+ "WHERE test2.id = %(test2_id)s "
+ "RETURNING test2.bar",
+ [{'foo': 7, 'test2_id': 3}],
+ dialect='postgresql'
+ ),
+ CompiledSQL(
+ "UPDATE test2 SET foo=%(foo)s, bar=%(bar)s "
+ "WHERE test2.id = %(test2_id)s",
+ [{'foo': 8, 'bar': 12, 'test2_id': 4}],
+ dialect='postgresql'
+ ),
+ )
+ else:
+ self.assert_sql_execution(
+ testing.db,
+ s.flush,
+ CompiledSQL(
+ "UPDATE test2 SET foo=:foo WHERE test2.id = :test2_id",
+ [{'foo': 5, 'test2_id': 1}]
+ ),
+ CompiledSQL(
+ "UPDATE test2 SET foo=:foo, bar=:bar "
+ "WHERE test2.id = :test2_id",
+ [{'foo': 6, 'bar': 10, 'test2_id': 2}],
+ ),
+ CompiledSQL(
+ "UPDATE test2 SET foo=:foo WHERE test2.id = :test2_id",
+ [{'foo': 7, 'test2_id': 3}]
+ ),
+ CompiledSQL(
+ "UPDATE test2 SET foo=:foo, bar=:bar "
+ "WHERE test2.id = :test2_id",
+ [{'foo': 8, 'bar': 12, 'test2_id': 4}],
+ ),
+ CompiledSQL(
+ "SELECT test2.bar AS test2_bar FROM test2 "
+ "WHERE test2.id = :param_1",
+ [{'param_1': 1}]
+ ),
+ CompiledSQL(
+ "SELECT test2.bar AS test2_bar FROM test2 "
+ "WHERE test2.id = :param_1",
+ [{'param_1': 3}]
+ )
+ )
+
+ def go():
+ eq_(t1.bar, 2)
+ eq_(t2.bar, 10)
+ eq_(t3.bar, 4)
+ eq_(t4.bar, 12)
+
+ self.assert_sql_count(testing.db, go, 0)
+
+ def test_update_defaults_present_as_expr(self):
+ Thing2 = self.classes.Thing2
+ s = Session()
+
+ t1, t2, t3, t4 = (
+ Thing2(id=1, foo=1, bar=2),
+ Thing2(id=2, foo=2, bar=3),
+ Thing2(id=3, foo=3, bar=4),
+ Thing2(id=4, foo=4, bar=5)
+ )
+
+ s.add_all([t1, t2, t3, t4])
+ s.flush()
+
+ t1.foo = 5
+ t1.bar = text("1 + 1")
+ t2.foo = 6
+ t2.bar = 10
+ t3.foo = 7
+ t4.foo = 8
+ t4.bar = text("5 + 7")
+
+ if testing.db.dialect.implicit_returning:
+ self.assert_sql_execution(
+ testing.db,
+ s.flush,
+ CompiledSQL(
+ "UPDATE test2 SET foo=%(foo)s, bar=1 + 1 "
+ "WHERE test2.id = %(test2_id)s "
+ "RETURNING test2.bar",
+ [{'foo': 5, 'test2_id': 1}],
+ dialect='postgresql'
+ ),
+ CompiledSQL(
+ "UPDATE test2 SET foo=%(foo)s, bar=%(bar)s "
+ "WHERE test2.id = %(test2_id)s",
+ [{'foo': 6, 'bar': 10, 'test2_id': 2}],
+ dialect='postgresql'
+ ),
+ CompiledSQL(
+ "UPDATE test2 SET foo=%(foo)s "
+ "WHERE test2.id = %(test2_id)s "
+ "RETURNING test2.bar",
+ [{'foo': 7, 'test2_id': 3}],
+ dialect='postgresql'
+ ),
+ CompiledSQL(
+ "UPDATE test2 SET foo=%(foo)s, bar=5 + 7 "
+ "WHERE test2.id = %(test2_id)s RETURNING test2.bar",
+ [{'foo': 8, 'test2_id': 4}],
+ dialect='postgresql'
+ ),
+ )
+ else:
+ self.assert_sql_execution(
+ testing.db,
+ s.flush,
+ CompiledSQL(
+ "UPDATE test2 SET foo=:foo, bar=1 + 1 "
+ "WHERE test2.id = :test2_id",
+ [{'foo': 5, 'test2_id': 1}]
+ ),
+ CompiledSQL(
+ "UPDATE test2 SET foo=:foo, bar=:bar "
+ "WHERE test2.id = :test2_id",
+ [{'foo': 6, 'bar': 10, 'test2_id': 2}],
+ ),
+ CompiledSQL(
+ "UPDATE test2 SET foo=:foo WHERE test2.id = :test2_id",
+ [{'foo': 7, 'test2_id': 3}]
+ ),
+ CompiledSQL(
+ "UPDATE test2 SET foo=:foo, bar=5 + 7 "
+ "WHERE test2.id = :test2_id",
+ [{'foo': 8, 'test2_id': 4}],
+ ),
+ CompiledSQL(
+ "SELECT test2.bar AS test2_bar FROM test2 "
+ "WHERE test2.id = :param_1",
+ [{'param_1': 1}]
+ ),
+ CompiledSQL(
+ "SELECT test2.bar AS test2_bar FROM test2 "
+ "WHERE test2.id = :param_1",
+ [{'param_1': 3}]
+ ),
+ CompiledSQL(
+ "SELECT test2.bar AS test2_bar FROM test2 "
+ "WHERE test2.id = :param_1",
+ [{'param_1': 4}]
+ )
+ )
+
+ def go():
+ eq_(t1.bar, 2)
+ eq_(t2.bar, 10)
+ eq_(t3.bar, 4)
+ eq_(t4.bar, 12)
+
+ self.assert_sql_count(testing.db, go, 0)
+
+ def test_insert_defaults_bulk_insert(self):
+ Thing = self.classes.Thing
+ s = Session()
+
+ mappings = [
+ {"id": 1},
+ {"id": 2}
+ ]
+
+ self.assert_sql_execution(
+ testing.db,
+ lambda: s.bulk_insert_mappings(Thing, mappings),
+ CompiledSQL(
+ "INSERT INTO test (id) VALUES (:id)",
+ [{'id': 1}, {'id': 2}]
+ )
+ )
+
+ def test_update_defaults_bulk_update(self):
+ Thing2 = self.classes.Thing2
+ s = Session()
+
+ t1, t2, t3, t4 = (
+ Thing2(id=1, foo=1, bar=2),
+ Thing2(id=2, foo=2, bar=3),
+ Thing2(id=3, foo=3, bar=4),
+ Thing2(id=4, foo=4, bar=5)
+ )
+
+ s.add_all([t1, t2, t3, t4])
+ s.flush()
+
+ mappings = [
+ {"id": 1, "foo": 5},
+ {"id": 2, "foo": 6, "bar": 10},
+ {"id": 3, "foo": 7},
+ {"id": 4, "foo": 8}
+ ]
+
+ self.assert_sql_execution(
+ testing.db,
+ lambda: s.bulk_update_mappings(Thing2, mappings),
+ CompiledSQL(
+ "UPDATE test2 SET foo=:foo WHERE test2.id = :test2_id",
+ [{'foo': 5, 'test2_id': 1}]
+ ),
+ CompiledSQL(
+ "UPDATE test2 SET foo=:foo, bar=:bar "
+ "WHERE test2.id = :test2_id",
+ [{'foo': 6, 'bar': 10, 'test2_id': 2}]
+ ),
+ CompiledSQL(
+ "UPDATE test2 SET foo=:foo WHERE test2.id = :test2_id",
+ [{'foo': 7, 'test2_id': 3}, {'foo': 8, 'test2_id': 4}]
+ )
+ )
+
+ def test_update_defaults_present(self):
+ Thing2 = self.classes.Thing2
+ s = Session()
+
+ t1, t2 = (
+ Thing2(id=1, foo=1, bar=2),
+ Thing2(id=2, foo=2, bar=3)
+ )
+
+ s.add_all([t1, t2])
+ s.flush()
+
+ t1.bar = 5
+ t2.bar = 10
+
+ self.assert_sql_execution(
+ testing.db,
+ s.commit,
+ CompiledSQL(
+ "UPDATE test2 SET bar=%(bar)s WHERE test2.id = %(test2_id)s",
+ [{'bar': 5, 'test2_id': 1}, {'bar': 10, 'test2_id': 2}],
+ dialect='postgresql'
+ )
+ )
+
class TypeWoBoolTest(fixtures.MappedTest, testing.AssertsExecutionResults):
"""test support for custom datatypes that return a non-__bool__ value
when compared via __eq__(), eg. ticket 3469"""
@@ -1954,3 +2399,215 @@ class TypeWoBoolTest(fixtures.MappedTest, testing.AssertsExecutionResults):
eq_(
s.query(Thing.value).scalar().text, "foo"
)
+
+
+class NullEvaluatingTest(fixtures.MappedTest, testing.AssertsExecutionResults):
+ @classmethod
+ def define_tables(cls, metadata):
+ from sqlalchemy import TypeDecorator
+
+ class EvalsNull(TypeDecorator):
+ impl = String(50)
+
+ should_evaluate_none = True
+
+ def process_bind_param(self, value, dialect):
+ if value is None:
+ value = 'nothing'
+ return value
+
+ Table(
+ 'test', metadata,
+ Column('id', Integer, primary_key=True,
+ test_needs_autoincrement=True),
+ Column('evals_null_no_default', EvalsNull()),
+ Column('evals_null_default', EvalsNull(), default='default_val'),
+ Column('no_eval_null_no_default', String(50)),
+ Column('no_eval_null_default', String(50), default='default_val'),
+ Column(
+ 'builtin_evals_null_no_default', String(50).evaluates_none()),
+ Column(
+ 'builtin_evals_null_default',
+ String(50).evaluates_none(), default='default_val'),
+ )
+
+ @classmethod
+ def setup_classes(cls):
+ class Thing(cls.Basic):
+ pass
+
+ @classmethod
+ def setup_mappers(cls):
+ Thing = cls.classes.Thing
+
+ mapper(Thing, cls.tables.test)
+
+ def _assert_col(self, name, value):
+ Thing = self.classes.Thing
+ s = Session()
+
+ col = getattr(Thing, name)
+ obj = s.query(col).filter(col == value).one()
+ eq_(obj[0], value)
+
+ def _test_insert(self, attr, expected):
+ Thing = self.classes.Thing
+
+ s = Session()
+ t1 = Thing(**{attr: None})
+ s.add(t1)
+ s.commit()
+
+ self._assert_col(attr, expected)
+
+ def _test_bulk_insert(self, attr, expected):
+ Thing = self.classes.Thing
+
+ s = Session()
+ s.bulk_insert_mappings(
+ Thing, [{attr: None}]
+ )
+ s.commit()
+
+ self._assert_col(attr, expected)
+
+ def _test_insert_novalue(self, attr, expected):
+ Thing = self.classes.Thing
+
+ s = Session()
+ t1 = Thing()
+ s.add(t1)
+ s.commit()
+
+ self._assert_col(attr, expected)
+
+ def _test_bulk_insert_novalue(self, attr, expected):
+ Thing = self.classes.Thing
+
+ s = Session()
+ s.bulk_insert_mappings(
+ Thing, [{}]
+ )
+ s.commit()
+
+ self._assert_col(attr, expected)
+
+ def test_evalnull_nodefault_insert(self):
+ self._test_insert(
+ "evals_null_no_default", 'nothing'
+ )
+
+ def test_evalnull_nodefault_bulk_insert(self):
+ self._test_bulk_insert(
+ "evals_null_no_default", 'nothing'
+ )
+
+ def test_evalnull_nodefault_insert_novalue(self):
+ self._test_insert_novalue(
+ "evals_null_no_default", None
+ )
+
+ def test_evalnull_nodefault_bulk_insert_novalue(self):
+ self._test_bulk_insert_novalue(
+ "evals_null_no_default", None
+ )
+
+ def test_evalnull_default_insert(self):
+ self._test_insert(
+ "evals_null_default", 'nothing'
+ )
+
+ def test_evalnull_default_bulk_insert(self):
+ self._test_bulk_insert(
+ "evals_null_default", 'nothing'
+ )
+
+ def test_evalnull_default_insert_novalue(self):
+ self._test_insert_novalue(
+ "evals_null_default", 'default_val'
+ )
+
+ def test_evalnull_default_bulk_insert_novalue(self):
+ self._test_bulk_insert_novalue(
+ "evals_null_default", 'default_val'
+ )
+
+ def test_no_evalnull_nodefault_insert(self):
+ self._test_insert(
+ "no_eval_null_no_default", None
+ )
+
+ def test_no_evalnull_nodefault_bulk_insert(self):
+ self._test_bulk_insert(
+ "no_eval_null_no_default", None
+ )
+
+ def test_no_evalnull_nodefault_insert_novalue(self):
+ self._test_insert_novalue(
+ "no_eval_null_no_default", None
+ )
+
+ def test_no_evalnull_nodefault_bulk_insert_novalue(self):
+ self._test_bulk_insert_novalue(
+ "no_eval_null_no_default", None
+ )
+
+ def test_no_evalnull_default_insert(self):
+ self._test_insert(
+ "no_eval_null_default", 'default_val'
+ )
+
+ def test_no_evalnull_default_bulk_insert(self):
+ self._test_bulk_insert(
+ "no_eval_null_default", 'default_val'
+ )
+
+ def test_no_evalnull_default_insert_novalue(self):
+ self._test_insert_novalue(
+ "no_eval_null_default", 'default_val'
+ )
+
+ def test_no_evalnull_default_bulk_insert_novalue(self):
+ self._test_bulk_insert_novalue(
+ "no_eval_null_default", 'default_val'
+ )
+
+ def test_builtin_evalnull_nodefault_insert(self):
+ self._test_insert(
+ "builtin_evals_null_no_default", None
+ )
+
+ def test_builtin_evalnull_nodefault_bulk_insert(self):
+ self._test_bulk_insert(
+ "builtin_evals_null_no_default", None
+ )
+
+ def test_builtin_evalnull_nodefault_insert_novalue(self):
+ self._test_insert_novalue(
+ "builtin_evals_null_no_default", None
+ )
+
+ def test_builtin_evalnull_nodefault_bulk_insert_novalue(self):
+ self._test_bulk_insert_novalue(
+ "builtin_evals_null_no_default", None
+ )
+
+ def test_builtin_evalnull_default_insert(self):
+ self._test_insert(
+ "builtin_evals_null_default", None
+ )
+
+ def test_builtin_evalnull_default_bulk_insert(self):
+ self._test_bulk_insert(
+ "builtin_evals_null_default", None
+ )
+
+ def test_builtin_evalnull_default_insert_novalue(self):
+ self._test_insert_novalue(
+ "builtin_evals_null_default", 'default_val'
+ )
+
+ def test_builtin_evalnull_default_bulk_insert_novalue(self):
+ self._test_bulk_insert_novalue(
+ "builtin_evals_null_default", 'default_val'
+ )
diff --git a/test/orm/test_update_delete.py b/test/orm/test_update_delete.py
index 973053947..593714a06 100644
--- a/test/orm/test_update_delete.py
+++ b/test/orm/test_update_delete.py
@@ -1,10 +1,11 @@
-from sqlalchemy.testing import eq_, assert_raises, assert_raises_message
+from sqlalchemy.testing import eq_, assert_raises, assert_raises_message, is_
from sqlalchemy.testing import fixtures
from sqlalchemy import Integer, String, ForeignKey, or_, exc, \
select, func, Boolean, case, text, column
from sqlalchemy.orm import mapper, relationship, backref, Session, \
joinedload, synonym, query
from sqlalchemy import testing
+from sqlalchemy.testing import mock
from sqlalchemy.testing.schema import Table, Column
@@ -609,6 +610,42 @@ class UpdateDeleteTest(fixtures.MappedTest):
synchronize_session='fetch')
assert john not in sess
+ def test_update_unordered_dict(self):
+ User = self.classes.User
+ session = Session()
+
+ # Do an update using unordered dict and check that the parameters used
+ # are ordered in table order
+ with mock.patch.object(session, "execute") as exec_:
+ session.query(User).filter(User.id == 15).update(
+ {'name': 'foob', 'id': 123})
+ # Confirm that parameters are a dict instead of tuple or list
+ params_type = type(exec_.mock_calls[0][1][0].parameters)
+ is_(params_type, dict)
+
+ def test_update_preserve_parameter_order(self):
+ User = self.classes.User
+ session = Session()
+
+ # Do update using a tuple and check that order is preserved
+ with mock.patch.object(session, "execute") as exec_:
+ session.query(User).filter(User.id == 15).update(
+ (('id', 123), ('name', 'foob')),
+ update_args={"preserve_parameter_order": True})
+ cols = [c.key
+ for c in exec_.mock_calls[0][1][0]._parameter_ordering]
+ eq_(['id', 'name'], cols)
+
+ # Now invert the order and use a list instead, and check that order is
+ # also preserved
+ with mock.patch.object(session, "execute") as exec_:
+ session.query(User).filter(User.id == 15).update(
+ [('name', 'foob'), ('id', 123)],
+ update_args={"preserve_parameter_order": True})
+ cols = [c.key
+ for c in exec_.mock_calls[0][1][0]._parameter_ordering]
+ eq_(['name', 'id'], cols)
+
class UpdateDeleteIgnoresLoadersTest(fixtures.MappedTest):
diff --git a/test/orm/test_versioning.py b/test/orm/test_versioning.py
index d46799c5a..07b090c60 100644
--- a/test/orm/test_versioning.py
+++ b/test/orm/test_versioning.py
@@ -112,6 +112,61 @@ class VersioningTest(fixtures.MappedTest):
else:
s1.commit()
+ def test_multiple_updates(self):
+ Foo = self.classes.Foo
+
+ s1 = self._fixture()
+ f1 = Foo(value='f1')
+ f2 = Foo(value='f2')
+ s1.add_all((f1, f2))
+ s1.commit()
+
+ f1.value = 'f1rev2'
+ f2.value = 'f2rev2'
+ s1.commit()
+
+ eq_(
+ s1.query(Foo.id, Foo.value, Foo.version_id).order_by(Foo.id).all(),
+ [(f1.id, 'f1rev2', 2), (f2.id, 'f2rev2', 2)]
+ )
+
+ def test_bulk_insert(self):
+ Foo = self.classes.Foo
+
+ s1 = self._fixture()
+ s1.bulk_insert_mappings(
+ Foo,
+ [{"id": 1, "value": "f1"}, {"id": 2, "value": "f2"}]
+ )
+ eq_(
+ s1.query(Foo.id, Foo.value, Foo.version_id).order_by(Foo.id).all(),
+ [(1, 'f1', 1), (2, 'f2', 1)]
+ )
+
+ def test_bulk_update(self):
+ Foo = self.classes.Foo
+
+ s1 = self._fixture()
+ f1 = Foo(value='f1')
+ f2 = Foo(value='f2')
+ s1.add_all((f1, f2))
+ s1.commit()
+
+ s1.bulk_update_mappings(
+ Foo,
+ [
+ {"id": f1.id, "value": "f1rev2", "version_id": 1},
+ {"id": f2.id, "value": "f2rev2", "version_id": 1},
+
+ ]
+ )
+ s1.commit()
+
+ eq_(
+ s1.query(Foo.id, Foo.value, Foo.version_id).order_by(Foo.id).all(),
+ [(f1.id, 'f1rev2', 2), (f2.id, 'f2rev2', 2)]
+ )
+
@testing.emits_warning_on(
'+zxjdbc', r'.*does not support (update|delete)d rowcount')
def test_bump_version(self):
@@ -876,19 +931,26 @@ class ServerVersioningTest(fixtures.MappedTest):
class Bar(cls.Basic):
pass
- def _fixture(self, expire_on_commit=True):
+ def _fixture(self, expire_on_commit=True, eager_defaults=False):
Foo, version_table = self.classes.Foo, self.tables.version_table
mapper(
Foo, version_table, version_id_col=version_table.c.version_id,
version_id_generator=False,
+ eager_defaults=eager_defaults
)
s1 = Session(expire_on_commit=expire_on_commit)
return s1
def test_insert_col(self):
- sess = self._fixture()
+ self._test_insert_col()
+
+ def test_insert_col_eager_defaults(self):
+ self._test_insert_col(eager_defaults=True)
+
+ def _test_insert_col(self, **kw):
+ sess = self._fixture(**kw)
f1 = self.classes.Foo(value='f1')
sess.add(f1)
@@ -917,7 +979,13 @@ class ServerVersioningTest(fixtures.MappedTest):
self.assert_sql_execution(testing.db, sess.flush, *statements)
def test_update_col(self):
- sess = self._fixture()
+ self._test_update_col()
+
+ def test_update_col_eager_defaults(self):
+ self._test_update_col(eager_defaults=True)
+
+ def _test_update_col(self, **kw):
+ sess = self._fixture(**kw)
f1 = self.classes.Foo(value='f1')
sess.add(f1)
@@ -952,6 +1020,76 @@ class ServerVersioningTest(fixtures.MappedTest):
)
self.assert_sql_execution(testing.db, sess.flush, *statements)
+ def test_multi_update(self):
+ sess = self._fixture()
+
+ f1 = self.classes.Foo(value='f1')
+ f2 = self.classes.Foo(value='f2')
+ f3 = self.classes.Foo(value='f3')
+ sess.add_all([f1, f2, f3])
+ sess.flush()
+
+ f1.value = 'f1a'
+ f2.value = 'f2a'
+ f3.value = 'f3a'
+
+ statements = [
+ # note that the assertsql tests the rule against
+ # "default" - on a "returning" backend, the statement
+ # includes "RETURNING"
+ CompiledSQL(
+ "UPDATE version_table SET version_id=2, value=:value "
+ "WHERE version_table.id = :version_table_id AND "
+ "version_table.version_id = :version_table_version_id",
+ lambda ctx: [
+ {
+ "version_table_id": 1,
+ "version_table_version_id": 1, "value": "f1a"}]
+ ),
+ CompiledSQL(
+ "UPDATE version_table SET version_id=2, value=:value "
+ "WHERE version_table.id = :version_table_id AND "
+ "version_table.version_id = :version_table_version_id",
+ lambda ctx: [
+ {
+ "version_table_id": 2,
+ "version_table_version_id": 1, "value": "f2a"}]
+ ),
+ CompiledSQL(
+ "UPDATE version_table SET version_id=2, value=:value "
+ "WHERE version_table.id = :version_table_id AND "
+ "version_table.version_id = :version_table_version_id",
+ lambda ctx: [
+ {
+ "version_table_id": 3,
+ "version_table_version_id": 1, "value": "f3a"}]
+ )
+ ]
+ if not testing.db.dialect.implicit_returning:
+ # DBs without implicit returning, we must immediately
+ # SELECT for the new version id
+ statements.extend([
+ CompiledSQL(
+ "SELECT version_table.version_id "
+ "AS version_table_version_id "
+ "FROM version_table WHERE version_table.id = :param_1",
+ lambda ctx: [{"param_1": 1}]
+ ),
+ CompiledSQL(
+ "SELECT version_table.version_id "
+ "AS version_table_version_id "
+ "FROM version_table WHERE version_table.id = :param_1",
+ lambda ctx: [{"param_1": 2}]
+ ),
+ CompiledSQL(
+ "SELECT version_table.version_id "
+ "AS version_table_version_id "
+ "FROM version_table WHERE version_table.id = :param_1",
+ lambda ctx: [{"param_1": 3}]
+ )
+ ])
+ self.assert_sql_execution(testing.db, sess.flush, *statements)
+
def test_delete_col(self):
sess = self._fixture()
diff --git a/test/profiles.txt b/test/profiles.txt
index 691d1a54d..f6b682be1 100644
--- a/test/profiles.txt
+++ b/test/profiles.txt
@@ -38,7 +38,7 @@ test.aaa_profiling.test_compiler.CompileTest.test_insert 3.4_sqlite_pysqlite_noc
test.aaa_profiling.test_compiler.CompileTest.test_select 2.6_sqlite_pysqlite_nocextensions 157
test.aaa_profiling.test_compiler.CompileTest.test_select 2.7_mysql_mysqldb_cextensions 153
test.aaa_profiling.test_compiler.CompileTest.test_select 2.7_mysql_mysqldb_nocextensions 153
-test.aaa_profiling.test_compiler.CompileTest.test_select 2.7_postgresql_psycopg2_cextensions 153
+test.aaa_profiling.test_compiler.CompileTest.test_select 2.7_postgresql_psycopg2_cextensions 157
test.aaa_profiling.test_compiler.CompileTest.test_select 2.7_postgresql_psycopg2_nocextensions 153
test.aaa_profiling.test_compiler.CompileTest.test_select 2.7_sqlite_pysqlite_cextensions 153
test.aaa_profiling.test_compiler.CompileTest.test_select 2.7_sqlite_pysqlite_nocextensions 153
@@ -60,7 +60,7 @@ test.aaa_profiling.test_compiler.CompileTest.test_select 3.4_sqlite_pysqlite_noc
test.aaa_profiling.test_compiler.CompileTest.test_select_labels 2.6_sqlite_pysqlite_nocextensions 190
test.aaa_profiling.test_compiler.CompileTest.test_select_labels 2.7_mysql_mysqldb_cextensions 188
test.aaa_profiling.test_compiler.CompileTest.test_select_labels 2.7_mysql_mysqldb_nocextensions 188
-test.aaa_profiling.test_compiler.CompileTest.test_select_labels 2.7_postgresql_psycopg2_cextensions 188
+test.aaa_profiling.test_compiler.CompileTest.test_select_labels 2.7_postgresql_psycopg2_cextensions 190
test.aaa_profiling.test_compiler.CompileTest.test_select_labels 2.7_postgresql_psycopg2_nocextensions 188
test.aaa_profiling.test_compiler.CompileTest.test_select_labels 2.7_sqlite_pysqlite_cextensions 188
test.aaa_profiling.test_compiler.CompileTest.test_select_labels 2.7_sqlite_pysqlite_nocextensions 188
@@ -104,7 +104,7 @@ test.aaa_profiling.test_compiler.CompileTest.test_update 3.4_sqlite_pysqlite_noc
test.aaa_profiling.test_compiler.CompileTest.test_update_whereclause 2.6_sqlite_pysqlite_nocextensions 146
test.aaa_profiling.test_compiler.CompileTest.test_update_whereclause 2.7_mysql_mysqldb_cextensions 146
test.aaa_profiling.test_compiler.CompileTest.test_update_whereclause 2.7_mysql_mysqldb_nocextensions 146
-test.aaa_profiling.test_compiler.CompileTest.test_update_whereclause 2.7_postgresql_psycopg2_cextensions 146
+test.aaa_profiling.test_compiler.CompileTest.test_update_whereclause 2.7_postgresql_psycopg2_cextensions 147
test.aaa_profiling.test_compiler.CompileTest.test_update_whereclause 2.7_postgresql_psycopg2_nocextensions 146
test.aaa_profiling.test_compiler.CompileTest.test_update_whereclause 2.7_sqlite_pysqlite_cextensions 146
test.aaa_profiling.test_compiler.CompileTest.test_update_whereclause 2.7_sqlite_pysqlite_nocextensions 146
@@ -117,7 +117,7 @@ test.aaa_profiling.test_compiler.CompileTest.test_update_whereclause 3.3_sqlite_
test.aaa_profiling.test_compiler.CompileTest.test_update_whereclause 3.4_mysql_pymysql_cextensions 146
test.aaa_profiling.test_compiler.CompileTest.test_update_whereclause 3.4_mysql_pymysql_nocextensions 146
test.aaa_profiling.test_compiler.CompileTest.test_update_whereclause 3.4_postgresql_psycopg2_cextensions 146
-test.aaa_profiling.test_compiler.CompileTest.test_update_whereclause 3.4_postgresql_psycopg2_nocextensions 146
+test.aaa_profiling.test_compiler.CompileTest.test_update_whereclause 3.4_postgresql_psycopg2_nocextensions 147
test.aaa_profiling.test_compiler.CompileTest.test_update_whereclause 3.4_sqlite_pysqlite_cextensions 146
test.aaa_profiling.test_compiler.CompileTest.test_update_whereclause 3.4_sqlite_pysqlite_nocextensions 146
@@ -126,7 +126,7 @@ test.aaa_profiling.test_compiler.CompileTest.test_update_whereclause 3.4_sqlite_
test.aaa_profiling.test_orm.AttributeOverheadTest.test_attribute_set 2.6_sqlite_pysqlite_nocextensions 4262
test.aaa_profiling.test_orm.AttributeOverheadTest.test_attribute_set 2.7_mysql_mysqldb_cextensions 4262
test.aaa_profiling.test_orm.AttributeOverheadTest.test_attribute_set 2.7_mysql_mysqldb_nocextensions 4262
-test.aaa_profiling.test_orm.AttributeOverheadTest.test_attribute_set 2.7_postgresql_psycopg2_cextensions 4262
+test.aaa_profiling.test_orm.AttributeOverheadTest.test_attribute_set 2.7_postgresql_psycopg2_cextensions 4257
test.aaa_profiling.test_orm.AttributeOverheadTest.test_attribute_set 2.7_postgresql_psycopg2_nocextensions 4262
test.aaa_profiling.test_orm.AttributeOverheadTest.test_attribute_set 2.7_sqlite_pysqlite_cextensions 4262
test.aaa_profiling.test_orm.AttributeOverheadTest.test_attribute_set 2.7_sqlite_pysqlite_nocextensions 4262
@@ -139,7 +139,7 @@ test.aaa_profiling.test_orm.AttributeOverheadTest.test_attribute_set 3.3_sqlite_
test.aaa_profiling.test_orm.AttributeOverheadTest.test_attribute_set 3.4_mysql_pymysql_cextensions 4263
test.aaa_profiling.test_orm.AttributeOverheadTest.test_attribute_set 3.4_mysql_pymysql_nocextensions 4263
test.aaa_profiling.test_orm.AttributeOverheadTest.test_attribute_set 3.4_postgresql_psycopg2_cextensions 4263
-test.aaa_profiling.test_orm.AttributeOverheadTest.test_attribute_set 3.4_postgresql_psycopg2_nocextensions 4263
+test.aaa_profiling.test_orm.AttributeOverheadTest.test_attribute_set 3.4_postgresql_psycopg2_nocextensions 4258
test.aaa_profiling.test_orm.AttributeOverheadTest.test_attribute_set 3.4_sqlite_pysqlite_cextensions 4263
test.aaa_profiling.test_orm.AttributeOverheadTest.test_attribute_set 3.4_sqlite_pysqlite_nocextensions 4263
@@ -170,7 +170,7 @@ test.aaa_profiling.test_orm.AttributeOverheadTest.test_collection_append_remove
test.aaa_profiling.test_orm.DeferOptionsTest.test_baseline 2.6_sqlite_pysqlite_nocextensions 26358
test.aaa_profiling.test_orm.DeferOptionsTest.test_baseline 2.7_mysql_mysqldb_cextensions 16194
test.aaa_profiling.test_orm.DeferOptionsTest.test_baseline 2.7_mysql_mysqldb_nocextensions 25197
-test.aaa_profiling.test_orm.DeferOptionsTest.test_baseline 2.7_postgresql_psycopg2_cextensions 28177
+test.aaa_profiling.test_orm.DeferOptionsTest.test_baseline 2.7_postgresql_psycopg2_cextensions 29184
test.aaa_profiling.test_orm.DeferOptionsTest.test_baseline 2.7_postgresql_psycopg2_nocextensions 37180
test.aaa_profiling.test_orm.DeferOptionsTest.test_baseline 2.7_sqlite_pysqlite_cextensions 16329
test.aaa_profiling.test_orm.DeferOptionsTest.test_baseline 2.7_sqlite_pysqlite_nocextensions 25332
@@ -183,7 +183,7 @@ test.aaa_profiling.test_orm.DeferOptionsTest.test_baseline 3.3_sqlite_pysqlite_n
test.aaa_profiling.test_orm.DeferOptionsTest.test_baseline 3.4_mysql_pymysql_cextensions 83733
test.aaa_profiling.test_orm.DeferOptionsTest.test_baseline 3.4_mysql_pymysql_nocextensions 92736
test.aaa_profiling.test_orm.DeferOptionsTest.test_baseline 3.4_postgresql_psycopg2_cextensions 18221
-test.aaa_profiling.test_orm.DeferOptionsTest.test_baseline 3.4_postgresql_psycopg2_nocextensions 27224
+test.aaa_profiling.test_orm.DeferOptionsTest.test_baseline 3.4_postgresql_psycopg2_nocextensions 27201
test.aaa_profiling.test_orm.DeferOptionsTest.test_baseline 3.4_sqlite_pysqlite_cextensions 18393
test.aaa_profiling.test_orm.DeferOptionsTest.test_baseline 3.4_sqlite_pysqlite_nocextensions 27396
@@ -192,7 +192,7 @@ test.aaa_profiling.test_orm.DeferOptionsTest.test_baseline 3.4_sqlite_pysqlite_n
test.aaa_profiling.test_orm.DeferOptionsTest.test_defer_many_cols 2.6_sqlite_pysqlite_nocextensions 26282
test.aaa_profiling.test_orm.DeferOptionsTest.test_defer_many_cols 2.7_mysql_mysqldb_cextensions 22212
test.aaa_profiling.test_orm.DeferOptionsTest.test_defer_many_cols 2.7_mysql_mysqldb_nocextensions 25215
-test.aaa_profiling.test_orm.DeferOptionsTest.test_defer_many_cols 2.7_postgresql_psycopg2_cextensions 22183
+test.aaa_profiling.test_orm.DeferOptionsTest.test_defer_many_cols 2.7_postgresql_psycopg2_cextensions 23196
test.aaa_profiling.test_orm.DeferOptionsTest.test_defer_many_cols 2.7_postgresql_psycopg2_nocextensions 25186
test.aaa_profiling.test_orm.DeferOptionsTest.test_defer_many_cols 2.7_sqlite_pysqlite_cextensions 22269
test.aaa_profiling.test_orm.DeferOptionsTest.test_defer_many_cols 2.7_sqlite_pysqlite_nocextensions 25272
@@ -205,7 +205,7 @@ test.aaa_profiling.test_orm.DeferOptionsTest.test_defer_many_cols 3.3_sqlite_pys
test.aaa_profiling.test_orm.DeferOptionsTest.test_defer_many_cols 3.4_mysql_pymysql_cextensions 47353
test.aaa_profiling.test_orm.DeferOptionsTest.test_defer_many_cols 3.4_mysql_pymysql_nocextensions 50356
test.aaa_profiling.test_orm.DeferOptionsTest.test_defer_many_cols 3.4_postgresql_psycopg2_cextensions 24215
-test.aaa_profiling.test_orm.DeferOptionsTest.test_defer_many_cols 3.4_postgresql_psycopg2_nocextensions 27218
+test.aaa_profiling.test_orm.DeferOptionsTest.test_defer_many_cols 3.4_postgresql_psycopg2_nocextensions 27220
test.aaa_profiling.test_orm.DeferOptionsTest.test_defer_many_cols 3.4_sqlite_pysqlite_cextensions 24321
test.aaa_profiling.test_orm.DeferOptionsTest.test_defer_many_cols 3.4_sqlite_pysqlite_nocextensions 27324
@@ -236,7 +236,7 @@ test.aaa_profiling.test_orm.LoadManyToOneFromIdentityTest.test_many_to_one_load_
test.aaa_profiling.test_orm.LoadManyToOneFromIdentityTest.test_many_to_one_load_no_identity 2.6_sqlite_pysqlite_nocextensions 161101
test.aaa_profiling.test_orm.LoadManyToOneFromIdentityTest.test_many_to_one_load_no_identity 2.7_mysql_mysqldb_cextensions 127101
test.aaa_profiling.test_orm.LoadManyToOneFromIdentityTest.test_many_to_one_load_no_identity 2.7_mysql_mysqldb_nocextensions 128851
-test.aaa_profiling.test_orm.LoadManyToOneFromIdentityTest.test_many_to_one_load_no_identity 2.7_postgresql_psycopg2_cextensions 120101
+test.aaa_profiling.test_orm.LoadManyToOneFromIdentityTest.test_many_to_one_load_no_identity 2.7_postgresql_psycopg2_cextensions 123351
test.aaa_profiling.test_orm.LoadManyToOneFromIdentityTest.test_many_to_one_load_no_identity 2.7_postgresql_psycopg2_nocextensions 121851
test.aaa_profiling.test_orm.LoadManyToOneFromIdentityTest.test_many_to_one_load_no_identity 2.7_sqlite_pysqlite_cextensions 156351
test.aaa_profiling.test_orm.LoadManyToOneFromIdentityTest.test_many_to_one_load_no_identity 2.7_sqlite_pysqlite_nocextensions 158054
@@ -249,7 +249,7 @@ test.aaa_profiling.test_orm.LoadManyToOneFromIdentityTest.test_many_to_one_load_
test.aaa_profiling.test_orm.LoadManyToOneFromIdentityTest.test_many_to_one_load_no_identity 3.4_mysql_pymysql_cextensions 187056
test.aaa_profiling.test_orm.LoadManyToOneFromIdentityTest.test_many_to_one_load_no_identity 3.4_mysql_pymysql_nocextensions 188855
test.aaa_profiling.test_orm.LoadManyToOneFromIdentityTest.test_many_to_one_load_no_identity 3.4_postgresql_psycopg2_cextensions 128556
-test.aaa_profiling.test_orm.LoadManyToOneFromIdentityTest.test_many_to_one_load_no_identity 3.4_postgresql_psycopg2_nocextensions 130306
+test.aaa_profiling.test_orm.LoadManyToOneFromIdentityTest.test_many_to_one_load_no_identity 3.4_postgresql_psycopg2_nocextensions 130356
test.aaa_profiling.test_orm.LoadManyToOneFromIdentityTest.test_many_to_one_load_no_identity 3.4_sqlite_pysqlite_cextensions 168806
test.aaa_profiling.test_orm.LoadManyToOneFromIdentityTest.test_many_to_one_load_no_identity 3.4_sqlite_pysqlite_nocextensions 170556
@@ -258,7 +258,7 @@ test.aaa_profiling.test_orm.LoadManyToOneFromIdentityTest.test_many_to_one_load_
test.aaa_profiling.test_orm.MergeBackrefsTest.test_merge_pending_with_all_pks 2.6_sqlite_pysqlite_nocextensions 21505
test.aaa_profiling.test_orm.MergeBackrefsTest.test_merge_pending_with_all_pks 2.7_mysql_mysqldb_cextensions 19393
test.aaa_profiling.test_orm.MergeBackrefsTest.test_merge_pending_with_all_pks 2.7_mysql_mysqldb_nocextensions 19597
-test.aaa_profiling.test_orm.MergeBackrefsTest.test_merge_pending_with_all_pks 2.7_postgresql_psycopg2_cextensions 18881
+test.aaa_profiling.test_orm.MergeBackrefsTest.test_merge_pending_with_all_pks 2.7_postgresql_psycopg2_cextensions 19024
test.aaa_profiling.test_orm.MergeBackrefsTest.test_merge_pending_with_all_pks 2.7_postgresql_psycopg2_nocextensions 19085
test.aaa_profiling.test_orm.MergeBackrefsTest.test_merge_pending_with_all_pks 2.7_sqlite_pysqlite_cextensions 21186
test.aaa_profiling.test_orm.MergeBackrefsTest.test_merge_pending_with_all_pks 2.7_sqlite_pysqlite_nocextensions 21437
@@ -271,7 +271,7 @@ test.aaa_profiling.test_orm.MergeBackrefsTest.test_merge_pending_with_all_pks 3.
test.aaa_profiling.test_orm.MergeBackrefsTest.test_merge_pending_with_all_pks 3.4_mysql_pymysql_cextensions 23716
test.aaa_profiling.test_orm.MergeBackrefsTest.test_merge_pending_with_all_pks 3.4_mysql_pymysql_nocextensions 23871
test.aaa_profiling.test_orm.MergeBackrefsTest.test_merge_pending_with_all_pks 3.4_postgresql_psycopg2_cextensions 19552
-test.aaa_profiling.test_orm.MergeBackrefsTest.test_merge_pending_with_all_pks 3.4_postgresql_psycopg2_nocextensions 19744
+test.aaa_profiling.test_orm.MergeBackrefsTest.test_merge_pending_with_all_pks 3.4_postgresql_psycopg2_nocextensions 19727
test.aaa_profiling.test_orm.MergeBackrefsTest.test_merge_pending_with_all_pks 3.4_sqlite_pysqlite_cextensions 22051
test.aaa_profiling.test_orm.MergeBackrefsTest.test_merge_pending_with_all_pks 3.4_sqlite_pysqlite_nocextensions 22255
@@ -280,7 +280,7 @@ test.aaa_profiling.test_orm.MergeBackrefsTest.test_merge_pending_with_all_pks 3.
test.aaa_profiling.test_orm.MergeTest.test_merge_load 2.6_sqlite_pysqlite_nocextensions 1520
test.aaa_profiling.test_orm.MergeTest.test_merge_load 2.7_mysql_mysqldb_cextensions 1400
test.aaa_profiling.test_orm.MergeTest.test_merge_load 2.7_mysql_mysqldb_nocextensions 1415
-test.aaa_profiling.test_orm.MergeTest.test_merge_load 2.7_postgresql_psycopg2_cextensions 1319
+test.aaa_profiling.test_orm.MergeTest.test_merge_load 2.7_postgresql_psycopg2_cextensions 1309
test.aaa_profiling.test_orm.MergeTest.test_merge_load 2.7_postgresql_psycopg2_nocextensions 1334
test.aaa_profiling.test_orm.MergeTest.test_merge_load 2.7_sqlite_pysqlite_cextensions 1527
test.aaa_profiling.test_orm.MergeTest.test_merge_load 2.7_sqlite_pysqlite_nocextensions 1542
@@ -293,7 +293,7 @@ test.aaa_profiling.test_orm.MergeTest.test_merge_load 3.3_sqlite_pysqlite_nocext
test.aaa_profiling.test_orm.MergeTest.test_merge_load 3.4_mysql_pymysql_cextensions 2038
test.aaa_profiling.test_orm.MergeTest.test_merge_load 3.4_mysql_pymysql_nocextensions 2053
test.aaa_profiling.test_orm.MergeTest.test_merge_load 3.4_postgresql_psycopg2_cextensions 1335
-test.aaa_profiling.test_orm.MergeTest.test_merge_load 3.4_postgresql_psycopg2_nocextensions 1350
+test.aaa_profiling.test_orm.MergeTest.test_merge_load 3.4_postgresql_psycopg2_nocextensions 1354
test.aaa_profiling.test_orm.MergeTest.test_merge_load 3.4_sqlite_pysqlite_cextensions 1577
test.aaa_profiling.test_orm.MergeTest.test_merge_load 3.4_sqlite_pysqlite_nocextensions 1592
@@ -302,7 +302,7 @@ test.aaa_profiling.test_orm.MergeTest.test_merge_load 3.4_sqlite_pysqlite_nocext
test.aaa_profiling.test_orm.MergeTest.test_merge_no_load 2.6_sqlite_pysqlite_nocextensions 89,19
test.aaa_profiling.test_orm.MergeTest.test_merge_no_load 2.7_mysql_mysqldb_cextensions 93,19
test.aaa_profiling.test_orm.MergeTest.test_merge_no_load 2.7_mysql_mysqldb_nocextensions 93,19
-test.aaa_profiling.test_orm.MergeTest.test_merge_no_load 2.7_postgresql_psycopg2_cextensions 93,19
+test.aaa_profiling.test_orm.MergeTest.test_merge_no_load 2.7_postgresql_psycopg2_cextensions 91,19
test.aaa_profiling.test_orm.MergeTest.test_merge_no_load 2.7_postgresql_psycopg2_nocextensions 93,19
test.aaa_profiling.test_orm.MergeTest.test_merge_no_load 2.7_sqlite_pysqlite_cextensions 93,19
test.aaa_profiling.test_orm.MergeTest.test_merge_no_load 2.7_sqlite_pysqlite_nocextensions 93,19
@@ -315,7 +315,7 @@ test.aaa_profiling.test_orm.MergeTest.test_merge_no_load 3.3_sqlite_pysqlite_noc
test.aaa_profiling.test_orm.MergeTest.test_merge_no_load 3.4_mysql_pymysql_cextensions 92,20
test.aaa_profiling.test_orm.MergeTest.test_merge_no_load 3.4_mysql_pymysql_nocextensions 92,20
test.aaa_profiling.test_orm.MergeTest.test_merge_no_load 3.4_postgresql_psycopg2_cextensions 92,20
-test.aaa_profiling.test_orm.MergeTest.test_merge_no_load 3.4_postgresql_psycopg2_nocextensions 92,20
+test.aaa_profiling.test_orm.MergeTest.test_merge_no_load 3.4_postgresql_psycopg2_nocextensions 94,20
test.aaa_profiling.test_orm.MergeTest.test_merge_no_load 3.4_sqlite_pysqlite_cextensions 92,20
test.aaa_profiling.test_orm.MergeTest.test_merge_no_load 3.4_sqlite_pysqlite_nocextensions 92,20
@@ -324,7 +324,7 @@ test.aaa_profiling.test_orm.MergeTest.test_merge_no_load 3.4_sqlite_pysqlite_noc
test.aaa_profiling.test_orm.QueryTest.test_query_cols 2.6_sqlite_pysqlite_nocextensions 8064
test.aaa_profiling.test_orm.QueryTest.test_query_cols 2.7_mysql_mysqldb_cextensions 6220
test.aaa_profiling.test_orm.QueryTest.test_query_cols 2.7_mysql_mysqldb_nocextensions 6750
-test.aaa_profiling.test_orm.QueryTest.test_query_cols 2.7_postgresql_psycopg2_cextensions 6790
+test.aaa_profiling.test_orm.QueryTest.test_query_cols 2.7_postgresql_psycopg2_cextensions 6798
test.aaa_profiling.test_orm.QueryTest.test_query_cols 2.7_postgresql_psycopg2_nocextensions 7320
test.aaa_profiling.test_orm.QueryTest.test_query_cols 2.7_sqlite_pysqlite_cextensions 7564
test.aaa_profiling.test_orm.QueryTest.test_query_cols 2.7_sqlite_pysqlite_nocextensions 8094
@@ -337,7 +337,7 @@ test.aaa_profiling.test_orm.QueryTest.test_query_cols 3.3_sqlite_pysqlite_nocext
test.aaa_profiling.test_orm.QueryTest.test_query_cols 3.4_mysql_pymysql_cextensions 13744
test.aaa_profiling.test_orm.QueryTest.test_query_cols 3.4_mysql_pymysql_nocextensions 14274
test.aaa_profiling.test_orm.QueryTest.test_query_cols 3.4_postgresql_psycopg2_cextensions 6234
-test.aaa_profiling.test_orm.QueryTest.test_query_cols 3.4_postgresql_psycopg2_nocextensions 6674
+test.aaa_profiling.test_orm.QueryTest.test_query_cols 3.4_postgresql_psycopg2_nocextensions 6702
test.aaa_profiling.test_orm.QueryTest.test_query_cols 3.4_sqlite_pysqlite_cextensions 7846
test.aaa_profiling.test_orm.QueryTest.test_query_cols 3.4_sqlite_pysqlite_nocextensions 8376
@@ -346,7 +346,7 @@ test.aaa_profiling.test_orm.QueryTest.test_query_cols 3.4_sqlite_pysqlite_nocext
test.aaa_profiling.test_orm.SessionTest.test_expire_lots 2.6_sqlite_pysqlite_nocextensions 1156
test.aaa_profiling.test_orm.SessionTest.test_expire_lots 2.7_mysql_mysqldb_cextensions 1145
test.aaa_profiling.test_orm.SessionTest.test_expire_lots 2.7_mysql_mysqldb_nocextensions 1148
-test.aaa_profiling.test_orm.SessionTest.test_expire_lots 2.7_postgresql_psycopg2_cextensions 1160
+test.aaa_profiling.test_orm.SessionTest.test_expire_lots 2.7_postgresql_psycopg2_cextensions 1139
test.aaa_profiling.test_orm.SessionTest.test_expire_lots 2.7_postgresql_psycopg2_nocextensions 1161
test.aaa_profiling.test_orm.SessionTest.test_expire_lots 2.7_sqlite_pysqlite_cextensions 1151
test.aaa_profiling.test_orm.SessionTest.test_expire_lots 2.7_sqlite_pysqlite_nocextensions 1145
@@ -359,7 +359,7 @@ test.aaa_profiling.test_orm.SessionTest.test_expire_lots 3.3_sqlite_pysqlite_noc
test.aaa_profiling.test_orm.SessionTest.test_expire_lots 3.4_mysql_pymysql_cextensions 1254
test.aaa_profiling.test_orm.SessionTest.test_expire_lots 3.4_mysql_pymysql_nocextensions 1280
test.aaa_profiling.test_orm.SessionTest.test_expire_lots 3.4_postgresql_psycopg2_cextensions 1247
-test.aaa_profiling.test_orm.SessionTest.test_expire_lots 3.4_postgresql_psycopg2_nocextensions 1262
+test.aaa_profiling.test_orm.SessionTest.test_expire_lots 3.4_postgresql_psycopg2_nocextensions 1263
test.aaa_profiling.test_orm.SessionTest.test_expire_lots 3.4_sqlite_pysqlite_cextensions 1238
test.aaa_profiling.test_orm.SessionTest.test_expire_lots 3.4_sqlite_pysqlite_nocextensions 1272
@@ -368,7 +368,7 @@ test.aaa_profiling.test_orm.SessionTest.test_expire_lots 3.4_sqlite_pysqlite_noc
test.aaa_profiling.test_pool.QueuePoolTest.test_first_connect 2.6_sqlite_pysqlite_nocextensions 97
test.aaa_profiling.test_pool.QueuePoolTest.test_first_connect 2.7_mysql_mysqldb_cextensions 95
test.aaa_profiling.test_pool.QueuePoolTest.test_first_connect 2.7_mysql_mysqldb_nocextensions 95
-test.aaa_profiling.test_pool.QueuePoolTest.test_first_connect 2.7_postgresql_psycopg2_cextensions 95
+test.aaa_profiling.test_pool.QueuePoolTest.test_first_connect 2.7_postgresql_psycopg2_cextensions 96
test.aaa_profiling.test_pool.QueuePoolTest.test_first_connect 2.7_postgresql_psycopg2_nocextensions 95
test.aaa_profiling.test_pool.QueuePoolTest.test_first_connect 2.7_sqlite_pysqlite_cextensions 95
test.aaa_profiling.test_pool.QueuePoolTest.test_first_connect 2.7_sqlite_pysqlite_nocextensions 95
@@ -500,7 +500,7 @@ test.aaa_profiling.test_resultset.ResultSetTest.test_contains_doesnt_compile 3.4
test.aaa_profiling.test_resultset.ResultSetTest.test_string 2.6_sqlite_pysqlite_nocextensions 15439
test.aaa_profiling.test_resultset.ResultSetTest.test_string 2.7_mysql_mysqldb_cextensions 488
test.aaa_profiling.test_resultset.ResultSetTest.test_string 2.7_mysql_mysqldb_nocextensions 15488
-test.aaa_profiling.test_resultset.ResultSetTest.test_string 2.7_postgresql_psycopg2_cextensions 20477
+test.aaa_profiling.test_resultset.ResultSetTest.test_string 2.7_postgresql_psycopg2_cextensions 20497
test.aaa_profiling.test_resultset.ResultSetTest.test_string 2.7_postgresql_psycopg2_nocextensions 35477
test.aaa_profiling.test_resultset.ResultSetTest.test_string 2.7_sqlite_pysqlite_cextensions 419
test.aaa_profiling.test_resultset.ResultSetTest.test_string 2.7_sqlite_pysqlite_nocextensions 15419
@@ -522,7 +522,7 @@ test.aaa_profiling.test_resultset.ResultSetTest.test_string 3.4_sqlite_pysqlite_
test.aaa_profiling.test_resultset.ResultSetTest.test_unicode 2.6_sqlite_pysqlite_nocextensions 15439
test.aaa_profiling.test_resultset.ResultSetTest.test_unicode 2.7_mysql_mysqldb_cextensions 488
test.aaa_profiling.test_resultset.ResultSetTest.test_unicode 2.7_mysql_mysqldb_nocextensions 45488
-test.aaa_profiling.test_resultset.ResultSetTest.test_unicode 2.7_postgresql_psycopg2_cextensions 20477
+test.aaa_profiling.test_resultset.ResultSetTest.test_unicode 2.7_postgresql_psycopg2_cextensions 20497
test.aaa_profiling.test_resultset.ResultSetTest.test_unicode 2.7_postgresql_psycopg2_nocextensions 35477
test.aaa_profiling.test_resultset.ResultSetTest.test_unicode 2.7_sqlite_pysqlite_cextensions 419
test.aaa_profiling.test_resultset.ResultSetTest.test_unicode 2.7_sqlite_pysqlite_nocextensions 15419
@@ -541,18 +541,18 @@ test.aaa_profiling.test_resultset.ResultSetTest.test_unicode 3.4_sqlite_pysqlite
# TEST: test.aaa_profiling.test_zoomark.ZooMarkTest.test_invocation
-test.aaa_profiling.test_zoomark.ZooMarkTest.test_invocation 2.7_postgresql_psycopg2_cextensions 5811,295,3577,11462,1134,1973,2434
+test.aaa_profiling.test_zoomark.ZooMarkTest.test_invocation 2.7_postgresql_psycopg2_cextensions 5823,295,3721,11938,1146,2017,2481
test.aaa_profiling.test_zoomark.ZooMarkTest.test_invocation 2.7_postgresql_psycopg2_nocextensions 5833,295,3681,12720,1241,1980,2655
test.aaa_profiling.test_zoomark.ZooMarkTest.test_invocation 3.3_postgresql_psycopg2_cextensions 5591,277,3569,11458,1134,1924,2489
test.aaa_profiling.test_zoomark.ZooMarkTest.test_invocation 3.3_postgresql_psycopg2_nocextensions 5613,277,3665,12630,1228,1931,2681
test.aaa_profiling.test_zoomark.ZooMarkTest.test_invocation 3.4_postgresql_psycopg2_cextensions 5619,277,3705,11902,1144,1966,2532
-test.aaa_profiling.test_zoomark.ZooMarkTest.test_invocation 3.4_postgresql_psycopg2_nocextensions 5624,277,3801,13074,1238,1970,2724
+test.aaa_profiling.test_zoomark.ZooMarkTest.test_invocation 3.4_postgresql_psycopg2_nocextensions 5625,277,3809,13110,1240,1975,2733
# TEST: test.aaa_profiling.test_zoomark_orm.ZooMarkTest.test_invocation
-test.aaa_profiling.test_zoomark_orm.ZooMarkTest.test_invocation 2.7_postgresql_psycopg2_cextensions 6256,402,6599,17140,1146,2569
+test.aaa_profiling.test_zoomark_orm.ZooMarkTest.test_invocation 2.7_postgresql_psycopg2_cextensions 6437,410,6761,17665,1159,2627
test.aaa_profiling.test_zoomark_orm.ZooMarkTest.test_invocation 2.7_postgresql_psycopg2_nocextensions 6341,407,6703,18167,1244,2598
test.aaa_profiling.test_zoomark_orm.ZooMarkTest.test_invocation 3.3_postgresql_psycopg2_cextensions 6228,393,6747,17582,1148,2623
test.aaa_profiling.test_zoomark_orm.ZooMarkTest.test_invocation 3.3_postgresql_psycopg2_nocextensions 6318,398,6851,18609,1234,2652
test.aaa_profiling.test_zoomark_orm.ZooMarkTest.test_invocation 3.4_postgresql_psycopg2_cextensions 6257,393,6891,18056,1159,2671
-test.aaa_profiling.test_zoomark_orm.ZooMarkTest.test_invocation 3.4_postgresql_psycopg2_nocextensions 6341,398,6995,19083,1245,2700
+test.aaa_profiling.test_zoomark_orm.ZooMarkTest.test_invocation 3.4_postgresql_psycopg2_nocextensions 6418,401,7005,19115,1247,2706
diff --git a/test/requirements.py b/test/requirements.py
index db4daca20..ff93a9c3d 100644
--- a/test/requirements.py
+++ b/test/requirements.py
@@ -293,7 +293,6 @@ class DefaultRequirements(SuiteRequirements):
named 'test_schema'."""
return skip_if([
- "sqlite",
"firebird"
], "no schema support")
@@ -362,6 +361,32 @@ class DefaultRequirements(SuiteRequirements):
], 'no support for EXCEPT')
@property
+ def parens_in_union_contained_select_w_limit_offset(self):
+ """Target database must support parenthesized SELECT in UNION
+ when LIMIT/OFFSET is specifically present.
+
+ E.g. (SELECT ...) UNION (SELECT ..)
+
+ This is known to fail on SQLite.
+
+ """
+ return fails_if('sqlite')
+
+ @property
+ def parens_in_union_contained_select_wo_limit_offset(self):
+ """Target database must support parenthesized SELECT in UNION
+ when OFFSET/LIMIT is specifically not present.
+
+ E.g. (SELECT ... LIMIT ..) UNION (SELECT .. OFFSET ..)
+
+ This is known to fail on SQLite. It also fails on Oracle
+ because without LIMIT/OFFSET, there is currently no step that
+ creates an additional subquery.
+
+ """
+ return fails_if(['sqlite', 'oracle'])
+
+ @property
def offset(self):
"""Target database must support some method of adding OFFSET or
equivalent to a result set."""
@@ -758,7 +783,7 @@ class DefaultRequirements(SuiteRequirements):
@property
def postgresql_jsonb(self):
- return skip_if(
+ return only_on("postgresql >= 9.4") + skip_if(
lambda config:
config.db.dialect.driver == "pg8000" and
config.db.dialect._dbapi_version <= (1, 10, 1)
@@ -841,6 +866,10 @@ class DefaultRequirements(SuiteRequirements):
return skip_if(["oracle", "firebird"], "non-standard SELECT scalar syntax")
@property
+ def mysql_fsp(self):
+ return only_if('mysql >= 5.6.4')
+
+ @property
def mysql_fully_case_sensitive(self):
return only_if(self._has_mysql_fully_case_sensitive)
diff --git a/test/sql/test_compiler.py b/test/sql/test_compiler.py
index 04e3171a9..ffd13309b 100644
--- a/test/sql/test_compiler.py
+++ b/test/sql/test_compiler.py
@@ -18,7 +18,7 @@ from sqlalchemy import Integer, String, MetaData, Table, Column, select, \
literal, and_, null, type_coerce, alias, or_, literal_column,\
Float, TIMESTAMP, Numeric, Date, Text, union, except_,\
intersect, union_all, Boolean, distinct, join, outerjoin, asc, desc,\
- over, subquery, case, true
+ over, subquery, case, true, CheckConstraint
import decimal
from sqlalchemy.util import u
from sqlalchemy import exc, sql, util, types, schema
@@ -1643,14 +1643,12 @@ class SelectTest(fixtures.TestBase, AssertsCompiledSQL):
s = select([column('foo'), column('bar')])
- # ORDER BY's even though not supported by
- # all DB's, are rendered if requested
self.assert_compile(
union(
s.order_by("foo"),
s.order_by("bar")),
- "SELECT foo, bar ORDER BY foo UNION SELECT foo, bar ORDER BY bar")
- # self_group() is honored
+ "(SELECT foo, bar ORDER BY foo) UNION "
+ "(SELECT foo, bar ORDER BY bar)")
self.assert_compile(
union(s.order_by("foo").self_group(),
s.order_by("bar").limit(10).self_group()),
@@ -1759,6 +1757,67 @@ class SelectTest(fixtures.TestBase, AssertsCompiledSQL):
"SELECT foo, bar FROM bat)"
)
+ # tests for [ticket:2528]
+ # sqlite hates all of these.
+ self.assert_compile(
+ union(
+ s.limit(1),
+ s.offset(2)
+ ),
+ "(SELECT foo, bar FROM bat LIMIT :param_1) "
+ "UNION (SELECT foo, bar FROM bat LIMIT -1 OFFSET :param_2)"
+ )
+
+ self.assert_compile(
+ union(
+ s.order_by(column('bar')),
+ s.offset(2)
+ ),
+ "(SELECT foo, bar FROM bat ORDER BY bar) "
+ "UNION (SELECT foo, bar FROM bat LIMIT -1 OFFSET :param_1)"
+ )
+
+ self.assert_compile(
+ union(
+ s.limit(1).alias('a'),
+ s.limit(2).alias('b')
+ ),
+ "(SELECT foo, bar FROM bat LIMIT :param_1) "
+ "UNION (SELECT foo, bar FROM bat LIMIT :param_2)"
+ )
+
+ self.assert_compile(
+ union(
+ s.limit(1).self_group(),
+ s.limit(2).self_group()
+ ),
+ "(SELECT foo, bar FROM bat LIMIT :param_1) "
+ "UNION (SELECT foo, bar FROM bat LIMIT :param_2)"
+ )
+
+ self.assert_compile(
+ union(s.limit(1), s.limit(2).offset(3)).alias().select(),
+ "SELECT anon_1.foo, anon_1.bar FROM "
+ "((SELECT foo, bar FROM bat LIMIT :param_1) "
+ "UNION (SELECT foo, bar FROM bat LIMIT :param_2 OFFSET :param_3)) "
+ "AS anon_1"
+ )
+
+ # this version works for SQLite
+ self.assert_compile(
+ union(
+ s.limit(1).alias().select(),
+ s.offset(2).alias().select(),
+ ),
+ "SELECT anon_1.foo, anon_1.bar "
+ "FROM (SELECT foo, bar FROM bat"
+ " LIMIT :param_1) AS anon_1 "
+ "UNION SELECT anon_2.foo, anon_2.bar "
+ "FROM (SELECT foo, bar "
+ "FROM bat"
+ " LIMIT -1 OFFSET :param_2) AS anon_2"
+ )
+
def test_binds(self):
for (
stmt,
@@ -2040,6 +2099,8 @@ class SelectTest(fixtures.TestBase, AssertsCompiledSQL):
'Incorrect number of expected results')
eq_(str(cast(tbl.c.v1, Numeric).compile(dialect=dialect)),
'CAST(casttest.v1 AS %s)' % expected_results[0])
+ eq_(str(tbl.c.v1.cast(Numeric).compile(dialect=dialect)),
+ 'CAST(casttest.v1 AS %s)' % expected_results[0])
eq_(str(cast(tbl.c.v1, Numeric(12, 9)).compile(dialect=dialect)),
'CAST(casttest.v1 AS %s)' % expected_results[1])
eq_(str(cast(tbl.c.ts, Date).compile(dialect=dialect)),
@@ -2855,6 +2916,45 @@ class DDLTest(fixtures.TestBase, AssertsCompiledSQL):
"CREATE TABLE t (x INTEGER, z INTEGER)"
)
+ def test_composite_pk_constraint_autoinc_first(self):
+ m = MetaData()
+ t = Table(
+ 't', m,
+ Column('a', Integer, primary_key=True),
+ Column('b', Integer, primary_key=True, autoincrement=True)
+ )
+ self.assert_compile(
+ schema.CreateTable(t),
+ "CREATE TABLE t ("
+ "a INTEGER NOT NULL, "
+ "b INTEGER NOT NULL, "
+ "PRIMARY KEY (b, a))"
+ )
+
+ def test_table_no_cols(self):
+ m = MetaData()
+ t1 = Table('t1', m)
+ self.assert_compile(
+ schema.CreateTable(t1),
+ "CREATE TABLE t1 ()"
+ )
+
+ def test_table_no_cols_w_constraint(self):
+ m = MetaData()
+ t1 = Table('t1', m, CheckConstraint('a = 1'))
+ self.assert_compile(
+ schema.CreateTable(t1),
+ "CREATE TABLE t1 (CHECK (a = 1))"
+ )
+
+ def test_table_one_col_w_constraint(self):
+ m = MetaData()
+ t1 = Table('t1', m, Column('q', Integer), CheckConstraint('a = 1'))
+ self.assert_compile(
+ schema.CreateTable(t1),
+ "CREATE TABLE t1 (q INTEGER, CHECK (a = 1))"
+ )
+
class InlineDefaultTest(fixtures.TestBase, AssertsCompiledSQL):
__dialect__ = 'default'
@@ -3423,13 +3523,15 @@ class ResultMapTest(fixtures.TestBase):
tc = type_coerce(t.c.a, String)
stmt = select([t.c.a, l1, tc])
comp = stmt.compile()
- tc_anon_label = comp._create_result_map()['a_1'][1][0]
+ tc_anon_label = comp._create_result_map()['anon_1'][1][0]
eq_(
comp._create_result_map(),
{
'a': ('a', (t.c.a, 'a', 'a'), t.c.a.type),
'bar': ('bar', (l1, 'bar'), l1.type),
- 'a_1': ('%%(%d a)s' % id(tc), (tc_anon_label, 'a_1'), tc.type),
+ 'anon_1': (
+ '%%(%d anon)s' % id(tc),
+ (tc_anon_label, 'anon_1', tc), tc.type),
},
)
diff --git a/test/sql/test_defaults.py b/test/sql/test_defaults.py
index c154daa22..e21b21ab2 100644
--- a/test/sql/test_defaults.py
+++ b/test/sql/test_defaults.py
@@ -123,6 +123,14 @@ class DefaultTest(fixtures.TestBase):
def gen_default(cls, ctx):
return "hi"
+ class MyType(TypeDecorator):
+ impl = String(50)
+
+ def process_bind_param(self, value, dialect):
+ if value is not None:
+ value = "BIND" + value
+ return value
+
# select "count(1)" returns different results on different DBs also
# correct for "current_date" compatible as column default, value
# differences
@@ -211,7 +219,10 @@ class DefaultTest(fixtures.TestBase):
server_default='ddl'),
# python method w/ context
- Column('col10', String(20), default=MyClass.gen_default)
+ Column('col10', String(20), default=MyClass.gen_default),
+
+ # fixed default w/ type that has bound processor
+ Column('col11', MyType(), default='foo')
)
t.create()
@@ -290,6 +301,7 @@ class DefaultTest(fixtures.TestBase):
c = sa.ColumnDefault(fn)
c.arg("context")
+
@testing.fails_on('firebird', 'Data type unknown')
def test_standalone(self):
c = testing.db.engine.contextual_connect()
@@ -391,7 +403,7 @@ class DefaultTest(fixtures.TestBase):
today = datetime.date.today()
eq_(l.fetchall(), [
(x, 'imthedefault', f, ts, ts, ctexec, True, False,
- 12, today, 'py', 'hi')
+ 12, today, 'py', 'hi', 'BINDfoo')
for x in range(51, 54)])
t.insert().execute(col9=None)
@@ -401,7 +413,7 @@ class DefaultTest(fixtures.TestBase):
eq_(t.select(t.c.col1 == 54).execute().fetchall(),
[(54, 'imthedefault', f, ts, ts, ctexec, True, False,
- 12, today, None, 'hi')])
+ 12, today, None, 'hi', 'BINDfoo')])
def test_insertmany(self):
t.insert().execute({}, {}, {})
@@ -411,11 +423,11 @@ class DefaultTest(fixtures.TestBase):
today = datetime.date.today()
eq_(l.fetchall(),
[(51, 'imthedefault', f, ts, ts, ctexec, True, False,
- 12, today, 'py', 'hi'),
+ 12, today, 'py', 'hi', 'BINDfoo'),
(52, 'imthedefault', f, ts, ts, ctexec, True, False,
- 12, today, 'py', 'hi'),
+ 12, today, 'py', 'hi', 'BINDfoo'),
(53, 'imthedefault', f, ts, ts, ctexec, True, False,
- 12, today, 'py', 'hi')])
+ 12, today, 'py', 'hi', 'BINDfoo')])
@testing.requires.multivalues_inserts
def test_insert_multivalues(self):
@@ -427,11 +439,11 @@ class DefaultTest(fixtures.TestBase):
today = datetime.date.today()
eq_(l.fetchall(),
[(51, 'imthedefault', f, ts, ts, ctexec, True, False,
- 12, today, 'py', 'hi'),
+ 12, today, 'py', 'hi', 'BINDfoo'),
(52, 'imthedefault', f, ts, ts, ctexec, True, False,
- 12, today, 'py', 'hi'),
+ 12, today, 'py', 'hi', 'BINDfoo'),
(53, 'imthedefault', f, ts, ts, ctexec, True, False,
- 12, today, 'py', 'hi')])
+ 12, today, 'py', 'hi', 'BINDfoo')])
def test_no_embed_in_sql(self):
"""Using a DefaultGenerator, Sequence, DefaultClause
@@ -498,11 +510,11 @@ class DefaultTest(fixtures.TestBase):
today = datetime.date.today()
eq_(l.fetchall(),
[(51, 'im the update', f2, ts, ts, ctexec, False, False,
- 13, today, 'py', 'hi'),
+ 13, today, 'py', 'hi', 'BINDfoo'),
(52, 'im the update', f2, ts, ts, ctexec, True, False,
- 13, today, 'py', 'hi'),
+ 13, today, 'py', 'hi', 'BINDfoo'),
(53, 'im the update', f2, ts, ts, ctexec, True, False,
- 13, today, 'py', 'hi')])
+ 13, today, 'py', 'hi', 'BINDfoo')])
@testing.fails_on('firebird', 'Data type unknown')
def test_update(self):
@@ -514,7 +526,7 @@ class DefaultTest(fixtures.TestBase):
l = l.first()
eq_(l,
(pk, 'im the update', f2, None, None, ctexec, True, False,
- 13, datetime.date.today(), 'py', 'hi'))
+ 13, datetime.date.today(), 'py', 'hi', 'BINDfoo'))
eq_(11, f2)
@testing.fails_on('firebird', 'Data type unknown')
@@ -721,7 +733,6 @@ class AutoIncrementTest(fixtures.TablesTest):
)
assert x._autoincrement_column is None
- @testing.fails_on('sqlite', 'FIXME: unknown')
def test_non_autoincrement(self):
# sqlite INT primary keys can be non-unique! (only for ints)
nonai = Table(
@@ -735,8 +746,9 @@ class AutoIncrementTest(fixtures.TablesTest):
# mysql in legacy mode fails on second row
nonai.insert().execute(data='row 1')
nonai.insert().execute(data='row 2')
- assert_raises(
- sa.exc.DBAPIError,
+ assert_raises_message(
+ sa.exc.CompileError,
+ ".*has no Python-side or server-side default.*",
go
)
@@ -793,6 +805,36 @@ class SequenceDDLTest(fixtures.TestBase, testing.AssertsCompiledSQL):
)
self.assert_compile(
+ CreateSequence(Sequence(
+ 'foo_seq', increment=2, start=0, minvalue=0)),
+ "CREATE SEQUENCE foo_seq INCREMENT BY 2 START WITH 0 MINVALUE 0",
+ )
+
+ self.assert_compile(
+ CreateSequence(Sequence(
+ 'foo_seq', increment=2, start=1, maxvalue=5)),
+ "CREATE SEQUENCE foo_seq INCREMENT BY 2 START WITH 1 MAXVALUE 5",
+ )
+
+ self.assert_compile(
+ CreateSequence(Sequence(
+ 'foo_seq', increment=2, start=1, nomaxvalue=True)),
+ "CREATE SEQUENCE foo_seq INCREMENT BY 2 START WITH 1 NO MAXVALUE",
+ )
+
+ self.assert_compile(
+ CreateSequence(Sequence(
+ 'foo_seq', increment=2, start=0, nominvalue=True)),
+ "CREATE SEQUENCE foo_seq INCREMENT BY 2 START WITH 0 NO MINVALUE",
+ )
+
+ self.assert_compile(
+ CreateSequence(Sequence(
+ 'foo_seq', start=1, maxvalue=10, cycle=True)),
+ "CREATE SEQUENCE foo_seq START WITH 1 MAXVALUE 10 CYCLE",
+ )
+
+ self.assert_compile(
DropSequence(Sequence('foo_seq')),
"DROP SEQUENCE foo_seq",
)
@@ -1039,6 +1081,23 @@ class SequenceTest(fixtures.TestBase, testing.AssertsCompiledSQL):
assert not self._has_sequence('s1')
assert not self._has_sequence('s2')
+ @testing.requires.returning
+ @testing.provide_metadata
+ def test_freestanding_sequence_via_autoinc(self):
+ t = Table(
+ 'some_table', self.metadata,
+ Column(
+ 'id', Integer,
+ autoincrement=True,
+ primary_key=True,
+ default=Sequence(
+ 'my_sequence', metadata=self.metadata).next_value())
+ )
+ self.metadata.create_all(testing.db)
+
+ result = testing.db.execute(t.insert())
+ eq_(result.inserted_primary_key, [1])
+
cartitems = sometable = metadata = None
diff --git a/test/sql/test_functions.py b/test/sql/test_functions.py
index ec8d9b5c0..51cfcb919 100644
--- a/test/sql/test_functions.py
+++ b/test/sql/test_functions.py
@@ -1,20 +1,20 @@
-from sqlalchemy.testing import eq_
+from sqlalchemy.testing import eq_, is_
import datetime
from sqlalchemy import func, select, Integer, literal, DateTime, Table, \
Column, Sequence, MetaData, extract, Date, String, bindparam, \
- literal_column
+ literal_column, Array, Numeric
from sqlalchemy.sql import table, column
from sqlalchemy import sql, util
from sqlalchemy.sql.compiler import BIND_TEMPLATES
from sqlalchemy.testing.engines import all_dialects
from sqlalchemy import types as sqltypes
from sqlalchemy.sql import functions
-from sqlalchemy.sql.functions import GenericFunction
+from sqlalchemy.sql.functions import GenericFunction, FunctionElement
import decimal
from sqlalchemy import testing
from sqlalchemy.testing import fixtures, AssertsCompiledSQL, engines
from sqlalchemy.dialects import sqlite, postgresql, mysql, oracle
-
+from sqlalchemy.testing import assert_raises_message
table1 = table('mytable',
column('myid', Integer),
@@ -52,7 +52,7 @@ class CompileTest(fixtures.TestBase, AssertsCompiledSQL):
self.assert_compile(
fake_func('foo'),
"fake_func(%s)" %
- bindtemplate % {'name': 'param_1', 'position': 1},
+ bindtemplate % {'name': 'fake_func_1', 'position': 1},
dialect=dialect)
def test_use_labels(self):
@@ -89,7 +89,7 @@ class CompileTest(fixtures.TestBase, AssertsCompiledSQL):
def test_generic_annotation(self):
fn = func.coalesce('x', 'y')._annotate({"foo": "bar"})
self.assert_compile(
- fn, "coalesce(:param_1, :param_2)"
+ fn, "coalesce(:coalesce_1, :coalesce_2)"
)
def test_custom_default_namespace(self):
@@ -140,7 +140,7 @@ class CompileTest(fixtures.TestBase, AssertsCompiledSQL):
self.assert_compile(
func.my_func(1, 2),
- "my_func(:param_1, :param_2, :param_3)"
+ "my_func(:my_func_1, :my_func_2, :my_func_3)"
)
def test_custom_registered_identifier(self):
@@ -178,7 +178,7 @@ class CompileTest(fixtures.TestBase, AssertsCompiledSQL):
self.assert_compile(
myfunc(1, 2, 3),
- "myfunc(:param_1, :param_2, :param_3)"
+ "myfunc(:myfunc_1, :myfunc_2, :myfunc_3)"
)
def test_namespacing_conflicts(self):
@@ -188,7 +188,7 @@ class CompileTest(fixtures.TestBase, AssertsCompiledSQL):
assert isinstance(func.count().type, sqltypes.Integer)
self.assert_compile(func.count(), 'count(*)')
- self.assert_compile(func.count(1), 'count(:param_1)')
+ self.assert_compile(func.count(1), 'count(:count_1)')
c = column('abc')
self.assert_compile(func.count(c), 'count(abc)')
@@ -378,7 +378,7 @@ class CompileTest(fixtures.TestBase, AssertsCompiledSQL):
def test_funcfilter_empty(self):
self.assert_compile(
func.count(1).filter(),
- "count(:param_1)"
+ "count(:count_1)"
)
def test_funcfilter_criterion(self):
@@ -386,7 +386,7 @@ class CompileTest(fixtures.TestBase, AssertsCompiledSQL):
func.count(1).filter(
table1.c.name != None
),
- "count(:param_1) FILTER (WHERE mytable.name IS NOT NULL)"
+ "count(:count_1) FILTER (WHERE mytable.name IS NOT NULL)"
)
def test_funcfilter_compound_criterion(self):
@@ -395,7 +395,7 @@ class CompileTest(fixtures.TestBase, AssertsCompiledSQL):
table1.c.name == None,
table1.c.myid > 0
),
- "count(:param_1) FILTER (WHERE mytable.name IS NULL AND "
+ "count(:count_1) FILTER (WHERE mytable.name IS NULL AND "
"mytable.myid > :myid_1)"
)
@@ -404,7 +404,7 @@ class CompileTest(fixtures.TestBase, AssertsCompiledSQL):
select([func.count(1).filter(
table1.c.description != None
).label('foo')]),
- "SELECT count(:param_1) FILTER (WHERE mytable.description "
+ "SELECT count(:count_1) FILTER (WHERE mytable.description "
"IS NOT NULL) AS foo FROM mytable"
)
@@ -429,7 +429,7 @@ class CompileTest(fixtures.TestBase, AssertsCompiledSQL):
table1.c.name == 'name'
)
]),
- "SELECT count(:param_1) FILTER (WHERE mytable.name = :name_1) "
+ "SELECT count(:count_1) FILTER (WHERE mytable.name = :name_1) "
"AS anon_1 FROM mytable"
)
@@ -443,7 +443,7 @@ class CompileTest(fixtures.TestBase, AssertsCompiledSQL):
table1.c.description == 'description'
)
]),
- "SELECT count(:param_1) FILTER (WHERE "
+ "SELECT count(:count_1) FILTER (WHERE "
"mytable.name = :name_1 AND mytable.description = :description_1) "
"AS anon_1 FROM mytable"
)
@@ -477,6 +477,121 @@ class CompileTest(fixtures.TestBase, AssertsCompiledSQL):
"AS anon_1 FROM mytable"
)
+ def test_funcfilter_within_group(self):
+ stmt = select([
+ table1.c.myid,
+ func.percentile_cont(0.5).within_group(
+ table1.c.name
+ )
+ ])
+ self.assert_compile(
+ stmt,
+ "SELECT mytable.myid, percentile_cont(:percentile_cont_1) "
+ "WITHIN GROUP (ORDER BY mytable.name) "
+ "AS anon_1 "
+ "FROM mytable",
+ {'percentile_cont_1': 0.5}
+ )
+
+ def test_funcfilter_within_group_multi(self):
+ stmt = select([
+ table1.c.myid,
+ func.percentile_cont(0.5).within_group(
+ table1.c.name, table1.c.description
+ )
+ ])
+ self.assert_compile(
+ stmt,
+ "SELECT mytable.myid, percentile_cont(:percentile_cont_1) "
+ "WITHIN GROUP (ORDER BY mytable.name, mytable.description) "
+ "AS anon_1 "
+ "FROM mytable",
+ {'percentile_cont_1': 0.5}
+ )
+
+ def test_funcfilter_within_group_desc(self):
+ stmt = select([
+ table1.c.myid,
+ func.percentile_cont(0.5).within_group(
+ table1.c.name.desc()
+ )
+ ])
+ self.assert_compile(
+ stmt,
+ "SELECT mytable.myid, percentile_cont(:percentile_cont_1) "
+ "WITHIN GROUP (ORDER BY mytable.name DESC) "
+ "AS anon_1 "
+ "FROM mytable",
+ {'percentile_cont_1': 0.5}
+ )
+
+ def test_funcfilter_within_group_w_over(self):
+ stmt = select([
+ table1.c.myid,
+ func.percentile_cont(0.5).within_group(
+ table1.c.name.desc()
+ ).over(partition_by=table1.c.description)
+ ])
+ self.assert_compile(
+ stmt,
+ "SELECT mytable.myid, percentile_cont(:percentile_cont_1) "
+ "WITHIN GROUP (ORDER BY mytable.name DESC) "
+ "OVER (PARTITION BY mytable.description) AS anon_1 "
+ "FROM mytable",
+ {'percentile_cont_1': 0.5}
+ )
+
+ def test_incorrect_none_type(self):
+ class MissingType(FunctionElement):
+ name = 'mt'
+ type = None
+
+ assert_raises_message(
+ TypeError,
+ "Object None associated with '.type' attribute is "
+ "not a TypeEngine class or object",
+ MissingType().compile
+ )
+
+
+class ReturnTypeTest(fixtures.TestBase):
+
+ def test_array_agg(self):
+ expr = func.array_agg(column('data', Integer))
+ is_(expr.type._type_affinity, Array)
+ is_(expr.type.item_type._type_affinity, Integer)
+
+ def test_mode(self):
+ expr = func.mode(0.5).within_group(
+ column('data', Integer).desc())
+ is_(expr.type._type_affinity, Integer)
+
+ def test_percentile_cont(self):
+ expr = func.percentile_cont(0.5).within_group(column('data', Integer))
+ is_(expr.type._type_affinity, Integer)
+
+ def test_percentile_cont_array(self):
+ expr = func.percentile_cont(0.5, 0.7).within_group(
+ column('data', Integer))
+ is_(expr.type._type_affinity, Array)
+ is_(expr.type.item_type._type_affinity, Integer)
+
+ def test_percentile_cont_array_desc(self):
+ expr = func.percentile_cont(0.5, 0.7).within_group(
+ column('data', Integer).desc())
+ is_(expr.type._type_affinity, Array)
+ is_(expr.type.item_type._type_affinity, Integer)
+
+ def test_cume_dist(self):
+ expr = func.cume_dist(0.5).within_group(
+ column('data', Integer).desc())
+ is_(expr.type._type_affinity, Numeric)
+
+ def test_percent_rank(self):
+ expr = func.percent_rank(0.5).within_group(
+ column('data', Integer))
+ is_(expr.type._type_affinity, Numeric)
+
class ExecuteTest(fixtures.TestBase):
diff --git a/test/sql/test_insert.py b/test/sql/test_insert.py
index 3c533d75f..ea4de032c 100644
--- a/test/sql/test_insert.py
+++ b/test/sql/test_insert.py
@@ -5,7 +5,7 @@ from sqlalchemy import Column, Integer, MetaData, String, Table,\
from sqlalchemy.dialects import mysql, postgresql
from sqlalchemy.engine import default
from sqlalchemy.testing import AssertsCompiledSQL,\
- assert_raises_message, fixtures
+ assert_raises_message, fixtures, eq_
from sqlalchemy.sql import crud
@@ -319,6 +319,32 @@ class InsertTest(_InsertTestBase, fixtures.TablesTest, AssertsCompiledSQL):
checkparams={"name_1": "foo", "foo": None}
)
+ def test_insert_from_select_dont_mutate_raw_columns(self):
+ # test [ticket:3603]
+ from sqlalchemy import table
+ table_ = table(
+ 'mytable',
+ Column('foo', String),
+ Column('bar', String, default='baz'),
+ )
+
+ stmt = select([table_.c.foo])
+ insert = table_.insert().from_select(['foo'], stmt)
+
+ self.assert_compile(stmt, "SELECT mytable.foo FROM mytable")
+ self.assert_compile(
+ insert,
+ "INSERT INTO mytable (foo, bar) "
+ "SELECT mytable.foo, :bar AS anon_1 FROM mytable"
+ )
+ self.assert_compile(stmt, "SELECT mytable.foo FROM mytable")
+ self.assert_compile(
+ insert,
+ "INSERT INTO mytable (foo, bar) "
+ "SELECT mytable.foo, :bar AS anon_1 FROM mytable"
+ )
+
+
def test_insert_mix_select_values_exception(self):
table1 = self.tables.mytable
sel = select([table1.c.myid, table1.c.name]).where(
@@ -390,6 +416,106 @@ class InsertTest(_InsertTestBase, fixtures.TablesTest, AssertsCompiledSQL):
checkparams={"name_1": "foo"}
)
+ def test_anticipate_no_pk_composite_pk(self):
+ t = Table(
+ 't', MetaData(), Column('x', Integer, primary_key=True),
+ Column('y', Integer, primary_key=True)
+ )
+ assert_raises_message(
+ exc.CompileError,
+ "Column 't.y' is marked as a member.*"
+ "Note that as of SQLAlchemy 1.1,",
+ t.insert().compile, column_keys=['x']
+
+ )
+
+ def test_anticipate_no_pk_composite_pk_implicit_returning(self):
+ t = Table(
+ 't', MetaData(), Column('x', Integer, primary_key=True),
+ Column('y', Integer, primary_key=True)
+ )
+ d = postgresql.dialect()
+ d.implicit_returning = True
+ assert_raises_message(
+ exc.CompileError,
+ "Column 't.y' is marked as a member.*"
+ "Note that as of SQLAlchemy 1.1,",
+ t.insert().compile, dialect=d, column_keys=['x']
+
+ )
+
+ def test_anticipate_no_pk_composite_pk_prefetch(self):
+ t = Table(
+ 't', MetaData(), Column('x', Integer, primary_key=True),
+ Column('y', Integer, primary_key=True)
+ )
+ d = postgresql.dialect()
+ d.implicit_returning = False
+ assert_raises_message(
+ exc.CompileError,
+ "Column 't.y' is marked as a member.*"
+ "Note that as of SQLAlchemy 1.1,",
+ t.insert().compile, dialect=d, column_keys=['x']
+
+ )
+
+ def test_anticipate_nullable_composite_pk(self):
+ t = Table(
+ 't', MetaData(), Column('x', Integer, primary_key=True),
+ Column('y', Integer, primary_key=True, nullable=True)
+ )
+ self.assert_compile(
+ t.insert(),
+ "INSERT INTO t (x) VALUES (:x)",
+ params={'x': 5},
+ )
+
+ def test_anticipate_no_pk_non_composite_pk(self):
+ t = Table(
+ 't', MetaData(),
+ Column('x', Integer, primary_key=True, autoincrement=False),
+ Column('q', Integer)
+ )
+ assert_raises_message(
+ exc.CompileError,
+ "Column 't.x' is marked as a member.*"
+ "may not store NULL.$",
+ t.insert().compile, column_keys=['q']
+
+ )
+
+ def test_anticipate_no_pk_non_composite_pk_implicit_returning(self):
+ t = Table(
+ 't', MetaData(),
+ Column('x', Integer, primary_key=True, autoincrement=False),
+ Column('q', Integer)
+ )
+ d = postgresql.dialect()
+ d.implicit_returning = True
+ assert_raises_message(
+ exc.CompileError,
+ "Column 't.x' is marked as a member.*"
+ "may not store NULL.$",
+ t.insert().compile, dialect=d, column_keys=['q']
+
+ )
+
+ def test_anticipate_no_pk_non_composite_pk_prefetch(self):
+ t = Table(
+ 't', MetaData(),
+ Column('x', Integer, primary_key=True, autoincrement=False),
+ Column('q', Integer)
+ )
+ d = postgresql.dialect()
+ d.implicit_returning = False
+ assert_raises_message(
+ exc.CompileError,
+ "Column 't.x' is marked as a member.*"
+ "may not store NULL.$",
+ t.insert().compile, dialect=d, column_keys=['q']
+
+ )
+
class InsertImplicitReturningTest(
_InsertTestBase, fixtures.TablesTest, AssertsCompiledSQL):
@@ -694,8 +820,21 @@ class MultirowTest(_InsertTestBase, fixtures.TablesTest, AssertsCompiledSQL):
'foo_2': None # evaluated later
}
+ stmt = table.insert().values(values)
+
+ eq_(
+ dict([
+ (k, v.type._type_affinity)
+ for (k, v) in
+ stmt.compile(dialect=postgresql.dialect()).binds.items()]),
+ {
+ 'foo': Integer, 'data_2': String, 'id_0': Integer,
+ 'id_2': Integer, 'foo_1': Integer, 'data_1': String,
+ 'id_1': Integer, 'foo_2': Integer, 'data_0': String}
+ )
+
self.assert_compile(
- table.insert().values(values),
+ stmt,
'INSERT INTO sometable (id, data, foo) VALUES '
'(%(id_0)s, %(data_0)s, %(foo)s), '
'(%(id_1)s, %(data_1)s, %(foo_1)s), '
@@ -728,8 +867,20 @@ class MultirowTest(_InsertTestBase, fixtures.TablesTest, AssertsCompiledSQL):
'foo_2': None, # evaluated later
}
+ stmt = table.insert().values(values)
+ eq_(
+ dict([
+ (k, v.type._type_affinity)
+ for (k, v) in
+ stmt.compile(dialect=postgresql.dialect()).binds.items()]),
+ {
+ 'foo': Integer, 'data_2': String, 'id_0': Integer,
+ 'id_2': Integer, 'foo_1': Integer, 'data_1': String,
+ 'id_1': Integer, 'foo_2': Integer, 'data_0': String}
+ )
+
self.assert_compile(
- table.insert().values(values),
+ stmt,
"INSERT INTO sometable (id, data, foo) VALUES "
"(%(id_0)s, %(data_0)s, %(foo)s), "
"(%(id_1)s, %(data_1)s, %(foo_1)s), "
diff --git a/test/sql/test_insert_exec.py b/test/sql/test_insert_exec.py
new file mode 100644
index 000000000..c49947425
--- /dev/null
+++ b/test/sql/test_insert_exec.py
@@ -0,0 +1,445 @@
+from sqlalchemy.testing import eq_, assert_raises_message, is_
+from sqlalchemy import testing
+from sqlalchemy.testing import fixtures, engines
+from sqlalchemy import (
+ exc, sql, String, Integer, MetaData, and_, ForeignKey,
+ VARCHAR, INT, Sequence, func)
+from sqlalchemy.testing.schema import Table, Column
+
+
+class InsertExecTest(fixtures.TablesTest):
+ __backend__ = True
+
+ @classmethod
+ def define_tables(cls, metadata):
+ Table(
+ 'users', metadata,
+ Column(
+ 'user_id', INT, primary_key=True,
+ test_needs_autoincrement=True),
+ Column('user_name', VARCHAR(20)),
+ test_needs_acid=True
+ )
+
+ @testing.requires.multivalues_inserts
+ def test_multivalues_insert(self):
+ users = self.tables.users
+ users.insert(
+ values=[
+ {'user_id': 7, 'user_name': 'jack'},
+ {'user_id': 8, 'user_name': 'ed'}]).execute()
+ rows = users.select().order_by(users.c.user_id).execute().fetchall()
+ eq_(rows[0], (7, 'jack'))
+ eq_(rows[1], (8, 'ed'))
+ users.insert(values=[(9, 'jack'), (10, 'ed')]).execute()
+ rows = users.select().order_by(users.c.user_id).execute().fetchall()
+ eq_(rows[2], (9, 'jack'))
+ eq_(rows[3], (10, 'ed'))
+
+ def test_insert_heterogeneous_params(self):
+ """test that executemany parameters are asserted to match the
+ parameter set of the first."""
+ users = self.tables.users
+
+ assert_raises_message(
+ exc.StatementError,
+ r"\(sqlalchemy.exc.InvalidRequestError\) A value is required for "
+ "bind parameter 'user_name', in "
+ "parameter group 2 "
+ r"\[SQL: u?'INSERT INTO users",
+ users.insert().execute,
+ {'user_id': 7, 'user_name': 'jack'},
+ {'user_id': 8, 'user_name': 'ed'},
+ {'user_id': 9}
+ )
+
+ # this succeeds however. We aren't yet doing
+ # a length check on all subsequent parameters.
+ users.insert().execute(
+ {'user_id': 7},
+ {'user_id': 8, 'user_name': 'ed'},
+ {'user_id': 9}
+ )
+
+ def _test_lastrow_accessor(self, table_, values, assertvalues):
+ """Tests the inserted_primary_key and lastrow_has_id() functions."""
+
+ def insert_values(engine, table_, values):
+ """
+ Inserts a row into a table, returns the full list of values
+ INSERTed including defaults that fired off on the DB side and
+ detects rows that had defaults and post-fetches.
+ """
+
+ # verify implicit_returning is working
+ if engine.dialect.implicit_returning:
+ ins = table_.insert()
+ comp = ins.compile(engine, column_keys=list(values))
+ if not set(values).issuperset(
+ c.key for c in table_.primary_key):
+ is_(bool(comp.returning), True)
+
+ result = engine.execute(table_.insert(), **values)
+ ret = values.copy()
+
+ for col, id in zip(
+ table_.primary_key, result.inserted_primary_key):
+ ret[col.key] = id
+
+ if result.lastrow_has_defaults():
+ criterion = and_(
+ *[
+ col == id for col, id in
+ zip(table_.primary_key, result.inserted_primary_key)])
+ row = engine.execute(table_.select(criterion)).first()
+ for c in table_.c:
+ ret[c.key] = row[c]
+ return ret
+
+ if testing.against('firebird', 'postgresql', 'oracle', 'mssql'):
+ assert testing.db.dialect.implicit_returning
+
+ if testing.db.dialect.implicit_returning:
+ test_engines = [
+ engines.testing_engine(options={'implicit_returning': False}),
+ engines.testing_engine(options={'implicit_returning': True}),
+ ]
+ else:
+ test_engines = [testing.db]
+
+ for engine in test_engines:
+ try:
+ table_.create(bind=engine, checkfirst=True)
+ i = insert_values(engine, table_, values)
+ eq_(i, assertvalues)
+ finally:
+ table_.drop(bind=engine)
+
+ @testing.skip_if('sqlite')
+ def test_lastrow_accessor_one(self):
+ metadata = MetaData()
+ self._test_lastrow_accessor(
+ Table(
+ "t1", metadata,
+ Column(
+ 'id', Integer, primary_key=True,
+ test_needs_autoincrement=True),
+ Column('foo', String(30), primary_key=True)),
+ {'foo': 'hi'},
+ {'id': 1, 'foo': 'hi'}
+ )
+
+ @testing.skip_if('sqlite')
+ def test_lastrow_accessor_two(self):
+ metadata = MetaData()
+ self._test_lastrow_accessor(
+ Table(
+ "t2", metadata,
+ Column(
+ 'id', Integer, primary_key=True,
+ test_needs_autoincrement=True),
+ Column('foo', String(30), primary_key=True),
+ Column('bar', String(30), server_default='hi')
+ ),
+ {'foo': 'hi'},
+ {'id': 1, 'foo': 'hi', 'bar': 'hi'}
+ )
+
+ def test_lastrow_accessor_three(self):
+ metadata = MetaData()
+ self._test_lastrow_accessor(
+ Table(
+ "t3", metadata,
+ Column("id", String(40), primary_key=True),
+ Column('foo', String(30), primary_key=True),
+ Column("bar", String(30))
+ ),
+ {'id': 'hi', 'foo': 'thisisfoo', 'bar': "thisisbar"},
+ {'id': 'hi', 'foo': 'thisisfoo', 'bar': "thisisbar"}
+ )
+
+ def test_lastrow_accessor_four(self):
+ metadata = MetaData()
+ self._test_lastrow_accessor(
+ Table(
+ "t4", metadata,
+ Column(
+ 'id', Integer,
+ Sequence('t4_id_seq', optional=True),
+ primary_key=True),
+ Column('foo', String(30), primary_key=True),
+ Column('bar', String(30), server_default='hi')
+ ),
+ {'foo': 'hi', 'id': 1},
+ {'id': 1, 'foo': 'hi', 'bar': 'hi'}
+ )
+
+ def test_lastrow_accessor_five(self):
+ metadata = MetaData()
+ self._test_lastrow_accessor(
+ Table(
+ "t5", metadata,
+ Column('id', String(10), primary_key=True),
+ Column('bar', String(30), server_default='hi')
+ ),
+ {'id': 'id1'},
+ {'id': 'id1', 'bar': 'hi'},
+ )
+
+ @testing.skip_if('sqlite')
+ def test_lastrow_accessor_six(self):
+ metadata = MetaData()
+ self._test_lastrow_accessor(
+ Table(
+ "t6", metadata,
+ Column(
+ 'id', Integer, primary_key=True,
+ test_needs_autoincrement=True),
+ Column('bar', Integer, primary_key=True)
+ ),
+ {'bar': 0},
+ {'id': 1, 'bar': 0},
+ )
+
+ # TODO: why not in the sqlite suite?
+ @testing.only_on('sqlite+pysqlite')
+ @testing.provide_metadata
+ def test_lastrowid_zero(self):
+ from sqlalchemy.dialects import sqlite
+ eng = engines.testing_engine()
+
+ class ExcCtx(sqlite.base.SQLiteExecutionContext):
+
+ def get_lastrowid(self):
+ return 0
+ eng.dialect.execution_ctx_cls = ExcCtx
+ t = Table(
+ 't', self.metadata, Column('x', Integer, primary_key=True),
+ Column('y', Integer))
+ t.create(eng)
+ r = eng.execute(t.insert().values(y=5))
+ eq_(r.inserted_primary_key, [0])
+
+ @testing.fails_on(
+ 'sqlite', "sqlite autoincremnt doesn't work with composite pks")
+ @testing.provide_metadata
+ def test_misordered_lastrow(self):
+ metadata = self.metadata
+
+ related = Table(
+ 'related', metadata,
+ Column('id', Integer, primary_key=True),
+ mysql_engine='MyISAM'
+ )
+ t6 = Table(
+ "t6", metadata,
+ Column(
+ 'manual_id', Integer, ForeignKey('related.id'),
+ primary_key=True),
+ Column(
+ 'auto_id', Integer, primary_key=True,
+ test_needs_autoincrement=True),
+ mysql_engine='MyISAM'
+ )
+
+ metadata.create_all()
+ r = related.insert().values(id=12).execute()
+ id_ = r.inserted_primary_key[0]
+ eq_(id_, 12)
+
+ r = t6.insert().values(manual_id=id_).execute()
+ eq_(r.inserted_primary_key, [12, 1])
+
+ def test_implicit_id_insert_select_columns(self):
+ users = self.tables.users
+ stmt = users.insert().from_select(
+ (users.c.user_id, users.c.user_name),
+ users.select().where(users.c.user_id == 20))
+
+ testing.db.execute(stmt)
+
+ def test_implicit_id_insert_select_keys(self):
+ users = self.tables.users
+ stmt = users.insert().from_select(
+ ["user_id", "user_name"],
+ users.select().where(users.c.user_id == 20))
+
+ testing.db.execute(stmt)
+
+ @testing.requires.empty_inserts
+ @testing.requires.returning
+ def test_no_inserted_pk_on_returning(self):
+ users = self.tables.users
+ result = testing.db.execute(users.insert().returning(
+ users.c.user_id, users.c.user_name))
+ assert_raises_message(
+ exc.InvalidRequestError,
+ r"Can't call inserted_primary_key when returning\(\) is used.",
+ getattr, result, 'inserted_primary_key'
+ )
+
+
+class TableInsertTest(fixtures.TablesTest):
+
+ """test for consistent insert behavior across dialects
+ regarding the inline=True flag, lower-case 't' tables.
+
+ """
+ run_create_tables = 'each'
+ __backend__ = True
+
+ @classmethod
+ def define_tables(cls, metadata):
+ Table(
+ 'foo', metadata,
+ Column('id', Integer, Sequence('t_id_seq'), primary_key=True),
+ Column('data', String(50)),
+ Column('x', Integer)
+ )
+
+ def _fixture(self, types=True):
+ if types:
+ t = sql.table(
+ 'foo', sql.column('id', Integer),
+ sql.column('data', String),
+ sql.column('x', Integer))
+ else:
+ t = sql.table(
+ 'foo', sql.column('id'), sql.column('data'), sql.column('x'))
+ return t
+
+ def _test(self, stmt, row, returning=None, inserted_primary_key=False):
+ r = testing.db.execute(stmt)
+
+ if returning:
+ returned = r.first()
+ eq_(returned, returning)
+ elif inserted_primary_key is not False:
+ eq_(r.inserted_primary_key, inserted_primary_key)
+
+ eq_(testing.db.execute(self.tables.foo.select()).first(), row)
+
+ def _test_multi(self, stmt, rows, data):
+ testing.db.execute(stmt, rows)
+ eq_(
+ testing.db.execute(
+ self.tables.foo.select().
+ order_by(self.tables.foo.c.id)).fetchall(),
+ data)
+
+ @testing.requires.sequences
+ def test_expicit_sequence(self):
+ t = self._fixture()
+ self._test(
+ t.insert().values(
+ id=func.next_value(Sequence('t_id_seq')), data='data', x=5),
+ (1, 'data', 5)
+ )
+
+ def test_uppercase(self):
+ t = self.tables.foo
+ self._test(
+ t.insert().values(id=1, data='data', x=5),
+ (1, 'data', 5),
+ inserted_primary_key=[1]
+ )
+
+ def test_uppercase_inline(self):
+ t = self.tables.foo
+ self._test(
+ t.insert(inline=True).values(id=1, data='data', x=5),
+ (1, 'data', 5),
+ inserted_primary_key=[1]
+ )
+
+ @testing.crashes(
+ "mssql+pyodbc",
+ "Pyodbc + SQL Server + Py3K, some decimal handling issue")
+ def test_uppercase_inline_implicit(self):
+ t = self.tables.foo
+ self._test(
+ t.insert(inline=True).values(data='data', x=5),
+ (1, 'data', 5),
+ inserted_primary_key=[None]
+ )
+
+ def test_uppercase_implicit(self):
+ t = self.tables.foo
+ self._test(
+ t.insert().values(data='data', x=5),
+ (1, 'data', 5),
+ inserted_primary_key=[1]
+ )
+
+ def test_uppercase_direct_params(self):
+ t = self.tables.foo
+ self._test(
+ t.insert().values(id=1, data='data', x=5),
+ (1, 'data', 5),
+ inserted_primary_key=[1]
+ )
+
+ @testing.requires.returning
+ def test_uppercase_direct_params_returning(self):
+ t = self.tables.foo
+ self._test(
+ t.insert().values(id=1, data='data', x=5).returning(t.c.id, t.c.x),
+ (1, 'data', 5),
+ returning=(1, 5)
+ )
+
+ @testing.fails_on(
+ 'mssql', "lowercase table doesn't support identity insert disable")
+ def test_direct_params(self):
+ t = self._fixture()
+ self._test(
+ t.insert().values(id=1, data='data', x=5),
+ (1, 'data', 5),
+ inserted_primary_key=[]
+ )
+
+ @testing.fails_on(
+ 'mssql', "lowercase table doesn't support identity insert disable")
+ @testing.requires.returning
+ def test_direct_params_returning(self):
+ t = self._fixture()
+ self._test(
+ t.insert().values(id=1, data='data', x=5).returning(t.c.id, t.c.x),
+ (1, 'data', 5),
+ returning=(1, 5)
+ )
+
+ @testing.requires.emulated_lastrowid
+ def test_implicit_pk(self):
+ t = self._fixture()
+ self._test(
+ t.insert().values(data='data', x=5),
+ (1, 'data', 5),
+ inserted_primary_key=[]
+ )
+
+ @testing.requires.emulated_lastrowid
+ def test_implicit_pk_multi_rows(self):
+ t = self._fixture()
+ self._test_multi(
+ t.insert(),
+ [
+ {'data': 'd1', 'x': 5},
+ {'data': 'd2', 'x': 6},
+ {'data': 'd3', 'x': 7},
+ ],
+ [
+ (1, 'd1', 5),
+ (2, 'd2', 6),
+ (3, 'd3', 7)
+ ],
+ )
+
+ @testing.requires.emulated_lastrowid
+ def test_implicit_pk_inline(self):
+ t = self._fixture()
+ self._test(
+ t.insert(inline=True).values(data='data', x=5),
+ (1, 'data', 5),
+ inserted_primary_key=[]
+ )
diff --git a/test/sql/test_metadata.py b/test/sql/test_metadata.py
index 2e51b9a91..d4039a5fe 100644
--- a/test/sql/test_metadata.py
+++ b/test/sql/test_metadata.py
@@ -7,7 +7,7 @@ from sqlalchemy import Integer, String, UniqueConstraint, \
CheckConstraint, ForeignKey, MetaData, Sequence, \
ForeignKeyConstraint, PrimaryKeyConstraint, ColumnDefault, Index, event,\
events, Unicode, types as sqltypes, bindparam, \
- Table, Column, Boolean, Enum, func, text
+ Table, Column, Boolean, Enum, func, text, TypeDecorator
from sqlalchemy import schema, exc
from sqlalchemy.sql import elements, naming
import sqlalchemy as tsa
@@ -1361,6 +1361,123 @@ class TableTest(fixtures.TestBase, AssertsCompiledSQL):
assert not t1.c.x.nullable
+class PKAutoIncrementTest(fixtures.TestBase):
+ def test_multi_integer_no_autoinc(self):
+ pk = PrimaryKeyConstraint(
+ Column('a', Integer),
+ Column('b', Integer)
+ )
+ t = Table('t', MetaData())
+ t.append_constraint(pk)
+
+ is_(pk._autoincrement_column, None)
+
+ def test_multi_integer_multi_autoinc(self):
+ pk = PrimaryKeyConstraint(
+ Column('a', Integer, autoincrement=True),
+ Column('b', Integer, autoincrement=True)
+ )
+ t = Table('t', MetaData())
+ t.append_constraint(pk)
+
+ assert_raises_message(
+ exc.ArgumentError,
+ "Only one Column may be marked",
+ lambda: pk._autoincrement_column
+ )
+
+ def test_single_integer_no_autoinc(self):
+ pk = PrimaryKeyConstraint(
+ Column('a', Integer),
+ )
+ t = Table('t', MetaData())
+ t.append_constraint(pk)
+
+ is_(pk._autoincrement_column, pk.columns['a'])
+
+ def test_single_string_no_autoinc(self):
+ pk = PrimaryKeyConstraint(
+ Column('a', String),
+ )
+ t = Table('t', MetaData())
+ t.append_constraint(pk)
+
+ is_(pk._autoincrement_column, None)
+
+ def test_single_string_illegal_autoinc(self):
+ t = Table('t', MetaData(), Column('a', String, autoincrement=True))
+ pk = PrimaryKeyConstraint(
+ t.c.a
+ )
+ t.append_constraint(pk)
+
+ assert_raises_message(
+ exc.ArgumentError,
+ "Column type VARCHAR on column 't.a'",
+ lambda: pk._autoincrement_column
+ )
+
+ def test_single_integer_default(self):
+ t = Table(
+ 't', MetaData(),
+ Column('a', Integer, autoincrement=True, default=lambda: 1))
+ pk = PrimaryKeyConstraint(
+ t.c.a
+ )
+ t.append_constraint(pk)
+
+ is_(pk._autoincrement_column, t.c.a)
+
+ def test_single_integer_server_default(self):
+ # new as of 1.1; now that we have three states for autoincrement,
+ # if the user puts autoincrement=True with a server_default, trust
+ # them on it
+ t = Table(
+ 't', MetaData(),
+ Column('a', Integer,
+ autoincrement=True, server_default=func.magic()))
+ pk = PrimaryKeyConstraint(
+ t.c.a
+ )
+ t.append_constraint(pk)
+
+ is_(pk._autoincrement_column, t.c.a)
+
+ def test_implicit_autoinc_but_fks(self):
+ m = MetaData()
+ Table('t1', m, Column('id', Integer, primary_key=True))
+ t2 = Table(
+ 't2', MetaData(),
+ Column('a', Integer, ForeignKey('t1.id')))
+ pk = PrimaryKeyConstraint(
+ t2.c.a
+ )
+ t2.append_constraint(pk)
+ is_(pk._autoincrement_column, None)
+
+ def test_explicit_autoinc_but_fks(self):
+ m = MetaData()
+ Table('t1', m, Column('id', Integer, primary_key=True))
+ t2 = Table(
+ 't2', MetaData(),
+ Column('a', Integer, ForeignKey('t1.id'), autoincrement=True))
+ pk = PrimaryKeyConstraint(
+ t2.c.a
+ )
+ t2.append_constraint(pk)
+ is_(pk._autoincrement_column, t2.c.a)
+
+ t3 = Table(
+ 't3', MetaData(),
+ Column('a', Integer,
+ ForeignKey('t1.id'), autoincrement='ignore_fk'))
+ pk = PrimaryKeyConstraint(
+ t3.c.a
+ )
+ t3.append_constraint(pk)
+ is_(pk._autoincrement_column, t3.c.a)
+
+
class SchemaTypeTest(fixtures.TestBase):
class MyType(sqltypes.SchemaType, sqltypes.TypeEngine):
@@ -1430,6 +1547,20 @@ class SchemaTypeTest(fixtures.TestBase):
# our test type sets table, though
is_(t2.c.y.type.table, t2)
+ def test_tometadata_copy_decorated(self):
+
+ class MyDecorated(TypeDecorator):
+ impl = self.MyType
+
+ m1 = MetaData()
+
+ type_ = MyDecorated(schema="z")
+ t1 = Table('x', m1, Column("y", type_))
+
+ m2 = MetaData()
+ t2 = t1.tometadata(m2)
+ eq_(t2.c.y.type.schema, "z")
+
def test_tometadata_independent_schema(self):
m1 = MetaData()
diff --git a/test/sql/test_operators.py b/test/sql/test_operators.py
index 0985020d1..03c0f89be 100644
--- a/test/sql/test_operators.py
+++ b/test/sql/test_operators.py
@@ -1,7 +1,8 @@
from sqlalchemy.testing import fixtures, eq_, is_, is_not_
from sqlalchemy import testing
from sqlalchemy.testing import assert_raises_message
-from sqlalchemy.sql import column, desc, asc, literal, collate, null, true, false
+from sqlalchemy.sql import column, desc, asc, literal, collate, null, \
+ true, false, any_, all_
from sqlalchemy.sql.expression import BinaryExpression, \
ClauseList, Grouping, \
UnaryExpression, select, union, func, tuple_
@@ -12,8 +13,9 @@ from sqlalchemy import exc
from sqlalchemy.engine import default
from sqlalchemy.sql.elements import _literal_as_text
from sqlalchemy.schema import Column, Table, MetaData
+from sqlalchemy.sql import compiler
from sqlalchemy.types import TypeEngine, TypeDecorator, UserDefinedType, \
- Boolean, NullType, MatchType
+ Boolean, NullType, MatchType, Indexable, Concatenable, Array
from sqlalchemy.dialects import mysql, firebird, postgresql, oracle, \
sqlite, mssql
from sqlalchemy import util
@@ -21,7 +23,6 @@ import datetime
import collections
from sqlalchemy import text, literal_column
from sqlalchemy import and_, not_, between, or_
-from sqlalchemy.sql import true, false, null
class LoopOperate(operators.ColumnOperators):
@@ -210,6 +211,60 @@ class DefaultColumnComparatorTest(fixtures.TestBase):
def test_concat(self):
self._do_operate_test(operators.concat_op)
+ def test_default_adapt(self):
+ class TypeOne(TypeEngine):
+ pass
+
+ class TypeTwo(TypeEngine):
+ pass
+
+ expr = column('x', TypeOne()) - column('y', TypeTwo())
+ is_(
+ expr.type._type_affinity, TypeOne
+ )
+
+ def test_concatenable_adapt(self):
+ class TypeOne(Concatenable, TypeEngine):
+ pass
+
+ class TypeTwo(Concatenable, TypeEngine):
+ pass
+
+ class TypeThree(TypeEngine):
+ pass
+
+ expr = column('x', TypeOne()) - column('y', TypeTwo())
+ is_(
+ expr.type._type_affinity, TypeOne
+ )
+ is_(
+ expr.operator, operator.sub
+ )
+
+ expr = column('x', TypeOne()) + column('y', TypeTwo())
+ is_(
+ expr.type._type_affinity, TypeOne
+ )
+ is_(
+ expr.operator, operators.concat_op
+ )
+
+ expr = column('x', TypeOne()) - column('y', TypeThree())
+ is_(
+ expr.type._type_affinity, TypeOne
+ )
+ is_(
+ expr.operator, operator.sub
+ )
+
+ expr = column('x', TypeOne()) + column('y', TypeThree())
+ is_(
+ expr.type._type_affinity, TypeOne
+ )
+ is_(
+ expr.operator, operator.add
+ )
+
class CustomUnaryOperatorTest(fixtures.TestBase, testing.AssertsCompiledSQL):
__dialect__ = 'default'
@@ -577,6 +632,200 @@ class ExtensionOperatorTest(fixtures.TestBase, testing.AssertsCompiledSQL):
)
+class IndexableTest(fixtures.TestBase, testing.AssertsCompiledSQL):
+ def setUp(self):
+ class MyTypeCompiler(compiler.GenericTypeCompiler):
+ def visit_mytype(self, type, **kw):
+ return "MYTYPE"
+
+ def visit_myothertype(self, type, **kw):
+ return "MYOTHERTYPE"
+
+ class MyCompiler(compiler.SQLCompiler):
+ def visit_slice(self, element, **kw):
+ return "%s:%s" % (
+ self.process(element.start, **kw),
+ self.process(element.stop, **kw),
+ )
+
+ def visit_getitem_binary(self, binary, operator, **kw):
+ return "%s[%s]" % (
+ self.process(binary.left, **kw),
+ self.process(binary.right, **kw)
+ )
+
+ class MyDialect(default.DefaultDialect):
+ statement_compiler = MyCompiler
+ type_compiler = MyTypeCompiler
+
+ class MyType(Indexable, TypeEngine):
+ __visit_name__ = 'mytype'
+
+ def __init__(self, zero_indexes=False, dimensions=1):
+ if zero_indexes:
+ self.zero_indexes = zero_indexes
+ self.dimensions = dimensions
+
+ class Comparator(Indexable.Comparator):
+ def _setup_getitem(self, index):
+ if isinstance(index, slice):
+ return_type = self.type
+ elif self.type.dimensions is None or \
+ self.type.dimensions == 1:
+ return_type = Integer()
+ else:
+ adapt_kw = {'dimensions': self.type.dimensions - 1}
+ # this is also testing the behavior of adapt()
+ # that we can pass kw that override constructor kws.
+ # required a small change to util.constructor_copy().
+ return_type = self.type.adapt(
+ self.type.__class__, **adapt_kw)
+
+ return operators.getitem, index, return_type
+ comparator_factory = Comparator
+
+ self.MyType = MyType
+ self.__dialect__ = MyDialect()
+
+ def test_setup_getitem_w_dims(self):
+ """test the behavior of the _setup_getitem() method given a simple
+ 'dimensions' scheme - this is identical to postgresql.ARRAY."""
+
+ col = Column('x', self.MyType(dimensions=3))
+
+ is_(
+ col[5].type._type_affinity, self.MyType
+ )
+ eq_(
+ col[5].type.dimensions, 2
+ )
+ is_(
+ col[5][6].type._type_affinity, self.MyType
+ )
+ eq_(
+ col[5][6].type.dimensions, 1
+ )
+ is_(
+ col[5][6][7].type._type_affinity, Integer
+ )
+
+ def test_getindex_literal(self):
+
+ col = Column('x', self.MyType())
+
+ self.assert_compile(
+ col[5],
+ "x[:x_1]",
+ checkparams={'x_1': 5}
+ )
+
+ def test_getindex_sqlexpr(self):
+
+ col = Column('x', self.MyType())
+ col2 = Column('y', Integer())
+
+ self.assert_compile(
+ col[col2],
+ "x[y]",
+ checkparams={}
+ )
+
+ self.assert_compile(
+ col[col2 + 8],
+ "x[(y + :y_1)]",
+ checkparams={'y_1': 8}
+ )
+
+ def test_getslice_literal(self):
+
+ col = Column('x', self.MyType())
+
+ self.assert_compile(
+ col[5:6],
+ "x[:x_1::x_2]",
+ checkparams={'x_1': 5, 'x_2': 6}
+ )
+
+ def test_getslice_sqlexpr(self):
+
+ col = Column('x', self.MyType())
+ col2 = Column('y', Integer())
+
+ self.assert_compile(
+ col[col2:col2 + 5],
+ "x[y:y + :y_1]",
+ checkparams={'y_1': 5}
+ )
+
+ def test_getindex_literal_zeroind(self):
+
+ col = Column('x', self.MyType(zero_indexes=True))
+
+ self.assert_compile(
+ col[5],
+ "x[:x_1]",
+ checkparams={'x_1': 6}
+ )
+
+ def test_getindex_sqlexpr_zeroind(self):
+
+ col = Column('x', self.MyType(zero_indexes=True))
+ col2 = Column('y', Integer())
+
+ self.assert_compile(
+ col[col2],
+ "x[(y + :y_1)]",
+ checkparams={'y_1': 1}
+ )
+
+ self.assert_compile(
+ col[col2 + 8],
+ "x[(y + :y_1 + :param_1)]",
+ checkparams={'y_1': 8, 'param_1': 1}
+ )
+
+ def test_getslice_literal_zeroind(self):
+
+ col = Column('x', self.MyType(zero_indexes=True))
+
+ self.assert_compile(
+ col[5:6],
+ "x[:x_1::x_2]",
+ checkparams={'x_1': 6, 'x_2': 7}
+ )
+
+ def test_getslice_sqlexpr_zeroind(self):
+
+ col = Column('x', self.MyType(zero_indexes=True))
+ col2 = Column('y', Integer())
+
+ self.assert_compile(
+ col[col2:col2 + 5],
+ "x[y + :y_1:y + :y_2 + :param_1]",
+ checkparams={'y_1': 1, 'y_2': 5, 'param_1': 1}
+ )
+
+ def test_override_operators(self):
+ special_index_op = operators.custom_op('->')
+
+ class MyOtherType(Indexable, TypeEngine):
+ __visit_name__ = 'myothertype'
+
+ class Comparator(TypeEngine.Comparator):
+
+ def _adapt_expression(self, op, other_comparator):
+ return special_index_op, MyOtherType()
+
+ comparator_factory = Comparator
+
+ col = Column('x', MyOtherType())
+ self.assert_compile(
+ col[5],
+ "x -> :x_1",
+ checkparams={'x_1': 5}
+ )
+
+
class BooleanEvalTest(fixtures.TestBase, testing.AssertsCompiledSQL):
"""test standalone booleans being wrapped in an AsBoolean, as well
@@ -825,6 +1074,64 @@ class ConjunctionTest(fixtures.TestBase, testing.AssertsCompiledSQL):
"SELECT false AS anon_1, false AS anon_2"
)
+ def test_is_true_literal(self):
+ c = column('x', Boolean)
+ self.assert_compile(
+ c.is_(True),
+ "x IS true"
+ )
+
+ def test_is_false_literal(self):
+ c = column('x', Boolean)
+ self.assert_compile(
+ c.is_(False),
+ "x IS false"
+ )
+
+ def test_and_false_literal_leading(self):
+ self.assert_compile(
+ and_(False, True),
+ "false"
+ )
+
+ self.assert_compile(
+ and_(False, False),
+ "false"
+ )
+
+ def test_and_true_literal_leading(self):
+ self.assert_compile(
+ and_(True, True),
+ "true"
+ )
+
+ self.assert_compile(
+ and_(True, False),
+ "false"
+ )
+
+ def test_or_false_literal_leading(self):
+ self.assert_compile(
+ or_(False, True),
+ "true"
+ )
+
+ self.assert_compile(
+ or_(False, False),
+ "false"
+ )
+
+ def test_or_true_literal_leading(self):
+ self.assert_compile(
+ or_(True, True),
+ "true"
+ )
+
+ self.assert_compile(
+ or_(True, False),
+ "true"
+ )
+
class OperatorPrecedenceTest(fixtures.TestBase, testing.AssertsCompiledSQL):
__dialect__ = 'default'
@@ -1327,6 +1634,9 @@ class MathOperatorTest(fixtures.TestBase, testing.AssertsCompiledSQL):
else:
self._test_math_op(operator.div, '/')
+ def test_math_op_mod(self):
+ self._test_math_op(operator.mod, '%')
+
class ComparisonOperatorTest(fixtures.TestBase, testing.AssertsCompiledSQL):
__dialect__ = 'default'
@@ -1953,3 +2263,154 @@ class TupleTypingTest(fixtures.TestBase):
eq_(len(expr.right.clauses), 2)
for elem in expr.right.clauses:
self._assert_types(elem)
+
+
+class AnyAllTest(fixtures.TestBase, testing.AssertsCompiledSQL):
+ __dialect__ = 'default'
+
+ def _fixture(self):
+ m = MetaData()
+
+ t = Table(
+ 'tab1', m,
+ Column('arrval', Array(Integer)),
+ Column('data', Integer)
+ )
+ return t
+
+ def test_any_array(self):
+ t = self._fixture()
+
+ self.assert_compile(
+ 5 == any_(t.c.arrval),
+ ":param_1 = ANY (tab1.arrval)",
+ checkparams={"param_1": 5}
+ )
+
+ def test_all_array(self):
+ t = self._fixture()
+
+ self.assert_compile(
+ 5 == all_(t.c.arrval),
+ ":param_1 = ALL (tab1.arrval)",
+ checkparams={"param_1": 5}
+ )
+
+ def test_any_comparator_array(self):
+ t = self._fixture()
+
+ self.assert_compile(
+ 5 > any_(t.c.arrval),
+ ":param_1 > ANY (tab1.arrval)",
+ checkparams={"param_1": 5}
+ )
+
+ def test_all_comparator_array(self):
+ t = self._fixture()
+
+ self.assert_compile(
+ 5 > all_(t.c.arrval),
+ ":param_1 > ALL (tab1.arrval)",
+ checkparams={"param_1": 5}
+ )
+
+ def test_any_comparator_array_wexpr(self):
+ t = self._fixture()
+
+ self.assert_compile(
+ t.c.data > any_(t.c.arrval),
+ "tab1.data > ANY (tab1.arrval)",
+ checkparams={}
+ )
+
+ def test_all_comparator_array_wexpr(self):
+ t = self._fixture()
+
+ self.assert_compile(
+ t.c.data > all_(t.c.arrval),
+ "tab1.data > ALL (tab1.arrval)",
+ checkparams={}
+ )
+
+ def test_illegal_ops(self):
+ t = self._fixture()
+
+ assert_raises_message(
+ exc.ArgumentError,
+ "Only comparison operators may be used with ANY/ALL",
+ lambda: 5 + all_(t.c.arrval)
+ )
+
+ # TODO:
+ # this is invalid but doesn't raise an error,
+ # as the left-hand side just does its thing. Types
+ # would need to reject their right-hand side.
+ self.assert_compile(
+ t.c.data + all_(t.c.arrval),
+ "tab1.data + ALL (tab1.arrval)"
+ )
+
+ def test_any_array_comparator_accessor(self):
+ t = self._fixture()
+
+ self.assert_compile(
+ t.c.arrval.any(5, operator.gt),
+ ":param_1 > ANY (tab1.arrval)",
+ checkparams={"param_1": 5}
+ )
+
+ def test_all_array_comparator_accessor(self):
+ t = self._fixture()
+
+ self.assert_compile(
+ t.c.arrval.all(5, operator.gt),
+ ":param_1 > ALL (tab1.arrval)",
+ checkparams={"param_1": 5}
+ )
+
+ def test_any_array_expression(self):
+ t = self._fixture()
+
+ self.assert_compile(
+ 5 == any_(t.c.arrval[5:6] + postgresql.array([3, 4])),
+ "%(param_1)s = ANY (tab1.arrval[%(arrval_1)s:%(arrval_2)s] || "
+ "ARRAY[%(param_2)s, %(param_3)s])",
+ checkparams={
+ 'arrval_2': 6, 'param_1': 5, 'param_3': 4,
+ 'arrval_1': 5, 'param_2': 3},
+ dialect='postgresql'
+ )
+
+ def test_all_array_expression(self):
+ t = self._fixture()
+
+ self.assert_compile(
+ 5 == all_(t.c.arrval[5:6] + postgresql.array([3, 4])),
+ "%(param_1)s = ALL (tab1.arrval[%(arrval_1)s:%(arrval_2)s] || "
+ "ARRAY[%(param_2)s, %(param_3)s])",
+ checkparams={
+ 'arrval_2': 6, 'param_1': 5, 'param_3': 4,
+ 'arrval_1': 5, 'param_2': 3},
+ dialect='postgresql'
+ )
+
+ def test_any_subq(self):
+ t = self._fixture()
+
+ self.assert_compile(
+ 5 == any_(select([t.c.data]).where(t.c.data < 10)),
+ ":param_1 = ANY (SELECT tab1.data "
+ "FROM tab1 WHERE tab1.data < :data_1)",
+ checkparams={'data_1': 10, 'param_1': 5}
+ )
+
+ def test_all_subq(self):
+ t = self._fixture()
+
+ self.assert_compile(
+ 5 == all_(select([t.c.data]).where(t.c.data < 10)),
+ ":param_1 = ALL (SELECT tab1.data "
+ "FROM tab1 WHERE tab1.data < :data_1)",
+ checkparams={'data_1': 10, 'param_1': 5}
+ )
+
diff --git a/test/sql/test_query.py b/test/sql/test_query.py
index 98f375018..aca933fc9 100644
--- a/test/sql/test_query.py
+++ b/test/sql/test_query.py
@@ -1,13 +1,13 @@
-from sqlalchemy.testing import eq_, assert_raises_message, assert_raises, is_
+from sqlalchemy.testing import eq_, assert_raises_message, assert_raises, \
+ is_, in_, not_in_
from sqlalchemy import testing
from sqlalchemy.testing import fixtures, engines
-from sqlalchemy import util
from sqlalchemy import (
exc, sql, func, select, String, Integer, MetaData, and_, ForeignKey,
- union, intersect, except_, union_all, VARCHAR, INT, CHAR, text, Sequence,
- bindparam, literal, not_, type_coerce, literal_column, desc, asc,
- TypeDecorator, or_, cast, table, column)
-from sqlalchemy.engine import default, result as _result
+ union, intersect, except_, union_all, VARCHAR, INT, text,
+ bindparam, literal, not_, literal_column, desc, asc,
+ TypeDecorator, or_, cast)
+from sqlalchemy.engine import default
from sqlalchemy.testing.schema import Table, Column
# ongoing - these are old tests. those which are of general use
@@ -61,260 +61,6 @@ class QueryTest(fixtures.TestBase):
def teardown_class(cls):
metadata.drop_all()
- @testing.requires.multivalues_inserts
- def test_multivalues_insert(self):
- users.insert(
- values=[
- {'user_id': 7, 'user_name': 'jack'},
- {'user_id': 8, 'user_name': 'ed'}]).execute()
- rows = users.select().order_by(users.c.user_id).execute().fetchall()
- self.assert_(rows[0] == (7, 'jack'))
- self.assert_(rows[1] == (8, 'ed'))
- users.insert(values=[(9, 'jack'), (10, 'ed')]).execute()
- rows = users.select().order_by(users.c.user_id).execute().fetchall()
- self.assert_(rows[2] == (9, 'jack'))
- self.assert_(rows[3] == (10, 'ed'))
-
- def test_insert_heterogeneous_params(self):
- """test that executemany parameters are asserted to match the
- parameter set of the first."""
-
- assert_raises_message(
- exc.StatementError,
- r"\(sqlalchemy.exc.InvalidRequestError\) A value is required for "
- "bind parameter 'user_name', in "
- "parameter group 2 "
- r"\[SQL: u?'INSERT INTO query_users",
- users.insert().execute,
- {'user_id': 7, 'user_name': 'jack'},
- {'user_id': 8, 'user_name': 'ed'},
- {'user_id': 9}
- )
-
- # this succeeds however. We aren't yet doing
- # a length check on all subsequent parameters.
- users.insert().execute(
- {'user_id': 7},
- {'user_id': 8, 'user_name': 'ed'},
- {'user_id': 9}
- )
-
- def test_lastrow_accessor(self):
- """Tests the inserted_primary_key and lastrow_has_id() functions."""
-
- def insert_values(engine, table, values):
- """
- Inserts a row into a table, returns the full list of values
- INSERTed including defaults that fired off on the DB side and
- detects rows that had defaults and post-fetches.
- """
-
- # verify implicit_returning is working
- if engine.dialect.implicit_returning:
- ins = table.insert()
- comp = ins.compile(engine, column_keys=list(values))
- if not set(values).issuperset(
- c.key for c in table.primary_key):
- assert comp.returning
-
- result = engine.execute(table.insert(), **values)
- ret = values.copy()
-
- for col, id in zip(table.primary_key, result.inserted_primary_key):
- ret[col.key] = id
-
- if result.lastrow_has_defaults():
- criterion = and_(
- *[
- col == id for col, id in
- zip(table.primary_key, result.inserted_primary_key)])
- row = engine.execute(table.select(criterion)).first()
- for c in table.c:
- ret[c.key] = row[c]
- return ret
-
- if testing.against('firebird', 'postgresql', 'oracle', 'mssql'):
- assert testing.db.dialect.implicit_returning
-
- if testing.db.dialect.implicit_returning:
- test_engines = [
- engines.testing_engine(options={'implicit_returning': False}),
- engines.testing_engine(options={'implicit_returning': True}),
- ]
- else:
- test_engines = [testing.db]
-
- for engine in test_engines:
- metadata = MetaData()
- for supported, table, values, assertvalues in [
- (
- {'unsupported': ['sqlite']},
- Table(
- "t1", metadata,
- Column(
- 'id', Integer, primary_key=True,
- test_needs_autoincrement=True),
- Column('foo', String(30), primary_key=True)),
- {'foo': 'hi'},
- {'id': 1, 'foo': 'hi'}
- ),
- (
- {'unsupported': ['sqlite']},
- Table(
- "t2", metadata,
- Column(
- 'id', Integer, primary_key=True,
- test_needs_autoincrement=True),
- Column('foo', String(30), primary_key=True),
- Column('bar', String(30), server_default='hi')
- ),
- {'foo': 'hi'},
- {'id': 1, 'foo': 'hi', 'bar': 'hi'}
- ),
- (
- {'unsupported': []},
- Table(
- "t3", metadata,
- Column("id", String(40), primary_key=True),
- Column('foo', String(30), primary_key=True),
- Column("bar", String(30))
- ),
- {'id': 'hi', 'foo': 'thisisfoo', 'bar': "thisisbar"},
- {'id': 'hi', 'foo': 'thisisfoo', 'bar': "thisisbar"}
- ),
- (
- {'unsupported': []},
- Table(
- "t4", metadata,
- Column(
- 'id', Integer,
- Sequence('t4_id_seq', optional=True),
- primary_key=True),
- Column('foo', String(30), primary_key=True),
- Column('bar', String(30), server_default='hi')
- ),
- {'foo': 'hi', 'id': 1},
- {'id': 1, 'foo': 'hi', 'bar': 'hi'}
- ),
- (
- {'unsupported': []},
- Table(
- "t5", metadata,
- Column('id', String(10), primary_key=True),
- Column('bar', String(30), server_default='hi')
- ),
- {'id': 'id1'},
- {'id': 'id1', 'bar': 'hi'},
- ),
- (
- {'unsupported': ['sqlite']},
- Table(
- "t6", metadata,
- Column(
- 'id', Integer, primary_key=True,
- test_needs_autoincrement=True),
- Column('bar', Integer, primary_key=True)
- ),
- {'bar': 0},
- {'id': 1, 'bar': 0},
- ),
- ]:
- if testing.db.name in supported['unsupported']:
- continue
- try:
- table.create(bind=engine, checkfirst=True)
- i = insert_values(engine, table, values)
- assert i == assertvalues, "tablename: %s %r %r" % \
- (table.name, repr(i), repr(assertvalues))
- finally:
- table.drop(bind=engine)
-
- # TODO: why not in the sqlite suite?
- @testing.only_on('sqlite+pysqlite')
- @testing.provide_metadata
- def test_lastrowid_zero(self):
- from sqlalchemy.dialects import sqlite
- eng = engines.testing_engine()
-
- class ExcCtx(sqlite.base.SQLiteExecutionContext):
-
- def get_lastrowid(self):
- return 0
- eng.dialect.execution_ctx_cls = ExcCtx
- t = Table(
- 't', self.metadata, Column('x', Integer, primary_key=True),
- Column('y', Integer))
- t.create(eng)
- r = eng.execute(t.insert().values(y=5))
- eq_(r.inserted_primary_key, [0])
-
- @testing.fails_on(
- 'sqlite', "sqlite autoincremnt doesn't work with composite pks")
- def test_misordered_lastrow(self):
- related = Table(
- 'related', metadata,
- Column('id', Integer, primary_key=True),
- mysql_engine='MyISAM'
- )
- t6 = Table(
- "t6", metadata,
- Column(
- 'manual_id', Integer, ForeignKey('related.id'),
- primary_key=True),
- Column(
- 'auto_id', Integer, primary_key=True,
- test_needs_autoincrement=True),
- mysql_engine='MyISAM'
- )
-
- metadata.create_all()
- r = related.insert().values(id=12).execute()
- id = r.inserted_primary_key[0]
- assert id == 12
-
- r = t6.insert().values(manual_id=id).execute()
- eq_(r.inserted_primary_key, [12, 1])
-
- def test_implicit_id_insert_select_columns(self):
- stmt = users.insert().from_select(
- (users.c.user_id, users.c.user_name),
- users.select().where(users.c.user_id == 20))
-
- testing.db.execute(stmt)
-
- def test_implicit_id_insert_select_keys(self):
- stmt = users.insert().from_select(
- ["user_id", "user_name"],
- users.select().where(users.c.user_id == 20))
-
- testing.db.execute(stmt)
-
- def test_row_iteration(self):
- users.insert().execute(
- {'user_id': 7, 'user_name': 'jack'},
- {'user_id': 8, 'user_name': 'ed'},
- {'user_id': 9, 'user_name': 'fred'},
- )
- r = users.select().execute()
- l = []
- for row in r:
- l.append(row)
- self.assert_(len(l) == 3)
-
- @testing.requires.subqueries
- def test_anonymous_rows(self):
- users.insert().execute(
- {'user_id': 7, 'user_name': 'jack'},
- {'user_id': 8, 'user_name': 'ed'},
- {'user_id': 9, 'user_name': 'fred'},
- )
-
- sel = select([users.c.user_id]).where(users.c.user_name == 'jack'). \
- as_scalar()
- for row in select([sel + 1, sel + 3], bind=users.bind).execute():
- assert row['anon_1'] == 8
- assert row['anon_2'] == 10
-
@testing.fails_on(
'firebird', "kinterbasdb doesn't send full type information")
def test_order_by_label(self):
@@ -364,154 +110,6 @@ class QueryTest(fixtures.TestBase):
[("test: ed",), ("test: fred",), ("test: jack",)]
)
- def test_row_comparison(self):
- users.insert().execute(user_id=7, user_name='jack')
- rp = users.select().execute().first()
-
- self.assert_(rp == rp)
- self.assert_(not(rp != rp))
-
- equal = (7, 'jack')
-
- self.assert_(rp == equal)
- self.assert_(equal == rp)
- self.assert_(not (rp != equal))
- self.assert_(not (equal != equal))
-
- def endless():
- while True:
- yield 1
- self.assert_(rp != endless())
- self.assert_(endless() != rp)
-
- # test that everything compares the same
- # as it would against a tuple
- import operator
- for compare in [False, 8, endless(), 'xyz', (7, 'jack')]:
- for op in [
- operator.eq, operator.ne, operator.gt,
- operator.lt, operator.ge, operator.le
- ]:
-
- try:
- control = op(equal, compare)
- except TypeError:
- # Py3K raises TypeError for some invalid comparisons
- assert_raises(TypeError, op, rp, compare)
- else:
- eq_(control, op(rp, compare))
-
- try:
- control = op(compare, equal)
- except TypeError:
- # Py3K raises TypeError for some invalid comparisons
- assert_raises(TypeError, op, compare, rp)
- else:
- eq_(control, op(compare, rp))
-
- @testing.provide_metadata
- def test_column_label_overlap_fallback(self):
- content = Table(
- 'content', self.metadata,
- Column('type', String(30)),
- )
- bar = Table(
- 'bar', self.metadata,
- Column('content_type', String(30))
- )
- self.metadata.create_all(testing.db)
- testing.db.execute(content.insert().values(type="t1"))
-
- row = testing.db.execute(content.select(use_labels=True)).first()
- assert content.c.type in row
- assert bar.c.content_type not in row
- assert sql.column('content_type') in row
-
- row = testing.db.execute(
- select([content.c.type.label("content_type")])).first()
- assert content.c.type in row
-
- assert bar.c.content_type not in row
-
- assert sql.column('content_type') in row
-
- row = testing.db.execute(select([func.now().label("content_type")])). \
- first()
- assert content.c.type not in row
-
- assert bar.c.content_type not in row
-
- assert sql.column('content_type') in row
-
- def test_pickled_rows(self):
- users.insert().execute(
- {'user_id': 7, 'user_name': 'jack'},
- {'user_id': 8, 'user_name': 'ed'},
- {'user_id': 9, 'user_name': 'fred'},
- )
-
- for pickle in False, True:
- for use_labels in False, True:
- result = users.select(use_labels=use_labels).order_by(
- users.c.user_id).execute().fetchall()
-
- if pickle:
- result = util.pickle.loads(util.pickle.dumps(result))
-
- eq_(
- result,
- [(7, "jack"), (8, "ed"), (9, "fred")]
- )
- if use_labels:
- eq_(result[0]['query_users_user_id'], 7)
- eq_(
- list(result[0].keys()),
- ["query_users_user_id", "query_users_user_name"])
- else:
- eq_(result[0]['user_id'], 7)
- eq_(list(result[0].keys()), ["user_id", "user_name"])
-
- eq_(result[0][0], 7)
- eq_(result[0][users.c.user_id], 7)
- eq_(result[0][users.c.user_name], 'jack')
-
- if not pickle or use_labels:
- assert_raises(
- exc.NoSuchColumnError,
- lambda: result[0][addresses.c.user_id])
- else:
- # test with a different table. name resolution is
- # causing 'user_id' to match when use_labels wasn't used.
- eq_(result[0][addresses.c.user_id], 7)
-
- assert_raises(
- exc.NoSuchColumnError, lambda: result[0]['fake key'])
- assert_raises(
- exc.NoSuchColumnError,
- lambda: result[0][addresses.c.address_id])
-
- def test_column_error_printing(self):
- row = testing.db.execute(select([1])).first()
-
- class unprintable(object):
-
- def __str__(self):
- raise ValueError("nope")
-
- msg = r"Could not locate column in row for column '%s'"
-
- for accessor, repl in [
- ("x", "x"),
- (Column("q", Integer), "q"),
- (Column("q", Integer) + 12, r"q \+ :q_1"),
- (unprintable(), "unprintable element.*"),
- ]:
- assert_raises_message(
- exc.NoSuchColumnError,
- msg % repl,
- lambda: row[accessor]
- )
-
@testing.requires.boolean_col_expressions
def test_or_and_as_columns(self):
true, false = literal(True), literal(False)
@@ -538,16 +136,6 @@ class QueryTest(fixtures.TestBase):
assert row.x == True # noqa
assert row.y == False # noqa
- def test_fetchmany(self):
- users.insert().execute(user_id=7, user_name='jack')
- users.insert().execute(user_id=8, user_name='ed')
- users.insert().execute(user_id=9, user_name='fred')
- r = users.select().execute()
- l = []
- for row in r.fetchmany(size=2):
- l.append(row)
- self.assert_(len(l) == 2, "fetchmany(size=2) got %s rows" % len(l))
-
def test_like_ops(self):
users.insert().execute(
{'user_id': 1, 'user_name': 'apples'},
@@ -816,521 +404,6 @@ class QueryTest(fixtures.TestBase):
use_labels=labels),
[(3, 'a'), (2, 'b'), (1, None)])
- def test_column_slices(self):
- users.insert().execute(user_id=1, user_name='john')
- users.insert().execute(user_id=2, user_name='jack')
- addresses.insert().execute(
- address_id=1, user_id=2, address='foo@bar.com')
-
- r = text(
- "select * from query_addresses", bind=testing.db).execute().first()
- self.assert_(r[0:1] == (1,))
- self.assert_(r[1:] == (2, 'foo@bar.com'))
- self.assert_(r[:-1] == (1, 2))
-
- def test_column_accessor_basic_compiled(self):
- users.insert().execute(
- dict(user_id=1, user_name='john'),
- dict(user_id=2, user_name='jack')
- )
-
- r = users.select(users.c.user_id == 2).execute().first()
- self.assert_(r.user_id == r['user_id'] == r[users.c.user_id] == 2)
- self.assert_(
- r.user_name == r['user_name'] == r[users.c.user_name] == 'jack')
-
- def test_column_accessor_basic_text(self):
- users.insert().execute(
- dict(user_id=1, user_name='john'),
- dict(user_id=2, user_name='jack')
- )
- r = testing.db.execute(
- text("select * from query_users where user_id=2")).first()
- self.assert_(r.user_id == r['user_id'] == r[users.c.user_id] == 2)
- self.assert_(
- r.user_name == r['user_name'] == r[users.c.user_name] == 'jack')
-
- def test_column_accessor_textual_select(self):
- users.insert().execute(
- dict(user_id=1, user_name='john'),
- dict(user_id=2, user_name='jack')
- )
- # this will create column() objects inside
- # the select(), these need to match on name anyway
- r = testing.db.execute(
- select([
- column('user_id'), column('user_name')
- ]).select_from(table('query_users')).
- where(text('user_id=2'))
- ).first()
- self.assert_(r.user_id == r['user_id'] == r[users.c.user_id] == 2)
- self.assert_(
- r.user_name == r['user_name'] == r[users.c.user_name] == 'jack')
-
- def test_column_accessor_dotted_union(self):
- users.insert().execute(
- dict(user_id=1, user_name='john'),
- )
-
- # test a little sqlite weirdness - with the UNION,
- # cols come back as "query_users.user_id" in cursor.description
- r = testing.db.execute(
- text(
- "select query_users.user_id, query_users.user_name "
- "from query_users "
- "UNION select query_users.user_id, "
- "query_users.user_name from query_users"
- )
- ).first()
- eq_(r['user_id'], 1)
- eq_(r['user_name'], "john")
- eq_(list(r.keys()), ["user_id", "user_name"])
-
- @testing.only_on("sqlite", "sqlite specific feature")
- def test_column_accessor_sqlite_raw(self):
- users.insert().execute(
- dict(user_id=1, user_name='john'),
- )
-
- r = text(
- "select query_users.user_id, query_users.user_name "
- "from query_users "
- "UNION select query_users.user_id, "
- "query_users.user_name from query_users",
- bind=testing.db).execution_options(sqlite_raw_colnames=True). \
- execute().first()
- assert 'user_id' not in r
- assert 'user_name' not in r
- eq_(r['query_users.user_id'], 1)
- eq_(r['query_users.user_name'], "john")
- eq_(list(r.keys()), ["query_users.user_id", "query_users.user_name"])
-
- @testing.only_on("sqlite", "sqlite specific feature")
- def test_column_accessor_sqlite_translated(self):
- users.insert().execute(
- dict(user_id=1, user_name='john'),
- )
-
- r = text(
- "select query_users.user_id, query_users.user_name "
- "from query_users "
- "UNION select query_users.user_id, "
- "query_users.user_name from query_users",
- bind=testing.db).execute().first()
- eq_(r['user_id'], 1)
- eq_(r['user_name'], "john")
- eq_(r['query_users.user_id'], 1)
- eq_(r['query_users.user_name'], "john")
- eq_(list(r.keys()), ["user_id", "user_name"])
-
- def test_column_accessor_labels_w_dots(self):
- users.insert().execute(
- dict(user_id=1, user_name='john'),
- )
- # test using literal tablename.colname
- r = text(
- 'select query_users.user_id AS "query_users.user_id", '
- 'query_users.user_name AS "query_users.user_name" '
- 'from query_users', bind=testing.db).\
- execution_options(sqlite_raw_colnames=True).execute().first()
- eq_(r['query_users.user_id'], 1)
- eq_(r['query_users.user_name'], "john")
- assert "user_name" not in r
- eq_(list(r.keys()), ["query_users.user_id", "query_users.user_name"])
-
- def test_column_accessor_unary(self):
- users.insert().execute(
- dict(user_id=1, user_name='john'),
- )
-
- # unary experssions
- r = select([users.c.user_name.distinct()]).order_by(
- users.c.user_name).execute().first()
- eq_(r[users.c.user_name], 'john')
- eq_(r.user_name, 'john')
-
- def test_column_accessor_err(self):
- r = testing.db.execute(select([1])).first()
- assert_raises_message(
- AttributeError,
- "Could not locate column in row for column 'foo'",
- getattr, r, "foo"
- )
- assert_raises_message(
- KeyError,
- "Could not locate column in row for column 'foo'",
- lambda: r['foo']
- )
-
- def test_graceful_fetch_on_non_rows(self):
- """test that calling fetchone() etc. on a result that doesn't
- return rows fails gracefully.
-
- """
-
- # these proxies don't work with no cursor.description present.
- # so they don't apply to this test at the moment.
- # result.FullyBufferedResultProxy,
- # result.BufferedRowResultProxy,
- # result.BufferedColumnResultProxy
-
- conn = testing.db.connect()
- for meth in ('fetchone', 'fetchall', 'first', 'scalar', 'fetchmany'):
- trans = conn.begin()
- result = conn.execute(users.insert(), user_id=1)
- assert_raises_message(
- exc.ResourceClosedError,
- "This result object does not return rows. "
- "It has been closed automatically.",
- getattr(result, meth),
- )
- trans.rollback()
-
- @testing.requires.empty_inserts
- @testing.requires.returning
- def test_no_inserted_pk_on_returning(self):
- result = testing.db.execute(users.insert().returning(
- users.c.user_id, users.c.user_name))
- assert_raises_message(
- exc.InvalidRequestError,
- r"Can't call inserted_primary_key when returning\(\) is used.",
- getattr, result, 'inserted_primary_key'
- )
-
- def test_fetchone_til_end(self):
- result = testing.db.execute("select * from query_users")
- eq_(result.fetchone(), None)
- eq_(result.fetchone(), None)
- eq_(result.fetchone(), None)
- result.close()
- assert_raises_message(
- exc.ResourceClosedError,
- "This result object is closed.",
- result.fetchone
- )
-
- def test_row_case_sensitive(self):
- row = testing.db.execute(
- select([
- literal_column("1").label("case_insensitive"),
- literal_column("2").label("CaseSensitive")
- ])
- ).first()
-
- eq_(list(row.keys()), ["case_insensitive", "CaseSensitive"])
- eq_(row["case_insensitive"], 1)
- eq_(row["CaseSensitive"], 2)
-
- assert_raises(
- KeyError,
- lambda: row["Case_insensitive"]
- )
- assert_raises(
- KeyError,
- lambda: row["casesensitive"]
- )
-
- def test_row_case_insensitive(self):
- ins_db = engines.testing_engine(options={"case_sensitive": False})
- row = ins_db.execute(
- select([
- literal_column("1").label("case_insensitive"),
- literal_column("2").label("CaseSensitive")
- ])
- ).first()
-
- eq_(list(row.keys()), ["case_insensitive", "CaseSensitive"])
- eq_(row["case_insensitive"], 1)
- eq_(row["CaseSensitive"], 2)
- eq_(row["Case_insensitive"], 1)
- eq_(row["casesensitive"], 2)
-
- def test_row_as_args(self):
- users.insert().execute(user_id=1, user_name='john')
- r = users.select(users.c.user_id == 1).execute().first()
- users.delete().execute()
- users.insert().execute(r)
- eq_(users.select().execute().fetchall(), [(1, 'john')])
-
- def test_result_as_args(self):
- users.insert().execute([
- dict(user_id=1, user_name='john'),
- dict(user_id=2, user_name='ed')])
- r = users.select().execute()
- users2.insert().execute(list(r))
- eq_(
- users2.select().order_by(users2.c.user_id).execute().fetchall(),
- [(1, 'john'), (2, 'ed')]
- )
-
- users2.delete().execute()
- r = users.select().execute()
- users2.insert().execute(*list(r))
- eq_(
- users2.select().order_by(users2.c.user_id).execute().fetchall(),
- [(1, 'john'), (2, 'ed')]
- )
-
- @testing.requires.duplicate_names_in_cursor_description
- def test_ambiguous_column(self):
- users.insert().execute(user_id=1, user_name='john')
- result = users.outerjoin(addresses).select().execute()
- r = result.first()
-
- assert_raises_message(
- exc.InvalidRequestError,
- "Ambiguous column name",
- lambda: r['user_id']
- )
-
- assert_raises_message(
- exc.InvalidRequestError,
- "Ambiguous column name",
- lambda: r[users.c.user_id]
- )
-
- assert_raises_message(
- exc.InvalidRequestError,
- "Ambiguous column name",
- lambda: r[addresses.c.user_id]
- )
-
- # try to trick it - fake_table isn't in the result!
- # we get the correct error
- fake_table = Table('fake', MetaData(), Column('user_id', Integer))
- assert_raises_message(
- exc.InvalidRequestError,
- "Could not locate column in row for column 'fake.user_id'",
- lambda: r[fake_table.c.user_id]
- )
-
- r = util.pickle.loads(util.pickle.dumps(r))
- assert_raises_message(
- exc.InvalidRequestError,
- "Ambiguous column name",
- lambda: r['user_id']
- )
-
- result = users.outerjoin(addresses).select().execute()
- result = _result.BufferedColumnResultProxy(result.context)
- r = result.first()
- assert isinstance(r, _result.BufferedColumnRow)
- assert_raises_message(
- exc.InvalidRequestError,
- "Ambiguous column name",
- lambda: r['user_id']
- )
-
- @testing.requires.duplicate_names_in_cursor_description
- def test_ambiguous_column_by_col(self):
- users.insert().execute(user_id=1, user_name='john')
- ua = users.alias()
- u2 = users.alias()
- result = select([users.c.user_id, ua.c.user_id]).execute()
- row = result.first()
-
- assert_raises_message(
- exc.InvalidRequestError,
- "Ambiguous column name",
- lambda: row[users.c.user_id]
- )
-
- assert_raises_message(
- exc.InvalidRequestError,
- "Ambiguous column name",
- lambda: row[ua.c.user_id]
- )
-
- # Unfortunately, this fails -
- # we'd like
- # "Could not locate column in row"
- # to be raised here, but the check for
- # "common column" in _compare_name_for_result()
- # has other requirements to be more liberal.
- # Ultimately the
- # expression system would need a way to determine
- # if given two columns in a "proxy" relationship, if they
- # refer to a different parent table
- assert_raises_message(
- exc.InvalidRequestError,
- "Ambiguous column name",
- lambda: row[u2.c.user_id]
- )
-
- @testing.requires.duplicate_names_in_cursor_description
- def test_ambiguous_column_contains(self):
- # ticket 2702. in 0.7 we'd get True, False.
- # in 0.8, both columns are present so it's True;
- # but when they're fetched you'll get the ambiguous error.
- users.insert().execute(user_id=1, user_name='john')
- result = select([users.c.user_id, addresses.c.user_id]).\
- select_from(users.outerjoin(addresses)).execute()
- row = result.first()
-
- eq_(
- set([users.c.user_id in row, addresses.c.user_id in row]),
- set([True])
- )
-
- def test_ambiguous_column_by_col_plus_label(self):
- users.insert().execute(user_id=1, user_name='john')
- result = select(
- [users.c.user_id,
- type_coerce(users.c.user_id, Integer).label('foo')]).execute()
- row = result.first()
- eq_(
- row[users.c.user_id], 1
- )
- eq_(
- row[1], 1
- )
-
- def test_fetch_partial_result_map(self):
- users.insert().execute(user_id=7, user_name='ed')
-
- t = text("select * from query_users").columns(
- user_name=String()
- )
- eq_(
- testing.db.execute(t).fetchall(), [(7, 'ed')]
- )
-
- def test_fetch_unordered_result_map(self):
- users.insert().execute(user_id=7, user_name='ed')
-
- class Goofy1(TypeDecorator):
- impl = String
-
- def process_result_value(self, value, dialect):
- return value + "a"
-
- class Goofy2(TypeDecorator):
- impl = String
-
- def process_result_value(self, value, dialect):
- return value + "b"
-
- class Goofy3(TypeDecorator):
- impl = String
-
- def process_result_value(self, value, dialect):
- return value + "c"
-
- t = text(
- "select user_name as a, user_name as b, "
- "user_name as c from query_users").columns(
- a=Goofy1(), b=Goofy2(), c=Goofy3()
- )
- eq_(
- testing.db.execute(t).fetchall(), [
- ('eda', 'edb', 'edc')
- ]
- )
-
- @testing.requires.subqueries
- def test_column_label_targeting(self):
- users.insert().execute(user_id=7, user_name='ed')
-
- for s in (
- users.select().alias('foo'),
- users.select().alias(users.name),
- ):
- row = s.select(use_labels=True).execute().first()
- assert row[s.c.user_id] == 7
- assert row[s.c.user_name] == 'ed'
-
- def test_keys(self):
- users.insert().execute(user_id=1, user_name='foo')
- r = users.select().execute()
- eq_([x.lower() for x in list(r.keys())], ['user_id', 'user_name'])
- r = r.first()
- eq_([x.lower() for x in list(r.keys())], ['user_id', 'user_name'])
-
- def test_items(self):
- users.insert().execute(user_id=1, user_name='foo')
- r = users.select().execute().first()
- eq_(
- [(x[0].lower(), x[1]) for x in list(r.items())],
- [('user_id', 1), ('user_name', 'foo')])
-
- def test_len(self):
- users.insert().execute(user_id=1, user_name='foo')
- r = users.select().execute().first()
- eq_(len(r), 2)
-
- r = testing.db.execute('select user_name, user_id from query_users'). \
- first()
- eq_(len(r), 2)
- r = testing.db.execute('select user_name from query_users').first()
- eq_(len(r), 1)
-
- def test_sorting_in_python(self):
- users.insert().execute(
- dict(user_id=1, user_name='foo'),
- dict(user_id=2, user_name='bar'),
- dict(user_id=3, user_name='def'),
- )
-
- rows = users.select().order_by(users.c.user_name).execute().fetchall()
-
- eq_(rows, [(2, 'bar'), (3, 'def'), (1, 'foo')])
-
- eq_(sorted(rows), [(1, 'foo'), (2, 'bar'), (3, 'def')])
-
- def test_column_order_with_simple_query(self):
- # should return values in column definition order
- users.insert().execute(user_id=1, user_name='foo')
- r = users.select(users.c.user_id == 1).execute().first()
- eq_(r[0], 1)
- eq_(r[1], 'foo')
- eq_([x.lower() for x in list(r.keys())], ['user_id', 'user_name'])
- eq_(list(r.values()), [1, 'foo'])
-
- def test_column_order_with_text_query(self):
- # should return values in query order
- users.insert().execute(user_id=1, user_name='foo')
- r = testing.db.execute('select user_name, user_id from query_users'). \
- first()
- eq_(r[0], 'foo')
- eq_(r[1], 1)
- eq_([x.lower() for x in list(r.keys())], ['user_name', 'user_id'])
- eq_(list(r.values()), ['foo', 1])
-
- @testing.crashes('oracle', 'FIXME: unknown, varify not fails_on()')
- @testing.crashes('firebird', 'An identifier must begin with a letter')
- def test_column_accessor_shadow(self):
- meta = MetaData(testing.db)
- shadowed = Table(
- 'test_shadowed', meta,
- Column('shadow_id', INT, primary_key=True),
- Column('shadow_name', VARCHAR(20)),
- Column('parent', VARCHAR(20)),
- Column('row', VARCHAR(40)),
- Column('_parent', VARCHAR(20)),
- Column('_row', VARCHAR(20)),
- )
- shadowed.create(checkfirst=True)
- try:
- shadowed.insert().execute(
- shadow_id=1, shadow_name='The Shadow', parent='The Light',
- row='Without light there is no shadow',
- _parent='Hidden parent', _row='Hidden row')
- r = shadowed.select(shadowed.c.shadow_id == 1).execute().first()
- self.assert_(
- r.shadow_id == r['shadow_id'] == r[shadowed.c.shadow_id] == 1)
- self.assert_(
- r.shadow_name == r['shadow_name'] ==
- r[shadowed.c.shadow_name] == 'The Shadow')
- self.assert_(
- r.parent == r['parent'] == r[shadowed.c.parent] == 'The Light')
- self.assert_(
- r.row == r['row'] == r[shadowed.c.row] ==
- 'Without light there is no shadow')
- self.assert_(r['_parent'] == 'Hidden parent')
- self.assert_(r['_row'] == 'Hidden row')
- finally:
- shadowed.drop(checkfirst=True)
-
@testing.emits_warning('.*empty sequence.*')
def test_in_filtering(self):
"""test the behavior of the in_() function."""
@@ -1480,393 +553,6 @@ class RequiredBindTest(fixtures.TablesTest):
is_(bindparam('foo', callable_=c, required=False).required, False)
-class TableInsertTest(fixtures.TablesTest):
-
- """test for consistent insert behavior across dialects
- regarding the inline=True flag, lower-case 't' tables.
-
- """
- run_create_tables = 'each'
- __backend__ = True
-
- @classmethod
- def define_tables(cls, metadata):
- Table(
- 'foo', metadata,
- Column('id', Integer, Sequence('t_id_seq'), primary_key=True),
- Column('data', String(50)),
- Column('x', Integer)
- )
-
- def _fixture(self, types=True):
- if types:
- t = sql.table(
- 'foo', sql.column('id', Integer),
- sql.column('data', String),
- sql.column('x', Integer))
- else:
- t = sql.table(
- 'foo', sql.column('id'), sql.column('data'), sql.column('x'))
- return t
-
- def _test(self, stmt, row, returning=None, inserted_primary_key=False):
- r = testing.db.execute(stmt)
-
- if returning:
- returned = r.first()
- eq_(returned, returning)
- elif inserted_primary_key is not False:
- eq_(r.inserted_primary_key, inserted_primary_key)
-
- eq_(testing.db.execute(self.tables.foo.select()).first(), row)
-
- def _test_multi(self, stmt, rows, data):
- testing.db.execute(stmt, rows)
- eq_(
- testing.db.execute(
- self.tables.foo.select().
- order_by(self.tables.foo.c.id)).fetchall(),
- data)
-
- @testing.requires.sequences
- def test_expicit_sequence(self):
- t = self._fixture()
- self._test(
- t.insert().values(
- id=func.next_value(Sequence('t_id_seq')), data='data', x=5),
- (1, 'data', 5)
- )
-
- def test_uppercase(self):
- t = self.tables.foo
- self._test(
- t.insert().values(id=1, data='data', x=5),
- (1, 'data', 5),
- inserted_primary_key=[1]
- )
-
- def test_uppercase_inline(self):
- t = self.tables.foo
- self._test(
- t.insert(inline=True).values(id=1, data='data', x=5),
- (1, 'data', 5),
- inserted_primary_key=[1]
- )
-
- @testing.crashes(
- "mssql+pyodbc",
- "Pyodbc + SQL Server + Py3K, some decimal handling issue")
- def test_uppercase_inline_implicit(self):
- t = self.tables.foo
- self._test(
- t.insert(inline=True).values(data='data', x=5),
- (1, 'data', 5),
- inserted_primary_key=[None]
- )
-
- def test_uppercase_implicit(self):
- t = self.tables.foo
- self._test(
- t.insert().values(data='data', x=5),
- (1, 'data', 5),
- inserted_primary_key=[1]
- )
-
- def test_uppercase_direct_params(self):
- t = self.tables.foo
- self._test(
- t.insert().values(id=1, data='data', x=5),
- (1, 'data', 5),
- inserted_primary_key=[1]
- )
-
- @testing.requires.returning
- def test_uppercase_direct_params_returning(self):
- t = self.tables.foo
- self._test(
- t.insert().values(id=1, data='data', x=5).returning(t.c.id, t.c.x),
- (1, 'data', 5),
- returning=(1, 5)
- )
-
- @testing.fails_on(
- 'mssql', "lowercase table doesn't support identity insert disable")
- def test_direct_params(self):
- t = self._fixture()
- self._test(
- t.insert().values(id=1, data='data', x=5),
- (1, 'data', 5),
- inserted_primary_key=[]
- )
-
- @testing.fails_on(
- 'mssql', "lowercase table doesn't support identity insert disable")
- @testing.requires.returning
- def test_direct_params_returning(self):
- t = self._fixture()
- self._test(
- t.insert().values(id=1, data='data', x=5).returning(t.c.id, t.c.x),
- (1, 'data', 5),
- returning=(1, 5)
- )
-
- @testing.requires.emulated_lastrowid
- def test_implicit_pk(self):
- t = self._fixture()
- self._test(
- t.insert().values(data='data', x=5),
- (1, 'data', 5),
- inserted_primary_key=[]
- )
-
- @testing.requires.emulated_lastrowid
- def test_implicit_pk_multi_rows(self):
- t = self._fixture()
- self._test_multi(
- t.insert(),
- [
- {'data': 'd1', 'x': 5},
- {'data': 'd2', 'x': 6},
- {'data': 'd3', 'x': 7},
- ],
- [
- (1, 'd1', 5),
- (2, 'd2', 6),
- (3, 'd3', 7)
- ],
- )
-
- @testing.requires.emulated_lastrowid
- def test_implicit_pk_inline(self):
- t = self._fixture()
- self._test(
- t.insert(inline=True).values(data='data', x=5),
- (1, 'data', 5),
- inserted_primary_key=[]
- )
-
-
-class KeyTargetingTest(fixtures.TablesTest):
- run_inserts = 'once'
- run_deletes = None
- __backend__ = True
-
- @classmethod
- def define_tables(cls, metadata):
- Table(
- 'keyed1', metadata, Column("a", CHAR(2), key="b"),
- Column("c", CHAR(2), key="q")
- )
- Table('keyed2', metadata, Column("a", CHAR(2)), Column("b", CHAR(2)))
- Table('keyed3', metadata, Column("a", CHAR(2)), Column("d", CHAR(2)))
- Table('keyed4', metadata, Column("b", CHAR(2)), Column("q", CHAR(2)))
- Table('content', metadata, Column('t', String(30), key="type"))
- Table('bar', metadata, Column('ctype', String(30), key="content_type"))
-
- if testing.requires.schemas.enabled:
- Table(
- 'wschema', metadata,
- Column("a", CHAR(2), key="b"),
- Column("c", CHAR(2), key="q"),
- schema=testing.config.test_schema
- )
-
- @classmethod
- def insert_data(cls):
- cls.tables.keyed1.insert().execute(dict(b="a1", q="c1"))
- cls.tables.keyed2.insert().execute(dict(a="a2", b="b2"))
- cls.tables.keyed3.insert().execute(dict(a="a3", d="d3"))
- cls.tables.keyed4.insert().execute(dict(b="b4", q="q4"))
- cls.tables.content.insert().execute(type="t1")
-
- if testing.requires.schemas.enabled:
- cls.tables['%s.wschema' % testing.config.test_schema].insert().execute(
- dict(b="a1", q="c1"))
-
- @testing.requires.schemas
- def test_keyed_accessor_wschema(self):
- keyed1 = self.tables['%s.wschema' % testing.config.test_schema]
- row = testing.db.execute(keyed1.select()).first()
-
- eq_(row.b, "a1")
- eq_(row.q, "c1")
- eq_(row.a, "a1")
- eq_(row.c, "c1")
-
- def test_keyed_accessor_single(self):
- keyed1 = self.tables.keyed1
- row = testing.db.execute(keyed1.select()).first()
-
- eq_(row.b, "a1")
- eq_(row.q, "c1")
- eq_(row.a, "a1")
- eq_(row.c, "c1")
-
- def test_keyed_accessor_single_labeled(self):
- keyed1 = self.tables.keyed1
- row = testing.db.execute(keyed1.select().apply_labels()).first()
-
- eq_(row.keyed1_b, "a1")
- eq_(row.keyed1_q, "c1")
- eq_(row.keyed1_a, "a1")
- eq_(row.keyed1_c, "c1")
-
- @testing.requires.duplicate_names_in_cursor_description
- def test_keyed_accessor_composite_conflict_2(self):
- keyed1 = self.tables.keyed1
- keyed2 = self.tables.keyed2
-
- row = testing.db.execute(select([keyed1, keyed2])).first()
- # row.b is unambiguous
- eq_(row.b, "b2")
- # row.a is ambiguous
- assert_raises_message(
- exc.InvalidRequestError,
- "Ambig",
- getattr, row, "a"
- )
-
- def test_keyed_accessor_composite_names_precedent(self):
- keyed1 = self.tables.keyed1
- keyed4 = self.tables.keyed4
-
- row = testing.db.execute(select([keyed1, keyed4])).first()
- eq_(row.b, "b4")
- eq_(row.q, "q4")
- eq_(row.a, "a1")
- eq_(row.c, "c1")
-
- @testing.requires.duplicate_names_in_cursor_description
- def test_keyed_accessor_composite_keys_precedent(self):
- keyed1 = self.tables.keyed1
- keyed3 = self.tables.keyed3
-
- row = testing.db.execute(select([keyed1, keyed3])).first()
- eq_(row.q, "c1")
- assert_raises_message(
- exc.InvalidRequestError,
- "Ambiguous column name 'b'",
- getattr, row, "b"
- )
- assert_raises_message(
- exc.InvalidRequestError,
- "Ambiguous column name 'a'",
- getattr, row, "a"
- )
- eq_(row.d, "d3")
-
- def test_keyed_accessor_composite_labeled(self):
- keyed1 = self.tables.keyed1
- keyed2 = self.tables.keyed2
-
- row = testing.db.execute(select([keyed1, keyed2]).apply_labels()). \
- first()
- eq_(row.keyed1_b, "a1")
- eq_(row.keyed1_a, "a1")
- eq_(row.keyed1_q, "c1")
- eq_(row.keyed1_c, "c1")
- eq_(row.keyed2_a, "a2")
- eq_(row.keyed2_b, "b2")
- assert_raises(KeyError, lambda: row['keyed2_c'])
- assert_raises(KeyError, lambda: row['keyed2_q'])
-
- def test_column_label_overlap_fallback(self):
- content, bar = self.tables.content, self.tables.bar
- row = testing.db.execute(
- select([content.c.type.label("content_type")])).first()
- assert content.c.type not in row
- assert bar.c.content_type not in row
- assert sql.column('content_type') in row
-
- row = testing.db.execute(select([func.now().label("content_type")])). \
- first()
- assert content.c.type not in row
- assert bar.c.content_type not in row
- assert sql.column('content_type') in row
-
- def test_column_label_overlap_fallback_2(self):
- content, bar = self.tables.content, self.tables.bar
- row = testing.db.execute(content.select(use_labels=True)).first()
- assert content.c.type in row
- assert bar.c.content_type not in row
- assert sql.column('content_type') not in row
-
- def test_columnclause_schema_column_one(self):
- keyed2 = self.tables.keyed2
-
- # this is addressed by [ticket:2932]
- # ColumnClause._compare_name_for_result allows the
- # columns which the statement is against to be lightweight
- # cols, which results in a more liberal comparison scheme
- a, b = sql.column('a'), sql.column('b')
- stmt = select([a, b]).select_from(table("keyed2"))
- row = testing.db.execute(stmt).first()
-
- assert keyed2.c.a in row
- assert keyed2.c.b in row
- assert a in row
- assert b in row
-
- def test_columnclause_schema_column_two(self):
- keyed2 = self.tables.keyed2
-
- a, b = sql.column('a'), sql.column('b')
- stmt = select([keyed2.c.a, keyed2.c.b])
- row = testing.db.execute(stmt).first()
-
- assert keyed2.c.a in row
- assert keyed2.c.b in row
- assert a in row
- assert b in row
-
- def test_columnclause_schema_column_three(self):
- keyed2 = self.tables.keyed2
-
- # this is also addressed by [ticket:2932]
-
- a, b = sql.column('a'), sql.column('b')
- stmt = text("select a, b from keyed2").columns(a=CHAR, b=CHAR)
- row = testing.db.execute(stmt).first()
-
- assert keyed2.c.a in row
- assert keyed2.c.b in row
- assert a in row
- assert b in row
- assert stmt.c.a in row
- assert stmt.c.b in row
-
- def test_columnclause_schema_column_four(self):
- keyed2 = self.tables.keyed2
-
- # this is also addressed by [ticket:2932]
-
- a, b = sql.column('keyed2_a'), sql.column('keyed2_b')
- stmt = text("select a AS keyed2_a, b AS keyed2_b from keyed2").columns(
- a, b)
- row = testing.db.execute(stmt).first()
-
- assert keyed2.c.a in row
- assert keyed2.c.b in row
- assert a in row
- assert b in row
- assert stmt.c.keyed2_a in row
- assert stmt.c.keyed2_b in row
-
- def test_columnclause_schema_column_five(self):
- keyed2 = self.tables.keyed2
-
- # this is also addressed by [ticket:2932]
-
- stmt = text("select a AS keyed2_a, b AS keyed2_b from keyed2").columns(
- keyed2_a=CHAR, keyed2_b=CHAR)
- row = testing.db.execute(stmt).first()
-
- assert keyed2.c.a in row
- assert keyed2.c.b in row
- assert stmt.c.keyed2_a in row
- assert stmt.c.keyed2_b in row
-
-
class LimitTest(fixtures.TestBase):
__backend__ = True
diff --git a/test/sql/test_resultset.py b/test/sql/test_resultset.py
new file mode 100644
index 000000000..8461996ea
--- /dev/null
+++ b/test/sql/test_resultset.py
@@ -0,0 +1,1136 @@
+from sqlalchemy.testing import eq_, assert_raises_message, assert_raises, \
+ in_, not_in_, is_, ne_
+from sqlalchemy import testing
+from sqlalchemy.testing import fixtures, engines
+from sqlalchemy import util
+from sqlalchemy import (
+ exc, sql, func, select, String, Integer, MetaData, ForeignKey,
+ VARCHAR, INT, CHAR, text, type_coerce, literal_column,
+ TypeDecorator, table, column)
+from sqlalchemy.engine import result as _result
+from sqlalchemy.testing.schema import Table, Column
+import operator
+
+
+class ResultProxyTest(fixtures.TablesTest):
+ __backend__ = True
+
+ @classmethod
+ def define_tables(cls, metadata):
+ Table(
+ 'users', metadata,
+ Column(
+ 'user_id', INT, primary_key=True,
+ test_needs_autoincrement=True),
+ Column('user_name', VARCHAR(20)),
+ test_needs_acid=True
+ )
+ Table(
+ 'addresses', metadata,
+ Column(
+ 'address_id', Integer, primary_key=True,
+ test_needs_autoincrement=True),
+ Column('user_id', Integer, ForeignKey('users.user_id')),
+ Column('address', String(30)),
+ test_needs_acid=True
+ )
+
+ Table(
+ 'users2', metadata,
+ Column('user_id', INT, primary_key=True),
+ Column('user_name', VARCHAR(20)),
+ test_needs_acid=True
+ )
+
+ def test_row_iteration(self):
+ users = self.tables.users
+
+ users.insert().execute(
+ {'user_id': 7, 'user_name': 'jack'},
+ {'user_id': 8, 'user_name': 'ed'},
+ {'user_id': 9, 'user_name': 'fred'},
+ )
+ r = users.select().execute()
+ l = []
+ for row in r:
+ l.append(row)
+ eq_(len(l), 3)
+
+ @testing.requires.subqueries
+ def test_anonymous_rows(self):
+ users = self.tables.users
+
+ users.insert().execute(
+ {'user_id': 7, 'user_name': 'jack'},
+ {'user_id': 8, 'user_name': 'ed'},
+ {'user_id': 9, 'user_name': 'fred'},
+ )
+
+ sel = select([users.c.user_id]).where(users.c.user_name == 'jack'). \
+ as_scalar()
+ for row in select([sel + 1, sel + 3], bind=users.bind).execute():
+ eq_(row['anon_1'], 8)
+ eq_(row['anon_2'], 10)
+
+ def test_row_comparison(self):
+ users = self.tables.users
+
+ users.insert().execute(user_id=7, user_name='jack')
+ rp = users.select().execute().first()
+
+ eq_(rp, rp)
+ is_(not(rp != rp), True)
+
+ equal = (7, 'jack')
+
+ eq_(rp, equal)
+ eq_(equal, rp)
+ is_((not (rp != equal)), True)
+ is_(not (equal != equal), True)
+
+ def endless():
+ while True:
+ yield 1
+ ne_(rp, endless())
+ ne_(endless(), rp)
+
+ # test that everything compares the same
+ # as it would against a tuple
+ for compare in [False, 8, endless(), 'xyz', (7, 'jack')]:
+ for op in [
+ operator.eq, operator.ne, operator.gt,
+ operator.lt, operator.ge, operator.le
+ ]:
+
+ try:
+ control = op(equal, compare)
+ except TypeError:
+ # Py3K raises TypeError for some invalid comparisons
+ assert_raises(TypeError, op, rp, compare)
+ else:
+ eq_(control, op(rp, compare))
+
+ try:
+ control = op(compare, equal)
+ except TypeError:
+ # Py3K raises TypeError for some invalid comparisons
+ assert_raises(TypeError, op, compare, rp)
+ else:
+ eq_(control, op(compare, rp))
+
+ @testing.provide_metadata
+ def test_column_label_overlap_fallback(self):
+ content = Table(
+ 'content', self.metadata,
+ Column('type', String(30)),
+ )
+ bar = Table(
+ 'bar', self.metadata,
+ Column('content_type', String(30))
+ )
+ self.metadata.create_all(testing.db)
+ testing.db.execute(content.insert().values(type="t1"))
+
+ row = testing.db.execute(content.select(use_labels=True)).first()
+ in_(content.c.type, row)
+ not_in_(bar.c.content_type, row)
+ in_(sql.column('content_type'), row)
+
+ row = testing.db.execute(
+ select([content.c.type.label("content_type")])).first()
+ in_(content.c.type, row)
+
+ not_in_(bar.c.content_type, row)
+
+ in_(sql.column('content_type'), row)
+
+ row = testing.db.execute(select([func.now().label("content_type")])). \
+ first()
+ not_in_(content.c.type, row)
+
+ not_in_(bar.c.content_type, row)
+
+ in_(sql.column('content_type'), row)
+
+ def test_pickled_rows(self):
+ users = self.tables.users
+ addresses = self.tables.addresses
+
+ users.insert().execute(
+ {'user_id': 7, 'user_name': 'jack'},
+ {'user_id': 8, 'user_name': 'ed'},
+ {'user_id': 9, 'user_name': 'fred'},
+ )
+
+ for pickle in False, True:
+ for use_labels in False, True:
+ result = users.select(use_labels=use_labels).order_by(
+ users.c.user_id).execute().fetchall()
+
+ if pickle:
+ result = util.pickle.loads(util.pickle.dumps(result))
+
+ eq_(
+ result,
+ [(7, "jack"), (8, "ed"), (9, "fred")]
+ )
+ if use_labels:
+ eq_(result[0]['users_user_id'], 7)
+ eq_(
+ list(result[0].keys()),
+ ["users_user_id", "users_user_name"])
+ else:
+ eq_(result[0]['user_id'], 7)
+ eq_(list(result[0].keys()), ["user_id", "user_name"])
+
+ eq_(result[0][0], 7)
+ eq_(result[0][users.c.user_id], 7)
+ eq_(result[0][users.c.user_name], 'jack')
+
+ if not pickle or use_labels:
+ assert_raises(
+ exc.NoSuchColumnError,
+ lambda: result[0][addresses.c.user_id])
+ else:
+ # test with a different table. name resolution is
+ # causing 'user_id' to match when use_labels wasn't used.
+ eq_(result[0][addresses.c.user_id], 7)
+
+ assert_raises(
+ exc.NoSuchColumnError, lambda: result[0]['fake key'])
+ assert_raises(
+ exc.NoSuchColumnError,
+ lambda: result[0][addresses.c.address_id])
+
+ def test_column_error_printing(self):
+ row = testing.db.execute(select([1])).first()
+
+ class unprintable(object):
+
+ def __str__(self):
+ raise ValueError("nope")
+
+ msg = r"Could not locate column in row for column '%s'"
+
+ for accessor, repl in [
+ ("x", "x"),
+ (Column("q", Integer), "q"),
+ (Column("q", Integer) + 12, r"q \+ :q_1"),
+ (unprintable(), "unprintable element.*"),
+ ]:
+ assert_raises_message(
+ exc.NoSuchColumnError,
+ msg % repl,
+ lambda: row[accessor]
+ )
+
+ def test_fetchmany(self):
+ users = self.tables.users
+
+ users.insert().execute(user_id=7, user_name='jack')
+ users.insert().execute(user_id=8, user_name='ed')
+ users.insert().execute(user_id=9, user_name='fred')
+ r = users.select().execute()
+ l = []
+ for row in r.fetchmany(size=2):
+ l.append(row)
+ eq_(len(l), 2)
+
+ def test_column_slices(self):
+ users = self.tables.users
+ addresses = self.tables.addresses
+
+ users.insert().execute(user_id=1, user_name='john')
+ users.insert().execute(user_id=2, user_name='jack')
+ addresses.insert().execute(
+ address_id=1, user_id=2, address='foo@bar.com')
+
+ r = text(
+ "select * from addresses", bind=testing.db).execute().first()
+ eq_(r[0:1], (1,))
+ eq_(r[1:], (2, 'foo@bar.com'))
+ eq_(r[:-1], (1, 2))
+
+ def test_column_accessor_basic_compiled(self):
+ users = self.tables.users
+
+ users.insert().execute(
+ dict(user_id=1, user_name='john'),
+ dict(user_id=2, user_name='jack')
+ )
+
+ r = users.select(users.c.user_id == 2).execute().first()
+ eq_(r.user_id, 2)
+ eq_(r['user_id'], 2)
+ eq_(r[users.c.user_id], 2)
+
+ eq_(r.user_name, 'jack')
+ eq_(r['user_name'], 'jack')
+ eq_(r[users.c.user_name], 'jack')
+
+ def test_column_accessor_basic_text(self):
+ users = self.tables.users
+
+ users.insert().execute(
+ dict(user_id=1, user_name='john'),
+ dict(user_id=2, user_name='jack')
+ )
+ r = testing.db.execute(
+ text("select * from users where user_id=2")).first()
+
+ eq_(r.user_id, 2)
+ eq_(r['user_id'], 2)
+ eq_(r[users.c.user_id], 2)
+
+ eq_(r.user_name, 'jack')
+ eq_(r['user_name'], 'jack')
+ eq_(r[users.c.user_name], 'jack')
+
+ def test_column_accessor_textual_select(self):
+ users = self.tables.users
+
+ users.insert().execute(
+ dict(user_id=1, user_name='john'),
+ dict(user_id=2, user_name='jack')
+ )
+ # this will create column() objects inside
+ # the select(), these need to match on name anyway
+ r = testing.db.execute(
+ select([
+ column('user_id'), column('user_name')
+ ]).select_from(table('users')).
+ where(text('user_id=2'))
+ ).first()
+
+ eq_(r.user_id, 2)
+ eq_(r['user_id'], 2)
+ eq_(r[users.c.user_id], 2)
+
+ eq_(r.user_name, 'jack')
+ eq_(r['user_name'], 'jack')
+ eq_(r[users.c.user_name], 'jack')
+
+ def test_column_accessor_dotted_union(self):
+ users = self.tables.users
+
+ users.insert().execute(
+ dict(user_id=1, user_name='john'),
+ )
+
+ # test a little sqlite weirdness - with the UNION,
+ # cols come back as "users.user_id" in cursor.description
+ r = testing.db.execute(
+ text(
+ "select users.user_id, users.user_name "
+ "from users "
+ "UNION select users.user_id, "
+ "users.user_name from users"
+ )
+ ).first()
+ eq_(r['user_id'], 1)
+ eq_(r['user_name'], "john")
+ eq_(list(r.keys()), ["user_id", "user_name"])
+
+ @testing.only_on("sqlite", "sqlite specific feature")
+ def test_column_accessor_sqlite_raw(self):
+ users = self.tables.users
+
+ users.insert().execute(
+ dict(user_id=1, user_name='john'),
+ )
+
+ r = text(
+ "select users.user_id, users.user_name "
+ "from users "
+ "UNION select users.user_id, "
+ "users.user_name from users",
+ bind=testing.db).execution_options(sqlite_raw_colnames=True). \
+ execute().first()
+ not_in_('user_id', r)
+ not_in_('user_name', r)
+ eq_(r['users.user_id'], 1)
+ eq_(r['users.user_name'], "john")
+ eq_(list(r.keys()), ["users.user_id", "users.user_name"])
+
+ @testing.only_on("sqlite", "sqlite specific feature")
+ def test_column_accessor_sqlite_translated(self):
+ users = self.tables.users
+
+ users.insert().execute(
+ dict(user_id=1, user_name='john'),
+ )
+
+ r = text(
+ "select users.user_id, users.user_name "
+ "from users "
+ "UNION select users.user_id, "
+ "users.user_name from users",
+ bind=testing.db).execute().first()
+ eq_(r['user_id'], 1)
+ eq_(r['user_name'], "john")
+ eq_(r['users.user_id'], 1)
+ eq_(r['users.user_name'], "john")
+ eq_(list(r.keys()), ["user_id", "user_name"])
+
+ def test_column_accessor_labels_w_dots(self):
+ users = self.tables.users
+
+ users.insert().execute(
+ dict(user_id=1, user_name='john'),
+ )
+ # test using literal tablename.colname
+ r = text(
+ 'select users.user_id AS "users.user_id", '
+ 'users.user_name AS "users.user_name" '
+ 'from users', bind=testing.db).\
+ execution_options(sqlite_raw_colnames=True).execute().first()
+ eq_(r['users.user_id'], 1)
+ eq_(r['users.user_name'], "john")
+ not_in_("user_name", r)
+ eq_(list(r.keys()), ["users.user_id", "users.user_name"])
+
+ def test_column_accessor_unary(self):
+ users = self.tables.users
+
+ users.insert().execute(
+ dict(user_id=1, user_name='john'),
+ )
+
+ # unary experssions
+ r = select([users.c.user_name.distinct()]).order_by(
+ users.c.user_name).execute().first()
+ eq_(r[users.c.user_name], 'john')
+ eq_(r.user_name, 'john')
+
+ def test_column_accessor_err(self):
+ r = testing.db.execute(select([1])).first()
+ assert_raises_message(
+ AttributeError,
+ "Could not locate column in row for column 'foo'",
+ getattr, r, "foo"
+ )
+ assert_raises_message(
+ KeyError,
+ "Could not locate column in row for column 'foo'",
+ lambda: r['foo']
+ )
+
+ def test_graceful_fetch_on_non_rows(self):
+ """test that calling fetchone() etc. on a result that doesn't
+ return rows fails gracefully.
+
+ """
+
+ # these proxies don't work with no cursor.description present.
+ # so they don't apply to this test at the moment.
+ # result.FullyBufferedResultProxy,
+ # result.BufferedRowResultProxy,
+ # result.BufferedColumnResultProxy
+
+ users = self.tables.users
+
+ conn = testing.db.connect()
+ for meth in [
+ lambda r: r.fetchone(),
+ lambda r: r.fetchall(),
+ lambda r: r.first(),
+ lambda r: r.scalar(),
+ lambda r: r.fetchmany(),
+ lambda r: r._getter('user'),
+ lambda r: r._has_key('user'),
+ ]:
+ trans = conn.begin()
+ result = conn.execute(users.insert(), user_id=1)
+ assert_raises_message(
+ exc.ResourceClosedError,
+ "This result object does not return rows. "
+ "It has been closed automatically.",
+ meth, result,
+ )
+ trans.rollback()
+
+ def test_fetchone_til_end(self):
+ result = testing.db.execute("select * from users")
+ eq_(result.fetchone(), None)
+ eq_(result.fetchone(), None)
+ eq_(result.fetchone(), None)
+ result.close()
+ assert_raises_message(
+ exc.ResourceClosedError,
+ "This result object is closed.",
+ result.fetchone
+ )
+
+ def test_row_case_sensitive(self):
+ row = testing.db.execute(
+ select([
+ literal_column("1").label("case_insensitive"),
+ literal_column("2").label("CaseSensitive")
+ ])
+ ).first()
+
+ eq_(list(row.keys()), ["case_insensitive", "CaseSensitive"])
+
+ in_("case_insensitive", row._keymap)
+ in_("CaseSensitive", row._keymap)
+ not_in_("casesensitive", row._keymap)
+
+ eq_(row["case_insensitive"], 1)
+ eq_(row["CaseSensitive"], 2)
+
+ assert_raises(
+ KeyError,
+ lambda: row["Case_insensitive"]
+ )
+ assert_raises(
+ KeyError,
+ lambda: row["casesensitive"]
+ )
+
+ def test_row_case_sensitive_unoptimized(self):
+ ins_db = engines.testing_engine(options={"case_sensitive": True})
+ row = ins_db.execute(
+ select([
+ literal_column("1").label("case_insensitive"),
+ literal_column("2").label("CaseSensitive"),
+ text("3 AS screw_up_the_cols")
+ ])
+ ).first()
+
+ eq_(
+ list(row.keys()),
+ ["case_insensitive", "CaseSensitive", "screw_up_the_cols"])
+
+ in_("case_insensitive", row._keymap)
+ in_("CaseSensitive", row._keymap)
+ not_in_("casesensitive", row._keymap)
+
+ eq_(row["case_insensitive"], 1)
+ eq_(row["CaseSensitive"], 2)
+ eq_(row["screw_up_the_cols"], 3)
+
+ assert_raises(KeyError, lambda: row["Case_insensitive"])
+ assert_raises(KeyError, lambda: row["casesensitive"])
+ assert_raises(KeyError, lambda: row["screw_UP_the_cols"])
+
+ def test_row_case_insensitive(self):
+ ins_db = engines.testing_engine(options={"case_sensitive": False})
+ row = ins_db.execute(
+ select([
+ literal_column("1").label("case_insensitive"),
+ literal_column("2").label("CaseSensitive")
+ ])
+ ).first()
+
+ eq_(list(row.keys()), ["case_insensitive", "CaseSensitive"])
+
+ in_("case_insensitive", row._keymap)
+ in_("CaseSensitive", row._keymap)
+ in_("casesensitive", row._keymap)
+
+ eq_(row["case_insensitive"], 1)
+ eq_(row["CaseSensitive"], 2)
+ eq_(row["Case_insensitive"], 1)
+ eq_(row["casesensitive"], 2)
+
+ def test_row_case_insensitive_unoptimized(self):
+ ins_db = engines.testing_engine(options={"case_sensitive": False})
+ row = ins_db.execute(
+ select([
+ literal_column("1").label("case_insensitive"),
+ literal_column("2").label("CaseSensitive"),
+ text("3 AS screw_up_the_cols")
+ ])
+ ).first()
+
+ eq_(
+ list(row.keys()),
+ ["case_insensitive", "CaseSensitive", "screw_up_the_cols"])
+
+ in_("case_insensitive", row._keymap)
+ in_("CaseSensitive", row._keymap)
+ in_("casesensitive", row._keymap)
+
+ eq_(row["case_insensitive"], 1)
+ eq_(row["CaseSensitive"], 2)
+ eq_(row["screw_up_the_cols"], 3)
+ eq_(row["Case_insensitive"], 1)
+ eq_(row["casesensitive"], 2)
+ eq_(row["screw_UP_the_cols"], 3)
+
+ def test_row_as_args(self):
+ users = self.tables.users
+
+ users.insert().execute(user_id=1, user_name='john')
+ r = users.select(users.c.user_id == 1).execute().first()
+ users.delete().execute()
+ users.insert().execute(r)
+ eq_(users.select().execute().fetchall(), [(1, 'john')])
+
+ def test_result_as_args(self):
+ users = self.tables.users
+ users2 = self.tables.users2
+
+ users.insert().execute([
+ dict(user_id=1, user_name='john'),
+ dict(user_id=2, user_name='ed')])
+ r = users.select().execute()
+ users2.insert().execute(list(r))
+ eq_(
+ users2.select().order_by(users2.c.user_id).execute().fetchall(),
+ [(1, 'john'), (2, 'ed')]
+ )
+
+ users2.delete().execute()
+ r = users.select().execute()
+ users2.insert().execute(*list(r))
+ eq_(
+ users2.select().order_by(users2.c.user_id).execute().fetchall(),
+ [(1, 'john'), (2, 'ed')]
+ )
+
+ @testing.requires.duplicate_names_in_cursor_description
+ def test_ambiguous_column(self):
+ users = self.tables.users
+ addresses = self.tables.addresses
+
+ users.insert().execute(user_id=1, user_name='john')
+ result = users.outerjoin(addresses).select().execute()
+ r = result.first()
+
+ assert_raises_message(
+ exc.InvalidRequestError,
+ "Ambiguous column name",
+ lambda: r['user_id']
+ )
+
+ assert_raises_message(
+ exc.InvalidRequestError,
+ "Ambiguous column name",
+ lambda: r[users.c.user_id]
+ )
+
+ assert_raises_message(
+ exc.InvalidRequestError,
+ "Ambiguous column name",
+ lambda: r[addresses.c.user_id]
+ )
+
+ # try to trick it - fake_table isn't in the result!
+ # we get the correct error
+ fake_table = Table('fake', MetaData(), Column('user_id', Integer))
+ assert_raises_message(
+ exc.InvalidRequestError,
+ "Could not locate column in row for column 'fake.user_id'",
+ lambda: r[fake_table.c.user_id]
+ )
+
+ r = util.pickle.loads(util.pickle.dumps(r))
+ assert_raises_message(
+ exc.InvalidRequestError,
+ "Ambiguous column name",
+ lambda: r['user_id']
+ )
+
+ result = users.outerjoin(addresses).select().execute()
+ result = _result.BufferedColumnResultProxy(result.context)
+ r = result.first()
+ assert isinstance(r, _result.BufferedColumnRow)
+ assert_raises_message(
+ exc.InvalidRequestError,
+ "Ambiguous column name",
+ lambda: r['user_id']
+ )
+
+ @testing.requires.duplicate_names_in_cursor_description
+ def test_ambiguous_column_by_col(self):
+ users = self.tables.users
+
+ users.insert().execute(user_id=1, user_name='john')
+ ua = users.alias()
+ u2 = users.alias()
+ result = select([users.c.user_id, ua.c.user_id]).execute()
+ row = result.first()
+
+ assert_raises_message(
+ exc.InvalidRequestError,
+ "Ambiguous column name",
+ lambda: row[users.c.user_id]
+ )
+
+ assert_raises_message(
+ exc.InvalidRequestError,
+ "Ambiguous column name",
+ lambda: row[ua.c.user_id]
+ )
+
+ # Unfortunately, this fails -
+ # we'd like
+ # "Could not locate column in row"
+ # to be raised here, but the check for
+ # "common column" in _compare_name_for_result()
+ # has other requirements to be more liberal.
+ # Ultimately the
+ # expression system would need a way to determine
+ # if given two columns in a "proxy" relationship, if they
+ # refer to a different parent table
+ assert_raises_message(
+ exc.InvalidRequestError,
+ "Ambiguous column name",
+ lambda: row[u2.c.user_id]
+ )
+
+ @testing.requires.duplicate_names_in_cursor_description
+ def test_ambiguous_column_contains(self):
+ users = self.tables.users
+ addresses = self.tables.addresses
+
+ # ticket 2702. in 0.7 we'd get True, False.
+ # in 0.8, both columns are present so it's True;
+ # but when they're fetched you'll get the ambiguous error.
+ users.insert().execute(user_id=1, user_name='john')
+ result = select([users.c.user_id, addresses.c.user_id]).\
+ select_from(users.outerjoin(addresses)).execute()
+ row = result.first()
+
+ eq_(
+ set([users.c.user_id in row, addresses.c.user_id in row]),
+ set([True])
+ )
+
+ def test_ambiguous_column_by_col_plus_label(self):
+ users = self.tables.users
+
+ users.insert().execute(user_id=1, user_name='john')
+ result = select(
+ [users.c.user_id,
+ type_coerce(users.c.user_id, Integer).label('foo')]).execute()
+ row = result.first()
+ eq_(
+ row[users.c.user_id], 1
+ )
+ eq_(
+ row[1], 1
+ )
+
+ def test_fetch_partial_result_map(self):
+ users = self.tables.users
+
+ users.insert().execute(user_id=7, user_name='ed')
+
+ t = text("select * from users").columns(
+ user_name=String()
+ )
+ eq_(
+ testing.db.execute(t).fetchall(), [(7, 'ed')]
+ )
+
+ def test_fetch_unordered_result_map(self):
+ users = self.tables.users
+
+ users.insert().execute(user_id=7, user_name='ed')
+
+ class Goofy1(TypeDecorator):
+ impl = String
+
+ def process_result_value(self, value, dialect):
+ return value + "a"
+
+ class Goofy2(TypeDecorator):
+ impl = String
+
+ def process_result_value(self, value, dialect):
+ return value + "b"
+
+ class Goofy3(TypeDecorator):
+ impl = String
+
+ def process_result_value(self, value, dialect):
+ return value + "c"
+
+ t = text(
+ "select user_name as a, user_name as b, "
+ "user_name as c from users").columns(
+ a=Goofy1(), b=Goofy2(), c=Goofy3()
+ )
+ eq_(
+ testing.db.execute(t).fetchall(), [
+ ('eda', 'edb', 'edc')
+ ]
+ )
+
+ @testing.requires.subqueries
+ def test_column_label_targeting(self):
+ users = self.tables.users
+
+ users.insert().execute(user_id=7, user_name='ed')
+
+ for s in (
+ users.select().alias('foo'),
+ users.select().alias(users.name),
+ ):
+ row = s.select(use_labels=True).execute().first()
+ eq_(row[s.c.user_id], 7)
+ eq_(row[s.c.user_name], 'ed')
+
+ def test_keys(self):
+ users = self.tables.users
+
+ users.insert().execute(user_id=1, user_name='foo')
+ result = users.select().execute()
+ eq_(
+ result.keys(),
+ ['user_id', 'user_name']
+ )
+ row = result.first()
+ eq_(
+ row.keys(),
+ ['user_id', 'user_name']
+ )
+
+ def test_keys_anon_labels(self):
+ """test [ticket:3483]"""
+
+ users = self.tables.users
+
+ users.insert().execute(user_id=1, user_name='foo')
+ result = testing.db.execute(
+ select([
+ users.c.user_id,
+ users.c.user_name.label(None),
+ func.count(literal_column('1'))]).
+ group_by(users.c.user_id, users.c.user_name)
+ )
+
+ eq_(
+ result.keys(),
+ ['user_id', 'user_name_1', 'count_1']
+ )
+ row = result.first()
+ eq_(
+ row.keys(),
+ ['user_id', 'user_name_1', 'count_1']
+ )
+
+ def test_items(self):
+ users = self.tables.users
+
+ users.insert().execute(user_id=1, user_name='foo')
+ r = users.select().execute().first()
+ eq_(
+ [(x[0].lower(), x[1]) for x in list(r.items())],
+ [('user_id', 1), ('user_name', 'foo')])
+
+ def test_len(self):
+ users = self.tables.users
+
+ users.insert().execute(user_id=1, user_name='foo')
+ r = users.select().execute().first()
+ eq_(len(r), 2)
+
+ r = testing.db.execute('select user_name, user_id from users'). \
+ first()
+ eq_(len(r), 2)
+ r = testing.db.execute('select user_name from users').first()
+ eq_(len(r), 1)
+
+ def test_sorting_in_python(self):
+ users = self.tables.users
+
+ users.insert().execute(
+ dict(user_id=1, user_name='foo'),
+ dict(user_id=2, user_name='bar'),
+ dict(user_id=3, user_name='def'),
+ )
+
+ rows = users.select().order_by(users.c.user_name).execute().fetchall()
+
+ eq_(rows, [(2, 'bar'), (3, 'def'), (1, 'foo')])
+
+ eq_(sorted(rows), [(1, 'foo'), (2, 'bar'), (3, 'def')])
+
+ def test_column_order_with_simple_query(self):
+ # should return values in column definition order
+ users = self.tables.users
+
+ users.insert().execute(user_id=1, user_name='foo')
+ r = users.select(users.c.user_id == 1).execute().first()
+ eq_(r[0], 1)
+ eq_(r[1], 'foo')
+ eq_([x.lower() for x in list(r.keys())], ['user_id', 'user_name'])
+ eq_(list(r.values()), [1, 'foo'])
+
+ def test_column_order_with_text_query(self):
+ # should return values in query order
+ users = self.tables.users
+
+ users.insert().execute(user_id=1, user_name='foo')
+ r = testing.db.execute('select user_name, user_id from users'). \
+ first()
+ eq_(r[0], 'foo')
+ eq_(r[1], 1)
+ eq_([x.lower() for x in list(r.keys())], ['user_name', 'user_id'])
+ eq_(list(r.values()), ['foo', 1])
+
+ @testing.crashes('oracle', 'FIXME: unknown, varify not fails_on()')
+ @testing.crashes('firebird', 'An identifier must begin with a letter')
+ @testing.provide_metadata
+ def test_column_accessor_shadow(self):
+ shadowed = Table(
+ 'test_shadowed', self.metadata,
+ Column('shadow_id', INT, primary_key=True),
+ Column('shadow_name', VARCHAR(20)),
+ Column('parent', VARCHAR(20)),
+ Column('row', VARCHAR(40)),
+ Column('_parent', VARCHAR(20)),
+ Column('_row', VARCHAR(20)),
+ )
+ self.metadata.create_all()
+ shadowed.insert().execute(
+ shadow_id=1, shadow_name='The Shadow', parent='The Light',
+ row='Without light there is no shadow',
+ _parent='Hidden parent', _row='Hidden row')
+ r = shadowed.select(shadowed.c.shadow_id == 1).execute().first()
+
+ eq_(r.shadow_id, 1)
+ eq_(r['shadow_id'], 1)
+ eq_(r[shadowed.c.shadow_id], 1)
+
+ eq_(r.shadow_name, 'The Shadow')
+ eq_(r['shadow_name'], 'The Shadow')
+ eq_(r[shadowed.c.shadow_name], 'The Shadow')
+
+ eq_(r.parent, 'The Light')
+ eq_(r['parent'], 'The Light')
+ eq_(r[shadowed.c.parent], 'The Light')
+
+ eq_(r.row, 'Without light there is no shadow')
+ eq_(r['row'], 'Without light there is no shadow')
+ eq_(r[shadowed.c.row], 'Without light there is no shadow')
+
+ eq_(r['_parent'], 'Hidden parent')
+ eq_(r['_row'], 'Hidden row')
+
+
+class KeyTargetingTest(fixtures.TablesTest):
+ run_inserts = 'once'
+ run_deletes = None
+ __backend__ = True
+
+ @classmethod
+ def define_tables(cls, metadata):
+ Table(
+ 'keyed1', metadata, Column("a", CHAR(2), key="b"),
+ Column("c", CHAR(2), key="q")
+ )
+ Table('keyed2', metadata, Column("a", CHAR(2)), Column("b", CHAR(2)))
+ Table('keyed3', metadata, Column("a", CHAR(2)), Column("d", CHAR(2)))
+ Table('keyed4', metadata, Column("b", CHAR(2)), Column("q", CHAR(2)))
+ Table('content', metadata, Column('t', String(30), key="type"))
+ Table('bar', metadata, Column('ctype', String(30), key="content_type"))
+
+ if testing.requires.schemas.enabled:
+ Table(
+ 'wschema', metadata,
+ Column("a", CHAR(2), key="b"),
+ Column("c", CHAR(2), key="q"),
+ schema=testing.config.test_schema
+ )
+
+ @classmethod
+ def insert_data(cls):
+ cls.tables.keyed1.insert().execute(dict(b="a1", q="c1"))
+ cls.tables.keyed2.insert().execute(dict(a="a2", b="b2"))
+ cls.tables.keyed3.insert().execute(dict(a="a3", d="d3"))
+ cls.tables.keyed4.insert().execute(dict(b="b4", q="q4"))
+ cls.tables.content.insert().execute(type="t1")
+
+ if testing.requires.schemas.enabled:
+ cls.tables[
+ '%s.wschema' % testing.config.test_schema].insert().execute(
+ dict(b="a1", q="c1"))
+
+ @testing.requires.schemas
+ def test_keyed_accessor_wschema(self):
+ keyed1 = self.tables['%s.wschema' % testing.config.test_schema]
+ row = testing.db.execute(keyed1.select()).first()
+
+ eq_(row.b, "a1")
+ eq_(row.q, "c1")
+ eq_(row.a, "a1")
+ eq_(row.c, "c1")
+
+ def test_keyed_accessor_single(self):
+ keyed1 = self.tables.keyed1
+ row = testing.db.execute(keyed1.select()).first()
+
+ eq_(row.b, "a1")
+ eq_(row.q, "c1")
+ eq_(row.a, "a1")
+ eq_(row.c, "c1")
+
+ def test_keyed_accessor_single_labeled(self):
+ keyed1 = self.tables.keyed1
+ row = testing.db.execute(keyed1.select().apply_labels()).first()
+
+ eq_(row.keyed1_b, "a1")
+ eq_(row.keyed1_q, "c1")
+ eq_(row.keyed1_a, "a1")
+ eq_(row.keyed1_c, "c1")
+
+ @testing.requires.duplicate_names_in_cursor_description
+ def test_keyed_accessor_composite_conflict_2(self):
+ keyed1 = self.tables.keyed1
+ keyed2 = self.tables.keyed2
+
+ row = testing.db.execute(select([keyed1, keyed2])).first()
+ # row.b is unambiguous
+ eq_(row.b, "b2")
+ # row.a is ambiguous
+ assert_raises_message(
+ exc.InvalidRequestError,
+ "Ambig",
+ getattr, row, "a"
+ )
+
+ def test_keyed_accessor_composite_names_precedent(self):
+ keyed1 = self.tables.keyed1
+ keyed4 = self.tables.keyed4
+
+ row = testing.db.execute(select([keyed1, keyed4])).first()
+ eq_(row.b, "b4")
+ eq_(row.q, "q4")
+ eq_(row.a, "a1")
+ eq_(row.c, "c1")
+
+ @testing.requires.duplicate_names_in_cursor_description
+ def test_keyed_accessor_composite_keys_precedent(self):
+ keyed1 = self.tables.keyed1
+ keyed3 = self.tables.keyed3
+
+ row = testing.db.execute(select([keyed1, keyed3])).first()
+ eq_(row.q, "c1")
+ assert_raises_message(
+ exc.InvalidRequestError,
+ "Ambiguous column name 'b'",
+ getattr, row, "b"
+ )
+ assert_raises_message(
+ exc.InvalidRequestError,
+ "Ambiguous column name 'a'",
+ getattr, row, "a"
+ )
+ eq_(row.d, "d3")
+
+ def test_keyed_accessor_composite_labeled(self):
+ keyed1 = self.tables.keyed1
+ keyed2 = self.tables.keyed2
+
+ row = testing.db.execute(select([keyed1, keyed2]).apply_labels()). \
+ first()
+ eq_(row.keyed1_b, "a1")
+ eq_(row.keyed1_a, "a1")
+ eq_(row.keyed1_q, "c1")
+ eq_(row.keyed1_c, "c1")
+ eq_(row.keyed2_a, "a2")
+ eq_(row.keyed2_b, "b2")
+ assert_raises(KeyError, lambda: row['keyed2_c'])
+ assert_raises(KeyError, lambda: row['keyed2_q'])
+
+ def test_column_label_overlap_fallback(self):
+ content, bar = self.tables.content, self.tables.bar
+ row = testing.db.execute(
+ select([content.c.type.label("content_type")])).first()
+
+ not_in_(content.c.type, row)
+ not_in_(bar.c.content_type, row)
+
+ in_(sql.column('content_type'), row)
+
+ row = testing.db.execute(select([func.now().label("content_type")])). \
+ first()
+ not_in_(content.c.type, row)
+ not_in_(bar.c.content_type, row)
+ in_(sql.column('content_type'), row)
+
+ def test_column_label_overlap_fallback_2(self):
+ content, bar = self.tables.content, self.tables.bar
+ row = testing.db.execute(content.select(use_labels=True)).first()
+ in_(content.c.type, row)
+ not_in_(bar.c.content_type, row)
+ not_in_(sql.column('content_type'), row)
+
+ def test_columnclause_schema_column_one(self):
+ keyed2 = self.tables.keyed2
+
+ # this is addressed by [ticket:2932]
+ # ColumnClause._compare_name_for_result allows the
+ # columns which the statement is against to be lightweight
+ # cols, which results in a more liberal comparison scheme
+ a, b = sql.column('a'), sql.column('b')
+ stmt = select([a, b]).select_from(table("keyed2"))
+ row = testing.db.execute(stmt).first()
+
+ in_(keyed2.c.a, row)
+ in_(keyed2.c.b, row)
+ in_(a, row)
+ in_(b, row)
+
+ def test_columnclause_schema_column_two(self):
+ keyed2 = self.tables.keyed2
+
+ a, b = sql.column('a'), sql.column('b')
+ stmt = select([keyed2.c.a, keyed2.c.b])
+ row = testing.db.execute(stmt).first()
+
+ in_(keyed2.c.a, row)
+ in_(keyed2.c.b, row)
+ in_(a, row)
+ in_(b, row)
+
+ def test_columnclause_schema_column_three(self):
+ keyed2 = self.tables.keyed2
+
+ # this is also addressed by [ticket:2932]
+
+ a, b = sql.column('a'), sql.column('b')
+ stmt = text("select a, b from keyed2").columns(a=CHAR, b=CHAR)
+ row = testing.db.execute(stmt).first()
+
+ in_(keyed2.c.a, row)
+ in_(keyed2.c.b, row)
+ in_(a, row)
+ in_(b, row)
+ in_(stmt.c.a, row)
+ in_(stmt.c.b, row)
+
+ def test_columnclause_schema_column_four(self):
+ keyed2 = self.tables.keyed2
+
+ # this is also addressed by [ticket:2932]
+
+ a, b = sql.column('keyed2_a'), sql.column('keyed2_b')
+ stmt = text("select a AS keyed2_a, b AS keyed2_b from keyed2").columns(
+ a, b)
+ row = testing.db.execute(stmt).first()
+
+ in_(keyed2.c.a, row)
+ in_(keyed2.c.b, row)
+ in_(a, row)
+ in_(b, row)
+ in_(stmt.c.keyed2_a, row)
+ in_(stmt.c.keyed2_b, row)
+
+ def test_columnclause_schema_column_five(self):
+ keyed2 = self.tables.keyed2
+
+ # this is also addressed by [ticket:2932]
+
+ stmt = text("select a AS keyed2_a, b AS keyed2_b from keyed2").columns(
+ keyed2_a=CHAR, keyed2_b=CHAR)
+ row = testing.db.execute(stmt).first()
+
+ in_(keyed2.c.a, row)
+ in_(keyed2.c.b, row)
+ in_(stmt.c.keyed2_a, row)
+ in_(stmt.c.keyed2_b, row)
diff --git a/test/sql/test_returning.py b/test/sql/test_returning.py
index cd9f632b9..8c189a0dd 100644
--- a/test/sql/test_returning.py
+++ b/test/sql/test_returning.py
@@ -387,6 +387,33 @@ class ReturnDefaultsTest(fixtures.TablesTest):
{"data": None, 'upddef': 1}
)
+ @testing.fails_on("oracle+cx_oracle", "seems like a cx_oracle bug")
+ def test_insert_all(self):
+ t1 = self.tables.t1
+ result = testing.db.execute(
+ t1.insert().values(upddef=1).return_defaults()
+ )
+ eq_(
+ dict(result.returned_defaults),
+ {"id": 1, "data": None, "insdef": 0}
+ )
+
+ @testing.fails_on("oracle+cx_oracle", "seems like a cx_oracle bug")
+ def test_update_all(self):
+ t1 = self.tables.t1
+ testing.db.execute(
+ t1.insert().values(upddef=1)
+ )
+ result = testing.db.execute(
+ t1.update().
+ values(insdef=2).return_defaults()
+ )
+ eq_(
+ dict(result.returned_defaults),
+ {'upddef': 1}
+ )
+
+
class ImplicitReturningFlag(fixtures.TestBase):
__backend__ = True
diff --git a/test/sql/test_selectable.py b/test/sql/test_selectable.py
index 3390f4a77..b9cbbf480 100644
--- a/test/sql/test_selectable.py
+++ b/test/sql/test_selectable.py
@@ -458,6 +458,26 @@ class SelectableTest(
assert u1.corresponding_column(table2.c.col1) is u1.c._all_columns[0]
assert u1.corresponding_column(table2.c.col3) is u1.c._all_columns[2]
+ @testing.emits_warning("Column 'col1'")
+ def test_union_alias_dupe_keys_grouped(self):
+ s1 = select([table1.c.col1, table1.c.col2, table2.c.col1]).\
+ limit(1).alias()
+ s2 = select([table2.c.col1, table2.c.col2, table2.c.col3]).limit(1)
+ u1 = union(s1, s2)
+
+ assert u1.corresponding_column(
+ s1.c._all_columns[0]) is u1.c._all_columns[0]
+ assert u1.corresponding_column(s2.c.col1) is u1.c._all_columns[0]
+ assert u1.corresponding_column(s1.c.col2) is u1.c.col2
+ assert u1.corresponding_column(s2.c.col2) is u1.c.col2
+
+ assert u1.corresponding_column(s2.c.col3) is u1.c._all_columns[2]
+
+ # this differs from the non-alias test because table2.c.col1 is
+ # more directly at s2.c.col1 than it is s1.c.col1.
+ assert u1.corresponding_column(table2.c.col1) is u1.c._all_columns[0]
+ assert u1.corresponding_column(table2.c.col3) is u1.c._all_columns[2]
+
def test_select_union(self):
# like testaliasunion, but off a Select off the union.
@@ -912,10 +932,10 @@ class AnonLabelTest(fixtures.TestBase):
c1 = func.count('*')
assert c1.label(None) is not c1
- eq_(str(select([c1])), "SELECT count(:param_1) AS count_1")
+ eq_(str(select([c1])), "SELECT count(:count_2) AS count_1")
c2 = select([c1]).compile()
- eq_(str(select([c1.label(None)])), "SELECT count(:param_1) AS count_1")
+ eq_(str(select([c1.label(None)])), "SELECT count(:count_2) AS count_1")
def test_named_labels_named_column(self):
c1 = column('x')
diff --git a/test/sql/test_types.py b/test/sql/test_types.py
index 2545dec59..f1fb611fb 100644
--- a/test/sql/test_types.py
+++ b/test/sql/test_types.py
@@ -1,5 +1,6 @@
# coding: utf-8
-from sqlalchemy.testing import eq_, assert_raises, assert_raises_message, expect_warnings
+from sqlalchemy.testing import eq_, is_, assert_raises, \
+ assert_raises_message, expect_warnings
import decimal
import datetime
import os
@@ -9,9 +10,10 @@ from sqlalchemy import (
and_, func, Date, LargeBinary, literal, cast, text, Enum,
type_coerce, VARCHAR, Time, DateTime, BigInteger, SmallInteger, BOOLEAN,
BLOB, NCHAR, NVARCHAR, CLOB, TIME, DATE, DATETIME, TIMESTAMP, SMALLINT,
- INTEGER, DECIMAL, NUMERIC, FLOAT, REAL)
+ INTEGER, DECIMAL, NUMERIC, FLOAT, REAL, Array)
from sqlalchemy.sql import ddl
-
+from sqlalchemy.sql import visitors
+from sqlalchemy import inspection
from sqlalchemy import exc, types, util, dialects
for name in dialects.__all__:
__import__("sqlalchemy.dialects.%s" % name)
@@ -25,6 +27,7 @@ from sqlalchemy.testing import AssertsCompiledSQL, AssertsExecutionResults, \
from sqlalchemy.testing.util import picklers
from sqlalchemy.testing.util import round_decimal
from sqlalchemy.testing import fixtures
+from sqlalchemy.testing import mock
class AdaptTest(fixtures.TestBase):
@@ -137,7 +140,7 @@ class AdaptTest(fixtures.TestBase):
for is_down_adaption, typ, target_adaptions in adaptions():
if typ in (types.TypeDecorator, types.TypeEngine, types.Variant):
continue
- elif typ is dialects.postgresql.ARRAY:
+ elif issubclass(typ, Array):
t1 = typ(String)
else:
t1 = typ()
@@ -187,12 +190,28 @@ class AdaptTest(fixtures.TestBase):
for typ in self._all_types():
if typ in (types.TypeDecorator, types.TypeEngine, types.Variant):
continue
- elif typ is dialects.postgresql.ARRAY:
+ elif issubclass(typ, Array):
t1 = typ(String)
else:
t1 = typ()
repr(t1)
+ def test_adapt_constructor_copy_override_kw(self):
+ """test that adapt() can accept kw args that override
+ the state of the original object.
+
+ This essentially is testing the behavior of util.constructor_copy().
+
+ """
+ t1 = String(length=50, convert_unicode=False)
+ t2 = t1.adapt(Text, convert_unicode=True)
+ eq_(
+ t2.length, 50
+ )
+ eq_(
+ t2.convert_unicode, True
+ )
+
class TypeAffinityTest(fixtures.TestBase):
@@ -771,6 +790,68 @@ class TypeCoerceCastTest(fixtures.TablesTest):
[('BIND_INd1', 'BIND_INd1BIND_OUT')]
)
+ def test_cast_replace_col_w_bind(self):
+ self._test_replace_col_w_bind(cast)
+
+ def test_type_coerce_replace_col_w_bind(self):
+ self._test_replace_col_w_bind(type_coerce)
+
+ def _test_replace_col_w_bind(self, coerce_fn):
+ MyType = self.MyType
+
+ t = self.tables.t
+ t.insert().values(data=coerce_fn('d1', MyType)).execute()
+
+ stmt = select([t.c.data, coerce_fn(t.c.data, MyType)])
+
+ def col_to_bind(col):
+ if col is t.c.data:
+ return bindparam(None, "x", type_=col.type, unique=True)
+ return None
+
+ # ensure we evaulate the expression so that we can see
+ # the clone resets this info
+ stmt.compile()
+
+ new_stmt = visitors.replacement_traverse(stmt, {}, col_to_bind)
+
+ # original statement
+ eq_(
+ testing.db.execute(stmt).fetchall(),
+ [('BIND_INd1', 'BIND_INd1BIND_OUT')]
+ )
+
+ # replaced with binds; CAST can't affect the bound parameter
+ # on the way in here
+ eq_(
+ testing.db.execute(new_stmt).fetchall(),
+ [('x', 'BIND_INxBIND_OUT')] if coerce_fn is type_coerce
+ else [('x', 'xBIND_OUT')]
+ )
+
+ def test_cast_bind(self):
+ self._test_bind(cast)
+
+ def test_type_bind(self):
+ self._test_bind(type_coerce)
+
+ def _test_bind(self, coerce_fn):
+ MyType = self.MyType
+
+ t = self.tables.t
+ t.insert().values(data=coerce_fn('d1', MyType)).execute()
+
+ stmt = select([
+ bindparam(None, "x", String(50), unique=True),
+ coerce_fn(bindparam(None, "x", String(50), unique=True), MyType)
+ ])
+
+ eq_(
+ testing.db.execute(stmt).fetchall(),
+ [('x', 'BIND_INxBIND_OUT')] if coerce_fn is type_coerce
+ else [('x', 'xBIND_OUT')]
+ )
+
@testing.fails_on(
"oracle", "ORA-00906: missing left parenthesis - "
"seems to be CAST(:param AS type)")
@@ -804,6 +885,7 @@ class TypeCoerceCastTest(fixtures.TablesTest):
[('BIND_INd1BIND_OUT', )])
+
class VariantTest(fixtures.TestBase, AssertsCompiledSQL):
def setup(self):
@@ -1160,16 +1242,13 @@ class EnumTest(AssertsCompiledSQL, fixtures.TestBase):
def __init__(self, name):
self.name = name
- class MyEnum(types.SchemaType, TypeDecorator):
+ class MyEnum(TypeDecorator):
def __init__(self, values):
self.impl = Enum(
*[v.name for v in values], name="myenum",
native_enum=False)
- def _set_table(self, table, column):
- self.impl._set_table(table, column)
-
# future method
def process_literal_param(self, value, dialect):
return value.name
@@ -1326,6 +1405,68 @@ class BinaryTest(fixtures.TestBase, AssertsExecutionResults):
with open(f, mode='rb') as o:
return o.read()
+
+class ArrayTest(fixtures.TestBase):
+
+ def _myarray_fixture(self):
+ class MyArray(Array):
+ pass
+ return MyArray
+
+ def test_array_index_map_dimensions(self):
+ col = column('x', Array(Integer, dimensions=3))
+ is_(
+ col[5].type._type_affinity, Array
+ )
+ eq_(
+ col[5].type.dimensions, 2
+ )
+ is_(
+ col[5][6].type._type_affinity, Array
+ )
+ eq_(
+ col[5][6].type.dimensions, 1
+ )
+ is_(
+ col[5][6][7].type._type_affinity, Integer
+ )
+
+ def test_array_getitem_single_type(self):
+ m = MetaData()
+ arrtable = Table(
+ 'arrtable', m,
+ Column('intarr', Array(Integer)),
+ Column('strarr', Array(String)),
+ )
+ is_(arrtable.c.intarr[1].type._type_affinity, Integer)
+ is_(arrtable.c.strarr[1].type._type_affinity, String)
+
+ def test_array_getitem_slice_type(self):
+ m = MetaData()
+ arrtable = Table(
+ 'arrtable', m,
+ Column('intarr', Array(Integer)),
+ Column('strarr', Array(String)),
+ )
+ is_(arrtable.c.intarr[1:3].type._type_affinity, Array)
+ is_(arrtable.c.strarr[1:3].type._type_affinity, Array)
+
+ def test_array_getitem_slice_type_dialect_level(self):
+ MyArray = self._myarray_fixture()
+ m = MetaData()
+ arrtable = Table(
+ 'arrtable', m,
+ Column('intarr', MyArray(Integer)),
+ Column('strarr', MyArray(String)),
+ )
+ is_(arrtable.c.intarr[1:3].type._type_affinity, Array)
+ is_(arrtable.c.strarr[1:3].type._type_affinity, Array)
+
+ # but the slice returns the actual type
+ assert isinstance(arrtable.c.intarr[1:3].type, MyArray)
+ assert isinstance(arrtable.c.strarr[1:3].type, MyArray)
+
+
test_table = meta = MyCustomType = MyTypeDec = None
@@ -1631,6 +1772,34 @@ class ExpressionTest(
assert distinct(test_table.c.data).type == test_table.c.data.type
assert test_table.c.data.distinct().type == test_table.c.data.type
+ def test_detect_coercion_of_builtins(self):
+ @inspection._self_inspects
+ class SomeSQLAThing(object):
+ def __repr__(self):
+ return "some_sqla_thing()"
+
+ class SomeOtherThing(object):
+ pass
+
+ assert_raises_message(
+ exc.ArgumentError,
+ r"Object some_sqla_thing\(\) is not legal as a SQL literal value",
+ lambda: column('a', String) == SomeSQLAThing()
+ )
+
+ is_(
+ bindparam('x', SomeOtherThing()).type,
+ types.NULLTYPE
+ )
+
+ def test_detect_coercion_not_fooled_by_mock(self):
+ m1 = mock.Mock()
+ is_(
+ bindparam('x', m1).type,
+ types.NULLTYPE
+ )
+
+
class CompileTest(fixtures.TestBase, AssertsCompiledSQL):
__dialect__ = 'default'
@@ -1899,12 +2068,9 @@ class BooleanTest(
def __init__(self, value):
self.value = value
- class MyBool(types.SchemaType, TypeDecorator):
+ class MyBool(TypeDecorator):
impl = Boolean()
- def _set_table(self, table, column):
- self.impl._set_table(table, column)
-
# future method
def process_literal_param(self, value, dialect):
return value.value
diff --git a/test/sql/test_update.py b/test/sql/test_update.py
index 58c86613b..3ab580b11 100644
--- a/test/sql/test_update.py
+++ b/test/sql/test_update.py
@@ -4,6 +4,7 @@ from sqlalchemy.dialects import mysql
from sqlalchemy.engine import default
from sqlalchemy.testing import AssertsCompiledSQL, eq_, fixtures
from sqlalchemy.testing.schema import Table, Column
+from sqlalchemy import util
class _UpdateFromTestBase(object):
@@ -32,6 +33,11 @@ class _UpdateFromTestBase(object):
test_needs_autoincrement=True),
Column('address_id', None, ForeignKey('addresses.id')),
Column('data', String(30)))
+ Table('update_w_default', metadata,
+ Column('id', Integer, primary_key=True),
+ Column('x', Integer),
+ Column('ycol', Integer, key='y'),
+ Column('data', String(30), onupdate=lambda: "hi"))
@classmethod
def fixtures(cls):
@@ -165,6 +171,124 @@ class UpdateTest(_UpdateFromTestBase, fixtures.TablesTest, AssertsCompiledSQL):
table1.c.name: table1.c.name + 'lala',
table1.c.myid: func.do_stuff(table1.c.myid, literal('hoho'))
}
+
+ self.assert_compile(
+ update(
+ table1,
+ (table1.c.myid == func.hoho(4)) & (
+ table1.c.name == literal('foo') +
+ table1.c.name +
+ literal('lala')),
+ values=values),
+ 'UPDATE mytable '
+ 'SET '
+ 'myid=do_stuff(mytable.myid, :param_1), '
+ 'name=(mytable.name || :name_1) '
+ 'WHERE '
+ 'mytable.myid = hoho(:hoho_1) AND '
+ 'mytable.name = :param_2 || mytable.name || :param_3')
+
+ def test_update_ordered_parameters_1(self):
+ table1 = self.tables.mytable
+
+ # Confirm that we can pass values as list value pairs
+ # note these are ordered *differently* from table.c
+ values = [
+ (table1.c.name, table1.c.name + 'lala'),
+ (table1.c.myid, func.do_stuff(table1.c.myid, literal('hoho'))),
+ ]
+ self.assert_compile(
+ update(
+ table1,
+ (table1.c.myid == func.hoho(4)) & (
+ table1.c.name == literal('foo') +
+ table1.c.name +
+ literal('lala')),
+ preserve_parameter_order=True,
+ values=values),
+ 'UPDATE mytable '
+ 'SET '
+ 'name=(mytable.name || :name_1), '
+ 'myid=do_stuff(mytable.myid, :param_1) '
+ 'WHERE '
+ 'mytable.myid = hoho(:hoho_1) AND '
+ 'mytable.name = :param_2 || mytable.name || :param_3')
+
+ def test_update_ordered_parameters_2(self):
+ table1 = self.tables.mytable
+
+ # Confirm that we can pass values as list value pairs
+ # note these are ordered *differently* from table.c
+ values = [
+ (table1.c.name, table1.c.name + 'lala'),
+ ('description', 'some desc'),
+ (table1.c.myid, func.do_stuff(table1.c.myid, literal('hoho')))
+ ]
+ self.assert_compile(
+ update(
+ table1,
+ (table1.c.myid == func.hoho(4)) & (
+ table1.c.name == literal('foo') +
+ table1.c.name +
+ literal('lala')),
+ preserve_parameter_order=True).values(values),
+ 'UPDATE mytable '
+ 'SET '
+ 'name=(mytable.name || :name_1), '
+ 'description=:description, '
+ 'myid=do_stuff(mytable.myid, :param_1) '
+ 'WHERE '
+ 'mytable.myid = hoho(:hoho_1) AND '
+ 'mytable.name = :param_2 || mytable.name || :param_3')
+
+ def test_update_ordered_parameters_fire_onupdate(self):
+ table = self.tables.update_w_default
+
+ values = [
+ (table.c.y, table.c.x + 5),
+ ('x', 10)
+ ]
+
+ self.assert_compile(
+ table.update(preserve_parameter_order=True).values(values),
+ "UPDATE update_w_default SET ycol=(update_w_default.x + :x_1), "
+ "x=:x, data=:data"
+ )
+
+ def test_update_ordered_parameters_override_onupdate(self):
+ table = self.tables.update_w_default
+
+ values = [
+ (table.c.y, table.c.x + 5),
+ (table.c.data, table.c.x + 10),
+ ('x', 10)
+ ]
+
+ self.assert_compile(
+ table.update(preserve_parameter_order=True).values(values),
+ "UPDATE update_w_default SET ycol=(update_w_default.x + :x_1), "
+ "data=(update_w_default.x + :x_2), x=:x"
+ )
+
+ def test_update_preserve_order_reqs_listtups(self):
+ table1 = self.tables.mytable
+ testing.assert_raises_message(
+ ValueError,
+ "When preserve_parameter_order is True, values\(\) "
+ "only accepts a list of 2-tuples",
+ table1.update(preserve_parameter_order=True).values,
+ {"description": "foo", "name": "bar"}
+ )
+
+ def test_update_ordereddict(self):
+ table1 = self.tables.mytable
+
+ # Confirm that ordered dicts are treated as normal dicts,
+ # columns sorted in table order
+ values = util.OrderedDict((
+ (table1.c.name, table1.c.name + 'lala'),
+ (table1.c.myid, func.do_stuff(table1.c.myid, literal('hoho')))))
+
self.assert_compile(
update(
table1,
diff --git a/tox.ini b/tox.ini
index 2bb589207..299ca2863 100644
--- a/tox.ini
+++ b/tox.ini
@@ -1,5 +1,5 @@
[tox]
-envlist = full,py26,py27,py33,py34
+envlist = full,py26,py27,py33,py34,py35
[testenv]
deps=pytest
@@ -20,6 +20,11 @@ sitepackages=True
# DISABLE_SQLALCHEMY_CEXT are honored
usedevelop=False
+# tox as of 2.0 blocks all environment variables from the
+# outside, unless they are here (or in TOX_TESTENV_PASSENV,
+# wildcards OK). Need at least these
+passenv=ORACLE_HOME NLS_LANG
+
commands=
python -m pytest {posargs}
@@ -50,6 +55,6 @@ commands = python -m flake8 {posargs}
[flake8]
show-source = True
-ignore = E711,E712,E721
+ignore = E711,E712,E721,N806
exclude=.venv,.git,.tox,dist,doc,*egg,build