summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--MANIFEST.in2
-rw-r--r--doc/build/changelog/changelog_10.rst96
-rw-r--r--doc/build/changelog/migration_09.rst66
-rw-r--r--doc/build/changelog/migration_10.rst16
-rw-r--r--doc/build/conf.py4
-rw-r--r--doc/build/core/connections.rst59
-rw-r--r--doc/build/core/defaults.rst40
-rw-r--r--doc/build/core/pooling.rst2
-rw-r--r--doc/build/orm/session_transaction.rst9
-rw-r--r--lib/sqlalchemy/__init__.py2
-rw-r--r--lib/sqlalchemy/dialects/postgresql/base.py10
-rw-r--r--lib/sqlalchemy/dialects/postgresql/constraints.py37
-rw-r--r--lib/sqlalchemy/dialects/postgresql/psycopg2.py36
-rw-r--r--lib/sqlalchemy/dialects/postgresql/psycopg2cffi.py12
-rw-r--r--lib/sqlalchemy/dialects/sqlite/base.py101
-rw-r--r--lib/sqlalchemy/engine/base.py25
-rw-r--r--lib/sqlalchemy/engine/result.py21
-rw-r--r--lib/sqlalchemy/events.py17
-rw-r--r--lib/sqlalchemy/ext/automap.py6
-rw-r--r--lib/sqlalchemy/orm/persistence.py3
-rw-r--r--lib/sqlalchemy/orm/query.py11
-rw-r--r--lib/sqlalchemy/orm/util.py7
-rw-r--r--lib/sqlalchemy/sql/elements.py3
-rw-r--r--lib/sqlalchemy/sql/schema.py28
-rw-r--r--lib/sqlalchemy/testing/__init__.py2
-rw-r--r--lib/sqlalchemy/testing/assertions.py5
-rw-r--r--lib/sqlalchemy/testing/fixtures.py15
-rw-r--r--lib/sqlalchemy/util/langhelpers.py30
-rw-r--r--test/base/test_utils.py3
-rw-r--r--test/dialect/postgresql/test_compiler.py46
-rw-r--r--test/dialect/postgresql/test_dialect.py31
-rw-r--r--test/dialect/postgresql/test_query.py2
-rw-r--r--test/dialect/postgresql/test_reflection.py2
-rw-r--r--test/dialect/postgresql/test_types.py40
-rw-r--r--test/engine/test_execute.py171
-rw-r--r--test/ext/test_extendedattr.py13
-rw-r--r--test/ext/test_hybrid.py1
-rw-r--r--test/orm/inheritance/test_single.py25
-rw-r--r--test/orm/test_bulk.py59
-rw-r--r--test/orm/test_descriptor.py1
-rw-r--r--test/orm/test_query.py25
-rw-r--r--test/requirements.py29
-rw-r--r--test/sql/test_generative.py21
43 files changed, 938 insertions, 196 deletions
diff --git a/MANIFEST.in b/MANIFEST.in
index 23aa88268..9b5b29936 100644
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -9,5 +9,5 @@ recursive-include test *.py *.dat
# don't come in if --with-cextensions isn't specified.
recursive-include lib *.c *.txt
-include README* AUTHORS LICENSE distribute_setup.py sa2to3.py ez_setup.py sqla_nose.py CHANGES*
+include README* AUTHORS LICENSE distribute_setup.py sa2to3.py ez_setup.py sqla_nose.py CHANGES* tox.ini
prune doc/build/output
diff --git a/doc/build/changelog/changelog_10.rst b/doc/build/changelog/changelog_10.rst
index 68d809eaf..a02a13ef9 100644
--- a/doc/build/changelog/changelog_10.rst
+++ b/doc/build/changelog/changelog_10.rst
@@ -16,7 +16,103 @@
:start-line: 5
.. changelog::
+ :version: 1.0.6
+
+ .. change::
+ :tags: bug, orm
+ :tickets: 3462
+
+ Fixed 1.0 regression where the enhanced behavior of single-inheritance
+ joins of :ticket:`3222` takes place inappropriately
+ for a JOIN along explicit join criteria with a single-inheritance
+ subclass that does not make use of any discriminator, resulting
+ in an additional "AND NULL" clause.
+
+ .. change::
+ :tags: bug, postgresql
+ :tickets: 3454
+
+ Repaired the :class:`.ExcludeConstraint` construct to support common
+ features that other objects like :class:`.Index` now do, that
+ the column expression may be specified as an arbitrary SQL
+ expression such as :obj:`.cast` or :obj:`.text`.
+
+ .. change::
+ :tags: feature, postgresql
+ :pullreq: github:182
+
+ Added new execution option ``max_row_buffer`` which is interpreted
+ by the psycopg2 dialect when the ``stream_results`` option is
+ used, which sets a limit on the size of the row buffer that may be
+ allocated. This value is also provided based on the integer
+ value sent to :meth:`.Query.yield_per`. Pull request courtesy
+ mcclurem.
+
+ .. change::
+ :tags: bug, orm
+ :tickets: 3451
+ :pullreq: github:181
+
+ Fixed bug in new :meth:`.Session.bulk_update_mappings` feature where
+ the primary key columns used in the WHERE clause to locate the row
+ would also be included in the SET clause, setting their value to
+ themselves unnecessarily. Pull request courtesy Patrick Hayes.
+
+ .. change::
+ :tags: bug, orm
+ :tickets: 3448
+
+ Fixed an unexpected-use regression whereby custom :class:`.Comparator`
+ objects that made use of the ``__clause_element__()`` method and
+ returned an object that was an ORM-mapped
+ :class:`.InstrumentedAttribute` and not explicitly a
+ :class:`.ColumnElement` would fail to be correctly
+ handled when passed as an expression to :meth:`.Session.query`.
+ The logic in 0.9 happened to succeed on this, so this use case is now
+ supported.
+
+ .. change::
+ :tags: bug, sql
+ :tickets: 3445
+
+ Fixed a bug where clause adaption as applied to a :class:`.Label`
+ object would fail to accommodate the labeled SQL expression
+ in all cases, such that any SQL operation that made use of
+ :meth:`.Label.self_group` would use the original unadapted
+ expression. One effect of this would be that an ORM :func:`.aliased`
+ construct would not fully accommodate attributes mapped by
+ :obj:`.column_property`, such that the un-aliased table could
+ leak out when the property were used in some kinds of SQL
+ comparisons.
+
+ .. change::
+ :tags: bug, documentation
+ :tickets: 2077
+
+ Fixed an internal "memoization" routine for method types such
+ that a Python descriptor is no longer used; repairs inspectability
+ of these methods including support for Sphinx documentation.
+
+.. changelog::
:version: 1.0.5
+ :released: June 7, 2015
+
+ .. change::
+ :tags: feature, engine
+
+ Added new engine event :meth:`.ConnectionEvents.engine_disposed`.
+ Called after the :meth:`.Engine.dispose` method is called.
+
+ .. change::
+ :tags: bug, postgresql, pypy
+ :tickets: 3439
+
+ Repaired some typing and test issues related to the pypy
+ psycopg2cffi dialect, in particular that the current 2.7.0 version
+ does not have native support for the JSONB type. The version detection
+ for psycopg2 features has been tuned into a specific sub-version
+ for psycopg2cffi. Additionally, test coverage has been enabled
+ for the full series of psycopg2 features under psycopg2cffi.
.. change::
:tags: feature, ext
diff --git a/doc/build/changelog/migration_09.rst b/doc/build/changelog/migration_09.rst
index 4904dcfdf..b07aed925 100644
--- a/doc/build/changelog/migration_09.rst
+++ b/doc/build/changelog/migration_09.rst
@@ -9,7 +9,7 @@ What's New in SQLAlchemy 0.9?
and SQLAlchemy version 0.9, which had its first production
release on December 30, 2013.
- Document last updated: February 28, 2014
+ Document last updated: June 10, 2015
Introduction
============
@@ -402,6 +402,70 @@ This is a small change demonstrated as follows::
Behavioral Changes - Core
=========================
+Type objects no longer accept ignored keyword arguments
+-------------------------------------------------------
+
+Up through the 0.8 series, most type objects accepted arbitrary keyword
+arguments which were silently ignored::
+
+ from sqlalchemy import Date, Integer
+
+ # storage_format argument here has no effect on any backend;
+ # it needs to be on the SQLite-specific type
+ d = Date(storage_format="%(day)02d.%(month)02d.%(year)04d")
+
+ # display_width argument here has no effect on any backend;
+ # it needs to be on the MySQL-specific type
+ i = Integer(display_width=5)
+
+This was a very old bug for which a deprecation warning was added to the
+0.8 series, but because nobody ever runs Python with the "-W" flag, it
+was mostly never seen::
+
+
+ $ python -W always::DeprecationWarning ~/dev/sqlalchemy/test.py
+ /Users/classic/dev/sqlalchemy/test.py:5: SADeprecationWarning: Passing arguments to
+ type object constructor <class 'sqlalchemy.types.Date'> is deprecated
+ d = Date(storage_format="%(day)02d.%(month)02d.%(year)04d")
+ /Users/classic/dev/sqlalchemy/test.py:9: SADeprecationWarning: Passing arguments to
+ type object constructor <class 'sqlalchemy.types.Integer'> is deprecated
+ i = Integer(display_width=5)
+
+As of the 0.9 series the "catch all" constructor is removed from
+:class:`.TypeEngine`, and these meaningless arguments are no longer accepted.
+
+The correct way to make use of dialect-specific arguments such as
+``storage_format`` and ``display_width`` is to use the appropriate
+dialect-specific types::
+
+ from sqlalchemy.dialects.sqlite import DATE
+ from sqlalchemy.dialects.mysql import INTEGER
+
+ d = DATE(storage_format="%(day)02d.%(month)02d.%(year)04d")
+
+ i = INTEGER(display_width=5)
+
+What about the case where we want the dialect-agnostic type also? We
+use the :meth:`.TypeEngine.with_variant` method::
+
+ from sqlalchemy import Date, Integer
+ from sqlalchemy.dialects.sqlite import DATE
+ from sqlalchemy.dialects.mysql import INTEGER
+
+ d = Date().with_variant(
+ DATE(storage_format="%(day)02d.%(month)02d.%(year)04d"),
+ "sqlite"
+ )
+
+ i = Integer().with_variant(
+ INTEGER(display_width=5),
+ "mysql"
+ )
+
+:meth:`.TypeEngine.with_variant` isn't new, it was added in SQLAlchemy
+0.7.2. So code that is running on the 0.8 series can be corrected to use
+this approach and tested before upgrading to 0.9.
+
``None`` can no longer be used as a "partial AND" constructor
--------------------------------------------------------------
diff --git a/doc/build/changelog/migration_10.rst b/doc/build/changelog/migration_10.rst
index 4999e45de..08e26fd4b 100644
--- a/doc/build/changelog/migration_10.rst
+++ b/doc/build/changelog/migration_10.rst
@@ -8,7 +8,7 @@ What's New in SQLAlchemy 1.0?
undergoing maintenance releases as of May, 2014,
and SQLAlchemy version 1.0, released in April, 2015.
- Document last updated: May 2, 2015
+ Document last updated: June 9, 2015
Introduction
============
@@ -1669,6 +1669,20 @@ has always emitted a warning here and ignored addtional results for
:ticket:`3249`
+query.update() / query.delete() raises if used with join(), select_from(), from_self()
+--------------------------------------------------------------------------------------
+
+A warning is emitted in SQLAlchemy 0.9.10 (not yet released as of
+June 9, 2015) when the :meth:`.Query.update` or :meth:`.Query.delete` methods
+are invoked against a query which has also called upon :meth:`.Query.join`,
+:meth:`.Query.outerjoin`,
+:meth:`.Query.select_from` or :meth:`.Query.from_self`. These are unsupported
+use cases which silently fail in the 0.9 series up until 0.9.10 where it emits
+a warning. In 1.0, these cases raise an exception.
+
+:ticket:`3349`
+
+
query.update() with ``synchronize_session='evaluate'`` raises on multi-table update
-----------------------------------------------------------------------------------
diff --git a/doc/build/conf.py b/doc/build/conf.py
index 24e235a32..61635be40 100644
--- a/doc/build/conf.py
+++ b/doc/build/conf.py
@@ -138,9 +138,9 @@ copyright = u'2007-2015, the SQLAlchemy authors and contributors'
# The short X.Y version.
version = "1.0"
# The full version, including alpha/beta/rc tags.
-release = "1.0.4"
+release = "1.0.5"
-release_date = "May 7, 2015"
+release_date = "June 7, 2015"
site_base = os.environ.get("RTD_SITE_BASE", "http://www.sqlalchemy.org")
site_adapter_template = "docs_adapter.mako"
diff --git a/doc/build/core/connections.rst b/doc/build/core/connections.rst
index b6770bb82..6f3fdcb84 100644
--- a/doc/build/core/connections.rst
+++ b/doc/build/core/connections.rst
@@ -368,6 +368,65 @@ the SQL statement. When the :class:`.ResultProxy` is closed, the underlying
:class:`.Connection` is closed for us, resulting in the
DBAPI connection being returned to the pool with transactional resources removed.
+.. _engine_disposal:
+
+Engine Disposal
+===============
+
+The :class:`.Engine` refers to a connection pool, which means under normal
+circumstances, there are open database connections present while the
+:class:`.Engine` object is still resident in memory. When an :class:`.Engine`
+is garbage collected, its connection pool is no longer referred to by
+that :class:`.Engine`, and assuming none of its connections are still checked
+out, the pool and its connections will also be checked in, which has the
+effect of closing out the actual database connections as well.
+
+The :class:`.Engine` is intended to normally be a long lived, typically permanent
+fixture throughout the lifespan of an application. It is **not** intended
+to be created and disposed on a per-connection basis; it is instead
+a registry of connections. However, in those cases where it is desirable
+that all connection resources
+referred to by the :class:`.Engine` need to be completely closed out,
+the :class:`.Engine` can be explicitly disposed using the :meth:`.Engine.dispose`
+method. This disposes of the engine's underlying connection pool and
+replaces it with a new one that's empty. Provided that the :class:`.Engine`
+is discarded at this point and no longer used, all **checked-in** connections
+which it refers to will also be fully closed.
+
+Valid use cases for calling :meth:`.Engine.dispose` include:
+
+* When a program wants to release any remaining checked-in connections
+ held by the connection pool and expects to no longer be connected
+ to that database at all for any future operations.
+
+* When a program uses multiprocessing or ``fork()``, and an
+ :class:`.Engine` object is copied to the child process,
+ :meth:`.Engine.dispose` should be called so that the engine creates
+ brand new database connections local to that fork. Database connections
+ generally do **not** travel across process boundaries.
+
+* Within test suites or multitenancy scenarios where many
+ ad-hoc, short-lived :class:`.Engine` objects may be created and disposed.
+
+
+Connections that are **checked out** are **not** discarded when the
+engine is disposed or garbage collected, as these connections are still
+strongly referenced elsewhere by the application.
+However, after :meth:`.Engine.dispose` is called, those
+connections are no longer associated with that :class:`.Engine`; when they
+are closed, they will be returned to their now-orphned connection pool
+which will ultimately be garbage collected, once all connections are checked in.
+Since this process is not as clean, it is strongly recommended that
+:meth:`.Engine.dispose` is called only after all checked out connections
+are fully checked in.
+
+An alternative for applications that are negatively impacted by the
+:class:`.Engine` object's use of connection pooling is to disable pooling
+entirely. This typically incurs only a modest performance impact upon the
+use of new connections, and means that when a connection is checked in,
+it is entirely closed out and is not held in memory. See :ref:`pool_switching`
+for guidelines on how to disable pooling.
+
.. _threadlocal_strategy:
Using the Threadlocal Execution Strategy
diff --git a/doc/build/core/defaults.rst b/doc/build/core/defaults.rst
index 1d55cd6c6..4166ac449 100644
--- a/doc/build/core/defaults.rst
+++ b/doc/build/core/defaults.rst
@@ -325,6 +325,46 @@ executed standalone like a SQL expression, which has the effect of calling its
seq = Sequence('some_sequence')
nextid = connection.execute(seq)
+Associating a Sequence as the Server Side Default
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+When we associate a :class:`.Sequence` with a :class:`.Column` as above,
+this association is an **in-Python only** association. The CREATE TABLE
+that would be generated for our :class:`.Table` would not refer to this
+sequence. If we want the sequence to be used as a server-side default,
+meaning it takes place even if we emit INSERT commands to the table from
+the SQL commandline, we can use the :paramref:`.Column.server_default`
+parameter in conjunction with the value-generation function of the
+sequence, available from the :meth:`.Sequence.next_value` method::
+
+ cart_id_seq = Sequence('cart_id_seq')
+ table = Table("cartitems", meta,
+ Column(
+ "cart_id", Integer, cart_id_seq,
+ server_default=cart_id_seq.next_value(), primary_key=True),
+ Column("description", String(40)),
+ Column("createdate", DateTime())
+ )
+
+The above metadata will generate a CREATE TABLE statement on Postgresql as::
+
+ CREATE TABLE cartitems (
+ cart_id INTEGER DEFAULT nextval('cart_id_seq') NOT NULL,
+ description VARCHAR(40),
+ createdate TIMESTAMP WITHOUT TIME ZONE,
+ PRIMARY KEY (cart_id)
+ )
+
+We place the :class:`.Sequence` also as a Python-side default above, that
+is, it is mentioned twice in the :class:`.Column` definition. Depending
+on the backend in use, this may not be strictly necessary, for example
+on the Postgresql backend the Core will use ``RETURNING`` to access the
+newly generated primary key value in any case. However, for the best
+compatibility, :class:`.Sequence` was originally intended to be a Python-side
+directive first and foremost so it's probably a good idea to specify it
+in this way as well.
+
+
Default Objects API
-------------------
diff --git a/doc/build/core/pooling.rst b/doc/build/core/pooling.rst
index 0dbf835d9..ce6d443f9 100644
--- a/doc/build/core/pooling.rst
+++ b/doc/build/core/pooling.rst
@@ -56,6 +56,8 @@ queued up - the pool would only grow to that size if the application
actually used five connections concurrently, in which case the usage of a
small pool is an entirely appropriate default behavior.
+.. _pool_switching:
+
Switching Pool Implementations
------------------------------
diff --git a/doc/build/orm/session_transaction.rst b/doc/build/orm/session_transaction.rst
index 24a844650..bca3e944f 100644
--- a/doc/build/orm/session_transaction.rst
+++ b/doc/build/orm/session_transaction.rst
@@ -484,7 +484,9 @@ everything is rolled back.
from sqlalchemy import event
+
class SomeTest(TestCase):
+
def setUp(self):
# connect to the database
self.connection = engine.connect()
@@ -502,7 +504,12 @@ everything is rolled back.
@event.listens_for(self.session, "after_transaction_end")
def restart_savepoint(session, transaction):
if transaction.nested and not transaction._parent.nested:
- session.begin_nested()
+ # ensure that state is expired the way
+ # session.commit() at the top level normally does
+ # (optional step)
+ session.expire_all()
+
+ session.begin_nested()
# ... the tearDown() method stays the same
diff --git a/lib/sqlalchemy/__init__.py b/lib/sqlalchemy/__init__.py
index 6ff33144a..afddd5941 100644
--- a/lib/sqlalchemy/__init__.py
+++ b/lib/sqlalchemy/__init__.py
@@ -120,7 +120,7 @@ from .schema import (
from .inspection import inspect
from .engine import create_engine, engine_from_config
-__version__ = '1.0.5'
+__version__ = '1.0.6'
def __go(lcls):
diff --git a/lib/sqlalchemy/dialects/postgresql/base.py b/lib/sqlalchemy/dialects/postgresql/base.py
index fa11956ad..b46c65335 100644
--- a/lib/sqlalchemy/dialects/postgresql/base.py
+++ b/lib/sqlalchemy/dialects/postgresql/base.py
@@ -1617,15 +1617,17 @@ class PGDDLCompiler(compiler.DDLCompiler):
text += " WHERE " + where_compiled
return text
- def visit_exclude_constraint(self, constraint):
+ def visit_exclude_constraint(self, constraint, **kw):
text = ""
if constraint.name is not None:
text += "CONSTRAINT %s " % \
self.preparer.format_constraint(constraint)
elements = []
- for c in constraint.columns:
- op = constraint.operators[c.name]
- elements.append(self.preparer.quote(c.name) + ' WITH ' + op)
+ for expr, name, op in constraint._render_exprs:
+ kw['include_table'] = False
+ elements.append(
+ "%s WITH %s" % (self.sql_compiler.process(expr, **kw), op)
+ )
text += "EXCLUDE USING %s (%s)" % (constraint.using,
', '.join(elements))
if constraint.where is not None:
diff --git a/lib/sqlalchemy/dialects/postgresql/constraints.py b/lib/sqlalchemy/dialects/postgresql/constraints.py
index 0371daf3d..4cfc050de 100644
--- a/lib/sqlalchemy/dialects/postgresql/constraints.py
+++ b/lib/sqlalchemy/dialects/postgresql/constraints.py
@@ -3,8 +3,9 @@
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
-from sqlalchemy.schema import ColumnCollectionConstraint
-from sqlalchemy.sql import expression
+from ...sql.schema import ColumnCollectionConstraint
+from ...sql import expression
+from ... import util
class ExcludeConstraint(ColumnCollectionConstraint):
@@ -48,17 +49,39 @@ static/sql-createtable.html#SQL-CREATETABLE-EXCLUDE
for this constraint.
"""
+ columns = []
+ render_exprs = []
+ self.operators = {}
+
+ expressions, operators = zip(*elements)
+
+ for (expr, column, strname, add_element), operator in zip(
+ self._extract_col_expression_collection(expressions),
+ operators
+ ):
+ if add_element is not None:
+ columns.append(add_element)
+
+ name = column.name if column is not None else strname
+
+ if name is not None:
+ # backwards compat
+ self.operators[name] = operator
+
+ expr = expression._literal_as_text(expr)
+
+ render_exprs.append(
+ (expr, name, operator)
+ )
+
+ self._render_exprs = render_exprs
ColumnCollectionConstraint.__init__(
self,
- *[col for col, op in elements],
+ *columns,
name=kw.get('name'),
deferrable=kw.get('deferrable'),
initially=kw.get('initially')
)
- self.operators = {}
- for col_or_string, op in elements:
- name = getattr(col_or_string, 'name', col_or_string)
- self.operators[name] = op
self.using = kw.get('using', 'gist')
where = kw.get('where')
if where:
diff --git a/lib/sqlalchemy/dialects/postgresql/psycopg2.py b/lib/sqlalchemy/dialects/postgresql/psycopg2.py
index f83bab2fa..36a9d7bf7 100644
--- a/lib/sqlalchemy/dialects/postgresql/psycopg2.py
+++ b/lib/sqlalchemy/dialects/postgresql/psycopg2.py
@@ -74,6 +74,8 @@ See also:
`PQconnectdbParams <http://www.postgresql.org/docs/9.1/static/\
libpq-connect.html#LIBPQ-PQCONNECTDBPARAMS>`_
+.. _psycopg2_execution_options:
+
Per-Statement/Connection Execution Options
-------------------------------------------
@@ -81,16 +83,23 @@ The following DBAPI-specific options are respected when used with
:meth:`.Connection.execution_options`, :meth:`.Executable.execution_options`,
:meth:`.Query.execution_options`, in addition to those not specific to DBAPIs:
-* isolation_level - Set the transaction isolation level for the lifespan of a
+* ``isolation_level`` - Set the transaction isolation level for the lifespan of a
:class:`.Connection` (can only be set on a connection, not a statement
or query). See :ref:`psycopg2_isolation_level`.
-* stream_results - Enable or disable usage of psycopg2 server side cursors -
+* ``stream_results`` - Enable or disable usage of psycopg2 server side cursors -
this feature makes use of "named" cursors in combination with special
result handling methods so that result rows are not fully buffered.
If ``None`` or not set, the ``server_side_cursors`` option of the
:class:`.Engine` is used.
+* ``max_row_buffer`` - when using ``stream_results``, an integer value that
+ specifies the maximum number of rows to buffer at a time. This is
+ interpreted by the :class:`.BufferedRowResultProxy`, and if omitted the
+ buffer will grow to ultimately store 1000 rows at a time.
+
+ .. versionadded:: 1.0.6
+
.. _psycopg2_unicode:
Unicode with Psycopg2
@@ -501,6 +510,14 @@ class PGDialect_psycopg2(PGDialect):
preparer = PGIdentifierPreparer_psycopg2
psycopg2_version = (0, 0)
+ FEATURE_VERSION_MAP = dict(
+ native_json=(2, 5),
+ native_jsonb=(2, 5, 4),
+ sane_multi_rowcount=(2, 0, 9),
+ array_oid=(2, 4, 3),
+ hstore_adapter=(2, 4)
+ )
+
_has_native_hstore = False
_has_native_json = False
_has_native_jsonb = False
@@ -547,11 +564,15 @@ class PGDialect_psycopg2(PGDialect):
self._has_native_hstore = self.use_native_hstore and \
self._hstore_oids(connection.connection) \
is not None
- self._has_native_json = self.psycopg2_version >= (2, 5)
- self._has_native_jsonb = self.psycopg2_version >= (2, 5, 4)
+ self._has_native_json = \
+ self.psycopg2_version >= self.FEATURE_VERSION_MAP['native_json']
+ self._has_native_jsonb = \
+ self.psycopg2_version >= self.FEATURE_VERSION_MAP['native_jsonb']
# http://initd.org/psycopg/docs/news.html#what-s-new-in-psycopg-2-0-9
- self.supports_sane_multi_rowcount = self.psycopg2_version >= (2, 0, 9)
+ self.supports_sane_multi_rowcount = \
+ self.psycopg2_version >= \
+ self.FEATURE_VERSION_MAP['sane_multi_rowcount']
@classmethod
def dbapi(cls):
@@ -625,7 +646,8 @@ class PGDialect_psycopg2(PGDialect):
kw = {'oid': oid}
if util.py2k:
kw['unicode'] = True
- if self.psycopg2_version >= (2, 4, 3):
+ if self.psycopg2_version >= \
+ self.FEATURE_VERSION_MAP['array_oid']:
kw['array_oid'] = array_oid
extras.register_hstore(conn, **kw)
fns.append(on_connect)
@@ -650,7 +672,7 @@ class PGDialect_psycopg2(PGDialect):
@util.memoized_instancemethod
def _hstore_oids(self, conn):
- if self.psycopg2_version >= (2, 4):
+ if self.psycopg2_version >= self.FEATURE_VERSION_MAP['hstore_adapter']:
extras = self._psycopg2_extras()
oids = extras.HstoreAdapter.get_oids(conn)
if oids is not None and oids[0]:
diff --git a/lib/sqlalchemy/dialects/postgresql/psycopg2cffi.py b/lib/sqlalchemy/dialects/postgresql/psycopg2cffi.py
index f5c475d90..f0fe23df3 100644
--- a/lib/sqlalchemy/dialects/postgresql/psycopg2cffi.py
+++ b/lib/sqlalchemy/dialects/postgresql/psycopg2cffi.py
@@ -31,6 +31,18 @@ class PGDialect_psycopg2cffi(PGDialect_psycopg2):
driver = 'psycopg2cffi'
supports_unicode_statements = True
+ # psycopg2cffi's first release is 2.5.0, but reports
+ # __version__ as 2.4.4. Subsequent releases seem to have
+ # fixed this.
+
+ FEATURE_VERSION_MAP = dict(
+ native_json=(2, 4, 4),
+ native_jsonb=(99, 99, 99),
+ sane_multi_rowcount=(2, 4, 4),
+ array_oid=(2, 4, 4),
+ hstore_adapter=(2, 4, 4)
+ )
+
@classmethod
def dbapi(cls):
return __import__('psycopg2cffi')
diff --git a/lib/sqlalchemy/dialects/sqlite/base.py b/lib/sqlalchemy/dialects/sqlite/base.py
index 960b4bdfb..d9da46f4c 100644
--- a/lib/sqlalchemy/dialects/sqlite/base.py
+++ b/lib/sqlalchemy/dialects/sqlite/base.py
@@ -352,6 +352,107 @@ The index will be rendered at create time as::
.. versionadded:: 0.9.9
+Dotted Column Names
+-------------------
+
+Using table or column names that explicitly have periods in them is
+**not recommended**. While this is generally a bad idea for relational
+databases in general, as the dot is a syntactically significant character,
+the SQLite driver has a bug which requires that SQLAlchemy filter out these
+dots in result sets.
+
+The bug, entirely outside of SQLAlchemy, can be illustrated thusly::
+
+ import sqlite3
+
+ conn = sqlite3.connect(":memory:")
+ cursor = conn.cursor()
+
+ cursor.execute("create table x (a integer, b integer)")
+ cursor.execute("insert into x (a, b) values (1, 1)")
+ cursor.execute("insert into x (a, b) values (2, 2)")
+
+ cursor.execute("select x.a, x.b from x")
+ assert [c[0] for c in cursor.description] == ['a', 'b']
+
+ cursor.execute('''
+ select x.a, x.b from x where a=1
+ union
+ select x.a, x.b from x where a=2
+ ''')
+ assert [c[0] for c in cursor.description] == ['a', 'b'], \\
+ [c[0] for c in cursor.description]
+
+The second assertion fails::
+
+ Traceback (most recent call last):
+ File "test.py", line 19, in <module>
+ [c[0] for c in cursor.description]
+ AssertionError: ['x.a', 'x.b']
+
+Where above, the driver incorrectly reports the names of the columns
+including the name of the table, which is entirely inconsistent vs.
+when the UNION is not present.
+
+SQLAlchemy relies upon column names being predictable in how they match
+to the original statement, so the SQLAlchemy dialect has no choice but
+to filter these out::
+
+
+ from sqlalchemy import create_engine
+
+ eng = create_engine("sqlite://")
+ conn = eng.connect()
+
+ conn.execute("create table x (a integer, b integer)")
+ conn.execute("insert into x (a, b) values (1, 1)")
+ conn.execute("insert into x (a, b) values (2, 2)")
+
+ result = conn.execute("select x.a, x.b from x")
+ assert result.keys() == ["a", "b"]
+
+ result = conn.execute('''
+ select x.a, x.b from x where a=1
+ union
+ select x.a, x.b from x where a=2
+ ''')
+ assert result.keys() == ["a", "b"]
+
+Note that above, even though SQLAlchemy filters out the dots, *both
+names are still addressable*::
+
+ >>> row = result.first()
+ >>> row["a"]
+ 1
+ >>> row["x.a"]
+ 1
+ >>> row["b"]
+ 1
+ >>> row["x.b"]
+ 1
+
+Therefore, the workaround applied by SQLAlchemy only impacts
+:meth:`.ResultProxy.keys` and :meth:`.RowProxy.keys()` in the public API.
+In the very specific case where
+an application is forced to use column names that contain dots, and the
+functionality of :meth:`.ResultProxy.keys` and :meth:`.RowProxy.keys()`
+is required to return these dotted names unmodified, the ``sqlite_raw_colnames``
+execution option may be provided, either on a per-:class:`.Connection` basis::
+
+ result = conn.execution_options(sqlite_raw_colnames=True).execute('''
+ select x.a, x.b from x where a=1
+ union
+ select x.a, x.b from x where a=2
+ ''')
+ assert result.keys() == ["x.a", "x.b"]
+
+or on a per-:class:`.Engine` basis::
+
+ engine = create_engine("sqlite://", execution_options={"sqlite_raw_colnames": True})
+
+When using the per-:class:`.Engine` execution option, note that
+**Core and ORM queries that use UNION may not function properly**.
+
"""
import datetime
diff --git a/lib/sqlalchemy/engine/base.py b/lib/sqlalchemy/engine/base.py
index 7ebe39bbf..dea92e512 100644
--- a/lib/sqlalchemy/engine/base.py
+++ b/lib/sqlalchemy/engine/base.py
@@ -1811,29 +1811,26 @@ class Engine(Connectable, log.Identified):
def dispose(self):
"""Dispose of the connection pool used by this :class:`.Engine`.
+ This has the effect of fully closing all **currently checked in**
+ database connections. Connections that are still checked out
+ will **not** be closed, however they will no longer be associated
+ with this :class:`.Engine`, so when they are closed individually
+ they will close out fully.
+
A new connection pool is created immediately after the old one has
been disposed. This new pool, like all SQLAlchemy connection pools,
does not make any actual connections to the database until one is
- first requested.
-
- This method has two general use cases:
+ first requested, so as long as the :class:`.Engine` isn't used again,
+ no new connections will be made.
- * When a dropped connection is detected, it is assumed that all
- connections held by the pool are potentially dropped, and
- the entire pool is replaced.
-
- * An application may want to use :meth:`dispose` within a test
- suite that is creating multiple engines.
+ .. seealso::
- It is critical to note that :meth:`dispose` does **not** guarantee
- that the application will release all open database connections - only
- those connections that are checked into the pool are closed.
- Connections which remain checked out or have been detached from
- the engine are not affected.
+ :ref:`engine_disposal`
"""
self.pool.dispose()
self.pool = self.pool.recreate()
+ self.dispatch.engine_disposed(self)
def _execute_default(self, default):
with self.contextual_connect() as conn:
diff --git a/lib/sqlalchemy/engine/result.py b/lib/sqlalchemy/engine/result.py
index 6d19cb6d0..b2b78dee8 100644
--- a/lib/sqlalchemy/engine/result.py
+++ b/lib/sqlalchemy/engine/result.py
@@ -1067,10 +1067,27 @@ class BufferedRowResultProxy(ResultProxy):
The pre-fetching behavior fetches only one row initially, and then
grows its buffer size by a fixed amount with each successive need
- for additional rows up to a size of 100.
+ for additional rows up to a size of 1000.
+
+ The size argument is configurable using the ``max_row_buffer``
+ execution option::
+
+ with psycopg2_engine.connect() as conn:
+
+ result = conn.execution_options(
+ stream_results=True, max_row_buffer=50
+ ).execute("select * from table")
+
+ .. versionadded:: 1.0.6 Added the ``max_row_buffer`` option.
+
+ .. seealso::
+
+ :ref:`psycopg2_execution_options`
"""
def _init_metadata(self):
+ self._max_row_buffer = self.context.execution_options.get(
+ 'max_row_buffer', None)
self.__buffer_rows()
super(BufferedRowResultProxy, self)._init_metadata()
@@ -1095,6 +1112,8 @@ class BufferedRowResultProxy(ResultProxy):
size = getattr(self, '_bufsize', 1)
self.__rowbuffer = collections.deque(self.cursor.fetchmany(size))
self._bufsize = self.size_growth.get(size, size)
+ if self._max_row_buffer is not None:
+ self._bufsize = min(self._max_row_buffer, self._bufsize)
def _soft_close(self, **kw):
self.__rowbuffer.clear()
diff --git a/lib/sqlalchemy/events.py b/lib/sqlalchemy/events.py
index b2d4b54a9..f439d554f 100644
--- a/lib/sqlalchemy/events.py
+++ b/lib/sqlalchemy/events.py
@@ -882,6 +882,23 @@ class ConnectionEvents(event.Events):
"""
+ def engine_disposed(self, engine):
+ """Intercept when the :meth:`.Engine.dispose` method is called.
+
+ The :meth:`.Engine.dispose` method instructs the engine to
+ "dispose" of it's connection pool (e.g. :class:`.Pool`), and
+ replaces it with a new one. Disposing of the old pool has the
+ effect that existing checked-in connections are closed. The new
+ pool does not establish any new connections until it is first used.
+
+ This event can be used to indicate that resources related to the
+ :class:`.Engine` should also be cleaned up, keeping in mind that the
+ :class:`.Engine` can still be used for new requests in which case
+ it re-acquires connection resources.
+
+ .. versionadded:: 1.0.5
+
+ """
def begin(self, conn):
"""Intercept begin() events.
diff --git a/lib/sqlalchemy/ext/automap.py b/lib/sqlalchemy/ext/automap.py
index 119d10c42..1006e7326 100644
--- a/lib/sqlalchemy/ext/automap.py
+++ b/lib/sqlalchemy/ext/automap.py
@@ -11,12 +11,6 @@ schema, typically though not necessarily one which is reflected.
.. versionadded:: 0.9.1 Added :mod:`sqlalchemy.ext.automap`.
-.. note::
-
- The :mod:`sqlalchemy.ext.automap` extension should be considered
- **experimental** as of 0.9.1. Featureset and API stability is
- not guaranteed at this time.
-
It is hoped that the :class:`.AutomapBase` system provides a quick
and modernized solution to the problem that the very famous
`SQLSoup <https://sqlsoup.readthedocs.org/en/latest/>`_
diff --git a/lib/sqlalchemy/orm/persistence.py b/lib/sqlalchemy/orm/persistence.py
index b429aa4c1..a42ed2f7c 100644
--- a/lib/sqlalchemy/orm/persistence.py
+++ b/lib/sqlalchemy/orm/persistence.py
@@ -443,7 +443,8 @@ def _collect_update_commands(
params = dict(
(propkey_to_col[propkey].key, state_dict[propkey])
for propkey in
- set(propkey_to_col).intersection(state_dict)
+ set(propkey_to_col).intersection(state_dict).difference(
+ mapper._pk_keys_by_table[table])
)
else:
params = {}
diff --git a/lib/sqlalchemy/orm/query.py b/lib/sqlalchemy/orm/query.py
index 8421e42ac..8b3df08e7 100644
--- a/lib/sqlalchemy/orm/query.py
+++ b/lib/sqlalchemy/orm/query.py
@@ -756,7 +756,8 @@ class Query(object):
"""
self._yield_per = count
self._execution_options = self._execution_options.union(
- {"stream_results": True})
+ {"stream_results": True,
+ "max_row_buffer": count})
def get(self, ident):
"""Return an instance based on the given primary key identifier,
@@ -3539,11 +3540,13 @@ class _ColumnEntity(_QueryEntity):
self.expr = column
self.namespace = namespace
search_entities = True
+ check_column = False
if isinstance(column, util.string_types):
column = sql.literal_column(column)
self._label_name = column.name
search_entities = False
+ check_column = True
_entity = None
elif isinstance(column, (
attributes.QueryableAttribute,
@@ -3554,10 +3557,12 @@ class _ColumnEntity(_QueryEntity):
search_entities = False
self._label_name = column.key
column = column._query_clause_element()
+ check_column = True
if isinstance(column, Bundle):
_BundleEntity(query, column)
return
- elif not isinstance(column, sql.ColumnElement):
+
+ if not isinstance(column, sql.ColumnElement):
if hasattr(column, '_select_iterable'):
# break out an object like Table into
# individual columns
@@ -3572,7 +3577,7 @@ class _ColumnEntity(_QueryEntity):
"SQL expression, column, or mapped entity "
"expected - got '%r'" % (column, )
)
- else:
+ elif not check_column:
self._label_name = getattr(column, 'key', None)
search_entities = True
diff --git a/lib/sqlalchemy/orm/util.py b/lib/sqlalchemy/orm/util.py
index 823b97239..66cb2a319 100644
--- a/lib/sqlalchemy/orm/util.py
+++ b/lib/sqlalchemy/orm/util.py
@@ -839,9 +839,10 @@ class _ORMJoin(expression.Join):
# or implicit ON clause, augment it the same way we'd augment the
# WHERE.
single_crit = right_info.mapper._single_table_criterion
- if right_info.is_aliased_class:
- single_crit = right_info._adapter.traverse(single_crit)
- self.onclause = self.onclause & single_crit
+ if single_crit is not None:
+ if right_info.is_aliased_class:
+ single_crit = right_info._adapter.traverse(single_crit)
+ self.onclause = self.onclause & single_crit
def _splice_into_center(self, other):
"""Splice a join into the center.
diff --git a/lib/sqlalchemy/sql/elements.py b/lib/sqlalchemy/sql/elements.py
index a178ed99a..27ecce2b0 100644
--- a/lib/sqlalchemy/sql/elements.py
+++ b/lib/sqlalchemy/sql/elements.py
@@ -3103,7 +3103,8 @@ class Label(ColumnElement):
return self.element,
def _copy_internals(self, clone=_clone, anonymize_labels=False, **kw):
- self.element = clone(self.element, **kw)
+ self._element = clone(self._element, **kw)
+ self.__dict__.pop('element', None)
self.__dict__.pop('_allow_label_resolve', None)
if anonymize_labels:
self.name = self._resolve_label = _anonymous_label(
diff --git a/lib/sqlalchemy/sql/schema.py b/lib/sqlalchemy/sql/schema.py
index e6d1d8858..a8989627d 100644
--- a/lib/sqlalchemy/sql/schema.py
+++ b/lib/sqlalchemy/sql/schema.py
@@ -2392,6 +2392,22 @@ class ColumnCollectionMixin(object):
if _autoattach and self._pending_colargs:
self._check_attach()
+ @classmethod
+ def _extract_col_expression_collection(cls, expressions):
+ for expr in expressions:
+ strname = None
+ column = None
+ if not isinstance(expr, ClauseElement):
+ # this assumes a string
+ strname = expr
+ else:
+ cols = []
+ visitors.traverse(expr, {}, {'column': cols.append})
+ if cols:
+ column = cols[0]
+ add_element = column if column is not None else strname
+ yield expr, column, strname, add_element
+
def _check_attach(self, evt=False):
col_objs = [
c for c in self._pending_colargs
@@ -3086,14 +3102,10 @@ class Index(DialectKWArgs, ColumnCollectionMixin, SchemaItem):
self.table = None
columns = []
- for expr in expressions:
- if not isinstance(expr, ClauseElement):
- columns.append(expr)
- else:
- cols = []
- visitors.traverse(expr, {}, {'column': cols.append})
- if cols:
- columns.append(cols[0])
+ for expr, column, strname, add_element in self.\
+ _extract_col_expression_collection(expressions):
+ if add_element is not None:
+ columns.append(add_element)
self.expressions = expressions
self.name = quoted_name(name, kw.pop("quote", None))
diff --git a/lib/sqlalchemy/testing/__init__.py b/lib/sqlalchemy/testing/__init__.py
index adfbe85e3..7482e32a1 100644
--- a/lib/sqlalchemy/testing/__init__.py
+++ b/lib/sqlalchemy/testing/__init__.py
@@ -19,7 +19,7 @@ def against(*queries):
return _against(config._current, *queries)
from .assertions import emits_warning, emits_warning_on, uses_deprecated, \
- eq_, ne_, is_, is_not_, startswith_, assert_raises, \
+ eq_, ne_, le_, is_, is_not_, startswith_, assert_raises, \
assert_raises_message, AssertsCompiledSQL, ComparesTables, \
AssertsExecutionResults, expect_deprecated, expect_warnings
diff --git a/lib/sqlalchemy/testing/assertions.py b/lib/sqlalchemy/testing/assertions.py
index e5249c296..e0c02c896 100644
--- a/lib/sqlalchemy/testing/assertions.py
+++ b/lib/sqlalchemy/testing/assertions.py
@@ -216,6 +216,11 @@ def ne_(a, b, msg=None):
assert a != b, msg or "%r == %r" % (a, b)
+def le_(a, b, msg=None):
+ """Assert a <= b, with repr messaging on failure."""
+ assert a <= b, msg or "%r != %r" % (a, b)
+
+
def is_(a, b, msg=None):
"""Assert a is b, with repr messaging on failure."""
assert a is b, msg or "%r is not %r" % (a, b)
diff --git a/lib/sqlalchemy/testing/fixtures.py b/lib/sqlalchemy/testing/fixtures.py
index 7b421952f..e16bc77c0 100644
--- a/lib/sqlalchemy/testing/fixtures.py
+++ b/lib/sqlalchemy/testing/fixtures.py
@@ -134,13 +134,14 @@ class TablesTest(TestBase):
def _teardown_each_tables(self):
# no need to run deletes if tables are recreated on setup
if self.run_define_tables != 'each' and self.run_deletes == 'each':
- for table in reversed(self.metadata.sorted_tables):
- try:
- table.delete().execute().close()
- except sa.exc.DBAPIError as ex:
- util.print_(
- ("Error emptying table %s: %r" % (table, ex)),
- file=sys.stderr)
+ with self.bind.connect() as conn:
+ for table in reversed(self.metadata.sorted_tables):
+ try:
+ conn.execute(table.delete())
+ except sa.exc.DBAPIError as ex:
+ util.print_(
+ ("Error emptying table %s: %r" % (table, ex)),
+ file=sys.stderr)
def setup(self):
self._setup_each_tables()
diff --git a/lib/sqlalchemy/util/langhelpers.py b/lib/sqlalchemy/util/langhelpers.py
index 3d7bfad0a..499515142 100644
--- a/lib/sqlalchemy/util/langhelpers.py
+++ b/lib/sqlalchemy/util/langhelpers.py
@@ -755,7 +755,7 @@ class memoized_property(object):
obj.__dict__.pop(name, None)
-class memoized_instancemethod(object):
+def memoized_instancemethod(fn):
"""Decorate a method memoize its return value.
Best applied to no-arg methods: memoization is not sensitive to
@@ -764,26 +764,14 @@ class memoized_instancemethod(object):
"""
- def __init__(self, fget, doc=None):
- self.fget = fget
- self.__doc__ = doc or fget.__doc__
- self.__name__ = fget.__name__
-
- def __get__(self, obj, cls):
- if obj is None:
- return self
-
- def oneshot(*args, **kw):
- result = self.fget(obj, *args, **kw)
- memo = lambda *a, **kw: result
- memo.__name__ = self.__name__
- memo.__doc__ = self.__doc__
- obj.__dict__[self.__name__] = memo
- return result
-
- oneshot.__name__ = self.__name__
- oneshot.__doc__ = self.__doc__
- return oneshot
+ def oneshot(self, *args, **kw):
+ result = fn(self, *args, **kw)
+ memo = lambda *a, **kw: result
+ memo.__name__ = fn.__name__
+ memo.__doc__ = fn.__doc__
+ self.__dict__[fn.__name__] = memo
+ return result
+ return update_wrapper(oneshot, fn)
class group_expirable_memoized_property(object):
diff --git a/test/base/test_utils.py b/test/base/test_utils.py
index df61d7874..256f52850 100644
--- a/test/base/test_utils.py
+++ b/test/base/test_utils.py
@@ -7,7 +7,7 @@ from sqlalchemy.testing.util import picklers, gc_collect
from sqlalchemy.util import classproperty, WeakSequence, get_callable_argspec
from sqlalchemy.sql import column
from sqlalchemy.util import langhelpers
-
+import inspect
class _KeyedTupleTest(object):
@@ -276,6 +276,7 @@ class MemoizedAttrTest(fixtures.TestBase):
val[0] += 1
return v
+ assert inspect.ismethod(Foo().bar)
ne_(Foo.bar, None)
f1 = Foo()
assert 'bar' not in f1.__dict__
diff --git a/test/dialect/postgresql/test_compiler.py b/test/dialect/postgresql/test_compiler.py
index 706b60bd8..731141604 100644
--- a/test/dialect/postgresql/test_compiler.py
+++ b/test/dialect/postgresql/test_compiler.py
@@ -5,7 +5,8 @@ from sqlalchemy.testing.assertions import AssertsCompiledSQL, is_, \
from sqlalchemy.testing import engines, fixtures
from sqlalchemy import testing
from sqlalchemy import Sequence, Table, Column, Integer, update, String,\
- insert, func, MetaData, Enum, Index, and_, delete, select, cast, text
+ insert, func, MetaData, Enum, Index, and_, delete, select, cast, text, \
+ Text
from sqlalchemy.dialects.postgresql import ExcludeConstraint, array
from sqlalchemy import exc, schema
from sqlalchemy.dialects.postgresql import base as postgresql
@@ -467,8 +468,47 @@ class CompileTest(fixtures.TestBase, AssertsCompiledSQL):
tbl.append_constraint(cons_copy)
self.assert_compile(schema.AddConstraint(cons_copy),
'ALTER TABLE testtbl ADD EXCLUDE USING gist '
- '(room WITH =)',
- dialect=postgresql.dialect())
+ '(room WITH =)')
+
+ def test_exclude_constraint_text(self):
+ m = MetaData()
+ cons = ExcludeConstraint((text('room::TEXT'), '='))
+ Table(
+ 'testtbl', m,
+ Column('room', String),
+ cons)
+ self.assert_compile(
+ schema.AddConstraint(cons),
+ 'ALTER TABLE testtbl ADD EXCLUDE USING gist '
+ '(room::TEXT WITH =)')
+
+ def test_exclude_constraint_cast(self):
+ m = MetaData()
+ tbl = Table(
+ 'testtbl', m,
+ Column('room', String)
+ )
+ cons = ExcludeConstraint((cast(tbl.c.room, Text), '='))
+ tbl.append_constraint(cons)
+ self.assert_compile(
+ schema.AddConstraint(cons),
+ 'ALTER TABLE testtbl ADD EXCLUDE USING gist '
+ '(CAST(room AS TEXT) WITH =)'
+ )
+
+ def test_exclude_constraint_cast_quote(self):
+ m = MetaData()
+ tbl = Table(
+ 'testtbl', m,
+ Column('Room', String)
+ )
+ cons = ExcludeConstraint((cast(tbl.c.Room, Text), '='))
+ tbl.append_constraint(cons)
+ self.assert_compile(
+ schema.AddConstraint(cons),
+ 'ALTER TABLE testtbl ADD EXCLUDE USING gist '
+ '(CAST("Room" AS TEXT) WITH =)'
+ )
def test_substring(self):
self.assert_compile(func.substring('abc', 1, 2),
diff --git a/test/dialect/postgresql/test_dialect.py b/test/dialect/postgresql/test_dialect.py
index 5d74d54ad..52620bb78 100644
--- a/test/dialect/postgresql/test_dialect.py
+++ b/test/dialect/postgresql/test_dialect.py
@@ -60,16 +60,19 @@ class MiscTest(fixtures.TestBase, AssertsExecutionResults, AssertsCompiledSQL):
eq_(testing.db.dialect._get_server_version_info(mock_conn(string)),
version)
- @testing.only_on('postgresql+psycopg2', 'psycopg2-specific feature')
+ @testing.requires.psycopg2_compatibility
def test_psycopg2_version(self):
v = testing.db.dialect.psycopg2_version
assert testing.db.dialect.dbapi.__version__.\
startswith(".".join(str(x) for x in v))
- @testing.only_on('postgresql+psycopg2', 'psycopg2-specific feature')
+ @testing.requires.psycopg2_compatibility
def test_psycopg2_non_standard_err(self):
- from psycopg2.extensions import TransactionRollbackError
- import psycopg2
+ # under pypy the name here is psycopg2cffi
+ psycopg2 = testing.db.dialect.dbapi
+ TransactionRollbackError = __import__(
+ "%s.extensions" % psycopg2.__name__
+ ).extensions.TransactionRollbackError
exception = exc.DBAPIError.instance(
"some statement", {}, TransactionRollbackError("foo"),
@@ -79,7 +82,7 @@ class MiscTest(fixtures.TestBase, AssertsExecutionResults, AssertsCompiledSQL):
# currently not passing with pg 9.3 that does not seem to generate
# any notices here, would rather find a way to mock this
@testing.requires.no_coverage
- @testing.only_on('postgresql+psycopg2', 'psycopg2-specific feature')
+ @testing.requires.psycopg2_compatibility
def _test_notice_logging(self):
log = logging.getLogger('sqlalchemy.dialects.postgresql')
buf = logging.handlers.BufferingHandler(100)
@@ -100,9 +103,7 @@ class MiscTest(fixtures.TestBase, AssertsExecutionResults, AssertsCompiledSQL):
assert 'will create implicit sequence' in msgs
assert 'will create implicit index' in msgs
- @testing.only_on(
- ['postgresql+psycopg2', 'postgresql+pg8000'],
- 'psycopg2/pg8000-specific feature')
+ @testing.requires.psycopg2_or_pg8000_compatibility
@engines.close_open_connections
def test_client_encoding(self):
c = testing.db.connect()
@@ -121,26 +122,23 @@ class MiscTest(fixtures.TestBase, AssertsExecutionResults, AssertsCompiledSQL):
new_encoding = c.execute("show client_encoding").fetchone()[0]
eq_(new_encoding, test_encoding)
+ @testing.requires.psycopg2_compatibility
def test_pg_dialect_use_native_unicode_from_config(self):
config = {
- 'sqlalchemy.url': 'postgresql://scott:tiger@somehost/test',
+ 'sqlalchemy.url': testing.db.url,
'sqlalchemy.use_native_unicode': "false"}
e = engine_from_config(config, _initialize=False)
eq_(e.dialect.use_native_unicode, False)
config = {
- 'sqlalchemy.url': 'postgresql://scott:tiger@somehost/test',
+ 'sqlalchemy.url': testing.db.url,
'sqlalchemy.use_native_unicode': "true"}
e = engine_from_config(config, _initialize=False)
eq_(e.dialect.use_native_unicode, True)
-
- @testing.only_on(
- ['postgresql+psycopg2', 'postgresql+pg8000',
- 'postgresql+psycopg2cffi'],
- 'psycopg2 / pg8000 - specific feature')
+ @testing.requires.psycopg2_or_pg8000_compatibility
@engines.close_open_connections
def test_autocommit_isolation_level(self):
c = testing.db.connect().execution_options(
@@ -234,8 +232,7 @@ class MiscTest(fixtures.TestBase, AssertsExecutionResults, AssertsCompiledSQL):
testing.db.execute('drop table speedy_users')
@testing.fails_on('+zxjdbc', 'psycopg2/pg8000 specific assertion')
- @testing.fails_on('pypostgresql',
- 'psycopg2/pg8000 specific assertion')
+ @testing.requires.psycopg2_or_pg8000_compatibility
def test_numeric_raise(self):
stmt = text(
"select cast('hi' as char) as hi", typemap={'hi': Numeric})
diff --git a/test/dialect/postgresql/test_query.py b/test/dialect/postgresql/test_query.py
index 27cb958fd..4a33644e0 100644
--- a/test/dialect/postgresql/test_query.py
+++ b/test/dialect/postgresql/test_query.py
@@ -549,7 +549,7 @@ class InsertTest(fixtures.TestBase, AssertsExecutionResults):
class ServerSideCursorsTest(fixtures.TestBase, AssertsExecutionResults):
- __only_on__ = 'postgresql+psycopg2'
+ __requires__ = 'psycopg2_compatibility',
def _fixture(self, server_side_cursors):
self.engine = engines.testing_engine(
diff --git a/test/dialect/postgresql/test_reflection.py b/test/dialect/postgresql/test_reflection.py
index 2321a3054..ed8a88fd4 100644
--- a/test/dialect/postgresql/test_reflection.py
+++ b/test/dialect/postgresql/test_reflection.py
@@ -856,7 +856,7 @@ class ReflectionTest(fixtures.TestBase):
}])
@testing.provide_metadata
- @testing.only_on("postgresql>=8.5")
+ @testing.only_on("postgresql >= 8.5")
def test_reflection_with_unique_constraint(self):
insp = inspect(testing.db)
diff --git a/test/dialect/postgresql/test_types.py b/test/dialect/postgresql/test_types.py
index e26526ef3..fac0f2df8 100644
--- a/test/dialect/postgresql/test_types.py
+++ b/test/dialect/postgresql/test_types.py
@@ -1567,7 +1567,7 @@ class HStoreRoundTripTest(fixtures.TablesTest):
self._assert_data([{"k1": "r1v1", "k2": "r1v2"}])
def _non_native_engine(self):
- if testing.against("postgresql+psycopg2"):
+ if testing.requires.psycopg2_native_hstore.enabled:
engine = engines.testing_engine(
options=dict(
use_native_hstore=False))
@@ -1581,7 +1581,7 @@ class HStoreRoundTripTest(fixtures.TablesTest):
cols = insp.get_columns('data_table')
assert isinstance(cols[2]['type'], HSTORE)
- @testing.only_on("postgresql+psycopg2")
+ @testing.requires.psycopg2_native_hstore
def test_insert_native(self):
engine = testing.db
self._test_insert(engine)
@@ -1590,7 +1590,7 @@ class HStoreRoundTripTest(fixtures.TablesTest):
engine = self._non_native_engine()
self._test_insert(engine)
- @testing.only_on("postgresql+psycopg2")
+ @testing.requires.psycopg2_native_hstore
def test_criterion_native(self):
engine = testing.db
self._fixture_data(engine)
@@ -1624,7 +1624,7 @@ class HStoreRoundTripTest(fixtures.TablesTest):
engine = self._non_native_engine()
self._test_fixed_round_trip(engine)
- @testing.only_on("postgresql+psycopg2")
+ @testing.requires.psycopg2_native_hstore
def test_fixed_round_trip_native(self):
engine = testing.db
self._test_fixed_round_trip(engine)
@@ -1645,12 +1645,12 @@ class HStoreRoundTripTest(fixtures.TablesTest):
}
)
- @testing.only_on("postgresql+psycopg2")
+ @testing.requires.psycopg2_native_hstore
def test_unicode_round_trip_python(self):
engine = self._non_native_engine()
self._test_unicode_round_trip(engine)
- @testing.only_on("postgresql+psycopg2")
+ @testing.requires.psycopg2_native_hstore
def test_unicode_round_trip_native(self):
engine = testing.db
self._test_unicode_round_trip(engine)
@@ -1659,7 +1659,7 @@ class HStoreRoundTripTest(fixtures.TablesTest):
engine = self._non_native_engine()
self._test_escaped_quotes_round_trip(engine)
- @testing.only_on("postgresql+psycopg2")
+ @testing.requires.psycopg2_native_hstore
def test_escaped_quotes_round_trip_native(self):
engine = testing.db
self._test_escaped_quotes_round_trip(engine)
@@ -1691,14 +1691,16 @@ class HStoreRoundTripTest(fixtures.TablesTest):
class _RangeTypeMixin(object):
- __requires__ = 'range_types',
- __dialect__ = 'postgresql+psycopg2'
+ __requires__ = 'range_types', 'psycopg2_compatibility'
__backend__ = True
def extras(self):
# done this way so we don't get ImportErrors with
# older psycopg2 versions.
- from psycopg2 import extras
+ if testing.against("postgresql+psycopg2cffi"):
+ from psycopg2cffi import extras
+ else:
+ from psycopg2 import extras
return extras
@classmethod
@@ -1966,7 +1968,7 @@ class DateTimeTZRangeTests(_RangeTypeMixin, fixtures.TablesTest):
def tstzs(self):
if self._tstzs is None:
- lower = testing.db.connect().scalar(
+ lower = testing.db.scalar(
func.current_timestamp().select()
)
upper = lower + datetime.timedelta(1)
@@ -2216,17 +2218,17 @@ class JSONRoundTripTest(fixtures.TablesTest):
cols = insp.get_columns('data_table')
assert isinstance(cols[2]['type'], self.test_type)
- @testing.only_on("postgresql+psycopg2")
+ @testing.requires.psycopg2_native_json
def test_insert_native(self):
engine = testing.db
self._test_insert(engine)
- @testing.only_on("postgresql+psycopg2")
+ @testing.requires.psycopg2_native_json
def test_insert_native_nulls(self):
engine = testing.db
self._test_insert_nulls(engine)
- @testing.only_on("postgresql+psycopg2")
+ @testing.requires.psycopg2_native_json
def test_insert_native_none_as_null(self):
engine = testing.db
self._test_insert_none_as_null(engine)
@@ -2284,15 +2286,15 @@ class JSONRoundTripTest(fixtures.TablesTest):
},
)
- @testing.only_on("postgresql+psycopg2")
+ @testing.requires.psycopg2_native_json
def test_custom_native(self):
self._test_custom_serialize_deserialize(True)
- @testing.only_on("postgresql+psycopg2")
+ @testing.requires.psycopg2_native_json
def test_custom_python(self):
self._test_custom_serialize_deserialize(False)
- @testing.only_on("postgresql+psycopg2")
+ @testing.requires.psycopg2_native_json
def test_criterion_native(self):
engine = testing.db
self._fixture_data(engine)
@@ -2364,7 +2366,7 @@ class JSONRoundTripTest(fixtures.TablesTest):
engine = self._non_native_engine()
self._test_fixed_round_trip(engine)
- @testing.only_on("postgresql+psycopg2")
+ @testing.requires.psycopg2_native_json
def test_fixed_round_trip_native(self):
engine = testing.db
self._test_fixed_round_trip(engine)
@@ -2391,7 +2393,7 @@ class JSONRoundTripTest(fixtures.TablesTest):
engine = self._non_native_engine()
self._test_unicode_round_trip(engine)
- @testing.only_on("postgresql+psycopg2")
+ @testing.requires.psycopg2_native_json
def test_unicode_round_trip_native(self):
engine = testing.db
self._test_unicode_round_trip(engine)
diff --git a/test/engine/test_execute.py b/test/engine/test_execute.py
index 761ac102a..fbb1878dc 100644
--- a/test/engine/test_execute.py
+++ b/test/engine/test_execute.py
@@ -1,7 +1,7 @@
# coding: utf-8
from sqlalchemy.testing import eq_, assert_raises, assert_raises_message, \
- config, is_, is_not_
+ config, is_, is_not_, le_
import re
from sqlalchemy.testing.util import picklers
from sqlalchemy.interfaces import ConnectionProxy
@@ -484,6 +484,32 @@ class ExecuteTest(fixtures.TestBase):
eq_(canary, ["l1", "l2", "l3", "l1", "l2"])
@testing.requires.ad_hoc_engines
+ def test_dispose_event(self):
+ canary = Mock()
+ eng = create_engine(testing.db.url)
+ event.listen(eng, "engine_disposed", canary)
+
+ conn = eng.connect()
+ conn.close()
+ eng.dispose()
+
+
+ conn = eng.connect()
+ conn.close()
+
+ eq_(
+ canary.mock_calls,
+ [call(eng)]
+ )
+
+ eng.dispose()
+
+ eq_(
+ canary.mock_calls,
+ [call(eng), call(eng)]
+ )
+
+ @testing.requires.ad_hoc_engines
def test_autocommit_option_no_issue_first_connect(self):
eng = create_engine(testing.db.url)
eng.update_execution_options(autocommit=True)
@@ -1021,76 +1047,91 @@ class ExecutionOptionsTest(fixtures.TestBase):
)
-class AlternateResultProxyTest(fixtures.TestBase):
+class AlternateResultProxyTest(fixtures.TablesTest):
__requires__ = ('sqlite', )
@classmethod
- def setup_class(cls):
+ def setup_bind(cls):
cls.engine = engine = testing_engine('sqlite://')
- m = MetaData()
- cls.table = t = Table('test', m,
- Column('x', Integer, primary_key=True),
- Column('y', String(50, convert_unicode='force'))
- )
- m.create_all(engine)
- engine.execute(t.insert(), [
+ return engine
+
+ @classmethod
+ def define_tables(cls, metadata):
+ Table(
+ 'test', metadata,
+ Column('x', Integer, primary_key=True),
+ Column('y', String(50, convert_unicode='force'))
+ )
+
+ @classmethod
+ def insert_data(cls):
+ cls.engine.execute(cls.tables.test.insert(), [
{'x': i, 'y': "t_%d" % i} for i in range(1, 12)
])
- def _test_proxy(self, cls):
+ @contextmanager
+ def _proxy_fixture(self, cls):
+ self.table = self.tables.test
+
class ExcCtx(default.DefaultExecutionContext):
def get_result_proxy(self):
return cls(self)
- self.engine.dialect.execution_ctx_cls = ExcCtx
- rows = []
- r = self.engine.execute(select([self.table]))
- assert isinstance(r, cls)
- for i in range(5):
- rows.append(r.fetchone())
- eq_(rows, [(i, "t_%d" % i) for i in range(1, 6)])
+ self.patcher = patch.object(
+ self.engine.dialect, "execution_ctx_cls", ExcCtx)
+ with self.patcher:
+ yield
- rows = r.fetchmany(3)
- eq_(rows, [(i, "t_%d" % i) for i in range(6, 9)])
+ def _test_proxy(self, cls):
+ with self._proxy_fixture(cls):
+ rows = []
+ r = self.engine.execute(select([self.table]))
+ assert isinstance(r, cls)
+ for i in range(5):
+ rows.append(r.fetchone())
+ eq_(rows, [(i, "t_%d" % i) for i in range(1, 6)])
+
+ rows = r.fetchmany(3)
+ eq_(rows, [(i, "t_%d" % i) for i in range(6, 9)])
- rows = r.fetchall()
- eq_(rows, [(i, "t_%d" % i) for i in range(9, 12)])
+ rows = r.fetchall()
+ eq_(rows, [(i, "t_%d" % i) for i in range(9, 12)])
- r = self.engine.execute(select([self.table]))
- rows = r.fetchmany(None)
- eq_(rows[0], (1, "t_1"))
- # number of rows here could be one, or the whole thing
- assert len(rows) == 1 or len(rows) == 11
+ r = self.engine.execute(select([self.table]))
+ rows = r.fetchmany(None)
+ eq_(rows[0], (1, "t_1"))
+ # number of rows here could be one, or the whole thing
+ assert len(rows) == 1 or len(rows) == 11
- r = self.engine.execute(select([self.table]).limit(1))
- r.fetchone()
- eq_(r.fetchone(), None)
+ r = self.engine.execute(select([self.table]).limit(1))
+ r.fetchone()
+ eq_(r.fetchone(), None)
- r = self.engine.execute(select([self.table]).limit(5))
- rows = r.fetchmany(6)
- eq_(rows, [(i, "t_%d" % i) for i in range(1, 6)])
+ r = self.engine.execute(select([self.table]).limit(5))
+ rows = r.fetchmany(6)
+ eq_(rows, [(i, "t_%d" % i) for i in range(1, 6)])
- # result keeps going just fine with blank results...
- eq_(r.fetchmany(2), [])
+ # result keeps going just fine with blank results...
+ eq_(r.fetchmany(2), [])
- eq_(r.fetchmany(2), [])
+ eq_(r.fetchmany(2), [])
- eq_(r.fetchall(), [])
+ eq_(r.fetchall(), [])
- eq_(r.fetchone(), None)
+ eq_(r.fetchone(), None)
- # until we close
- r.close()
+ # until we close
+ r.close()
- self._assert_result_closed(r)
+ self._assert_result_closed(r)
- r = self.engine.execute(select([self.table]).limit(5))
- eq_(r.first(), (1, "t_1"))
- self._assert_result_closed(r)
+ r = self.engine.execute(select([self.table]).limit(5))
+ eq_(r.first(), (1, "t_1"))
+ self._assert_result_closed(r)
- r = self.engine.execute(select([self.table]).limit(5))
- eq_(r.scalar(), 1)
- self._assert_result_closed(r)
+ r = self.engine.execute(select([self.table]).limit(5))
+ eq_(r.scalar(), 1)
+ self._assert_result_closed(r)
def _assert_result_closed(self, r):
assert_raises_message(
@@ -1123,6 +1164,42 @@ class AlternateResultProxyTest(fixtures.TestBase):
def test_buffered_column_result_proxy(self):
self._test_proxy(_result.BufferedColumnResultProxy)
+ def test_buffered_row_growth(self):
+ with self._proxy_fixture(_result.BufferedRowResultProxy):
+ with self.engine.connect() as conn:
+ conn.execute(self.table.insert(), [
+ {'x': i, 'y': "t_%d" % i} for i in range(15, 1200)
+ ])
+ result = conn.execute(self.table.select())
+ checks = {
+ 0: 5, 1: 10, 9: 20, 135: 250, 274: 500,
+ 1351: 1000
+ }
+ for idx, row in enumerate(result, 0):
+ if idx in checks:
+ eq_(result._bufsize, checks[idx])
+ le_(
+ len(result._BufferedRowResultProxy__rowbuffer),
+ 1000
+ )
+
+ def test_max_row_buffer_option(self):
+ with self._proxy_fixture(_result.BufferedRowResultProxy):
+ with self.engine.connect() as conn:
+ conn.execute(self.table.insert(), [
+ {'x': i, 'y': "t_%d" % i} for i in range(15, 1200)
+ ])
+ result = conn.execution_options(max_row_buffer=27).execute(
+ self.table.select()
+ )
+ for idx, row in enumerate(result, 0):
+ if idx in (16, 70, 150, 250):
+ eq_(result._bufsize, 27)
+ le_(
+ len(result._BufferedRowResultProxy__rowbuffer),
+ 27
+ )
+
class EngineEventsTest(fixtures.TestBase):
__requires__ = 'ad_hoc_engines',
diff --git a/test/ext/test_extendedattr.py b/test/ext/test_extendedattr.py
index 653418ac4..c4147ed85 100644
--- a/test/ext/test_extendedattr.py
+++ b/test/ext/test_extendedattr.py
@@ -9,7 +9,7 @@ from sqlalchemy.orm.instrumentation import is_instrumented
from sqlalchemy.orm import clear_mappers
from sqlalchemy.testing import fixtures
from sqlalchemy.ext import instrumentation
-from sqlalchemy.orm.instrumentation import register_class
+from sqlalchemy.orm.instrumentation import register_class, manager_of_class
from sqlalchemy.testing.util import decorator
from sqlalchemy.orm import events
from sqlalchemy import event
@@ -465,7 +465,7 @@ class FinderTest(_ExtBase, fixtures.ORMTest):
register_class(A)
eq_(
- type(instrumentation.manager_of_class(A)),
+ type(manager_of_class(A)),
instrumentation.ClassManager)
def test_nativeext_interfaceexact(self):
@@ -475,7 +475,7 @@ class FinderTest(_ExtBase, fixtures.ORMTest):
register_class(A)
ne_(
- type(instrumentation.manager_of_class(A)),
+ type(manager_of_class(A)),
instrumentation.ClassManager)
def test_nativeext_submanager(self):
@@ -486,7 +486,7 @@ class FinderTest(_ExtBase, fixtures.ORMTest):
__sa_instrumentation_manager__ = Mine
register_class(A)
- eq_(type(instrumentation.manager_of_class(A)), Mine)
+ eq_(type(manager_of_class(A)), Mine)
@modifies_instrumentation_finders
def test_customfinder_greedy(self):
@@ -501,7 +501,7 @@ class FinderTest(_ExtBase, fixtures.ORMTest):
instrumentation.instrumentation_finders.insert(0, find)
register_class(A)
- eq_(type(instrumentation.manager_of_class(A)), Mine)
+ eq_(type(manager_of_class(A)), Mine)
@modifies_instrumentation_finders
def test_customfinder_pass(self):
@@ -513,8 +513,9 @@ class FinderTest(_ExtBase, fixtures.ORMTest):
instrumentation.instrumentation_finders.insert(0, find)
register_class(A)
+
eq_(
- type(instrumentation.manager_of_class(A)),
+ type(manager_of_class(A)),
instrumentation.ClassManager)
diff --git a/test/ext/test_hybrid.py b/test/ext/test_hybrid.py
index b895d2fb2..e36b8f7e9 100644
--- a/test/ext/test_hybrid.py
+++ b/test/ext/test_hybrid.py
@@ -7,6 +7,7 @@ from sqlalchemy.testing import eq_, AssertsCompiledSQL, assert_raises_message
from sqlalchemy.testing import fixtures
from sqlalchemy import inspect
+
class PropertyComparatorTest(fixtures.TestBase, AssertsCompiledSQL):
__dialect__ = 'default'
diff --git a/test/orm/inheritance/test_single.py b/test/orm/inheritance/test_single.py
index dbbe4c435..9f5d21a43 100644
--- a/test/orm/inheritance/test_single.py
+++ b/test/orm/inheritance/test_single.py
@@ -410,6 +410,31 @@ class RelationshipToSingleTest(testing.AssertsCompiledSQL, fixtures.MappedTest):
"AND employees_1.type IN (:type_1)"
)
+ def test_join_explicit_onclause_no_discriminator(self):
+ # test issue #3462
+ Company, Employee, Engineer = (
+ self.classes.Company,
+ self.classes.Employee,
+ self.classes.Engineer)
+ companies, employees = self.tables.companies, self.tables.employees
+
+ mapper(Company, companies, properties={
+ 'employees': relationship(Employee)
+ })
+ mapper(Employee, employees)
+ mapper(Engineer, inherits=Employee)
+
+ sess = create_session()
+ self.assert_compile(
+ sess.query(Company, Engineer.name).join(
+ Engineer, Company.company_id == Engineer.company_id),
+ "SELECT companies.company_id AS companies_company_id, "
+ "companies.name AS companies_name, "
+ "employees.name AS employees_name "
+ "FROM companies JOIN "
+ "employees ON companies.company_id = employees.company_id"
+ )
+
def test_outer_join_prop(self):
Company, Employee, Engineer = self.classes.Company,\
self.classes.Employee,\
diff --git a/test/orm/test_bulk.py b/test/orm/test_bulk.py
index e27d3b73c..e2a1464a6 100644
--- a/test/orm/test_bulk.py
+++ b/test/orm/test_bulk.py
@@ -96,11 +96,62 @@ class BulkInsertUpdateTest(BulkTest, _fixtures.FixtureTest):
asserter.assert_(
CompiledSQL(
- "UPDATE users SET id=:id, name=:name WHERE "
+ "UPDATE users SET name=:name WHERE "
"users.id = :users_id",
- [{'users_id': 1, 'id': 1, 'name': 'u1new'},
- {'users_id': 2, 'id': 2, 'name': 'u2'},
- {'users_id': 3, 'id': 3, 'name': 'u3new'}]
+ [{'users_id': 1, 'name': 'u1new'},
+ {'users_id': 2, 'name': 'u2'},
+ {'users_id': 3, 'name': 'u3new'}]
+ )
+ )
+
+ def test_bulk_update(self):
+ User, = self.classes("User",)
+
+ s = Session(expire_on_commit=False)
+ objects = [
+ User(name="u1"),
+ User(name="u2"),
+ User(name="u3")
+ ]
+ s.add_all(objects)
+ s.commit()
+
+ s = Session()
+ with self.sql_execution_asserter() as asserter:
+ s.bulk_update_mappings(
+ User,
+ [{'id': 1, 'name': 'u1new'},
+ {'id': 2, 'name': 'u2'},
+ {'id': 3, 'name': 'u3new'}]
+ )
+
+ asserter.assert_(
+ CompiledSQL(
+ "UPDATE users SET name=:name WHERE users.id = :users_id",
+ [{'users_id': 1, 'name': 'u1new'},
+ {'users_id': 2, 'name': 'u2'},
+ {'users_id': 3, 'name': 'u3new'}]
+ )
+ )
+
+ def test_bulk_insert(self):
+ User, = self.classes("User",)
+
+ s = Session()
+ with self.sql_execution_asserter() as asserter:
+ s.bulk_insert_mappings(
+ User,
+ [{'id': 1, 'name': 'u1new'},
+ {'id': 2, 'name': 'u2'},
+ {'id': 3, 'name': 'u3new'}]
+ )
+
+ asserter.assert_(
+ CompiledSQL(
+ "INSERT INTO users (id, name) VALUES (:id, :name)",
+ [{'id': 1, 'name': 'u1new'},
+ {'id': 2, 'name': 'u2'},
+ {'id': 3, 'name': 'u3new'}]
)
)
diff --git a/test/orm/test_descriptor.py b/test/orm/test_descriptor.py
index 2134d87b2..d9aca30e5 100644
--- a/test/orm/test_descriptor.py
+++ b/test/orm/test_descriptor.py
@@ -125,3 +125,4 @@ class DescriptorInstrumentationTest(fixtures.ORMTest):
str(aliased(Foo).foo == 'ed'),
"foobar(foo_1.name) = foobar(:foobar_1)"
)
+
diff --git a/test/orm/test_query.py b/test/orm/test_query.py
index 6a1eb57b4..62c97ec90 100644
--- a/test/orm/test_query.py
+++ b/test/orm/test_query.py
@@ -1718,6 +1718,25 @@ class ColumnPropertyTest(_fixtures.FixtureTest, AssertsCompiledSQL):
)
+class ComparatorTest(QueryTest):
+ def test_clause_element_query_resolve(self):
+ from sqlalchemy.orm.properties import ColumnProperty
+ User = self.classes.User
+
+ class Comparator(ColumnProperty.Comparator):
+ def __init__(self, expr):
+ self.expr = expr
+
+ def __clause_element__(self):
+ return self.expr
+
+ sess = Session()
+ eq_(
+ sess.query(Comparator(User.id)).order_by(Comparator(User.id)).all(),
+ [(7, ), (8, ), (9, ), (10, )]
+ )
+
+
# more slice tests are available in test/orm/generative.py
class SliceTest(QueryTest):
def test_first(self):
@@ -2656,10 +2675,12 @@ class YieldTest(_fixtures.FixtureTest):
User = self.classes.User
sess = create_session()
- q = sess.query(User).yield_per(1)
+ q = sess.query(User).yield_per(15)
q = q.execution_options(foo='bar')
assert q._yield_per
- eq_(q._execution_options, {"stream_results": True, "foo": "bar"})
+ eq_(
+ q._execution_options,
+ {"stream_results": True, "foo": "bar", "max_row_buffer": 15})
def test_no_joinedload_opt(self):
self._eagerload_mappings()
diff --git a/test/requirements.py b/test/requirements.py
index db5e65f4c..db4daca20 100644
--- a/test/requirements.py
+++ b/test/requirements.py
@@ -727,12 +727,12 @@ class DefaultRequirements(SuiteRequirements):
@property
def range_types(self):
def check_range_types(config):
- if not against(config, "postgresql+psycopg2"):
+ if not against(
+ config,
+ ["postgresql+psycopg2", "postgresql+psycopg2cffi"]):
return False
try:
- config.db.execute("select '[1,2)'::int4range;")
- # only supported in psycopg 2.5+
- from psycopg2.extras import NumericRange
+ config.db.scalar("select '[1,2)'::int4range;")
return True
except:
return False
@@ -765,6 +765,27 @@ class DefaultRequirements(SuiteRequirements):
)
@property
+ def psycopg2_native_json(self):
+ return self.psycopg2_compatibility
+
+ @property
+ def psycopg2_native_hstore(self):
+ return self.psycopg2_compatibility
+
+ @property
+ def psycopg2_compatibility(self):
+ return only_on(
+ ["postgresql+psycopg2", "postgresql+psycopg2cffi"]
+ )
+
+ @property
+ def psycopg2_or_pg8000_compatibility(self):
+ return only_on(
+ ["postgresql+psycopg2", "postgresql+psycopg2cffi",
+ "postgresql+pg8000"]
+ )
+
+ @property
def percent_schema_names(self):
return skip_if(
[
diff --git a/test/sql/test_generative.py b/test/sql/test_generative.py
index 12bfdfa9d..9cf1ef612 100644
--- a/test/sql/test_generative.py
+++ b/test/sql/test_generative.py
@@ -454,6 +454,27 @@ class ClauseTest(fixtures.TestBase, AssertsCompiledSQL):
str(f1), str(f2)
)
+ def test_labeled_expression_adapt(self):
+ lbl_x = (t3.c.col1 == 1).label('x')
+ t3_alias = t3.alias()
+
+ adapter = sql_util.ColumnAdapter(t3_alias)
+
+ lblx_adapted = adapter.traverse(lbl_x)
+ is_not_(lblx_adapted._element, lbl_x._element)
+
+ lblx_adapted = adapter.traverse(lbl_x)
+ self.assert_compile(
+ select([lblx_adapted.self_group()]),
+ "SELECT (table3_1.col1 = :col1_1) AS x FROM table3 AS table3_1"
+ )
+
+ self.assert_compile(
+ select([lblx_adapted.is_(True)]),
+ "SELECT (table3_1.col1 = :col1_1) IS 1 AS anon_1 "
+ "FROM table3 AS table3_1"
+ )
+
def test_text(self):
clause = text(
"select * from table where foo=:bar",