summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--doc/build/changelog/migration_20.rst35
-rw-r--r--doc/build/changelog/unreleased_14/6746.rst7
-rw-r--r--doc/build/changelog/unreleased_14/6910.rst9
-rw-r--r--doc/build/changelog/unreleased_14/6913.rst9
-rw-r--r--doc/build/changelog/unreleased_14/6924.rst9
-rw-r--r--doc/build/orm/contextual.rst2
-rw-r--r--doc/build/orm/persistence_techniques.rst157
-rw-r--r--doc/build/orm/session_basics.rst68
-rw-r--r--doc/build/orm/session_transaction.rst2
-rw-r--r--lib/sqlalchemy/dialects/mssql/base.py20
-rw-r--r--lib/sqlalchemy/engine/base.py7
-rw-r--r--lib/sqlalchemy/engine/result.py11
-rw-r--r--lib/sqlalchemy/engine/url.py2
-rw-r--r--lib/sqlalchemy/ext/asyncio/scoping.py2
-rw-r--r--lib/sqlalchemy/ext/asyncio/session.py2
-rw-r--r--lib/sqlalchemy/orm/context.py42
-rw-r--r--lib/sqlalchemy/orm/scoping.py20
-rw-r--r--lib/sqlalchemy/orm/session.py22
-rw-r--r--lib/sqlalchemy/sql/ddl.py2
-rw-r--r--lib/sqlalchemy/testing/requirements.py2
-rw-r--r--lib/sqlalchemy/util/compat.py3
-rw-r--r--test/engine/test_deprecations.py11
-rw-r--r--test/engine/test_execute.py51
-rw-r--r--test/orm/test_attributes.py2
-rw-r--r--test/orm/test_deprecations.py23
-rw-r--r--test/orm/test_froms.py54
26 files changed, 468 insertions, 106 deletions
diff --git a/doc/build/changelog/migration_20.rst b/doc/build/changelog/migration_20.rst
index 71939f44f..e6c9e2952 100644
--- a/doc/build/changelog/migration_20.rst
+++ b/doc/build/changelog/migration_20.rst
@@ -1229,6 +1229,24 @@ following the table, and may include additional notes not summarized here.
* - ::
+ session.query(User).\
+ filter_by(name='some user').first()
+
+
+ - ::
+
+ session.execute(
+ select(User).
+ filter_by(name="some user").
+ limit(1)
+ ).scalars().first()
+
+ - :ref:`migration_20_unify_select`
+
+ :meth:`_engine.Result.first`
+
+ * - ::
+
session.query(User).options(
joinedload(User.addresses)
).all()
@@ -1362,6 +1380,9 @@ Legacy code examples are illustrated below::
user = session.query(User).filter_by(name='some user').one()
# becomes legacy use case
+ user = session.query(User).filter_by(name='some user').first()
+
+ # becomes legacy use case
user = session.query(User).get(5)
# becomes legacy use case
@@ -1422,6 +1443,11 @@ Below are some examples of how to migrate to :func:`_sql.select`::
select(User).filter_by(name="some user")
).scalar_one()
+ # for first(), no LIMIT is applied automatically; add limit(1) if LIMIT
+ # is desired on the query
+ user = session.execute(
+ select(User).filter_by(name="some user").limit(1)
+ ).scalars().first()
# get() moves to the Session directly
user = session.get(User, 5)
@@ -1846,21 +1872,22 @@ As is the case described at :ref:`migration_20_query_from_self`, the
from sqlalchemy.orm import aliased
- subquery = session.query(User).filter(User.id == 5).subquery()
+ subquery = session.query(User).filter(User.name.like("%somename%")).subquery()
ua = aliased(User, subquery)
- user = session.query(ua).first()
+ user = session.query(ua).order_by(ua.id).first()
Using :term:`2.0 style`::
from sqlalchemy.orm import aliased
- subquery = select(User).where(User.id == 5).subquery()
+ subquery = select(User).where(User.name.like("%somename%")).subquery()
ua = aliased(User, subquery)
- user = session.execute(select(ua)).scalars().first()
+ # note that LIMIT 1 is not automatically supplied, if needed
+ user = session.execute(select(ua).order_by(ua.id).limit(1)).scalars().first()
**Discussion**
diff --git a/doc/build/changelog/unreleased_14/6746.rst b/doc/build/changelog/unreleased_14/6746.rst
new file mode 100644
index 000000000..cff1a4ae9
--- /dev/null
+++ b/doc/build/changelog/unreleased_14/6746.rst
@@ -0,0 +1,7 @@
+.. change::
+ :tags: orm
+ :tickets: 6746
+
+ Deprecate usage of :class:`_orm.scoped_session` with asyncio drivers.
+ When using Asyncio the :class:`_asyncio.async_scoped_session` should
+ be used instead.
diff --git a/doc/build/changelog/unreleased_14/6910.rst b/doc/build/changelog/unreleased_14/6910.rst
new file mode 100644
index 000000000..1e0833ed8
--- /dev/null
+++ b/doc/build/changelog/unreleased_14/6910.rst
@@ -0,0 +1,9 @@
+.. change::
+ :tags: bug, mssql, reflection
+ :tickets: 6910
+
+ Fixed an issue where :meth:`_reflection.has_table` returned
+ ``True`` for local temporary tables that actually belonged to a
+ different SQL Server session (connection). An extra check is now
+ performed to ensure that the temp table detected is in fact owned
+ by the current session.
diff --git a/doc/build/changelog/unreleased_14/6913.rst b/doc/build/changelog/unreleased_14/6913.rst
new file mode 100644
index 000000000..43ce34c21
--- /dev/null
+++ b/doc/build/changelog/unreleased_14/6913.rst
@@ -0,0 +1,9 @@
+.. change::
+ :tags: bug, engine, regression
+ :tickets: 6913
+
+ Fixed issue where the ability of the
+ :meth:`_engine.ConnectionEvents.before_execute` method to alter the SQL
+ statement object passed, returning the new object to be invoked, was
+ inadvertently removed. This behavior has been restored.
+
diff --git a/doc/build/changelog/unreleased_14/6924.rst b/doc/build/changelog/unreleased_14/6924.rst
new file mode 100644
index 000000000..215dcc425
--- /dev/null
+++ b/doc/build/changelog/unreleased_14/6924.rst
@@ -0,0 +1,9 @@
+.. change::
+ :tags: bug, orm, regression
+ :tickets: 6924
+
+ Fixed issue in recently repaired ``Query.with_entities()`` method where the
+ flag that determines automatic uniquing for legacy ORM ``Query`` objects
+ only would be set to ``True`` inappropriately in cases where the
+ ``with_entities()`` call would be setting the ``Query`` to return
+ column-only rows, which are not uniqued.
diff --git a/doc/build/orm/contextual.rst b/doc/build/orm/contextual.rst
index 94f985ae8..2e4dbd93b 100644
--- a/doc/build/orm/contextual.rst
+++ b/doc/build/orm/contextual.rst
@@ -27,7 +27,7 @@ Architecture <https://martinfowler.com/eaaCatalog/registry.html>`_.
management. If you're new to SQLAlchemy, and especially if the
term "thread-local variable" seems strange to you, we recommend that
if possible you familiarize first with an off-the-shelf integration
- system such as `Flask-SQLAlchemy <https://packages.python.org/Flask-SQLAlchemy/>`_
+ system such as `Flask-SQLAlchemy <https://pypi.org/project/Flask-SQLAlchemy/>`_
or `zope.sqlalchemy <https://pypi.org/project/zope.sqlalchemy>`_.
A :class:`.scoped_session` is constructed by calling it, passing it a
diff --git a/doc/build/orm/persistence_techniques.rst b/doc/build/orm/persistence_techniques.rst
index 29b755cb7..38f289058 100644
--- a/doc/build/orm/persistence_techniques.rst
+++ b/doc/build/orm/persistence_techniques.rst
@@ -498,6 +498,151 @@ The above mapping upon INSERT will look like:
:ref:`metadata_defaults_toplevel`
+.. _orm_dml_returning_objects:
+
+Using INSERT, UPDATE and ON CONFLICT (i.e. upsert) to return ORM Objects
+==========================================================================
+
+.. deepalchemy:: The feature of linking ORM objects to RETURNING is a new and
+ experimental feature.
+
+.. versionadded:: 1.4.0
+
+The :term:`DML` constructs :func:`_dml.insert`, :func:`_dml.update`, and
+:func:`_dml.delete` feature a method :meth:`_dml.UpdateBase.returning` which on
+database backends that support RETURNING (PostgreSQL, SQL Server, some MariaDB
+versions) may be used to return database rows generated or matched by
+the statement as though they were SELECTed. The ORM-enabled UPDATE and DELETE
+statements may be combined with this feature, so that they return rows
+corresponding to all the rows which were matched by the criteria::
+
+ from sqlalchemy import update
+
+ stmt = update(User).where(User.name == "squidward").values(name="spongebob").\
+ returning(User.id)
+
+ for row in session.execute(stmt):
+ print(f"id: {row.id}")
+
+The above example returns the ``User.id`` attribute for each row matched.
+Provided that each row contains at least a primary key value, we may opt to
+receive these rows as ORM objects, allowing ORM objects to be loaded from the
+database corresponding atomically to an UPDATE statement against those rows. To
+achieve this, we may combine the :class:`_dml.Update` construct which returns
+``User`` rows with a :func:`_sql.select` that's adapted to run this UPDATE
+statement in an ORM context using the :meth:`_sql.Select.from_statement`
+method::
+
+ stmt = update(User).where(User.name == "squidward").values(name="spongebob").\
+ returning(User)
+
+ orm_stmt = select(User).from_statement(stmt).execution_options(populate_existing=True)
+
+ for user in session.execute(orm_stmt).scalars():
+ print("updated user: %s" % user)
+
+Above, we produce an :func:`_dml.update` construct that includes
+:meth:`_dml.Update.returning` given the full ``User`` entity, which will
+produce complete rows from the database table as it UPDATEs them; any arbitrary
+set of columns to load may be specified as long as the full primary key is
+included. Next, these rows are adapted to an ORM load by producing a
+:func:`_sql.select` for the desired entity, then adapting it to the UPDATE
+statement by passing the :class:`_dml.Update` construct to the
+:meth:`_sql.Select.from_statement` method; this special ORM method, introduced
+at :ref:`orm_queryguide_selecting_text`, produces an ORM-specific adapter that
+allows the given statement to act as though it were the SELECT of rows that is
+first described. No SELECT is actually emitted in the database, only the
+UPDATE..RETURNING we've constructed.
+
+Finally, we make use of :ref:`orm_queryguide_populate_existing` on the
+construct so that all the data returned by the UPDATE, including the columns
+we've updated, are populated into the returned objects, replacing any
+values which were there already. This has the same effect as if we had
+used the ``synchronize_session='fetch'`` strategy described previously
+at :ref:`orm_expression_update_delete_sync`.
+
+Using PostgreSQL ON CONFLICT with RETURNING to return upserted ORM objects
+---------------------------------------------------------------------------
+
+The above approach can be used with INSERTs with RETURNING as well. As a more
+advanced example, below illustrates how to use the PostgreSQL
+:ref:`postgresql_insert_on_conflict` construct to INSERT or UPDATE rows in the
+database, while simultaneously producing those objects as ORM instances::
+
+ from sqlalchemy.dialects.postgresql import insert
+
+ stmt = insert(User).values(
+ [
+ dict(name="sandy", fullname="Sandy Cheeks"),
+ dict(name="squidward", fullname="Squidward Tentacles"),
+ dict(name="spongebob", fullname="Spongebob Squarepants"),
+ ]
+ )
+
+ stmt = stmt.on_conflict_do_update(
+ index_elements=[User.name], set_=dict(fullname=stmt.excluded.fullname)
+ ).returning(User)
+
+ orm_stmt = (
+ select(User)
+ .from_statement(stmt)
+ .execution_options(populate_existing=True)
+ )
+ for user in session.execute(
+ orm_stmt,
+ ).scalars():
+ print("inserted or updated: %s" % user)
+
+To start, we make sure we are using the PostgreSQL variant of the
+:func:`_postgresql.insert` construct. Next, we construct a multi-values
+INSERT statement, where a single INSERT statement will provide multiple rows
+to be inserted. On the PostgreSQL database, this syntax provides the most
+efficient means of sending many hundreds of rows at once to be INSERTed.
+
+From there, we could if we wanted add the ``RETURNING`` clause to produce
+a bulk INSERT. However, to make the example even more interesting, we will
+also add the PostgreSQL specific ``ON CONFLICT..DO UPDATE`` syntax so that
+rows which already exist based on a unique criteria will be UPDATEd instead.
+We assume there is an INDEX or UNIQUE constraint on the ``name`` column of the
+``user_account`` table above, and then specify an appropriate :meth:`_postgresql.Insert.on_conflict_do_update`
+criteria that will update the ``fullname`` column for rows that already exist.
+
+Finally, we add the :meth:`_dml.Insert.returning` clause as we did in the
+previous example, and select our ``User`` objects using the same
+:meth:`_sql.Select.from_statement` approach as we did earlier. Supposing the
+database only a row for ``(1, "squidward", NULL)`` present; this row will
+trigger the ON CONFLICT routine in our above statement, in other words perform
+the equivalent of an UPDATE statement. The other two rows,
+``(NULL, "sandy", "Sandy Cheeks")`` and
+``(NULL, "spongebob", "Spongebob Squarepants")`` do not yet exist in the
+database, and will be inserted using normal INSERT semantics; the primary key
+column ``id`` uses either ``SERIAL`` or ``IDENTITY`` to auto-generate new
+integer values.
+
+Using this above form, we see SQL emitted on the PostgreSQL database as:
+
+
+.. sourcecode:: pycon+sql
+
+ {opensql}INSERT INTO user_account (name, fullname)
+ VALUES (%(name_m0)s, %(fullname_m0)s), (%(name_m1)s, %(fullname_m1)s), (%(name_m2)s, %(fullname_m2)s)
+ ON CONFLICT (name) DO UPDATE SET fullname = excluded.fullname
+ RETURNING user_account.id, user_account.name, user_account.fullname
+ {'name_m0': 'sandy', 'fullname_m0': 'Sandy Cheeks', 'name_m1': 'squidward', 'fullname_m1': 'Squidward Tentacles', 'name_m2': 'spongebob', 'fullname_m2': 'Spongebob Squarepants'}{stop}
+
+ inserted or updated: User(id=2, name='sandy', fullname='Sandy Cheeks')
+ inserted or updated: User(id=3, name='squidward', fullname='Squidward Tentacles')
+ inserted or updated: User(id=1, name='spongebob', fullname='Spongebob Squarepants')
+
+Above we can also see that the INSERTed ``User`` objects have a
+newly generated primary key value as we would expect with any other ORM
+oriented INSERT statement.
+
+.. seealso::
+
+ :ref:`orm_queryguide_selecting_text` - introduces the
+ :meth:`_sql.Select.from_statement` method.
+
.. _session_partitioning:
Partitioning Strategies (e.g. multiple database backends per Session)
@@ -684,7 +829,17 @@ Bulk Operations
.. note:: Bulk INSERT and UPDATE should not be confused with the
more common feature known as :ref:`orm_expression_update_delete`. This
feature allows a single UPDATE or DELETE statement with arbitrary WHERE
- criteria to be emitted.
+ criteria to be emitted. There is also an option on some backends to
+ use true "upsert" with the ORM, such as on PostgreSQL. See the section
+ :ref:`orm_dml_returning_objects` for examples.
+
+.. seealso::
+
+ :ref:`orm_expression_update_delete` - using straight multi-row UPDATE and DELETE statements
+ in an ORM context.
+
+ :ref:`orm_dml_returning_objects` - use UPDATE, INSERT or upsert operations to
+ return ORM objects
.. versionadded:: 1.0.0
diff --git a/doc/build/orm/session_basics.rst b/doc/build/orm/session_basics.rst
index fb148f6f2..d53b16074 100644
--- a/doc/build/orm/session_basics.rst
+++ b/doc/build/orm/session_basics.rst
@@ -684,78 +684,12 @@ values for ``synchronize_session`` are supported:
* In order to intercept ORM-enabled UPDATE and DELETE operations with event
handlers, use the :meth:`_orm.SessionEvents.do_orm_execute` event.
-.. _orm_dml_returning_objects:
Selecting ORM Objects Inline with UPDATE.. RETURNING or INSERT..RETURNING
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-.. deepalchemy:: The feature of linking ORM objects to RETURNING is a new and
- experimental feature.
+This section has moved. See :ref:`orm_dml_returning_objects`.
-.. versionadded:: 1.4.0b3
-
-The :term:`DML` constructs :func:`_dml.insert`, :func:`_dml.update`, and
-:func:`_dml.delete` feature a method :meth:`_dml.UpdateBase.returning` which on
-database backends that support RETURNING (PostgreSQL, SQL Server, some MariaDB
-versions) may be used to return database rows generated or matched by
-the statement as though they were SELECTed. The ORM-enabled UPDATE and DELETE
-statements may be combined with this feature, so that they return rows
-corresponding to all the rows which were matched by the criteria::
-
- from sqlalchemy import update
-
- stmt = update(User).where(User.name == "squidward").values(name="spongebob").\
- returning(User.id)
-
- for row in session.execute(stmt):
- print(f"id: {row.id}")
-
-The above example returns the ``User.id`` attribute for each row matched.
-Provided that each row contains at least a primary key value, we may opt to
-receive these rows as ORM objects, allowing ORM objects to be loaded from the
-database corresponding atomically to an UPDATE statement against those rows. To
-achieve this, we may combine the :class:`_dml.Update` construct which returns
-``User`` rows with a :func:`_sql.select` that's adapted to run this UPDATE
-statement in an ORM context using the :meth:`_sql.Select.from_statement`
-method::
-
- stmt = update(User).where(User.name == "squidward").values(name="spongebob").\
- returning(User)
-
- orm_stmt = select(User).from_statement(stmt).execution_options(populate_existing=True)
-
- for user in session.execute(orm_stmt).scalars():
- print("updated user: %s" % user)
-
-Above, we produce an :func:`_dml.update` construct that includes
-:meth:`_dml.Update.returning` given the full ``User`` entity, which will
-produce complete rows from the database table as it UPDATEs them; any arbitrary
-set of columns to load may be specified as long as the full primary key is
-included. Next, these rows are adapted to an ORM load by producing a
-:func:`_sql.select` for the desired entity, then adapting it to the UPDATE
-statement by passing the :class:`_dml.Update` construct to the
-:meth:`_sql.Select.from_statement` method; this special ORM method, introduced
-at :ref:`orm_queryguide_selecting_text`, produces an ORM-specific adapter that
-allows the given statement to act as though it were the SELECT of rows that is
-first described. No SELECT is actually emitted in the database, only the
-UPDATE..RETURNING we've constructed.
-
-Finally, we make use of :ref:`orm_queryguide_populate_existing` on the
-construct so that all the data returned by the UPDATE, including the columns
-we've updated, are populated into the returned objects, replacing any
-values which were there already. This has the same effect as if we had
-used the ``synchronize_session='fetch'`` strategy described previously
-at :ref:`orm_expression_update_delete_sync`.
-
-The above approach can be used with INSERTs as well (and technically
-DELETEs too, though this makes less sense as the returned ORM objects
-by definition don't exist in the database anymore), as both of these
-constructs support RETURNING as well.
-
-.. seealso::
-
- :ref:`orm_queryguide_selecting_text` - introduces the
- :meth:`_sql.Select.from_statement` method.
.. _session_autobegin:
diff --git a/doc/build/orm/session_transaction.rst b/doc/build/orm/session_transaction.rst
index a71b4e6ed..bc901762b 100644
--- a/doc/build/orm/session_transaction.rst
+++ b/doc/build/orm/session_transaction.rst
@@ -597,7 +597,7 @@ We can for example create our :class:`_orm.Session` from a default
plain_engine = create_engine("postgresql://scott:tiger@localhost/test")
- autocommit_engine = eng.execution_options(isolation_level="AUTOCOMMIT")
+ autocommit_engine = plain_engine.execution_options(isolation_level="AUTOCOMMIT")
# will normally use plain_engine
Session = sessionmaker(plain_engine)
diff --git a/lib/sqlalchemy/dialects/mssql/base.py b/lib/sqlalchemy/dialects/mssql/base.py
index c11166735..8607edeca 100644
--- a/lib/sqlalchemy/dialects/mssql/base.py
+++ b/lib/sqlalchemy/dialects/mssql/base.py
@@ -758,6 +758,7 @@ from ... import Identity
from ... import schema as sa_schema
from ... import Sequence
from ... import sql
+from ... import text
from ... import types as sqltypes
from ... import util
from ...engine import cursor as _cursor
@@ -2823,8 +2824,16 @@ class MSDialect(default.DefaultDialect):
)
)
- result = connection.execute(s.limit(1))
- return result.scalar() is not None
+ table_name = connection.execute(s.limit(1)).scalar()
+ if table_name:
+ # #6910: verify it's not a temp table from another session
+ obj_id = connection.execute(
+ text("SELECT object_id(:table_name)"),
+ {"table_name": "tempdb.dbo.[{}]".format(table_name)},
+ ).scalar()
+ return bool(obj_id)
+ else:
+ return False
else:
tables = ischema.tables
@@ -3009,7 +3018,12 @@ class MSDialect(default.DefaultDialect):
return view_def
def _temp_table_name_like_pattern(self, tablename):
- return tablename + (("___%") if not tablename.startswith("##") else "")
+ # LIKE uses '%' to match zero or more characters and '_' to match any
+ # single character. We want to match literal underscores, so T-SQL
+ # requires that we enclose them in square brackets.
+ return tablename + (
+ ("[_][_][_]%") if not tablename.startswith("##") else ""
+ )
def _get_internal_temp_table_name(self, connection, tablename):
# it's likely that schema is always "dbo", but since we can
diff --git a/lib/sqlalchemy/engine/base.py b/lib/sqlalchemy/engine/base.py
index c26d9a0a7..a316f904f 100644
--- a/lib/sqlalchemy/engine/base.py
+++ b/lib/sqlalchemy/engine/base.py
@@ -1287,6 +1287,7 @@ class Connection(Connectable):
if self._has_events or self.engine._has_events:
(
+ default,
distilled_params,
event_multiparams,
event_params,
@@ -1335,6 +1336,7 @@ class Connection(Connectable):
if self._has_events or self.engine._has_events:
(
+ ddl,
distilled_params,
event_multiparams,
event_params,
@@ -1399,7 +1401,7 @@ class Connection(Connectable):
else:
distilled_params = []
- return distilled_params, event_multiparams, event_params
+ return elem, distilled_params, event_multiparams, event_params
def _execute_clauseelement(
self, elem, multiparams, params, execution_options
@@ -1415,6 +1417,7 @@ class Connection(Connectable):
has_events = self._has_events or self.engine._has_events
if has_events:
(
+ elem,
distilled_params,
event_multiparams,
event_params,
@@ -1492,6 +1495,7 @@ class Connection(Connectable):
if self._has_events or self.engine._has_events:
(
+ compiled,
distilled_params,
event_multiparams,
event_params,
@@ -1536,6 +1540,7 @@ class Connection(Connectable):
if not future:
if self._has_events or self.engine._has_events:
(
+ statement,
distilled_params,
event_multiparams,
event_params,
diff --git a/lib/sqlalchemy/engine/result.py b/lib/sqlalchemy/engine/result.py
index 60474c0ed..3c2e682be 100644
--- a/lib/sqlalchemy/engine/result.py
+++ b/lib/sqlalchemy/engine/result.py
@@ -1054,6 +1054,17 @@ class Result(_WithKeys, ResultInternal):
column of the first row, use the :meth:`.Result.scalar` method,
or combine :meth:`.Result.scalars` and :meth:`.Result.first`.
+ Additionally, in contrast to the behavior of the legacy ORM
+ :meth:`_orm.Query.first` method, **no limit is applied** to the
+ SQL query which was invoked to produce this :class:`_engine.Result`;
+ for a DBAPI driver that buffers results in memory before yielding
+ rows, all rows will be sent to the Python process and all but
+ the first row will be discarded.
+
+ .. seealso::
+
+ :ref:`migration_20_unify_select`
+
:return: a :class:`.Row` object, or None
if no rows remain.
diff --git a/lib/sqlalchemy/engine/url.py b/lib/sqlalchemy/engine/url.py
index d72654c73..b924f1c73 100644
--- a/lib/sqlalchemy/engine/url.py
+++ b/lib/sqlalchemy/engine/url.py
@@ -655,7 +655,7 @@ class URL(
dialect_cls = entrypoint.get_dialect_cls(self)
return dialect_cls
- def translate_connect_args(self, names=[], **kw):
+ def translate_connect_args(self, names=None, **kw):
r"""Translate url attributes into a dictionary of connection arguments.
Returns attributes of this url (`host`, `database`, `username`,
diff --git a/lib/sqlalchemy/ext/asyncio/scoping.py b/lib/sqlalchemy/ext/asyncio/scoping.py
index 92d894599..7fa8cba5c 100644
--- a/lib/sqlalchemy/ext/asyncio/scoping.py
+++ b/lib/sqlalchemy/ext/asyncio/scoping.py
@@ -63,6 +63,8 @@ class async_scoped_session(ScopedSessionMixin):
"""
+ _support_async = True
+
def __init__(self, session_factory, scopefunc):
"""Construct a new :class:`_asyncio.async_scoped_session`.
diff --git a/lib/sqlalchemy/ext/asyncio/session.py b/lib/sqlalchemy/ext/asyncio/session.py
index 59b6a2b15..a10621eef 100644
--- a/lib/sqlalchemy/ext/asyncio/session.py
+++ b/lib/sqlalchemy/ext/asyncio/session.py
@@ -53,6 +53,8 @@ class AsyncSession(ReversibleProxy):
"""
+ _is_asyncio = True
+
__slots__ = (
"binds",
"bind",
diff --git a/lib/sqlalchemy/orm/context.py b/lib/sqlalchemy/orm/context.py
index 83b6586cc..9318bb163 100644
--- a/lib/sqlalchemy/orm/context.py
+++ b/lib/sqlalchemy/orm/context.py
@@ -417,7 +417,10 @@ class ORMFromStatementCompileState(ORMCompileState):
)
_QueryEntity.to_compile_state(
- self, statement_container._raw_columns, self._entities
+ self,
+ statement_container._raw_columns,
+ self._entities,
+ is_current_entities=True,
)
self.current_path = statement_container._compile_options._current_path
@@ -590,6 +593,7 @@ class ORMSelectCompileState(ORMCompileState, SelectState):
self,
memoized_entities._raw_columns,
[],
+ is_current_entities=False,
)
for memoized_entities in (
select_statement._memoized_select_entities
@@ -597,7 +601,10 @@ class ORMSelectCompileState(ORMCompileState, SelectState):
}
_QueryEntity.to_compile_state(
- self, select_statement._raw_columns, self._entities
+ self,
+ select_statement._raw_columns,
+ self._entities,
+ is_current_entities=True,
)
self.current_path = select_statement._compile_options._current_path
@@ -839,7 +846,9 @@ class ORMSelectCompileState(ORMCompileState, SelectState):
# entities will also set up polymorphic adapters for mappers
# that have with_polymorphic configured
- _QueryEntity.to_compile_state(self, query._raw_columns, self._entities)
+ _QueryEntity.to_compile_state(
+ self, query._raw_columns, self._entities, is_current_entities=True
+ )
return self
@classmethod
@@ -2278,13 +2287,18 @@ class _QueryEntity(object):
use_id_for_hash = False
@classmethod
- def to_compile_state(cls, compile_state, entities, entities_collection):
+ def to_compile_state(
+ cls, compile_state, entities, entities_collection, is_current_entities
+ ):
for idx, entity in enumerate(entities):
if entity._is_lambda_element:
if entity._is_sequence:
cls.to_compile_state(
- compile_state, entity._resolved, entities_collection
+ compile_state,
+ entity._resolved,
+ entities_collection,
+ is_current_entities,
)
continue
else:
@@ -2294,7 +2308,10 @@ class _QueryEntity(object):
if entity.is_selectable:
if "parententity" in entity._annotations:
_MapperEntity(
- compile_state, entity, entities_collection
+ compile_state,
+ entity,
+ entities_collection,
+ is_current_entities,
)
else:
_ColumnEntity._for_columns(
@@ -2343,12 +2360,15 @@ class _MapperEntity(_QueryEntity):
"_polymorphic_discriminator",
)
- def __init__(self, compile_state, entity, entities_collection):
+ def __init__(
+ self, compile_state, entity, entities_collection, is_current_entities
+ ):
entities_collection.append(self)
- if compile_state._primary_entity is None:
- compile_state._primary_entity = self
- compile_state._has_mapper_entities = True
- compile_state._has_orm_entities = True
+ if is_current_entities:
+ if compile_state._primary_entity is None:
+ compile_state._primary_entity = self
+ compile_state._has_mapper_entities = True
+ compile_state._has_orm_entities = True
entity = entity._annotations["parententity"]
entity._post_inspect
diff --git a/lib/sqlalchemy/orm/scoping.py b/lib/sqlalchemy/orm/scoping.py
index be16bc911..353caa8fe 100644
--- a/lib/sqlalchemy/orm/scoping.py
+++ b/lib/sqlalchemy/orm/scoping.py
@@ -13,6 +13,7 @@ from ..util import create_proxy_methods
from ..util import ScopedRegistry
from ..util import ThreadLocalRegistry
from ..util import warn
+from ..util import warn_deprecated
__all__ = ["scoped_session", "ScopedSessionMixin"]
@@ -42,9 +43,16 @@ class ScopedSessionMixin(object):
else:
sess = self.session_factory(**kw)
self.registry.set(sess)
- return sess
else:
- return self.registry()
+ sess = self.registry()
+ if not self._support_async and sess._is_asyncio:
+ warn_deprecated(
+ "Using `scoped_session` with asyncio is deprecated and "
+ "will raise an error in a future version. "
+ "Please use `async_scoped_session` instead.",
+ "1.4.23",
+ )
+ return sess
def configure(self, **kwargs):
"""reconfigure the :class:`.sessionmaker` used by this
@@ -116,8 +124,16 @@ class scoped_session(ScopedSessionMixin):
See :ref:`unitofwork_contextual` for a tutorial.
+ ..warning::
+
+ When using :ref:`asyncio_toplevel` the async
+ version :class:`_asyncio.async_scoped_session` should be
+ used instead.
+
"""
+ _support_async = False
+
session_factory = None
"""The `session_factory` provided to `__init__` is stored in this
attribute and may be accessed at a later time. This can be useful when
diff --git a/lib/sqlalchemy/orm/session.py b/lib/sqlalchemy/orm/session.py
index 8302f70d6..af803a1b0 100644
--- a/lib/sqlalchemy/orm/session.py
+++ b/lib/sqlalchemy/orm/session.py
@@ -948,6 +948,8 @@ class Session(_SessionClassMethods):
"""
+ _is_asyncio = False
+
@util.deprecated_params(
autocommit=(
"2.0",
@@ -1049,17 +1051,13 @@ class Session(_SessionClassMethods):
:param enable_baked_queries: defaults to ``True``. A flag consumed
by the :mod:`sqlalchemy.ext.baked` extension to determine if
"baked queries" should be cached, as is the normal operation
- of this extension. When set to ``False``, all caching is disabled,
- including baked queries defined by the calling application as
- well as those used internally. Setting this flag to ``False``
- can significantly reduce memory use, however will also degrade
- performance for those areas that make use of baked queries
- (such as relationship loaders). Additionally, baked query
- logic in the calling application or potentially within the ORM
- that may be malfunctioning due to cache key collisions or similar
- can be flagged by observing if this flag resolves the issue.
-
- .. versionadded:: 1.2
+ of this extension. When set to ``False``, caching as used by
+ this particular extension is disabled.
+
+ .. versionchanged:: 1.4 The ``sqlalchemy.ext.baked`` extension is
+ legacy and is not used by any of SQLAlchemy's internals. This
+ flag therefore only affects applications that are making explicit
+ use of this extension within their own code.
:param expire_on_commit: Defaults to ``True``. When ``True``, all
instances will be fully expired after each :meth:`~.commit`,
@@ -1311,6 +1309,7 @@ class Session(_SessionClassMethods):
"subtransactions are not implemented in future "
"Session objects."
)
+
if self._autobegin():
if not subtransactions and not nested and not _subtrans:
return self._transaction
@@ -1328,6 +1327,7 @@ class Session(_SessionClassMethods):
elif not self.autocommit:
# outermost transaction. must be a not nested and not
# a subtransaction
+
assert not nested and not _subtrans and not subtransactions
trans = SessionTransaction(self)
assert self._transaction is trans
diff --git a/lib/sqlalchemy/sql/ddl.py b/lib/sqlalchemy/sql/ddl.py
index 233e79f7c..f8985548e 100644
--- a/lib/sqlalchemy/sql/ddl.py
+++ b/lib/sqlalchemy/sql/ddl.py
@@ -1081,7 +1081,7 @@ class SchemaDropper(DDLBase):
table,
drop_ok=False,
_is_metadata_operation=False,
- _ignore_sequences=[],
+ _ignore_sequences=(),
):
if not drop_ok and not self._can_drop_table(table):
return
diff --git a/lib/sqlalchemy/testing/requirements.py b/lib/sqlalchemy/testing/requirements.py
index f660556eb..742a9a1f8 100644
--- a/lib/sqlalchemy/testing/requirements.py
+++ b/lib/sqlalchemy/testing/requirements.py
@@ -857,7 +857,7 @@ class SuiteRequirements(Requirements):
>>> testing.requirements.get_isolation_levels()
{
- "default": "READ_COMMITED",
+ "default": "READ_COMMITTED",
"supported": [
"SERIALIZABLE", "READ UNCOMMITTED",
"READ COMMITTED", "REPEATABLE READ",
diff --git a/lib/sqlalchemy/util/compat.py b/lib/sqlalchemy/util/compat.py
index 5d52f740f..5914e8681 100644
--- a/lib/sqlalchemy/util/compat.py
+++ b/lib/sqlalchemy/util/compat.py
@@ -392,6 +392,9 @@ if py3k:
"""
+ kwonlydefaults = kwonlydefaults or {}
+ annotations = annotations or {}
+
def formatargandannotation(arg):
result = formatarg(arg)
if arg in annotations:
diff --git a/test/engine/test_deprecations.py b/test/engine/test_deprecations.py
index 795cc5a4c..39e2bf762 100644
--- a/test/engine/test_deprecations.py
+++ b/test/engine/test_deprecations.py
@@ -1687,6 +1687,17 @@ class EngineEventsTest(fixtures.TestBase):
)
eq_(result.all(), [("15",)])
+ @testing.only_on("sqlite")
+ def test_modify_statement_string(self, connection):
+ @event.listens_for(connection, "before_execute", retval=True)
+ def _modify(
+ conn, clauseelement, multiparams, params, execution_options
+ ):
+ return clauseelement.replace("hi", "there"), multiparams, params
+
+ with _string_deprecation_expect():
+ eq_(connection.scalar("select 'hi'"), "there")
+
def test_retval_flag(self):
canary = []
diff --git a/test/engine/test_execute.py b/test/engine/test_execute.py
index 19ba5f03c..dd4ee32f8 100644
--- a/test/engine/test_execute.py
+++ b/test/engine/test_execute.py
@@ -31,6 +31,7 @@ from sqlalchemy.pool import NullPool
from sqlalchemy.pool import QueuePool
from sqlalchemy.sql import column
from sqlalchemy.sql import literal
+from sqlalchemy.sql.elements import literal_column
from sqlalchemy.testing import assert_raises
from sqlalchemy.testing import assert_raises_message
from sqlalchemy.testing import config
@@ -1771,6 +1772,56 @@ class EngineEventsTest(fixtures.TestBase):
with e1.connect() as conn:
conn.execute(select(literal("1")))
+ @testing.only_on("sqlite")
+ def test_dont_modify_statement_driversql(self, connection):
+ m1 = mock.Mock()
+
+ @event.listens_for(connection, "before_execute", retval=True)
+ def _modify(
+ conn, clauseelement, multiparams, params, execution_options
+ ):
+ m1.run_event()
+ return clauseelement.replace("hi", "there"), multiparams, params
+
+ # the event does not take effect for the "driver SQL" option
+ eq_(connection.exec_driver_sql("select 'hi'").scalar(), "hi")
+
+ # event is not called at all
+ eq_(m1.mock_calls, [])
+
+ @testing.combinations((True,), (False,), argnames="future")
+ @testing.only_on("sqlite")
+ def test_modify_statement_internal_driversql(self, connection, future):
+ m1 = mock.Mock()
+
+ @event.listens_for(connection, "before_execute", retval=True)
+ def _modify(
+ conn, clauseelement, multiparams, params, execution_options
+ ):
+ m1.run_event()
+ return clauseelement.replace("hi", "there"), multiparams, params
+
+ eq_(
+ connection._exec_driver_sql(
+ "select 'hi'", [], {}, {}, future=future
+ ).scalar(),
+ "hi" if future else "there",
+ )
+
+ if future:
+ eq_(m1.mock_calls, [])
+ else:
+ eq_(m1.mock_calls, [call.run_event()])
+
+ def test_modify_statement_clauseelement(self, connection):
+ @event.listens_for(connection, "before_execute", retval=True)
+ def _modify(
+ conn, clauseelement, multiparams, params, execution_options
+ ):
+ return select(literal_column("'there'")), multiparams, params
+
+ eq_(connection.scalar(select(literal_column("'hi'"))), "there")
+
def test_argument_format_execute(self, testing_engine):
def before_execute(
conn, clauseelement, multiparams, params, execution_options
diff --git a/test/orm/test_attributes.py b/test/orm/test_attributes.py
index 0a0a5d12b..3cdc9b784 100644
--- a/test/orm/test_attributes.py
+++ b/test/orm/test_attributes.py
@@ -1110,7 +1110,7 @@ class UtilTest(fixtures.ORMTest):
attributes.del_attribute(f1, "coll")
assert "coll" not in f1.__dict__
- def test_set_commited_value_none_uselist(self):
+ def test_set_committed_value_none_uselist(self):
"""test that set_committed_value->None to a uselist generates an
empty list"""
diff --git a/test/orm/test_deprecations.py b/test/orm/test_deprecations.py
index 4f762e4b7..baa1f44cf 100644
--- a/test/orm/test_deprecations.py
+++ b/test/orm/test_deprecations.py
@@ -38,7 +38,9 @@ from sqlalchemy.orm import joinedload
from sqlalchemy.orm import mapper
from sqlalchemy.orm import relation
from sqlalchemy.orm import relationship
+from sqlalchemy.orm import scoped_session
from sqlalchemy.orm import Session
+from sqlalchemy.orm import sessionmaker
from sqlalchemy.orm import subqueryload
from sqlalchemy.orm import synonym
from sqlalchemy.orm import undefer
@@ -54,6 +56,7 @@ from sqlalchemy.testing import assertions
from sqlalchemy.testing import AssertsCompiledSQL
from sqlalchemy.testing import eq_
from sqlalchemy.testing import eq_ignore_whitespace
+from sqlalchemy.testing import expect_deprecated
from sqlalchemy.testing import fixtures
from sqlalchemy.testing import is_
from sqlalchemy.testing import is_true
@@ -5475,3 +5478,23 @@ class GetBindTest(_GetBindTest):
session = self._fixture({self.tables.base_table: base_class_bind})
is_(session.get_bind(self.classes.ConcreteSubClass), testing.db)
+
+
+class DeprecationScopedSessionTest(fixtures.MappedTest):
+ def test_config_errors(self):
+ sm = sessionmaker()
+
+ def go():
+ s = sm()
+ s._is_asyncio = True
+ return s
+
+ Session = scoped_session(go)
+
+ with expect_deprecated(
+ "Using `scoped_session` with asyncio is deprecated and "
+ "will raise an error in a future version. "
+ "Please use `async_scoped_session` instead."
+ ):
+ Session()
+ Session.remove()
diff --git a/test/orm/test_froms.py b/test/orm/test_froms.py
index 0562dcb26..f7ee1a94c 100644
--- a/test/orm/test_froms.py
+++ b/test/orm/test_froms.py
@@ -29,6 +29,7 @@ from sqlalchemy.orm import joinedload
from sqlalchemy.orm import mapper
from sqlalchemy.orm import relationship
from sqlalchemy.orm import Session
+from sqlalchemy.orm.context import ORMSelectCompileState
from sqlalchemy.orm.util import join
from sqlalchemy.sql import column
from sqlalchemy.sql import table
@@ -1699,6 +1700,59 @@ class MixedEntitiesTest(QueryTest, AssertsCompiledSQL):
],
)
+ @testing.combinations((True,), (False,))
+ def test_no_uniquing_cols_legacy(self, with_entities):
+ """test #6924"""
+ User = self.classes.User
+ Address = self.classes.Address
+
+ sess = fixture_session()
+
+ if with_entities:
+ q = (
+ sess.query(User)
+ .join(Address)
+ .filter(Address.user_id == 8)
+ .with_entities(User.id, User.name)
+ .order_by(User.id)
+ )
+ else:
+ q = (
+ sess.query(User.id, User.name)
+ .join(Address)
+ .filter(Address.user_id == 8)
+ .order_by(User.id)
+ )
+
+ is_(q._compile_state()._primary_entity, None)
+
+ eq_(q.all(), [(8, "ed"), (8, "ed"), (8, "ed")])
+
+ @testing.combinations((True,), (False,))
+ def test_no_uniquing_cols(self, with_entities):
+ """test #6924"""
+ User = self.classes.User
+ Address = self.classes.Address
+
+ if with_entities:
+ stmt = (
+ select(User)
+ .join(Address)
+ .filter(Address.user_id == 8)
+ .with_only_columns(User.id, User.name)
+ .order_by(User.id)
+ )
+ else:
+ stmt = (
+ select(User.id, User.name)
+ .join(Address)
+ .filter(Address.user_id == 8)
+ .order_by(User.id)
+ )
+
+ compile_state = ORMSelectCompileState.create_for_statement(stmt, None)
+ is_(compile_state._primary_entity, None)
+
def test_column_queries_one(self):
User = self.classes.User