summaryrefslogtreecommitdiff
path: root/test/sql
diff options
context:
space:
mode:
authorMike Bayer <mike_mp@zzzcomputing.com>2020-03-09 17:12:35 -0400
committerMike Bayer <mike_mp@zzzcomputing.com>2020-04-01 16:12:23 -0400
commita9b62055bfa61c11e9fe0b2984437e2c3e32bf0e (patch)
tree366027c7069edd56d49e9d540ae6a14fbe9e16fe /test/sql
parente6250123a30e457068878394e49b7ca07ca4d3b0 (diff)
downloadsqlalchemy-a9b62055bfa61c11e9fe0b2984437e2c3e32bf0e.tar.gz
Try to measure new style caching in the ORM, take two
Supercedes: If78fbb557c6f2cae637799c3fec2cbc5ac248aaf Trying to see if by making the cache key memoized, we still can have the older "identity" form of caching which is the cheapest of all, at the same time as the newer "cache key each time" version that is not nearly as cheap; but still much cheaper than no caching at all. Also needed is a per-execution update of _keymap when we invoke from a cached select, so that Column objects that are anonymous or otherwise adapted will match up. this is analogous to the adaption of bound parameters from the cache key. Adds test coverage for the keymap / construct_params() changes related to caching. Also hones performance to a large extent for statement construction and cache key generation. Also includes a new memoized attribute approach that vastly simplifies the previous approach of "group_expirable_memoized_property" and finally integrates cleanly with _clone(), _generate(), etc. no more hardcoding of attributes is needed, as well as that most _reset_memoization() calls are no longer needed as the reset is inherent in a _generate() call; this also has dramatic performance improvements. Change-Id: I95c560ffcbfa30b26644999412fb6a385125f663
Diffstat (limited to 'test/sql')
-rw-r--r--test/sql/test_compare.py46
-rw-r--r--test/sql/test_compiler.py112
-rw-r--r--test/sql/test_external_traversal.py2
-rw-r--r--test/sql/test_functions.py5
-rw-r--r--test/sql/test_resultset.py107
-rw-r--r--test/sql/test_selectable.py8
6 files changed, 263 insertions, 17 deletions
diff --git a/test/sql/test_compare.py b/test/sql/test_compare.py
index ab612053e..2800f8248 100644
--- a/test/sql/test_compare.py
+++ b/test/sql/test_compare.py
@@ -61,6 +61,7 @@ from sqlalchemy.sql.visitors import InternalTraversal
from sqlalchemy.testing import eq_
from sqlalchemy.testing import fixtures
from sqlalchemy.testing import is_false
+from sqlalchemy.testing import is_not_
from sqlalchemy.testing import is_true
from sqlalchemy.testing import ne_
from sqlalchemy.testing.util import random_choices
@@ -174,6 +175,14 @@ class CoreFixtures(object):
table_a.c.a._annotate(
{"orm": True, "parententity": MyEntity("b", select([table_a]))}
),
+ table_a.c.a._annotate(
+ {
+ "orm": True,
+ "parententity": MyEntity(
+ "b", select([table_a]).where(table_a.c.a == 5)
+ ),
+ }
+ ),
),
lambda: (
table_a,
@@ -759,6 +768,43 @@ class CacheKeyTest(CacheKeyFixture, CoreFixtures, fixtures.TestBase):
f1._copy_internals()
f2._copy_internals()
+ def test_generative_cache_key_regen(self):
+ t1 = table("t1", column("a"), column("b"))
+
+ s1 = select([t1])
+
+ ck1 = s1._generate_cache_key()
+
+ s2 = s1.where(t1.c.a == 5)
+
+ ck2 = s2._generate_cache_key()
+
+ ne_(ck1, ck2)
+ is_not_(ck1, None)
+ is_not_(ck2, None)
+
+ def test_generative_cache_key_regen_w_del(self):
+ t1 = table("t1", column("a"), column("b"))
+
+ s1 = select([t1])
+
+ ck1 = s1._generate_cache_key()
+
+ s2 = s1.where(t1.c.a == 5)
+
+ del s1
+
+ # there is now a good chance that id(s3) == id(s1), make sure
+ # cache key is regenerated
+
+ s3 = s2.order_by(t1.c.b)
+
+ ck3 = s3._generate_cache_key()
+
+ ne_(ck1, ck3)
+ is_not_(ck1, None)
+ is_not_(ck3, None)
+
class CompareAndCopyTest(CoreFixtures, fixtures.TestBase):
@classmethod
diff --git a/test/sql/test_compiler.py b/test/sql/test_compiler.py
index 151ecb1d2..e44deed90 100644
--- a/test/sql/test_compiler.py
+++ b/test/sql/test_compiler.py
@@ -3344,6 +3344,116 @@ class BindParameterTest(AssertsCompiledSQL, fixtures.TestBase):
_group_number=2,
)
+ @testing.combinations(
+ (
+ select([table1]).where(table1.c.myid == 5),
+ select([table1]).where(table1.c.myid == 10),
+ {"myid_1": 5},
+ {"myid_1": 10},
+ None,
+ None,
+ ),
+ (
+ select([table1]).where(
+ table1.c.myid
+ == bindparam(None, unique=True, callable_=lambda: 5)
+ ),
+ select([table1]).where(
+ table1.c.myid
+ == bindparam(None, unique=True, callable_=lambda: 10)
+ ),
+ {"param_1": 5},
+ {"param_1": 10},
+ None,
+ None,
+ ),
+ (
+ table1.update()
+ .where(table1.c.myid == 5)
+ .values(name="n1", description="d1"),
+ table1.update()
+ .where(table1.c.myid == 10)
+ .values(name="n2", description="d2"),
+ {"description": "d1", "myid_1": 5, "name": "n1"},
+ {"description": "d2", "myid_1": 10, "name": "n2"},
+ None,
+ None,
+ ),
+ (
+ table1.update().where(table1.c.myid == 5),
+ table1.update().where(table1.c.myid == 10),
+ {"description": "d1", "myid_1": 5, "name": "n1"},
+ {"description": "d2", "myid_1": 10, "name": "n2"},
+ {"description": "d1", "name": "n1"},
+ {"description": "d2", "name": "n2"},
+ ),
+ (
+ table1.update().where(
+ table1.c.myid
+ == bindparam(None, unique=True, callable_=lambda: 5)
+ ),
+ table1.update().where(
+ table1.c.myid
+ == bindparam(None, unique=True, callable_=lambda: 10)
+ ),
+ {"description": "d1", "param_1": 5, "name": "n1"},
+ {"description": "d2", "param_1": 10, "name": "n2"},
+ {"description": "d1", "name": "n1"},
+ {"description": "d2", "name": "n2"},
+ ),
+ (
+ union(
+ select([table1]).where(table1.c.myid == 5),
+ select([table1]).where(table1.c.myid == 12),
+ ),
+ union(
+ select([table1]).where(table1.c.myid == 5),
+ select([table1]).where(table1.c.myid == 15),
+ ),
+ {"myid_1": 5, "myid_2": 12},
+ {"myid_1": 5, "myid_2": 15},
+ None,
+ None,
+ ),
+ )
+ def test_construct_params_combine_extracted(
+ self, stmt1, stmt2, param1, param2, extparam1, extparam2
+ ):
+
+ if extparam1:
+ keys = list(extparam1)
+ else:
+ keys = []
+
+ s1_cache_key = stmt1._generate_cache_key()
+ s1_compiled = stmt1.compile(cache_key=s1_cache_key, column_keys=keys)
+
+ s2_cache_key = stmt2._generate_cache_key()
+
+ eq_(s1_compiled.construct_params(params=extparam1), param1)
+ eq_(
+ s1_compiled.construct_params(
+ params=extparam1, extracted_parameters=s1_cache_key[1]
+ ),
+ param1,
+ )
+
+ eq_(
+ s1_compiled.construct_params(
+ params=extparam2, extracted_parameters=s2_cache_key[1]
+ ),
+ param2,
+ )
+
+ s1_compiled_no_cache_key = stmt1.compile()
+ assert_raises_message(
+ exc.CompileError,
+ "This compiled object has no original cache key; can't pass "
+ "extracted_parameters to construct_params",
+ s1_compiled_no_cache_key.construct_params,
+ extracted_parameters=s1_cache_key[1],
+ )
+
def test_tuple_expanding_in_no_values(self):
expr = tuple_(table1.c.myid, table1.c.name).in_(
[(1, "foo"), (5, "bar")]
@@ -5021,7 +5131,7 @@ class ResultMapTest(fixtures.TestBase):
stmt = select([t.c.x, t.c.y, l1, t.c.y, l2, t.c.x, l3])
# so the statement has 7 inner columns...
- eq_(len(list(stmt.inner_columns)), 7)
+ eq_(len(list(stmt.selected_columns)), 7)
# 7 are exposed as of 1.4, no more deduping
eq_(len(stmt.subquery().c), 7)
diff --git a/test/sql/test_external_traversal.py b/test/sql/test_external_traversal.py
index ac423931f..37fb752fe 100644
--- a/test/sql/test_external_traversal.py
+++ b/test/sql/test_external_traversal.py
@@ -172,7 +172,7 @@ class TraversalTest(fixtures.TestBase, AssertsExecutionResults):
vis = Vis()
s2 = vis.traverse(s1)
- eq_(list(s2.inner_columns)[0].anon_label, c1.anon_label)
+ eq_(list(s2.selected_columns)[0].anon_label, c1.anon_label)
def test_change_in_place(self):
struct = B(
diff --git a/test/sql/test_functions.py b/test/sql/test_functions.py
index fd3f55780..5a6e6252b 100644
--- a/test/sql/test_functions.py
+++ b/test/sql/test_functions.py
@@ -468,9 +468,12 @@ class CompileTest(fixtures.TestBase, AssertsCompiledSQL):
"my_func(:my_func_1, :my_func_2, NULL, :my_func_3)",
)
+ f1 = func.my_func(1, 2, None, 3)
+ f1._generate_cache_key()
+
# test pickling
self.assert_compile(
- util.pickle.loads(util.pickle.dumps(func.my_func(1, 2, None, 3))),
+ util.pickle.loads(util.pickle.dumps(f1)),
"my_func(:my_func_1, :my_func_2, NULL, :my_func_3)",
)
diff --git a/test/sql/test_resultset.py b/test/sql/test_resultset.py
index f08248440..253ad7b38 100644
--- a/test/sql/test_resultset.py
+++ b/test/sql/test_resultset.py
@@ -29,9 +29,11 @@ from sqlalchemy.engine import default
from sqlalchemy.engine import result as _result
from sqlalchemy.engine import Row
from sqlalchemy.ext.compiler import compiles
+from sqlalchemy.sql import ColumnElement
from sqlalchemy.sql import expression
from sqlalchemy.sql.selectable import TextualSelect
from sqlalchemy.sql.sqltypes import NULLTYPE
+from sqlalchemy.sql.util import ClauseAdapter
from sqlalchemy.testing import assert_raises
from sqlalchemy.testing import assert_raises_message
from sqlalchemy.testing import assertions
@@ -1391,16 +1393,20 @@ class KeyTargetingTest(fixtures.TablesTest):
@classmethod
def insert_data(cls):
- cls.tables.keyed1.insert().execute(dict(b="a1", q="c1"))
- cls.tables.keyed2.insert().execute(dict(a="a2", b="b2"))
- cls.tables.keyed3.insert().execute(dict(a="a3", d="d3"))
- cls.tables.keyed4.insert().execute(dict(b="b4", q="q4"))
- cls.tables.content.insert().execute(type="t1")
-
- if testing.requires.schemas.enabled:
- cls.tables[
- "%s.wschema" % testing.config.test_schema
- ].insert().execute(dict(b="a1", q="c1"))
+ with testing.db.begin() as conn:
+ conn.execute(cls.tables.keyed1.insert(), dict(b="a1", q="c1"))
+ conn.execute(cls.tables.keyed2.insert(), dict(a="a2", b="b2"))
+ conn.execute(cls.tables.keyed3.insert(), dict(a="a3", d="d3"))
+ conn.execute(cls.tables.keyed4.insert(), dict(b="b4", q="q4"))
+ conn.execute(cls.tables.content.insert(), dict(type="t1"))
+
+ if testing.requires.schemas.enabled:
+ conn.execute(
+ cls.tables[
+ "%s.wschema" % testing.config.test_schema
+ ].insert(),
+ dict(b="a1", q="c1"),
+ )
@testing.requires.schemas
def test_keyed_accessor_wschema(self):
@@ -1712,6 +1718,87 @@ class KeyTargetingTest(fixtures.TablesTest):
in_(stmt.selected_columns.keyed2_a, row._mapping)
in_(stmt.selected_columns.keyed2_b, row._mapping)
+ def _adapt_result_columns_fixture_one(self):
+ keyed1 = self.tables.keyed1
+ stmt = (
+ select([keyed1.c.b, keyed1.c.q.label("foo")])
+ .apply_labels()
+ .subquery()
+ )
+
+ return select([stmt.c.keyed1_b, stmt.c.foo])
+
+ def _adapt_result_columns_fixture_two(self):
+ return text("select a AS keyed2_a, b AS keyed2_b from keyed2").columns(
+ keyed2_a=CHAR, keyed2_b=CHAR
+ )
+
+ def _adapt_result_columns_fixture_three(self):
+ keyed1 = self.tables.keyed1
+ stmt = select([keyed1.c.b, keyed1.c.q.label("foo")]).subquery()
+
+ return select([stmt.c.b, stmt.c.foo])
+
+ def _adapt_result_columns_fixture_four(self):
+ keyed1 = self.tables.keyed1
+
+ stmt1 = select([keyed1]).apply_labels()
+
+ a1 = keyed1.alias()
+ stmt2 = ClauseAdapter(a1).traverse(stmt1)
+
+ return stmt2
+
+ @testing.combinations(
+ _adapt_result_columns_fixture_one,
+ _adapt_result_columns_fixture_two,
+ _adapt_result_columns_fixture_three,
+ _adapt_result_columns_fixture_four,
+ argnames="stmt_fn",
+ )
+ def test_adapt_result_columns(self, connection, stmt_fn):
+ """test adaptation of a CursorResultMetadata to another one.
+
+
+ This copies the _keymap from one to the other in terms of the
+ selected columns of a target selectable.
+
+ This is used by the statement caching process to re-use the
+ CursorResultMetadata from the cached statement against the same
+ statement sent separately.
+
+ """
+
+ stmt1 = stmt_fn(self)
+ stmt2 = stmt_fn(self)
+
+ eq_(stmt1._generate_cache_key(), stmt2._generate_cache_key())
+
+ column_linkage = dict(
+ zip(stmt1.selected_columns, stmt2.selected_columns)
+ )
+
+ result = connection.execute(stmt1)
+
+ mock_context = Mock(
+ compiled=result.context.compiled, invoked_statement=stmt2
+ )
+ existing_metadata = result._metadata
+ adapted_metadata = existing_metadata._adapt_to_context(mock_context)
+
+ eq_(existing_metadata.keys, adapted_metadata.keys)
+
+ for k in existing_metadata._keymap:
+ if isinstance(k, ColumnElement) and k in column_linkage:
+ other_k = column_linkage[k]
+ else:
+ other_k = k
+
+ is_(
+ existing_metadata._keymap[k], adapted_metadata._keymap[other_k]
+ )
+ return stmt1, existing_metadata, stmt2, adapted_metadata
+
class PositionalTextTest(fixtures.TablesTest):
run_inserts = "once"
diff --git a/test/sql/test_selectable.py b/test/sql/test_selectable.py
index 58e7ee6a1..8fb1f2951 100644
--- a/test/sql/test_selectable.py
+++ b/test/sql/test_selectable.py
@@ -1973,10 +1973,10 @@ class ReduceTest(fixtures.TestBase, AssertsExecutionResults):
)
s1 = select([t1, t2])
s2 = s1.reduce_columns(only_synonyms=False)
- eq_(set(s2.inner_columns), set([t1.c.x, t1.c.y, t2.c.q]))
+ eq_(set(s2.selected_columns), set([t1.c.x, t1.c.y, t2.c.q]))
s2 = s1.reduce_columns()
- eq_(set(s2.inner_columns), set([t1.c.x, t1.c.y, t2.c.z, t2.c.q]))
+ eq_(set(s2.selected_columns), set([t1.c.x, t1.c.y, t2.c.z, t2.c.q]))
def test_reduce_only_synonym_fk(self):
m = MetaData()
@@ -2019,7 +2019,7 @@ class ReduceTest(fixtures.TestBase, AssertsExecutionResults):
s1 = select([t1]).subquery()
s2 = select([t1, s1]).where(t1.c.x == s1.c.x).where(s1.c.y == t1.c.z)
eq_(
- set(s2.reduce_columns().inner_columns),
+ set(s2.reduce_columns().selected_columns),
set([t1.c.x, t1.c.y, t1.c.z, s1.c.y, s1.c.z]),
)
@@ -2027,7 +2027,7 @@ class ReduceTest(fixtures.TestBase, AssertsExecutionResults):
s1 = select([t1]).subquery()
s2 = select([s1, t1]).where(t1.c.x == s1.c.x).where(s1.c.y == t1.c.z)
eq_(
- set(s2.reduce_columns().inner_columns),
+ set(s2.reduce_columns().selected_columns),
set([s1.c.x, t1.c.y, t1.c.z, s1.c.y, s1.c.z]),
)