summaryrefslogtreecommitdiff
path: root/examples
diff options
context:
space:
mode:
authorMike Bayer <mike_mp@zzzcomputing.com>2020-03-09 17:12:35 -0400
committerMike Bayer <mike_mp@zzzcomputing.com>2020-04-01 16:12:23 -0400
commita9b62055bfa61c11e9fe0b2984437e2c3e32bf0e (patch)
tree366027c7069edd56d49e9d540ae6a14fbe9e16fe /examples
parente6250123a30e457068878394e49b7ca07ca4d3b0 (diff)
downloadsqlalchemy-a9b62055bfa61c11e9fe0b2984437e2c3e32bf0e.tar.gz
Try to measure new style caching in the ORM, take two
Supercedes: If78fbb557c6f2cae637799c3fec2cbc5ac248aaf Trying to see if by making the cache key memoized, we still can have the older "identity" form of caching which is the cheapest of all, at the same time as the newer "cache key each time" version that is not nearly as cheap; but still much cheaper than no caching at all. Also needed is a per-execution update of _keymap when we invoke from a cached select, so that Column objects that are anonymous or otherwise adapted will match up. this is analogous to the adaption of bound parameters from the cache key. Adds test coverage for the keymap / construct_params() changes related to caching. Also hones performance to a large extent for statement construction and cache key generation. Also includes a new memoized attribute approach that vastly simplifies the previous approach of "group_expirable_memoized_property" and finally integrates cleanly with _clone(), _generate(), etc. no more hardcoding of attributes is needed, as well as that most _reset_memoization() calls are no longer needed as the reset is inherent in a _generate() call; this also has dramatic performance improvements. Change-Id: I95c560ffcbfa30b26644999412fb6a385125f663
Diffstat (limited to 'examples')
-rw-r--r--examples/performance/short_selects.py27
1 files changed, 26 insertions, 1 deletions
diff --git a/examples/performance/short_selects.py b/examples/performance/short_selects.py
index 376f18f02..db8ab8789 100644
--- a/examples/performance/short_selects.py
+++ b/examples/performance/short_selects.py
@@ -13,6 +13,7 @@ from sqlalchemy import select
from sqlalchemy import String
from sqlalchemy.ext import baked
from sqlalchemy.ext.declarative import declarative_base
+from sqlalchemy.future import select as future_select
from sqlalchemy.orm import deferred
from sqlalchemy.orm import Session
from . import Profiler
@@ -117,6 +118,20 @@ def test_core_new_stmt_each_time(n):
@Profiler.profile
+def test_core_new_stmt_each_time_compiled_cache(n):
+ """test core, creating a new statement each time, but using the cache."""
+
+ compiled_cache = {}
+ with engine.connect().execution_options(
+ compiled_cache=compiled_cache
+ ) as conn:
+ for id_ in random.sample(ids, n):
+ stmt = select([Customer.__table__]).where(Customer.id == id_)
+ row = conn.execute(stmt).first()
+ tuple(row)
+
+
+@Profiler.profile
def test_core_reuse_stmt(n):
"""test core, reusing the same statement (but recompiling each time)."""
@@ -132,8 +147,8 @@ def test_core_reuse_stmt(n):
def test_core_reuse_stmt_compiled_cache(n):
"""test core, reusing the same statement + compiled cache."""
- compiled_cache = {}
stmt = select([Customer.__table__]).where(Customer.id == bindparam("id"))
+ compiled_cache = {}
with engine.connect().execution_options(
compiled_cache=compiled_cache
) as conn:
@@ -142,5 +157,15 @@ def test_core_reuse_stmt_compiled_cache(n):
tuple(row)
+@Profiler.profile
+def test_core_just_statement_construct_plus_cache_key(n):
+ for i in range(n):
+ stmt = future_select(Customer.__table__).where(
+ Customer.id == bindparam("id")
+ )
+
+ stmt._generate_cache_key()
+
+
if __name__ == "__main__":
Profiler.main()