summaryrefslogtreecommitdiff
path: root/test/aaa_profiling
diff options
context:
space:
mode:
authorMike Bayer <mike_mp@zzzcomputing.com>2017-04-12 11:37:19 -0400
committerMike Bayer <mike_mp@zzzcomputing.com>2017-04-12 12:53:40 -0400
commitcef4e5ff38dc7d2200800837c110ab6beec10d8a (patch)
tree24eac58c1fb6e3105c9c4c7046d1aa2b5259a67d /test/aaa_profiling
parent1b463058e3282c73d0fb361f78e96ecaa23ce9f4 (diff)
downloadsqlalchemy-cef4e5ff38dc7d2200800837c110ab6beec10d8a.tar.gz
Warn on _compiled_cache growth
Added warnings to the LRU "compiled cache" used by the :class:`.Mapper` (and ultimately will be for other ORM-based LRU caches) such that when the cache starts hitting its size limits, the application will emit a warning that this is a performance-degrading situation that may require attention. The LRU caches can reach their size limits primarily if an application is making use of an unbounded number of :class:`.Engine` objects, which is an antipattern. Otherwise, this may suggest an issue that should be brought to the SQLAlchemy developer's attention. Additionally, adjusted the test_memusage algorithm again as the previous one could still allow a growing memory size to be missed. Change-Id: I020d1ceafb7a08f6addfa990a1e7acd09f933240
Diffstat (limited to 'test/aaa_profiling')
-rw-r--r--test/aaa_profiling/test_memusage.py54
1 files changed, 25 insertions, 29 deletions
diff --git a/test/aaa_profiling/test_memusage.py b/test/aaa_profiling/test_memusage.py
index 53f118e15..5e0baabef 100644
--- a/test/aaa_profiling/test_memusage.py
+++ b/test/aaa_profiling/test_memusage.py
@@ -34,7 +34,7 @@ class ASub(A):
pass
-def profile_memory(maxtimes=50,
+def profile_memory(maxtimes=250,
assert_no_sessions=True, get_num_objects=None):
def decorate(func):
# run the test N times. if length of gc.get_objects()
@@ -55,9 +55,10 @@ def profile_memory(maxtimes=50,
def profile(*args):
gc_collect()
samples = []
-
+ max_ = 0
+ max_grew_for = 0
success = False
- for y in range(100 // 5):
+ for y in range(maxtimes // 5):
for x in range(5):
func(*args)
gc_collect()
@@ -71,35 +72,28 @@ def profile_memory(maxtimes=50,
if assert_no_sessions:
assert len(_sessions) == 0
- # check for "flatline" - size is constant for
- # 5 iterations
- for x in samples[-4:]:
- if x != samples[-5]:
- break
+ latest_max = max(samples[-5:])
+ if latest_max > max_:
+ print(
+ "Max grew from %s to %s, max has "
+ "grown for %s samples" % (
+ max_, latest_max, max_grew_for
+ )
+ )
+ max_ = latest_max
+ max_grew_for += 1
+ continue
else:
- success = True
-
- if not success:
- # object count is bigger than when it started
- if samples[-1] > samples[0]:
- for x in samples[1:-2]:
- # see if a spike bigger than the endpoint exists
- if x > samples[-1]:
- success = True
- break
- else:
+ print("Max remained at %s, %s more attempts left" %
+ (max_, max_grew_for))
+ max_grew_for -= 1
+ if max_grew_for == 0:
success = True
-
- # if we saw count go down or flatline,
- # we're done
- if success:
- break
-
- # else keep trying until maxtimes
-
+ break
else:
assert False, repr(samples)
+ assert success
return profile
return decorate
@@ -204,6 +198,7 @@ class MemUsageTest(EnsureZeroed):
del sessmaker
go()
+ @testing.emits_warning("Compiled statement cache for.*")
@testing.crashes('sqlite', ':memory: connection not suitable here')
def test_orm_many_engines(self):
metadata = MetaData(self.engine)
@@ -224,10 +219,10 @@ class MemUsageTest(EnsureZeroed):
m1 = mapper(A, table1, properties={
"bs": relationship(B, cascade="all, delete",
order_by=table2.c.col1)},
- _compiled_cache_size=10
+ _compiled_cache_size=50
)
m2 = mapper(B, table2,
- _compiled_cache_size=10
+ _compiled_cache_size=50
)
m3 = mapper(A, table1, non_primary=True)
@@ -301,6 +296,7 @@ class MemUsageTest(EnsureZeroed):
assert not eng.dialect._type_memos
+ @testing.emits_warning("Compiled statement cache for.*")
def test_many_updates(self):
metadata = MetaData(self.engine)