summaryrefslogtreecommitdiff
path: root/numpy/fft/tests/test_helper.py
diff options
context:
space:
mode:
authorLion Krischer <krischer@geophysik.uni-muenchen.de>2016-05-27 17:06:56 +0200
committerLion Krischer <krischer@geophysik.uni-muenchen.de>2016-06-06 19:51:12 +0200
commit2de9651b99da9d3308de5df510cfd28c1a3a0f19 (patch)
treecfe67667ac9620a2668d1d06928f5b5008868ad3 /numpy/fft/tests/test_helper.py
parentf65b233dc5689ecbb79a4ed8a383ef0251bc39c6 (diff)
downloadnumpy-2de9651b99da9d3308de5df510cfd28c1a3a0f19.tar.gz
ENH: Changing FFT cache to a bounded LRU cache
Replaces the simple dictionary caches for the twiddle factors of numpy.fft to bounded LRU (least recently used) caches. The caches can thus no longer grow without bounds. See #7686.
Diffstat (limited to 'numpy/fft/tests/test_helper.py')
-rw-r--r--numpy/fft/tests/test_helper.py87
1 files changed, 87 insertions, 0 deletions
diff --git a/numpy/fft/tests/test_helper.py b/numpy/fft/tests/test_helper.py
index 1a51f8e3a..9fd0e496d 100644
--- a/numpy/fft/tests/test_helper.py
+++ b/numpy/fft/tests/test_helper.py
@@ -10,6 +10,7 @@ import numpy as np
from numpy.testing import TestCase, run_module_suite, assert_array_almost_equal
from numpy import fft
from numpy import pi
+from numpy.fft.helper import _FFTCache
class TestFFTShift(TestCase):
@@ -74,5 +75,91 @@ class TestIRFFTN(TestCase):
fft.irfftn(a, axes=axes)
+class TestFFTCache(TestCase):
+
+ def test_basic_behaviour(self):
+ c = _FFTCache(max_size_in_mb=1, max_item_count=4)
+ # Setting
+ c[1] = [np.ones(2, dtype=np.float32)]
+ c[2] = [np.zeros(2, dtype=np.float32)]
+ # Getting
+ assert_array_almost_equal(c[1][0], np.ones(2, dtype=np.float32))
+ assert_array_almost_equal(c[2][0], np.zeros(2, dtype=np.float32))
+ # Setdefault
+ c.setdefault(1, [np.array([1, 2], dtype=np.float32)])
+ assert_array_almost_equal(c[1][0], np.ones(2, dtype=np.float32))
+ c.setdefault(3, [np.array([1, 2], dtype=np.float32)])
+ assert_array_almost_equal(c[3][0], np.array([1, 2], dtype=np.float32))
+
+ self.assertEqual(len(c._dict), 3)
+
+ def test_automatic_pruning(self):
+ # Thats around 2600 single precision samples.
+ c = _FFTCache(max_size_in_mb=0.01, max_item_count=4)
+ c[1] = [np.ones(200, dtype=np.float32)]
+ c[2] = [np.ones(200, dtype=np.float32)]
+
+ # Don't raise errors.
+ c[1], c[2], c[1], c[2]
+
+ # This is larger than the limit but should still be kept.
+ c[3] = [np.ones(3000, dtype=np.float32)]
+ # Should exist.
+ c[1], c[2], c[3]
+ # Add one more.
+ c[4] = [np.ones(3000, dtype=np.float32)]
+
+ # The other three should no longer exist.
+ with self.assertRaises(KeyError):
+ c[1]
+ with self.assertRaises(KeyError):
+ c[2]
+ with self.assertRaises(KeyError):
+ c[3]
+
+ # Now test the max item count pruning.
+ c = _FFTCache(max_size_in_mb=0.01, max_item_count=2)
+ c[1] = [np.empty(2)]
+ c[2] = [np.empty(2)]
+ # Can still be accessed.
+ c[2], c[1]
+
+ c[3] = [np.empty(2)]
+
+ # 1 and 3 can still be accessed - c[2] has been touched least recently
+ # and is thus evicted.
+ c[1], c[3]
+
+ with self.assertRaises(KeyError):
+ c[2]
+
+ c[1], c[3]
+
+ # One last test. We will add a single large item that is slightly
+ # bigger then the cache size. Some small items can still be added.
+ c = _FFTCache(max_size_in_mb=0.01, max_item_count=5)
+ c[1] = [np.ones(3000, dtype=np.float32)]
+ c[1]
+ c[2] = [np.ones(2, dtype=np.float32)]
+ c[3] = [np.ones(2, dtype=np.float32)]
+ c[4] = [np.ones(2, dtype=np.float32)]
+ c[1], c[2], c[3], c[4]
+
+ # One more big item.
+ c[5] = [np.ones(3000, dtype=np.float32)]
+
+ # c[1] no longer in the cache. Rest still in the cache.
+ c[2], c[3], c[4], c[5]
+ with self.assertRaises(KeyError):
+ c[1]
+
+ # Another big item - should now be the only item in the cache.
+ c[6] = [np.ones(4000, dtype=np.float32)]
+ for _i in range(1, 6):
+ with self.assertRaises(KeyError):
+ c[_i]
+ c[6]
+
+
if __name__ == "__main__":
run_module_suite()