diff options
| author | sumau <soumaya.mauthoor@gmail.com> | 2019-10-28 15:22:08 -0400 |
|---|---|---|
| committer | Mike Bayer <mike_mp@zzzcomputing.com> | 2019-10-30 10:50:44 -0400 |
| commit | d36b1f7f03841b9b346a6fd3395dd29333dce588 (patch) | |
| tree | 507f1cca6605dcb767074f0adfdf883fa2ace164 /lib/sqlalchemy/engine/result.py | |
| parent | 12623517def001caa275c2af172405e336c731ab (diff) | |
| download | sqlalchemy-d36b1f7f03841b9b346a6fd3395dd29333dce588.tar.gz | |
Use simple growth scale with any max size for BufferedRowResultProxy
The maximum buffer size for the :class:`.BufferedRowResultProxy`, which
is used by dialects such as PostgreSQL when ``stream_results=True``, can
now be set to a number greater than 1000 and the buffer will grow to
that size. Previously, the buffer would not go beyond 1000 even if the
value were set larger. The growth of the buffer is also now based
on a simple multiplying factor currently set to 5. Pull request courtesy
Soumaya Mauthoor.
Fixes: #4914
Closes: #4930
Pull-request: https://github.com/sqlalchemy/sqlalchemy/pull/4930
Pull-request-sha: 66841f56e967c784f7078a787cec5129462006c8
Change-Id: I6286220bd9d488027fadc444039421a410e19a19
Diffstat (limited to 'lib/sqlalchemy/engine/result.py')
| -rw-r--r-- | lib/sqlalchemy/engine/result.py | 33 |
1 files changed, 9 insertions, 24 deletions
diff --git a/lib/sqlalchemy/engine/result.py b/lib/sqlalchemy/engine/result.py index 733bd6f6a..004a84da5 100644 --- a/lib/sqlalchemy/engine/result.py +++ b/lib/sqlalchemy/engine/result.py @@ -1486,10 +1486,8 @@ class BufferedRowResultProxy(ResultProxy): The pre-fetching behavior fetches only one row initially, and then grows its buffer size by a fixed amount with each successive need - for additional rows up to a size of 1000. - - The size argument is configurable using the ``max_row_buffer`` - execution option:: + for additional rows up the ``max_row_buffer`` size, which defaults + to 1000:: with psycopg2_engine.connect() as conn: @@ -1497,7 +1495,7 @@ class BufferedRowResultProxy(ResultProxy): stream_results=True, max_row_buffer=50 ).execute("select * from table") - .. versionadded:: 1.0.6 Added the ``max_row_buffer`` option. + .. versionadded:: 1.4 ``max_row_buffer`` may now exceed 1000 rows. .. seealso:: @@ -1506,34 +1504,21 @@ class BufferedRowResultProxy(ResultProxy): def _init_metadata(self): self._max_row_buffer = self.context.execution_options.get( - "max_row_buffer", None + "max_row_buffer", 1000 ) + self._growth_factor = 5 self.__buffer_rows() super(BufferedRowResultProxy, self)._init_metadata() - # this is a "growth chart" for the buffering of rows. - # each successive __buffer_rows call will use the next - # value in the list for the buffer size until the max - # is reached - size_growth = { - 1: 5, - 5: 10, - 10: 20, - 20: 50, - 50: 100, - 100: 250, - 250: 500, - 500: 1000, - } - def __buffer_rows(self): if self.cursor is None: return size = getattr(self, "_bufsize", 1) self.__rowbuffer = collections.deque(self.cursor.fetchmany(size)) - self._bufsize = self.size_growth.get(size, size) - if self._max_row_buffer is not None: - self._bufsize = min(self._max_row_buffer, self._bufsize) + if size < self._max_row_buffer: + self._bufsize = min( + self._max_row_buffer, size * self._growth_factor + ) def _soft_close(self, **kw): self.__rowbuffer.clear() |
