summaryrefslogtreecommitdiff
path: root/lib/sqlalchemy/engine
diff options
context:
space:
mode:
authorMike Bayer <mike_mp@zzzcomputing.com>2015-06-14 16:43:16 -0400
committerMike Bayer <mike_mp@zzzcomputing.com>2015-06-14 16:43:16 -0400
commit9ccdea3a0fe57931e779b44eb2c278b78eea3d95 (patch)
tree0e77479d782446f0b35e367bc38e97d215800f4c /lib/sqlalchemy/engine
parente15d58695d6eff9a1d53e31e5ae3666434a4a1af (diff)
downloadsqlalchemy-9ccdea3a0fe57931e779b44eb2c278b78eea3d95.tar.gz
- add test cases for pullreq github:182, where we add a new
"max_row_buffer" execution option for BufferedRowResultProxy - also add documentation, changelog and version notes - rework the max_row_buffer argument to be interpreted from the execution options upfront when the BufferedRowResultProxy is first initialized.
Diffstat (limited to 'lib/sqlalchemy/engine')
-rw-r--r--lib/sqlalchemy/engine/result.py21
1 files changed, 19 insertions, 2 deletions
diff --git a/lib/sqlalchemy/engine/result.py b/lib/sqlalchemy/engine/result.py
index 41b30c983..b2b78dee8 100644
--- a/lib/sqlalchemy/engine/result.py
+++ b/lib/sqlalchemy/engine/result.py
@@ -1068,9 +1068,26 @@ class BufferedRowResultProxy(ResultProxy):
The pre-fetching behavior fetches only one row initially, and then
grows its buffer size by a fixed amount with each successive need
for additional rows up to a size of 1000.
+
+ The size argument is configurable using the ``max_row_buffer``
+ execution option::
+
+ with psycopg2_engine.connect() as conn:
+
+ result = conn.execution_options(
+ stream_results=True, max_row_buffer=50
+ ).execute("select * from table")
+
+ .. versionadded:: 1.0.6 Added the ``max_row_buffer`` option.
+
+ .. seealso::
+
+ :ref:`psycopg2_execution_options`
"""
def _init_metadata(self):
+ self._max_row_buffer = self.context.execution_options.get(
+ 'max_row_buffer', None)
self.__buffer_rows()
super(BufferedRowResultProxy, self)._init_metadata()
@@ -1095,8 +1112,8 @@ class BufferedRowResultProxy(ResultProxy):
size = getattr(self, '_bufsize', 1)
self.__rowbuffer = collections.deque(self.cursor.fetchmany(size))
self._bufsize = self.size_growth.get(size, size)
- if self.context.execution_options.get('max_row_buffer') is not None:
- self._bufsize = min(self.context.execution_options['max_row_buffer'], self._bufsize)
+ if self._max_row_buffer is not None:
+ self._bufsize = min(self._max_row_buffer, self._bufsize)
def _soft_close(self, **kw):
self.__rowbuffer.clear()