summaryrefslogtreecommitdiff
path: root/lib/sqlalchemy
diff options
context:
space:
mode:
authorDiana Clarke <diana.joan.clarke@gmail.com>2012-11-19 21:28:56 -0500
committerDiana Clarke <diana.joan.clarke@gmail.com>2012-11-19 21:28:56 -0500
commitfb18595f426a9026c946840e2fb2f0e0dbd29bef (patch)
tree7cb58071c6e9d0d49db7e19ee347cfefac2864bd /lib/sqlalchemy
parent8ed1835d8e4844c72d6a0e1727c83a163ccde163 (diff)
downloadsqlalchemy-fb18595f426a9026c946840e2fb2f0e0dbd29bef.tar.gz
just a pep8 pass of lib/sqlalchemy/dialects/sqlite
Diffstat (limited to 'lib/sqlalchemy')
-rw-r--r--lib/sqlalchemy/dialects/sqlite/__init__.py7
-rw-r--r--lib/sqlalchemy/dialects/sqlite/base.py178
-rw-r--r--lib/sqlalchemy/dialects/sqlite/pysqlite.py104
3 files changed, 161 insertions, 128 deletions
diff --git a/lib/sqlalchemy/dialects/sqlite/__init__.py b/lib/sqlalchemy/dialects/sqlite/__init__.py
index 0958c813c..e11923b44 100644
--- a/lib/sqlalchemy/dialects/sqlite/__init__.py
+++ b/lib/sqlalchemy/dialects/sqlite/__init__.py
@@ -15,6 +15,7 @@ from sqlalchemy.dialects.sqlite.base import \
NUMERIC, SMALLINT, TEXT, TIME, TIMESTAMP, VARCHAR, dialect
__all__ = (
- 'BLOB', 'BOOLEAN', 'CHAR', 'DATE', 'DATETIME', 'DECIMAL', 'FLOAT', 'INTEGER',
- 'NUMERIC', 'SMALLINT', 'TEXT', 'TIME', 'TIMESTAMP', 'VARCHAR', 'dialect', 'REAL'
-) \ No newline at end of file
+ 'BLOB', 'BOOLEAN', 'CHAR', 'DATE', 'DATETIME', 'DECIMAL', 'FLOAT',
+ 'INTEGER', 'NUMERIC', 'SMALLINT', 'TEXT', 'TIME', 'TIMESTAMP', 'VARCHAR',
+ 'REAL', 'dialect'
+)
diff --git a/lib/sqlalchemy/dialects/sqlite/base.py b/lib/sqlalchemy/dialects/sqlite/base.py
index c38baa738..6d2d0e89d 100644
--- a/lib/sqlalchemy/dialects/sqlite/base.py
+++ b/lib/sqlalchemy/dialects/sqlite/base.py
@@ -12,14 +12,15 @@
Date and Time Types
-------------------
-SQLite does not have built-in DATE, TIME, or DATETIME types, and pysqlite does not provide
-out of the box functionality for translating values between Python `datetime` objects
-and a SQLite-supported format. SQLAlchemy's own :class:`~sqlalchemy.types.DateTime`
-and related types provide date formatting and parsing functionality when SQlite is used.
-The implementation classes are :class:`~.sqlite.DATETIME`, :class:`~.sqlite.DATE` and :class:`~.sqlite.TIME`.
-These types represent dates and times as ISO formatted strings, which also nicely
-support ordering. There's no reliance on typical "libc" internals for these functions
-so historical dates are fully supported.
+SQLite does not have built-in DATE, TIME, or DATETIME types, and pysqlite
+does not provide out of the box functionality for translating values between
+Python `datetime` objects and a SQLite-supported format. SQLAlchemy's own
+:class:`~sqlalchemy.types.DateTime` and related types provide date formatting
+and parsing functionality when SQlite is used. The implementation classes are
+:class:`~.sqlite.DATETIME`, :class:`~.sqlite.DATE` and :class:`~.sqlite.TIME`.
+These types represent dates and times as ISO formatted strings, which also
+nicely support ordering. There's no reliance on typical "libc" internals
+for these functions so historical dates are fully supported.
Auto Incrementing Behavior
--------------------------
@@ -46,44 +47,47 @@ to the Table construct::
Transaction Isolation Level
---------------------------
-:func:`.create_engine` accepts an ``isolation_level`` parameter which results in
-the command ``PRAGMA read_uncommitted <level>`` being invoked for every new
-connection. Valid values for this parameter are ``SERIALIZABLE`` and
-``READ UNCOMMITTED`` corresponding to a value of 0 and 1, respectively.
+:func:`.create_engine` accepts an ``isolation_level`` parameter which
+results in the command ``PRAGMA read_uncommitted <level>`` being invoked for
+every new connection. Valid values for this parameter are ``SERIALIZABLE``
+and ``READ UNCOMMITTED`` corresponding to a value of 0 and 1, respectively.
See the section :ref:`pysqlite_serializable` for an important workaround
when using serializable isolation with Pysqlite.
Database Locking Behavior / Concurrency
---------------------------------------
-Note that SQLite is not designed for a high level of concurrency. The database
-itself, being a file, is locked completely during write operations and within
-transactions, meaning exactly one connection has exclusive access to the database
-during this period - all other connections will be blocked during this time.
+Note that SQLite is not designed for a high level of concurrency. The
+database itself, being a file, is locked completely during write operations
+and within transactions, meaning exactly one connection has exclusive access
+to the database during this period - all other connections will be blocked
+during this time.
The Python DBAPI specification also calls for a connection model that is always
-in a transaction; there is no BEGIN method, only commit and rollback. This implies
-that a SQLite DBAPI driver would technically allow only serialized access to a
-particular database file at all times. The pysqlite driver attempts to ameliorate this by
-deferring the actual BEGIN statement until the first DML (INSERT, UPDATE, or
-DELETE) is received within a transaction. While this breaks serializable isolation,
-it at least delays the exclusive locking inherent in SQLite's design.
+in a transaction; there is no BEGIN method, only commit and rollback. This
+implies that a SQLite DBAPI driver would technically allow only serialized
+access to a particular database file at all times. The pysqlite driver
+attempts to ameliorate this by deferring the actual BEGIN statement until
+the first DML (INSERT, UPDATE, or DELETE) is received within a
+transaction. While this breaks serializable isolation, it at least delays
+the exclusive locking inherent in SQLite's design.
SQLAlchemy's default mode of usage with the ORM is known
-as "autocommit=False", which means the moment the :class:`.Session` begins to be
-used, a transaction is begun. As the :class:`.Session` is used, the autoflush
-feature, also on by default, will flush out pending changes to the database
-before each query. The effect of this is that a :class:`.Session` used in its
-default mode will often emit DML early on, long before the transaction is actually
-committed. This again will have the effect of serializing access to the SQLite
-database. If highly concurrent reads are desired against the SQLite database,
-it is advised that the autoflush feature be disabled, and potentially even
-that autocommit be re-enabled, which has the effect of each SQL statement and
-flush committing changes immediately.
+as "autocommit=False", which means the moment the :class:`.Session` begins to
+be used, a transaction is begun. As the :class:`.Session` is used, the
+autoflush feature, also on by default, will flush out pending changes to the
+database before each query. The effect of this is that a :class:`.Session`
+used in its default mode will often emit DML early on, long before the
+transaction is actually committed. This again will have the effect of
+serializing access to the SQLite database. If highly concurrent reads are
+desired against the SQLite database, it is advised that the autoflush feature
+be disabled, and potentially even that autocommit be re-enabled, which has
+the effect of each SQL statement and flush committing changes immediately.
For more information on SQLite's lack of concurrency by design, please
-see `Situations Where Another RDBMS May Work Better - High Concurrency <http://www.sqlite.org/whentouse.html>`_
-near the bottom of the page.
+see `Situations Where Another RDBMS May Work Better - High
+Concurrency <http://www.sqlite.org/whentouse.html>`_ near the bottom of
+ the page.
.. _sqlite_foreign_keys:
@@ -123,7 +127,8 @@ for new connections through the usage of events::
"""
-import datetime, re
+import datetime
+import re
from sqlalchemy import sql, exc
from sqlalchemy.engine import default, base, reflection
@@ -135,6 +140,7 @@ from sqlalchemy import processors
from sqlalchemy.types import BLOB, BOOLEAN, CHAR, DATE, DATETIME, DECIMAL,\
FLOAT, REAL, INTEGER, NUMERIC, SMALLINT, TEXT, TIME, TIMESTAMP, VARCHAR
+
class _DateTimeMixin(object):
_reg = None
_storage_format = None
@@ -146,6 +152,7 @@ class _DateTimeMixin(object):
if storage_format is not None:
self._storage_format = storage_format
+
class DATETIME(_DateTimeMixin, sqltypes.DateTime):
"""Represent a Python datetime object in SQLite using a string.
@@ -164,9 +171,9 @@ class DATETIME(_DateTimeMixin, sqltypes.DateTime):
from sqlalchemy.dialects.sqlite import DATETIME
dt = DATETIME(
- storage_format="%(year)04d/%(month)02d/%(day)02d %(hour)02d:%(min)02d:%(second)02d",
- regexp=re.compile("(\d+)/(\d+)/(\d+) (\d+)-(\d+)-(\d+)")
- )
+ storage_format="%(year)04d/%(month)02d/%(day)02d %(hour)02d:%(min)02d:%(second)02d",
+ regexp=re.compile("(\d+)/(\d+)/(\d+) (\d+)-(\d+)-(\d+)")
+ )
:param storage_format: format string which will be applied to the
dict with keys year, month, day, hour, minute, second, and microsecond.
@@ -201,6 +208,7 @@ class DATETIME(_DateTimeMixin, sqltypes.DateTime):
datetime_datetime = datetime.datetime
datetime_date = datetime.date
format = self._storage_format
+
def process(value):
if value is None:
return None
@@ -236,6 +244,7 @@ class DATETIME(_DateTimeMixin, sqltypes.DateTime):
else:
return processors.str_to_datetime
+
class DATE(_DateTimeMixin, sqltypes.Date):
"""Represent a Python date object in SQLite using a string.
@@ -274,6 +283,7 @@ class DATE(_DateTimeMixin, sqltypes.Date):
def bind_processor(self, dialect):
datetime_date = datetime.date
format = self._storage_format
+
def process(value):
if value is None:
return None
@@ -295,6 +305,7 @@ class DATE(_DateTimeMixin, sqltypes.Date):
else:
return processors.str_to_date
+
class TIME(_DateTimeMixin, sqltypes.Time):
"""Represent a Python time object in SQLite using a string.
@@ -313,9 +324,9 @@ class TIME(_DateTimeMixin, sqltypes.Time):
from sqlalchemy.dialects.sqlite import TIME
t = TIME(
- storage_format="%(hour)02d-%(minute)02d-%(second)02d-%(microsecond)06d",
- regexp=re.compile("(\d+)-(\d+)-(\d+)-(?:-(\d+))?")
- )
+ storage_format="%(hour)02d-%(minute)02d-%(second)02d-%(microsecond)06d",
+ regexp=re.compile("(\d+)-(\d+)-(\d+)-(?:-(\d+))?")
+ )
:param storage_format: format string which will be applied to the
dict with keys hour, minute, second, and microsecond.
@@ -343,6 +354,7 @@ class TIME(_DateTimeMixin, sqltypes.Time):
def bind_processor(self, dialect):
datetime_time = datetime.time
format = self._storage_format
+
def process(value):
if value is None:
return None
@@ -394,7 +406,6 @@ ischema_names = {
}
-
class SQLiteCompiler(compiler.SQLCompiler):
extract_map = util.update_copy(
compiler.SQLCompiler.extract_map,
@@ -435,7 +446,9 @@ class SQLiteCompiler(compiler.SQLCompiler):
def visit_extract(self, extract, **kw):
try:
return "CAST(STRFTIME('%s', %s) AS INTEGER)" % (
- self.extract_map[extract.field], self.process(extract.expr, **kw))
+ self.extract_map[extract.field],
+ self.process(extract.expr, **kw)
+ )
except KeyError:
raise exc.CompileError(
"%s is not a valid extract argument." % extract.field)
@@ -443,7 +456,7 @@ class SQLiteCompiler(compiler.SQLCompiler):
def limit_clause(self, select):
text = ""
if select._limit is not None:
- text += "\n LIMIT " + self.process(sql.literal(select._limit))
+ text += "\n LIMIT " + self.process(sql.literal(select._limit))
if select._offset is not None:
if select._limit is None:
text += "\n LIMIT " + self.process(sql.literal(-1))
@@ -460,7 +473,8 @@ class SQLiteCompiler(compiler.SQLCompiler):
class SQLiteDDLCompiler(compiler.DDLCompiler):
def get_column_specification(self, column, **kwargs):
- colspec = self.preparer.format_column(column) + " " + self.dialect.type_compiler.process(column.type)
+ coltype = self.dialect.type_compiler.process(column.type)
+ colspec = self.preparer.format_column(column) + " " + coltype
default = self.get_column_default_string(column)
if default is not None:
colspec += " DEFAULT " + default
@@ -468,12 +482,12 @@ class SQLiteDDLCompiler(compiler.DDLCompiler):
if not column.nullable:
colspec += " NOT NULL"
- if column.primary_key and \
- column.table.kwargs.get('sqlite_autoincrement', False) and \
- len(column.table.primary_key.columns) == 1 and \
- issubclass(column.type._type_affinity, sqltypes.Integer) and \
- not column.foreign_keys:
- colspec += " PRIMARY KEY AUTOINCREMENT"
+ if (column.primary_key and
+ column.table.kwargs.get('sqlite_autoincrement', False) and
+ len(column.table.primary_key.columns) == 1 and
+ issubclass(column.type._type_affinity, sqltypes.Integer) and
+ not column.foreign_keys):
+ colspec += " PRIMARY KEY AUTOINCREMENT"
return colspec
@@ -521,10 +535,12 @@ class SQLiteDDLCompiler(compiler.DDLCompiler):
for c in index.columns))
return text
+
class SQLiteTypeCompiler(compiler.GenericTypeCompiler):
def visit_large_binary(self, type_):
return self.visit_BLOB(type_)
+
class SQLiteIdentifierPreparer(compiler.IdentifierPreparer):
reserved_words = set([
'add', 'after', 'all', 'alter', 'analyze', 'and', 'as', 'asc',
@@ -536,14 +552,15 @@ class SQLiteIdentifierPreparer(compiler.IdentifierPreparer):
'drop', 'each', 'else', 'end', 'escape', 'except', 'exclusive',
'explain', 'false', 'fail', 'for', 'foreign', 'from', 'full', 'glob',
'group', 'having', 'if', 'ignore', 'immediate', 'in', 'index',
- 'indexed', 'initially', 'inner', 'insert', 'instead', 'intersect', 'into', 'is',
- 'isnull', 'join', 'key', 'left', 'like', 'limit', 'match', 'natural',
- 'not', 'notnull', 'null', 'of', 'offset', 'on', 'or', 'order', 'outer',
- 'plan', 'pragma', 'primary', 'query', 'raise', 'references',
- 'reindex', 'rename', 'replace', 'restrict', 'right', 'rollback',
- 'row', 'select', 'set', 'table', 'temp', 'temporary', 'then', 'to',
- 'transaction', 'trigger', 'true', 'union', 'unique', 'update', 'using',
- 'vacuum', 'values', 'view', 'virtual', 'when', 'where',
+ 'indexed', 'initially', 'inner', 'insert', 'instead', 'intersect',
+ 'into', 'is', 'isnull', 'join', 'key', 'left', 'like', 'limit',
+ 'match', 'natural', 'not', 'notnull', 'null', 'of', 'offset', 'on',
+ 'or', 'order', 'outer', 'plan', 'pragma', 'primary', 'query',
+ 'raise', 'references', 'reindex', 'rename', 'replace', 'restrict',
+ 'right', 'rollback', 'row', 'select', 'set', 'table', 'temp',
+ 'temporary', 'then', 'to', 'transaction', 'trigger', 'true', 'union',
+ 'unique', 'update', 'using', 'vacuum', 'values', 'view', 'virtual',
+ 'when', 'where',
])
def format_index(self, index, use_schema=True, name=None):
@@ -552,10 +569,14 @@ class SQLiteIdentifierPreparer(compiler.IdentifierPreparer):
if name is None:
name = index.name
result = self.quote(name, index.quote)
- if not self.omit_schema and use_schema and getattr(index.table, "schema", None):
- result = self.quote_schema(index.table.schema, index.table.quote_schema) + "." + result
+ if (not self.omit_schema and
+ use_schema and
+ getattr(index.table, "schema", None)):
+ result = self.quote_schema(
+ index.table.schema, index.table.quote_schema) + "." + result
return result
+
class SQLiteExecutionContext(default.DefaultExecutionContext):
@util.memoized_property
def _preserve_raw_colnames(self):
@@ -611,9 +632,10 @@ class SQLiteDialect(default.DefaultDialect):
self.dbapi.sqlite_version_info >= (3, 2, 3)
_isolation_lookup = {
- 'READ UNCOMMITTED':1,
- 'SERIALIZABLE':0
+ 'READ UNCOMMITTED': 1,
+ 'SERIALIZABLE': 0
}
+
def set_isolation_level(self, connection, level):
try:
isolation_level = self._isolation_lookup[level.replace('_', ' ')]
@@ -686,7 +708,8 @@ class SQLiteDialect(default.DefaultDialect):
else:
pragma = "PRAGMA "
qtable = quote(table_name)
- cursor = _pragma_cursor(connection.execute("%stable_info(%s)" % (pragma, qtable)))
+ statement = "%stable_info(%s)" % (pragma, qtable)
+ cursor = _pragma_cursor(connection.execute(statement))
row = cursor.fetchone()
# consume remaining rows, to work around
@@ -752,9 +775,8 @@ class SQLiteDialect(default.DefaultDialect):
else:
pragma = "PRAGMA "
qtable = quote(table_name)
- c = _pragma_cursor(
- connection.execute("%stable_info(%s)" %
- (pragma, qtable)))
+ statement = "%stable_info(%s)" % (pragma, qtable)
+ c = _pragma_cursor(connection.execute(statement))
rows = c.fetchall()
columns = []
@@ -806,7 +828,7 @@ class SQLiteDialect(default.DefaultDialect):
for col in cols:
if col['primary_key']:
pkeys.append(col['name'])
- return {'constrained_columns':pkeys, 'name':None}
+ return {'constrained_columns': pkeys, 'name': None}
@reflection.cache
def get_foreign_keys(self, connection, table_name, schema=None, **kw):
@@ -816,7 +838,8 @@ class SQLiteDialect(default.DefaultDialect):
else:
pragma = "PRAGMA "
qtable = quote(table_name)
- c = _pragma_cursor(connection.execute("%sforeign_key_list(%s)" % (pragma, qtable)))
+ statement = "%sforeign_key_list(%s)" % (pragma, qtable)
+ c = _pragma_cursor(connection.execute(statement))
fkeys = []
fks = {}
while True:
@@ -839,10 +862,10 @@ class SQLiteDialect(default.DefaultDialect):
except KeyError:
fk = {
'name': None,
- 'constrained_columns' : [],
- 'referred_schema' : None,
- 'referred_table' : rtbl,
- 'referred_columns' : []
+ 'constrained_columns': [],
+ 'referred_schema': None,
+ 'referred_table': rtbl,
+ 'referred_columns': []
}
fkeys.append(fk)
fks[numerical_id] = fk
@@ -864,7 +887,8 @@ class SQLiteDialect(default.DefaultDialect):
pragma = "PRAGMA "
include_auto_indexes = kw.pop('include_auto_indexes', False)
qtable = quote(table_name)
- c = _pragma_cursor(connection.execute("%sindex_list(%s)" % (pragma, qtable)))
+ statement = "%sindex_list(%s)" % (pragma, qtable)
+ c = _pragma_cursor(connection.execute(statement))
indexes = []
while True:
row = c.fetchone()
@@ -872,13 +896,15 @@ class SQLiteDialect(default.DefaultDialect):
break
# ignore implicit primary key index.
# http://www.mail-archive.com/sqlite-users@sqlite.org/msg30517.html
- elif not include_auto_indexes and row[1].startswith('sqlite_autoindex'):
+ elif (not include_auto_indexes and
+ row[1].startswith('sqlite_autoindex')):
continue
indexes.append(dict(name=row[1], column_names=[], unique=row[2]))
# loop thru unique indexes to get the column names.
for idx in indexes:
- c = connection.execute("%sindex_info(%s)" % (pragma, quote(idx['name'])))
+ statement = "%sindex_info(%s)" % (pragma, quote(idx['name']))
+ c = connection.execute(statement)
cols = idx['column_names']
while True:
row = c.fetchone()
diff --git a/lib/sqlalchemy/dialects/sqlite/pysqlite.py b/lib/sqlalchemy/dialects/sqlite/pysqlite.py
index 558f1016b..bb77b27b6 100644
--- a/lib/sqlalchemy/dialects/sqlite/pysqlite.py
+++ b/lib/sqlalchemy/dialects/sqlite/pysqlite.py
@@ -35,31 +35,32 @@ this explicitly::
Connect Strings
---------------
-The file specification for the SQLite database is taken as the "database" portion of
-the URL. Note that the format of a SQLAlchemy url is::
+The file specification for the SQLite database is taken as the "database"
+portion of the URL. Note that the format of a SQLAlchemy url is::
driver://user:pass@host/database
-This means that the actual filename to be used starts with the characters to the
-**right** of the third slash. So connecting to a relative filepath looks like::
+This means that the actual filename to be used starts with the characters to
+the **right** of the third slash. So connecting to a relative filepath
+looks like::
# relative path
e = create_engine('sqlite:///path/to/database.db')
-An absolute path, which is denoted by starting with a slash, means you need **four**
-slashes::
+An absolute path, which is denoted by starting with a slash, means you
+need **four** slashes::
# absolute path
e = create_engine('sqlite:////path/to/database.db')
-To use a Windows path, regular drive specifications and backslashes can be used.
-Double backslashes are probably needed::
+To use a Windows path, regular drive specifications and backslashes can be
+used. Double backslashes are probably needed::
# absolute path on Windows
e = create_engine('sqlite:///C:\\\\path\\\\to\\\\database.db')
-The sqlite ``:memory:`` identifier is the default if no filepath is present. Specify
-``sqlite://`` and nothing else::
+The sqlite ``:memory:`` identifier is the default if no filepath is
+present. Specify ``sqlite://`` and nothing else::
# in-memory database
e = create_engine('sqlite://')
@@ -86,13 +87,13 @@ nor should be necessary, for use with SQLAlchemy, usage of PARSE_DECLTYPES
can be forced if one configures "native_datetime=True" on create_engine()::
engine = create_engine('sqlite://',
- connect_args={'detect_types': sqlite3.PARSE_DECLTYPES|sqlite3.PARSE_COLNAMES},
- native_datetime=True
- )
+ connect_args={'detect_types': sqlite3.PARSE_DECLTYPES|sqlite3.PARSE_COLNAMES},
+ native_datetime=True
+ )
-With this flag enabled, the DATE and TIMESTAMP types (but note - not the DATETIME
-or TIME types...confused yet ?) will not perform any bind parameter or result
-processing. Execution of "func.current_date()" will return a string.
+With this flag enabled, the DATE and TIMESTAMP types (but note - not the
+DATETIME or TIME types...confused yet ?) will not perform any bind parameter
+or result processing. Execution of "func.current_date()" will return a string.
"func.current_timestamp()" is registered as returning a DATETIME type in
SQLAlchemy, so this function still receives SQLAlchemy-level result processing.
@@ -100,8 +101,8 @@ Threading/Pooling Behavior
---------------------------
Pysqlite's default behavior is to prohibit the usage of a single connection
-in more than one thread. This is originally intended to work with older versions
-of SQLite that did not support multithreaded operation under
+in more than one thread. This is originally intended to work with older
+versions of SQLite that did not support multithreaded operation under
various circumstances. In particular, older SQLite versions
did not allow a ``:memory:`` database to be used in multiple threads
under any circumstances.
@@ -117,17 +118,17 @@ thread-safety to make this usage worth it.
SQLAlchemy sets up pooling to work with Pysqlite's default behavior:
-* When a ``:memory:`` SQLite database is specified, the dialect by default will use
- :class:`.SingletonThreadPool`. This pool maintains a single connection per
- thread, so that all access to the engine within the current thread use the
- same ``:memory:`` database - other threads would access a different
- ``:memory:`` database.
-* When a file-based database is specified, the dialect will use :class:`.NullPool`
- as the source of connections. This pool closes and discards connections
- which are returned to the pool immediately. SQLite file-based connections
- have extremely low overhead, so pooling is not necessary. The scheme also
- prevents a connection from being used again in a different thread and works
- best with SQLite's coarse-grained file locking.
+* When a ``:memory:`` SQLite database is specified, the dialect by default
+ will use :class:`.SingletonThreadPool`. This pool maintains a single
+ connection per thread, so that all access to the engine within the current
+ thread use the same ``:memory:`` database - other threads would access a
+ different ``:memory:`` database.
+* When a file-based database is specified, the dialect will use
+ :class:`.NullPool` as the source of connections. This pool closes and
+ discards connections which are returned to the pool immediately. SQLite
+ file-based connections have extremely low overhead, so pooling is not
+ necessary. The scheme also prevents a connection from being used again in
+ a different thread and works best with SQLite's coarse-grained file locking.
.. versionchanged:: 0.7
Default selection of :class:`.NullPool` for SQLite file-based databases.
@@ -140,9 +141,10 @@ Using a Memory Database in Multiple Threads
To use a ``:memory:`` database in a multithreaded scenario, the same connection
object must be shared among threads, since the database exists
-only within the scope of that connection. The :class:`.StaticPool` implementation
-will maintain a single connection globally, and the ``check_same_thread`` flag
-can be passed to Pysqlite as ``False``::
+only within the scope of that connection. The
+:class:`.StaticPool` implementation will maintain a single connection
+globally, and the ``check_same_thread`` flag can be passed to Pysqlite
+as ``False``::
from sqlalchemy.pool import StaticPool
engine = create_engine('sqlite://',
@@ -155,13 +157,14 @@ version of SQLite.
Using Temporary Tables with SQLite
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-Due to the way SQLite deals with temporary tables, if you wish to use a temporary table
-in a file-based SQLite database across multiple checkouts from the connection pool, such
-as when using an ORM :class:`.Session` where the temporary table should continue to remain
-after :meth:`.commit` or :meth:`.rollback` is called,
-a pool which maintains a single connection must be used. Use :class:`.SingletonThreadPool`
-if the scope is only needed within the current thread, or :class:`.StaticPool` is scope is
-needed within multiple threads for this case::
+Due to the way SQLite deals with temporary tables, if you wish to use a
+temporary table in a file-based SQLite database across multiple checkouts
+from the connection pool, such as when using an ORM :class:`.Session` where
+the temporary table should continue to remain after :meth:`.commit` or
+:meth:`.rollback` is called, a pool which maintains a single connection must
+be used. Use :class:`.SingletonThreadPool` if the scope is only needed
+within the current thread, or :class:`.StaticPool` is scope is needed within
+multiple threads for this case::
# maintain the same connection per thread
from sqlalchemy.pool import SingletonThreadPool
@@ -174,17 +177,17 @@ needed within multiple threads for this case::
engine = create_engine('sqlite:///mydb.db',
poolclass=StaticPool)
-Note that :class:`.SingletonThreadPool` should be configured for the number of threads
-that are to be used; beyond that number, connections will be closed out in a non deterministic
-way.
+Note that :class:`.SingletonThreadPool` should be configured for the number
+of threads that are to be used; beyond that number, connections will be
+closed out in a non deterministic way.
Unicode
-------
-The pysqlite driver only returns Python ``unicode`` objects in result sets, never
-plain strings, and accommodates ``unicode`` objects within bound parameter
-values in all cases. Regardless of the SQLAlchemy string type in use,
-string-based result values will by Python ``unicode`` in Python 2.
+The pysqlite driver only returns Python ``unicode`` objects in result sets,
+never plain strings, and accommodates ``unicode`` objects within bound
+parameter values in all cases. Regardless of the SQLAlchemy string type in
+use, string-based result values will by Python ``unicode`` in Python 2.
The :class:`.Unicode` type should still be used to indicate those columns that
require unicode, however, so that non-``unicode`` values passed inadvertently
will emit a warning. Pysqlite will emit an error if a non-``unicode`` string
@@ -224,6 +227,7 @@ from sqlalchemy import util
import os
+
class _SQLite_pysqliteTimeStamp(DATETIME):
def bind_processor(self, dialect):
if dialect.native_datetime:
@@ -237,6 +241,7 @@ class _SQLite_pysqliteTimeStamp(DATETIME):
else:
return DATETIME.result_processor(self, dialect, coltype)
+
class _SQLite_pysqliteDate(DATE):
def bind_processor(self, dialect):
if dialect.native_datetime:
@@ -250,14 +255,15 @@ class _SQLite_pysqliteDate(DATE):
else:
return DATE.result_processor(self, dialect, coltype)
+
class SQLiteDialect_pysqlite(SQLiteDialect):
default_paramstyle = 'qmark'
colspecs = util.update_copy(
SQLiteDialect.colspecs,
{
- sqltypes.Date:_SQLite_pysqliteDate,
- sqltypes.TIMESTAMP:_SQLite_pysqliteTimeStamp,
+ sqltypes.Date: _SQLite_pysqliteDate,
+ sqltypes.TIMESTAMP: _SQLite_pysqliteTimeStamp,
}
)
@@ -284,7 +290,7 @@ class SQLiteDialect_pysqlite(SQLiteDialect):
from pysqlite2 import dbapi2 as sqlite
except ImportError, e:
try:
- from sqlite3 import dbapi2 as sqlite #try the 2.5+ stdlib name.
+ from sqlite3 import dbapi2 as sqlite # try 2.5+ stdlib name.
except ImportError:
raise e
return sqlite