summaryrefslogtreecommitdiff
path: root/lib/sqlalchemy/dialects
diff options
context:
space:
mode:
authorBrian Jarrett <celttechie@gmail.com>2014-07-20 12:44:40 -0400
committerMike Bayer <mike_mp@zzzcomputing.com>2014-07-20 12:44:40 -0400
commitcca03097f47f22783d42d1853faac6cf84607c5a (patch)
tree4fe1a63d03a2d88d1cf37e1167759dfaf84f4ce7 /lib/sqlalchemy/dialects
parent827329a0cca5351094a1a86b6b2be2b9182f0ae2 (diff)
downloadsqlalchemy-cca03097f47f22783d42d1853faac6cf84607c5a.tar.gz
- apply pep8 formatting to sqlalchemy/sql, sqlalchemy/util, sqlalchemy/dialects,
sqlalchemy/orm, sqlalchemy/event, sqlalchemy/testing
Diffstat (limited to 'lib/sqlalchemy/dialects')
-rw-r--r--lib/sqlalchemy/dialects/__init__.py3
-rw-r--r--lib/sqlalchemy/dialects/firebird/base.py132
-rw-r--r--lib/sqlalchemy/dialects/firebird/fdb.py20
-rw-r--r--lib/sqlalchemy/dialects/firebird/kinterbasdb.py26
-rw-r--r--lib/sqlalchemy/dialects/mssql/__init__.py2
-rw-r--r--lib/sqlalchemy/dialects/mssql/adodbapi.py4
-rw-r--r--lib/sqlalchemy/dialects/mssql/base.py415
-rw-r--r--lib/sqlalchemy/dialects/mssql/information_schema.py141
-rw-r--r--lib/sqlalchemy/dialects/mssql/mxodbc.py8
-rw-r--r--lib/sqlalchemy/dialects/mssql/pymssql.py5
-rw-r--r--lib/sqlalchemy/dialects/mssql/pyodbc.py62
-rw-r--r--lib/sqlalchemy/dialects/mssql/zxjdbc.py13
-rw-r--r--lib/sqlalchemy/dialects/mysql/__init__.py14
-rw-r--r--lib/sqlalchemy/dialects/mysql/base.py485
-rw-r--r--lib/sqlalchemy/dialects/mysql/cymysql.py6
-rw-r--r--lib/sqlalchemy/dialects/mysql/gaerdbms.py9
-rw-r--r--lib/sqlalchemy/dialects/mysql/mysqlconnector.py14
-rw-r--r--lib/sqlalchemy/dialects/mysql/mysqldb.py34
-rw-r--r--lib/sqlalchemy/dialects/mysql/oursql.py22
-rw-r--r--lib/sqlalchemy/dialects/mysql/pymysql.py9
-rw-r--r--lib/sqlalchemy/dialects/mysql/pyodbc.py3
-rw-r--r--lib/sqlalchemy/dialects/mysql/zxjdbc.py6
-rw-r--r--lib/sqlalchemy/dialects/oracle/__init__.py8
-rw-r--r--lib/sqlalchemy/dialects/oracle/base.py415
-rw-r--r--lib/sqlalchemy/dialects/oracle/cx_oracle.py308
-rw-r--r--lib/sqlalchemy/dialects/oracle/zxjdbc.py47
-rw-r--r--lib/sqlalchemy/dialects/postgres.py7
-rw-r--r--lib/sqlalchemy/dialects/postgresql/base.py543
-rw-r--r--lib/sqlalchemy/dialects/postgresql/constraints.py15
-rw-r--r--lib/sqlalchemy/dialects/postgresql/hstore.py21
-rw-r--r--lib/sqlalchemy/dialects/postgresql/json.py66
-rw-r--r--lib/sqlalchemy/dialects/postgresql/pg8000.py2
-rw-r--r--lib/sqlalchemy/dialects/postgresql/psycopg2.py127
-rw-r--r--lib/sqlalchemy/dialects/postgresql/pypostgresql.py3
-rw-r--r--lib/sqlalchemy/dialects/postgresql/ranges.py7
-rw-r--r--lib/sqlalchemy/dialects/sqlite/base.py114
-rw-r--r--lib/sqlalchemy/dialects/sqlite/pysqlite.py35
-rw-r--r--lib/sqlalchemy/dialects/sybase/__init__.py10
-rw-r--r--lib/sqlalchemy/dialects/sybase/base.py98
-rw-r--r--lib/sqlalchemy/dialects/sybase/pyodbc.py7
-rw-r--r--lib/sqlalchemy/dialects/sybase/pysybase.py11
41 files changed, 1794 insertions, 1483 deletions
diff --git a/lib/sqlalchemy/dialects/__init__.py b/lib/sqlalchemy/dialects/__init__.py
index 31afe1568..74c48820d 100644
--- a/lib/sqlalchemy/dialects/__init__.py
+++ b/lib/sqlalchemy/dialects/__init__.py
@@ -13,10 +13,11 @@ __all__ = (
'postgresql',
'sqlite',
'sybase',
- )
+)
from .. import util
+
def _auto_fn(name):
"""default dialect importer.
diff --git a/lib/sqlalchemy/dialects/firebird/base.py b/lib/sqlalchemy/dialects/firebird/base.py
index c8f081b2d..36229a105 100644
--- a/lib/sqlalchemy/dialects/firebird/base.py
+++ b/lib/sqlalchemy/dialects/firebird/base.py
@@ -119,7 +119,7 @@ RESERVED_WORDS = set([
"union", "unique", "update", "upper", "user", "using", "value",
"values", "varchar", "variable", "varying", "view", "wait", "when",
"where", "while", "with", "work", "write", "year",
- ])
+])
class _StringType(sqltypes.String):
@@ -160,20 +160,20 @@ colspecs = {
}
ischema_names = {
- 'SHORT': SMALLINT,
- 'LONG': INTEGER,
- 'QUAD': FLOAT,
- 'FLOAT': FLOAT,
- 'DATE': DATE,
- 'TIME': TIME,
- 'TEXT': TEXT,
- 'INT64': BIGINT,
- 'DOUBLE': FLOAT,
- 'TIMESTAMP': TIMESTAMP,
+ 'SHORT': SMALLINT,
+ 'LONG': INTEGER,
+ 'QUAD': FLOAT,
+ 'FLOAT': FLOAT,
+ 'DATE': DATE,
+ 'TIME': TIME,
+ 'TEXT': TEXT,
+ 'INT64': BIGINT,
+ 'DOUBLE': FLOAT,
+ 'TIMESTAMP': TIMESTAMP,
'VARYING': VARCHAR,
'CSTRING': CHAR,
- 'BLOB': BLOB,
- }
+ 'BLOB': BLOB,
+}
# TODO: date conversion types (should be implemented as _FBDateTime,
@@ -193,7 +193,7 @@ class FBTypeCompiler(compiler.GenericTypeCompiler):
return "BLOB SUB_TYPE 0"
def _extend_string(self, type_, basic):
- charset = getattr(type_, 'charset', None)
+ charset = getattr(type_, 'charset', None)
if charset is None:
return basic
else:
@@ -206,8 +206,8 @@ class FBTypeCompiler(compiler.GenericTypeCompiler):
def visit_VARCHAR(self, type_):
if not type_.length:
raise exc.CompileError(
- "VARCHAR requires a length on dialect %s" %
- self.dialect.name)
+ "VARCHAR requires a length on dialect %s" %
+ self.dialect.name)
basic = super(FBTypeCompiler, self).visit_VARCHAR(type_)
return self._extend_string(type_, basic)
@@ -217,46 +217,46 @@ class FBCompiler(sql.compiler.SQLCompiler):
ansi_bind_rules = True
- #def visit_contains_op_binary(self, binary, operator, **kw):
- # cant use CONTAINING b.c. it's case insensitive.
+ # def visit_contains_op_binary(self, binary, operator, **kw):
+ # cant use CONTAINING b.c. it's case insensitive.
- #def visit_notcontains_op_binary(self, binary, operator, **kw):
- # cant use NOT CONTAINING b.c. it's case insensitive.
+ # def visit_notcontains_op_binary(self, binary, operator, **kw):
+ # cant use NOT CONTAINING b.c. it's case insensitive.
def visit_now_func(self, fn, **kw):
return "CURRENT_TIMESTAMP"
def visit_startswith_op_binary(self, binary, operator, **kw):
return '%s STARTING WITH %s' % (
- binary.left._compiler_dispatch(self, **kw),
- binary.right._compiler_dispatch(self, **kw))
+ binary.left._compiler_dispatch(self, **kw),
+ binary.right._compiler_dispatch(self, **kw))
def visit_notstartswith_op_binary(self, binary, operator, **kw):
return '%s NOT STARTING WITH %s' % (
- binary.left._compiler_dispatch(self, **kw),
- binary.right._compiler_dispatch(self, **kw))
+ binary.left._compiler_dispatch(self, **kw),
+ binary.right._compiler_dispatch(self, **kw))
def visit_mod_binary(self, binary, operator, **kw):
return "mod(%s, %s)" % (
- self.process(binary.left, **kw),
- self.process(binary.right, **kw))
+ self.process(binary.left, **kw),
+ self.process(binary.right, **kw))
def visit_alias(self, alias, asfrom=False, **kwargs):
if self.dialect._version_two:
return super(FBCompiler, self).\
- visit_alias(alias, asfrom=asfrom, **kwargs)
+ visit_alias(alias, asfrom=asfrom, **kwargs)
else:
# Override to not use the AS keyword which FB 1.5 does not like
if asfrom:
alias_name = isinstance(alias.name,
- expression._truncated_label) and \
- self._truncated_identifier("alias",
- alias.name) or alias.name
+ expression._truncated_label) and \
+ self._truncated_identifier("alias",
+ alias.name) or alias.name
return self.process(
- alias.original, asfrom=asfrom, **kwargs) + \
- " " + \
- self.preparer.format_alias(alias, alias_name)
+ alias.original, asfrom=asfrom, **kwargs) + \
+ " " + \
+ self.preparer.format_alias(alias, alias_name)
else:
return self.process(alias.original, **kwargs)
@@ -315,9 +315,9 @@ class FBCompiler(sql.compiler.SQLCompiler):
def returning_clause(self, stmt, returning_cols):
columns = [
- self._label_select_column(None, c, True, False, {})
- for c in expression._select_iterables(returning_cols)
- ]
+ self._label_select_column(None, c, True, False, {})
+ for c in expression._select_iterables(returning_cols)
+ ]
return 'RETURNING ' + ', '.join(columns)
@@ -332,34 +332,35 @@ class FBDDLCompiler(sql.compiler.DDLCompiler):
# http://www.firebirdsql.org/manual/generatorguide-sqlsyntax.html
if create.element.start is not None:
raise NotImplemented(
- "Firebird SEQUENCE doesn't support START WITH")
+ "Firebird SEQUENCE doesn't support START WITH")
if create.element.increment is not None:
raise NotImplemented(
- "Firebird SEQUENCE doesn't support INCREMENT BY")
+ "Firebird SEQUENCE doesn't support INCREMENT BY")
if self.dialect._version_two:
return "CREATE SEQUENCE %s" % \
- self.preparer.format_sequence(create.element)
+ self.preparer.format_sequence(create.element)
else:
return "CREATE GENERATOR %s" % \
- self.preparer.format_sequence(create.element)
+ self.preparer.format_sequence(create.element)
def visit_drop_sequence(self, drop):
"""Generate a ``DROP GENERATOR`` statement for the sequence."""
if self.dialect._version_two:
return "DROP SEQUENCE %s" % \
- self.preparer.format_sequence(drop.element)
+ self.preparer.format_sequence(drop.element)
else:
return "DROP GENERATOR %s" % \
- self.preparer.format_sequence(drop.element)
+ self.preparer.format_sequence(drop.element)
class FBIdentifierPreparer(sql.compiler.IdentifierPreparer):
"""Install Firebird specific reserved words."""
reserved_words = RESERVED_WORDS
- illegal_initial_characters = compiler.ILLEGAL_INITIAL_CHARACTERS.union(['_'])
+ illegal_initial_characters = compiler.ILLEGAL_INITIAL_CHARACTERS.union(
+ ['_'])
def __init__(self, dialect):
super(FBIdentifierPreparer, self).__init__(dialect, omit_schema=True)
@@ -370,10 +371,10 @@ class FBExecutionContext(default.DefaultExecutionContext):
"""Get the next value from the sequence using ``gen_id()``."""
return self._execute_scalar(
- "SELECT gen_id(%s, 1) FROM rdb$database" %
- self.dialect.identifier_preparer.format_sequence(seq),
- type_
- )
+ "SELECT gen_id(%s, 1) FROM rdb$database" %
+ self.dialect.identifier_preparer.format_sequence(seq),
+ type_
+ )
class FBDialect(default.DefaultDialect):
@@ -411,12 +412,12 @@ class FBDialect(default.DefaultDialect):
def initialize(self, connection):
super(FBDialect, self).initialize(connection)
- self._version_two = ('firebird' in self.server_version_info and \
- self.server_version_info >= (2, )
- ) or \
- ('interbase' in self.server_version_info and \
+ self._version_two = ('firebird' in self.server_version_info and
+ self.server_version_info >= (2, )
+ ) or \
+ ('interbase' in self.server_version_info and
self.server_version_info >= (6, )
- )
+ )
if not self._version_two:
# TODO: whatever other pre < 2.0 stuff goes here
@@ -426,8 +427,8 @@ class FBDialect(default.DefaultDialect):
sqltypes.DateTime: sqltypes.DATE
}
- self.implicit_returning = self._version_two and \
- self.__dict__.get('implicit_returning', True)
+ self.implicit_returning = self._version_two and \
+ self.__dict__.get('implicit_returning', True)
def normalize_name(self, name):
# Remove trailing spaces: FB uses a CHAR() type,
@@ -436,7 +437,7 @@ class FBDialect(default.DefaultDialect):
if name is None:
return None
elif name.upper() == name and \
- not self.identifier_preparer._requires_quotes(name.lower()):
+ not self.identifier_preparer._requires_quotes(name.lower()):
return name.lower()
else:
return name
@@ -445,7 +446,7 @@ class FBDialect(default.DefaultDialect):
if name is None:
return None
elif name.lower() == name and \
- not self.identifier_preparer._requires_quotes(name.lower()):
+ not self.identifier_preparer._requires_quotes(name.lower()):
return name.upper()
else:
return name
@@ -539,8 +540,8 @@ class FBDialect(default.DefaultDialect):
@reflection.cache
def get_column_sequence(self, connection,
- table_name, column_name,
- schema=None, **kw):
+ table_name, column_name,
+ schema=None, **kw):
tablename = self.denormalize_name(table_name)
colname = self.denormalize_name(column_name)
# Heuristic-query to determine the generator associated to a PK field
@@ -613,8 +614,8 @@ class FBDialect(default.DefaultDialect):
coltype = sqltypes.NULLTYPE
elif issubclass(coltype, Integer) and row['fprec'] != 0:
coltype = NUMERIC(
- precision=row['fprec'],
- scale=row['fscale'] * -1)
+ precision=row['fprec'],
+ scale=row['fscale'] * -1)
elif colspec in ('VARYING', 'CSTRING'):
coltype = coltype(row['flen'])
elif colspec == 'TEXT':
@@ -636,8 +637,8 @@ class FBDialect(default.DefaultDialect):
# (see also http://tracker.firebirdsql.org/browse/CORE-356)
defexpr = row['fdefault'].lstrip()
assert defexpr[:8].rstrip().upper() == \
- 'DEFAULT', "Unrecognized default value: %s" % \
- defexpr
+ 'DEFAULT', "Unrecognized default value: %s" % \
+ defexpr
defvalue = defexpr[8:].strip()
if defvalue == 'NULL':
# Redundant
@@ -700,9 +701,9 @@ class FBDialect(default.DefaultDialect):
fk['name'] = cname
fk['referred_table'] = self.normalize_name(row['targetrname'])
fk['constrained_columns'].append(
- self.normalize_name(row['fname']))
+ self.normalize_name(row['fname']))
fk['referred_columns'].append(
- self.normalize_name(row['targetfname']))
+ self.normalize_name(row['targetfname']))
return list(fks.values())
@reflection.cache
@@ -732,7 +733,6 @@ class FBDialect(default.DefaultDialect):
indexrec['unique'] = bool(row['unique_flag'])
indexrec['column_names'].append(
- self.normalize_name(row['field_name']))
+ self.normalize_name(row['field_name']))
return list(indexes.values())
-
diff --git a/lib/sqlalchemy/dialects/firebird/fdb.py b/lib/sqlalchemy/dialects/firebird/fdb.py
index a691adb53..ddffc80f5 100644
--- a/lib/sqlalchemy/dialects/firebird/fdb.py
+++ b/lib/sqlalchemy/dialects/firebird/fdb.py
@@ -9,7 +9,8 @@
.. dialect:: firebird+fdb
:name: fdb
:dbapi: pyodbc
- :connectstring: firebird+fdb://user:password@host:port/path/to/db[?key=value&key=value...]
+ :connectstring: firebird+fdb://user:password@host:port/path/to/db\
+[?key=value&key=value...]
:url: http://pypi.python.org/pypi/fdb/
fdb is a kinterbasdb compatible DBAPI for Firebird.
@@ -23,8 +24,9 @@
Arguments
----------
-The ``fdb`` dialect is based on the :mod:`sqlalchemy.dialects.firebird.kinterbasdb`
-dialect, however does not accept every argument that Kinterbasdb does.
+The ``fdb`` dialect is based on the
+:mod:`sqlalchemy.dialects.firebird.kinterbasdb` dialect, however does not
+accept every argument that Kinterbasdb does.
* ``enable_rowcount`` - True by default, setting this to False disables
the usage of "cursor.rowcount" with the
@@ -61,8 +63,8 @@ dialect, however does not accept every argument that Kinterbasdb does.
.. seealso::
- http://pythonhosted.org/fdb/usage-guide.html#retaining-transactions - information
- on the "retaining" flag.
+ http://pythonhosted.org/fdb/usage-guide.html#retaining-transactions
+ - information on the "retaining" flag.
"""
@@ -73,14 +75,14 @@ from ... import util
class FBDialect_fdb(FBDialect_kinterbasdb):
def __init__(self, enable_rowcount=True,
- retaining=False, **kwargs):
+ retaining=False, **kwargs):
super(FBDialect_fdb, self).__init__(
- enable_rowcount=enable_rowcount,
- retaining=retaining, **kwargs)
+ enable_rowcount=enable_rowcount,
+ retaining=retaining, **kwargs)
@classmethod
def dbapi(cls):
- return __import__('fdb')
+ return __import__('fdb')
def create_connect_args(self, url):
opts = url.translate_connect_args(username='user')
diff --git a/lib/sqlalchemy/dialects/firebird/kinterbasdb.py b/lib/sqlalchemy/dialects/firebird/kinterbasdb.py
index cdd1f7e7b..6bd7887f7 100644
--- a/lib/sqlalchemy/dialects/firebird/kinterbasdb.py
+++ b/lib/sqlalchemy/dialects/firebird/kinterbasdb.py
@@ -9,15 +9,16 @@
.. dialect:: firebird+kinterbasdb
:name: kinterbasdb
:dbapi: kinterbasdb
- :connectstring: firebird+kinterbasdb://user:password@host:port/path/to/db[?key=value&key=value...]
+ :connectstring: firebird+kinterbasdb://user:password@host:port/path/to/db\
+[?key=value&key=value...]
:url: http://firebirdsql.org/index.php?op=devel&sub=python
Arguments
----------
The Kinterbasdb backend accepts the ``enable_rowcount`` and ``retaining``
-arguments accepted by the :mod:`sqlalchemy.dialects.firebird.fdb` dialect. In addition, it
-also accepts the following:
+arguments accepted by the :mod:`sqlalchemy.dialects.firebird.fdb` dialect.
+In addition, it also accepts the following:
* ``type_conv`` - select the kind of mapping done on the types: by default
SQLAlchemy uses 200 with Unicode, datetime and decimal support. See
@@ -52,9 +53,11 @@ class _kinterbasdb_numeric(object):
return value
return process
+
class _FBNumeric_kinterbasdb(_kinterbasdb_numeric, sqltypes.Numeric):
pass
+
class _FBFloat_kinterbasdb(_kinterbasdb_numeric, sqltypes.Float):
pass
@@ -63,7 +66,7 @@ class FBExecutionContext_kinterbasdb(FBExecutionContext):
@property
def rowcount(self):
if self.execution_options.get('enable_rowcount',
- self.dialect.enable_rowcount):
+ self.dialect.enable_rowcount):
return self.cursor.rowcount
else:
return -1
@@ -87,8 +90,8 @@ class FBDialect_kinterbasdb(FBDialect):
)
def __init__(self, type_conv=200, concurrency_level=1,
- enable_rowcount=True,
- retaining=False, **kwargs):
+ enable_rowcount=True,
+ retaining=False, **kwargs):
super(FBDialect_kinterbasdb, self).__init__(**kwargs)
self.enable_rowcount = enable_rowcount
self.type_conv = type_conv
@@ -123,7 +126,7 @@ class FBDialect_kinterbasdb(FBDialect):
type_conv = opts.pop('type_conv', self.type_conv)
concurrency_level = opts.pop('concurrency_level',
- self.concurrency_level)
+ self.concurrency_level)
if self.dbapi is not None:
initialized = getattr(self.dbapi, 'initialized', None)
@@ -134,7 +137,7 @@ class FBDialect_kinterbasdb(FBDialect):
initialized = getattr(self.dbapi, '_initialized', False)
if not initialized:
self.dbapi.init(type_conv=type_conv,
- concurrency_level=concurrency_level)
+ concurrency_level=concurrency_level)
return ([], opts)
def _get_server_version_info(self, connection):
@@ -156,10 +159,11 @@ class FBDialect_kinterbasdb(FBDialect):
return self._parse_version_info(version)
def _parse_version_info(self, version):
- m = match('\w+-V(\d+)\.(\d+)\.(\d+)\.(\d+)( \w+ (\d+)\.(\d+))?', version)
+ m = match(
+ '\w+-V(\d+)\.(\d+)\.(\d+)\.(\d+)( \w+ (\d+)\.(\d+))?', version)
if not m:
raise AssertionError(
- "Could not determine version from string '%s'" % version)
+ "Could not determine version from string '%s'" % version)
if m.group(5) != None:
return tuple([int(x) for x in m.group(6, 7, 4)] + ['firebird'])
@@ -168,7 +172,7 @@ class FBDialect_kinterbasdb(FBDialect):
def is_disconnect(self, e, connection, cursor):
if isinstance(e, (self.dbapi.OperationalError,
- self.dbapi.ProgrammingError)):
+ self.dbapi.ProgrammingError)):
msg = str(e)
return ('Unable to complete network request to host' in msg or
'Invalid connection state' in msg or
diff --git a/lib/sqlalchemy/dialects/mssql/__init__.py b/lib/sqlalchemy/dialects/mssql/__init__.py
index 4c059ae2f..d0047765e 100644
--- a/lib/sqlalchemy/dialects/mssql/__init__.py
+++ b/lib/sqlalchemy/dialects/mssql/__init__.py
@@ -6,7 +6,7 @@
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from sqlalchemy.dialects.mssql import base, pyodbc, adodbapi, \
- pymssql, zxjdbc, mxodbc
+ pymssql, zxjdbc, mxodbc
base.dialect = pyodbc.dialect
diff --git a/lib/sqlalchemy/dialects/mssql/adodbapi.py b/lib/sqlalchemy/dialects/mssql/adodbapi.py
index d94a4517d..e9927f8ed 100644
--- a/lib/sqlalchemy/dialects/mssql/adodbapi.py
+++ b/lib/sqlalchemy/dialects/mssql/adodbapi.py
@@ -61,7 +61,7 @@ class MSDialect_adodbapi(MSDialect):
connectors = ["Provider=SQLOLEDB"]
if 'port' in keys:
connectors.append("Data Source=%s, %s" %
- (keys.get("host"), keys.get("port")))
+ (keys.get("host"), keys.get("port")))
else:
connectors.append("Data Source=%s" % keys.get("host"))
connectors.append("Initial Catalog=%s" % keys.get("database"))
@@ -75,6 +75,6 @@ class MSDialect_adodbapi(MSDialect):
def is_disconnect(self, e, connection, cursor):
return isinstance(e, self.dbapi.adodbapi.DatabaseError) and \
- "'connection failure'" in str(e)
+ "'connection failure'" in str(e)
dialect = MSDialect_adodbapi
diff --git a/lib/sqlalchemy/dialects/mssql/base.py b/lib/sqlalchemy/dialects/mssql/base.py
index 473f7df06..f4264b3d0 100644
--- a/lib/sqlalchemy/dialects/mssql/base.py
+++ b/lib/sqlalchemy/dialects/mssql/base.py
@@ -13,9 +13,9 @@
Auto Increment Behavior
-----------------------
-SQL Server provides so-called "auto incrementing" behavior using the ``IDENTITY``
-construct, which can be placed on an integer primary key. SQLAlchemy
-considers ``IDENTITY`` within its default "autoincrement" behavior,
+SQL Server provides so-called "auto incrementing" behavior using the
+``IDENTITY`` construct, which can be placed on an integer primary key.
+SQLAlchemy considers ``IDENTITY`` within its default "autoincrement" behavior,
described at :paramref:`.Column.autoincrement`; this means
that by default, the first integer primary key column in a :class:`.Table`
will be considered to be the identity column and will generate DDL as such::
@@ -52,24 +52,25 @@ specify ``autoincrement=False`` on all integer primary key columns::
An INSERT statement which refers to an explicit value for such
a column is prohibited by SQL Server, however SQLAlchemy will detect this
and modify the ``IDENTITY_INSERT`` flag accordingly at statement execution
- time. As this is not a high performing process, care should be taken to set
- the ``autoincrement`` flag appropriately for columns that will not actually
- require IDENTITY behavior.
+ time. As this is not a high performing process, care should be taken to
+ set the ``autoincrement`` flag appropriately for columns that will not
+ actually require IDENTITY behavior.
Controlling "Start" and "Increment"
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Specific control over the parameters of the ``IDENTITY`` value is supported
-using the :class:`.schema.Sequence` object. While this object normally represents
-an explicit "sequence" for supporting backends, on SQL Server it is re-purposed
-to specify behavior regarding the identity column, including support
-of the "start" and "increment" values::
+using the :class:`.schema.Sequence` object. While this object normally
+represents an explicit "sequence" for supporting backends, on SQL Server it is
+re-purposed to specify behavior regarding the identity column, including
+support of the "start" and "increment" values::
from sqlalchemy import Table, Integer, Sequence, Column
Table('test', metadata,
Column('id', Integer,
- Sequence('blah', start=100, increment=10), primary_key=True),
+ Sequence('blah', start=100, increment=10),
+ primary_key=True),
Column('name', String(20))
).create(some_engine)
@@ -88,10 +89,10 @@ optional and will default to 1,1.
INSERT behavior
^^^^^^^^^^^^^^^^
-Handling of the ``IDENTITY`` column at INSERT time involves two key techniques.
-The most common is being able to fetch the "last inserted value" for a given
-``IDENTITY`` column, a process which SQLAlchemy performs implicitly in many
-cases, most importantly within the ORM.
+Handling of the ``IDENTITY`` column at INSERT time involves two key
+techniques. The most common is being able to fetch the "last inserted value"
+for a given ``IDENTITY`` column, a process which SQLAlchemy performs
+implicitly in many cases, most importantly within the ORM.
The process for fetching this value has several variants:
@@ -106,9 +107,9 @@ The process for fetching this value has several variants:
``implicit_returning=False``, either the ``scope_identity()`` function or
the ``@@identity`` variable is used; behavior varies by backend:
- * when using PyODBC, the phrase ``; select scope_identity()`` will be appended
- to the end of the INSERT statement; a second result set will be fetched
- in order to receive the value. Given a table as::
+ * when using PyODBC, the phrase ``; select scope_identity()`` will be
+ appended to the end of the INSERT statement; a second result set will be
+ fetched in order to receive the value. Given a table as::
t = Table('t', m, Column('id', Integer, primary_key=True),
Column('x', Integer),
@@ -121,17 +122,18 @@ The process for fetching this value has several variants:
INSERT INTO t (x) VALUES (?); select scope_identity()
* Other dialects such as pymssql will call upon
- ``SELECT scope_identity() AS lastrowid`` subsequent to an INSERT statement.
- If the flag ``use_scope_identity=False`` is passed to :func:`.create_engine`,
- the statement ``SELECT @@identity AS lastrowid`` is used instead.
+ ``SELECT scope_identity() AS lastrowid`` subsequent to an INSERT
+ statement. If the flag ``use_scope_identity=False`` is passed to
+ :func:`.create_engine`, the statement ``SELECT @@identity AS lastrowid``
+ is used instead.
A table that contains an ``IDENTITY`` column will prohibit an INSERT statement
that refers to the identity column explicitly. The SQLAlchemy dialect will
detect when an INSERT construct, created using a core :func:`.insert`
construct (not a plain string SQL), refers to the identity column, and
-in this case will emit ``SET IDENTITY_INSERT ON`` prior to the insert statement
-proceeding, and ``SET IDENTITY_INSERT OFF`` subsequent to the execution.
-Given this example::
+in this case will emit ``SET IDENTITY_INSERT ON`` prior to the insert
+statement proceeding, and ``SET IDENTITY_INSERT OFF`` subsequent to the
+execution. Given this example::
m = MetaData()
t = Table('t', m, Column('id', Integer, primary_key=True),
@@ -250,7 +252,8 @@ To generate a clustered primary key use::
which will render the table, for example, as::
- CREATE TABLE my_table (x INTEGER NOT NULL, y INTEGER NOT NULL, PRIMARY KEY CLUSTERED (x, y))
+ CREATE TABLE my_table (x INTEGER NOT NULL, y INTEGER NOT NULL,
+ PRIMARY KEY CLUSTERED (x, y))
Similarly, we can generate a clustered unique constraint using::
@@ -272,7 +275,8 @@ for :class:`.Index`.
INCLUDE
^^^^^^^
-The ``mssql_include`` option renders INCLUDE(colname) for the given string names::
+The ``mssql_include`` option renders INCLUDE(colname) for the given string
+names::
Index("my_index", table.c.x, mssql_include=['y'])
@@ -364,13 +368,13 @@ import re
from ... import sql, schema as sa_schema, exc, util
from ...sql import compiler, expression, \
- util as sql_util, cast
+ util as sql_util, cast
from ... import engine
from ...engine import reflection, default
from ... import types as sqltypes
from ...types import INTEGER, BIGINT, SMALLINT, DECIMAL, NUMERIC, \
- FLOAT, TIMESTAMP, DATETIME, DATE, BINARY,\
- VARBINARY, TEXT, VARCHAR, NVARCHAR, CHAR, NCHAR
+ FLOAT, TIMESTAMP, DATETIME, DATE, BINARY,\
+ VARBINARY, TEXT, VARCHAR, NVARCHAR, CHAR, NCHAR
from ...util import update_wrapper
@@ -409,7 +413,7 @@ RESERVED_WORDS = set(
'unique', 'unpivot', 'update', 'updatetext', 'use', 'user', 'values',
'varying', 'view', 'waitfor', 'when', 'where', 'while', 'with',
'writetext',
- ])
+ ])
class REAL(sqltypes.REAL):
@@ -431,6 +435,7 @@ class TINYINT(sqltypes.Integer):
# not sure about other dialects).
class _MSDate(sqltypes.Date):
+
def bind_processor(self, dialect):
def process(value):
if type(value) == datetime.date:
@@ -447,15 +452,16 @@ class _MSDate(sqltypes.Date):
return value.date()
elif isinstance(value, util.string_types):
return datetime.date(*[
- int(x or 0)
- for x in self._reg.match(value).groups()
- ])
+ int(x or 0)
+ for x in self._reg.match(value).groups()
+ ])
else:
return value
return process
class TIME(sqltypes.TIME):
+
def __init__(self, precision=None, **kwargs):
self.precision = precision
super(TIME, self).__init__()
@@ -466,7 +472,7 @@ class TIME(sqltypes.TIME):
def process(value):
if isinstance(value, datetime.datetime):
value = datetime.datetime.combine(
- self.__zero_date, value.time())
+ self.__zero_date, value.time())
elif isinstance(value, datetime.time):
value = datetime.datetime.combine(self.__zero_date, value)
return value
@@ -480,8 +486,8 @@ class TIME(sqltypes.TIME):
return value.time()
elif isinstance(value, util.string_types):
return datetime.time(*[
- int(x or 0)
- for x in self._reg.match(value).groups()])
+ int(x or 0)
+ for x in self._reg.match(value).groups()])
else:
return value
return process
@@ -489,6 +495,7 @@ _MSTime = TIME
class _DateTimeBase(object):
+
def bind_processor(self, dialect):
def process(value):
if type(value) == datetime.date:
@@ -523,22 +530,21 @@ class DATETIMEOFFSET(sqltypes.TypeEngine):
class _StringType(object):
+
"""Base for MSSQL string types."""
def __init__(self, collation=None):
super(_StringType, self).__init__(collation=collation)
-
-
class NTEXT(sqltypes.UnicodeText):
+
"""MSSQL NTEXT type, for variable-length unicode text up to 2^30
characters."""
__visit_name__ = 'NTEXT'
-
class IMAGE(sqltypes.LargeBinary):
__visit_name__ = 'IMAGE'
@@ -620,6 +626,7 @@ ischema_names = {
class MSTypeCompiler(compiler.GenericTypeCompiler):
+
def _extend(self, spec, type_, length=None):
"""Extend a string-type declaration with standard SQL
COLLATE annotations.
@@ -638,7 +645,7 @@ class MSTypeCompiler(compiler.GenericTypeCompiler):
spec = spec + "(%s)" % length
return ' '.join([c for c in (spec, collation)
- if c is not None])
+ if c is not None])
def visit_FLOAT(self, type_):
precision = getattr(type_, 'precision', None)
@@ -717,9 +724,9 @@ class MSTypeCompiler(compiler.GenericTypeCompiler):
def visit_VARBINARY(self, type_):
return self._extend(
- "VARBINARY",
- type_,
- length=type_.length or 'max')
+ "VARBINARY",
+ type_,
+ length=type_.length or 'max')
def visit_boolean(self, type_):
return self.visit_BIT(type_)
@@ -762,20 +769,23 @@ class MSExecutionContext(default.DefaultExecutionContext):
if insert_has_sequence:
self._enable_identity_insert = \
- seq_column.key in self.compiled_parameters[0]
+ seq_column.key in self.compiled_parameters[0]
else:
self._enable_identity_insert = False
self._select_lastrowid = insert_has_sequence and \
- not self.compiled.returning and \
- not self._enable_identity_insert and \
- not self.executemany
+ not self.compiled.returning and \
+ not self._enable_identity_insert and \
+ not self.executemany
if self._enable_identity_insert:
- self.root_connection._cursor_execute(self.cursor,
- self._opt_encode("SET IDENTITY_INSERT %s ON" %
- self.dialect.identifier_preparer.format_table(tbl)),
- (), self)
+ self.root_connection._cursor_execute(
+ self.cursor,
+ self._opt_encode(
+ "SET IDENTITY_INSERT %s ON" %
+ self.dialect.identifier_preparer.format_table(tbl)),
+ (),
+ self)
def post_exec(self):
"""Disable IDENTITY_INSERT if enabled."""
@@ -783,11 +793,14 @@ class MSExecutionContext(default.DefaultExecutionContext):
conn = self.root_connection
if self._select_lastrowid:
if self.dialect.use_scope_identity:
- conn._cursor_execute(self.cursor,
+ conn._cursor_execute(
+ self.cursor,
"SELECT scope_identity() AS lastrowid", (), self)
else:
conn._cursor_execute(self.cursor,
- "SELECT @@identity AS lastrowid", (), self)
+ "SELECT @@identity AS lastrowid",
+ (),
+ self)
# fetchall() ensures the cursor is consumed without closing it
row = self.cursor.fetchall()[0]
self._lastrowid = int(row[0])
@@ -797,11 +810,14 @@ class MSExecutionContext(default.DefaultExecutionContext):
self._result_proxy = engine.FullyBufferedResultProxy(self)
if self._enable_identity_insert:
- conn._cursor_execute(self.cursor,
- self._opt_encode("SET IDENTITY_INSERT %s OFF" %
- self.dialect.identifier_preparer.
- format_table(self.compiled.statement.table)),
- (), self)
+ conn._cursor_execute(
+ self.cursor,
+ self._opt_encode(
+ "SET IDENTITY_INSERT %s OFF" %
+ self.dialect.identifier_preparer. format_table(
+ self.compiled.statement.table)),
+ (),
+ self)
def get_lastrowid(self):
return self._lastrowid
@@ -810,10 +826,10 @@ class MSExecutionContext(default.DefaultExecutionContext):
if self._enable_identity_insert:
try:
self.cursor.execute(
- self._opt_encode("SET IDENTITY_INSERT %s OFF" %
- self.dialect.identifier_preparer.\
- format_table(self.compiled.statement.table))
- )
+ self._opt_encode(
+ "SET IDENTITY_INSERT %s OFF" %
+ self.dialect.identifier_preparer. format_table(
+ self.compiled.statement.table)))
except:
pass
@@ -830,11 +846,11 @@ class MSSQLCompiler(compiler.SQLCompiler):
extract_map = util.update_copy(
compiler.SQLCompiler.extract_map,
{
- 'doy': 'dayofyear',
- 'dow': 'weekday',
- 'milliseconds': 'millisecond',
- 'microseconds': 'microsecond'
- })
+ 'doy': 'dayofyear',
+ 'dow': 'weekday',
+ 'milliseconds': 'millisecond',
+ 'microseconds': 'microsecond'
+ })
def __init__(self, *args, **kwargs):
self.tablealiases = {}
@@ -854,8 +870,8 @@ class MSSQLCompiler(compiler.SQLCompiler):
def visit_concat_op_binary(self, binary, operator, **kw):
return "%s + %s" % \
- (self.process(binary.left, **kw),
- self.process(binary.right, **kw))
+ (self.process(binary.left, **kw),
+ self.process(binary.right, **kw))
def visit_true(self, expr, **kw):
return '1'
@@ -865,8 +881,8 @@ class MSSQLCompiler(compiler.SQLCompiler):
def visit_match_op_binary(self, binary, operator, **kw):
return "CONTAINS (%s, %s)" % (
- self.process(binary.left, **kw),
- self.process(binary.right, **kw))
+ self.process(binary.left, **kw),
+ self.process(binary.right, **kw))
def get_select_precolumns(self, select):
""" MS-SQL puts TOP, it's version of LIMIT here """
@@ -902,20 +918,20 @@ class MSSQLCompiler(compiler.SQLCompiler):
"""
if (
- (
- not select._simple_int_limit and
- select._limit_clause is not None
- ) or (
- select._offset_clause is not None and
- not select._simple_int_offset or select._offset
- )
- ) and not getattr(select, '_mssql_visit', None):
+ (
+ not select._simple_int_limit and
+ select._limit_clause is not None
+ ) or (
+ select._offset_clause is not None and
+ not select._simple_int_offset or select._offset
+ )
+ ) and not getattr(select, '_mssql_visit', None):
# to use ROW_NUMBER(), an ORDER BY is required.
if not select._order_by_clause.clauses:
raise exc.CompileError('MSSQL requires an order_by when '
- 'using an OFFSET or a non-simple '
- 'LIMIT clause')
+ 'using an OFFSET or a non-simple '
+ 'LIMIT clause')
_order_by_clauses = select._order_by_clause.clauses
limit_clause = select._limit_clause
@@ -923,20 +939,20 @@ class MSSQLCompiler(compiler.SQLCompiler):
select = select._generate()
select._mssql_visit = True
select = select.column(
- sql.func.ROW_NUMBER().over(order_by=_order_by_clauses)
- .label("mssql_rn")).order_by(None).alias()
+ sql.func.ROW_NUMBER().over(order_by=_order_by_clauses)
+ .label("mssql_rn")).order_by(None).alias()
mssql_rn = sql.column('mssql_rn')
limitselect = sql.select([c for c in select.c if
- c.key != 'mssql_rn'])
+ c.key != 'mssql_rn'])
if offset_clause is not None:
limitselect.append_whereclause(mssql_rn > offset_clause)
if limit_clause is not None:
limitselect.append_whereclause(
- mssql_rn <= (limit_clause + offset_clause))
+ mssql_rn <= (limit_clause + offset_clause))
else:
limitselect.append_whereclause(
- mssql_rn <= (limit_clause))
+ mssql_rn <= (limit_clause))
return self.process(limitselect, iswrapper=True, **kwargs)
else:
return compiler.SQLCompiler.visit_select(self, select, **kwargs)
@@ -968,10 +984,11 @@ class MSSQLCompiler(compiler.SQLCompiler):
def visit_extract(self, extract, **kw):
field = self.extract_map.get(extract.field, extract.field)
return 'DATEPART("%s", %s)' % \
- (field, self.process(extract.expr, **kw))
+ (field, self.process(extract.expr, **kw))
def visit_savepoint(self, savepoint_stmt):
- return "SAVE TRANSACTION %s" % self.preparer.format_savepoint(savepoint_stmt)
+ return "SAVE TRANSACTION %s" % \
+ self.preparer.format_savepoint(savepoint_stmt)
def visit_rollback_to_savepoint(self, savepoint_stmt):
return ("ROLLBACK TRANSACTION %s"
@@ -979,25 +996,26 @@ class MSSQLCompiler(compiler.SQLCompiler):
def visit_column(self, column, add_to_result_map=None, **kwargs):
if column.table is not None and \
- (not self.isupdate and not self.isdelete) or self.is_subquery():
+ (not self.isupdate and not self.isdelete) or \
+ self.is_subquery():
# translate for schema-qualified table aliases
t = self._schema_aliased_table(column.table)
if t is not None:
converted = expression._corresponding_column_or_error(
- t, column)
+ t, column)
if add_to_result_map is not None:
add_to_result_map(
- column.name,
- column.name,
- (column, column.name, column.key),
- column.type
+ column.name,
+ column.name,
+ (column, column.name, column.key),
+ column.type
)
return super(MSSQLCompiler, self).\
- visit_column(converted, **kwargs)
+ visit_column(converted, **kwargs)
return super(MSSQLCompiler, self).visit_column(
- column, add_to_result_map=add_to_result_map, **kwargs)
+ column, add_to_result_map=add_to_result_map, **kwargs)
def visit_binary(self, binary, **kwargs):
"""Move bind parameters to the right-hand side of an operator, where
@@ -1008,12 +1026,12 @@ class MSSQLCompiler(compiler.SQLCompiler):
isinstance(binary.left, expression.BindParameter)
and binary.operator == operator.eq
and not isinstance(binary.right, expression.BindParameter)
- ):
+ ):
return self.process(
- expression.BinaryExpression(binary.right,
- binary.left,
- binary.operator),
- **kwargs)
+ expression.BinaryExpression(binary.right,
+ binary.left,
+ binary.operator),
+ **kwargs)
return super(MSSQLCompiler, self).visit_binary(binary, **kwargs)
def returning_clause(self, stmt, returning_cols):
@@ -1026,10 +1044,10 @@ class MSSQLCompiler(compiler.SQLCompiler):
adapter = sql_util.ClauseAdapter(target)
columns = [
- self._label_select_column(None, adapter.traverse(c),
- True, False, {})
- for c in expression._select_iterables(returning_cols)
- ]
+ self._label_select_column(None, adapter.traverse(c),
+ True, False, {})
+ for c in expression._select_iterables(returning_cols)
+ ]
return 'OUTPUT ' + ', '.join(columns)
@@ -1045,7 +1063,7 @@ class MSSQLCompiler(compiler.SQLCompiler):
return column.label(None)
else:
return super(MSSQLCompiler, self).\
- label_select_column(select, column, asfrom)
+ label_select_column(select, column, asfrom)
def for_update_clause(self, select):
# "FOR UPDATE" is only allowed on "DECLARE CURSOR" which
@@ -1062,9 +1080,9 @@ class MSSQLCompiler(compiler.SQLCompiler):
return ""
def update_from_clause(self, update_stmt,
- from_table, extra_froms,
- from_hints,
- **kw):
+ from_table, extra_froms,
+ from_hints,
+ **kw):
"""Render the UPDATE..FROM clause specific to MSSQL.
In MSSQL, if the UPDATE statement involves an alias of the table to
@@ -1073,12 +1091,13 @@ class MSSQLCompiler(compiler.SQLCompiler):
"""
return "FROM " + ', '.join(
- t._compiler_dispatch(self, asfrom=True,
- fromhints=from_hints, **kw)
- for t in [from_table] + extra_froms)
+ t._compiler_dispatch(self, asfrom=True,
+ fromhints=from_hints, **kw)
+ for t in [from_table] + extra_froms)
class MSSQLStrictCompiler(MSSQLCompiler):
+
"""A subclass of MSSQLCompiler which disables the usage of bind
parameters where not allowed natively by MS-SQL.
@@ -1091,16 +1110,16 @@ class MSSQLStrictCompiler(MSSQLCompiler):
def visit_in_op_binary(self, binary, operator, **kw):
kw['literal_binds'] = True
return "%s IN %s" % (
- self.process(binary.left, **kw),
- self.process(binary.right, **kw)
- )
+ self.process(binary.left, **kw),
+ self.process(binary.right, **kw)
+ )
def visit_notin_op_binary(self, binary, operator, **kw):
kw['literal_binds'] = True
return "%s NOT IN %s" % (
- self.process(binary.left, **kw),
- self.process(binary.right, **kw)
- )
+ self.process(binary.left, **kw),
+ self.process(binary.right, **kw)
+ )
def render_literal_value(self, value, type_):
"""
@@ -1119,10 +1138,11 @@ class MSSQLStrictCompiler(MSSQLCompiler):
return "'" + str(value) + "'"
else:
return super(MSSQLStrictCompiler, self).\
- render_literal_value(value, type_)
+ render_literal_value(value, type_)
class MSDDLCompiler(compiler.DDLCompiler):
+
def get_column_specification(self, column, **kwargs):
colspec = (self.preparer.format_column(column) + " "
+ self.dialect.type_compiler.process(column.type))
@@ -1136,17 +1156,19 @@ class MSDDLCompiler(compiler.DDLCompiler):
if column.table is None:
raise exc.CompileError(
- "mssql requires Table-bound columns "
- "in order to generate DDL")
+ "mssql requires Table-bound columns "
+ "in order to generate DDL")
- # install an IDENTITY Sequence if we either a sequence or an implicit IDENTITY column
+ # install an IDENTITY Sequence if we either a sequence or an implicit
+ # IDENTITY column
if isinstance(column.default, sa_schema.Sequence):
if column.default.start == 0:
start = 0
else:
start = column.default.start or 1
- colspec += " IDENTITY(%s,%s)" % (start, column.default.increment or 1)
+ colspec += " IDENTITY(%s,%s)" % (start,
+ column.default.increment or 1)
elif column is column.table._autoincrement_column:
colspec += " IDENTITY(1,1)"
else:
@@ -1169,21 +1191,24 @@ class MSDDLCompiler(compiler.DDLCompiler):
text += "CLUSTERED "
text += "INDEX %s ON %s (%s)" \
- % (
- self._prepared_index_name(index,
- include_schema=include_schema),
- preparer.format_table(index.table),
- ', '.join(
- self.sql_compiler.process(expr,
- include_table=False, literal_binds=True) for
- expr in index.expressions)
- )
+ % (
+ self._prepared_index_name(index,
+ include_schema=include_schema),
+ preparer.format_table(index.table),
+ ', '.join(
+ self.sql_compiler.process(expr,
+ include_table=False,
+ literal_binds=True) for
+ expr in index.expressions)
+ )
# handle other included columns
if index.dialect_options['mssql']['include']:
inclusions = [index.table.c[col]
- if isinstance(col, util.string_types) else col
- for col in index.dialect_options['mssql']['include']]
+ if isinstance(col, util.string_types) else col
+ for col in
+ index.dialect_options['mssql']['include']
+ ]
text += " INCLUDE (%s)" \
% ', '.join([preparer.quote(c.name)
@@ -1195,7 +1220,7 @@ class MSDDLCompiler(compiler.DDLCompiler):
return "\nDROP INDEX %s ON %s" % (
self._prepared_index_name(drop.element, include_schema=False),
self.preparer.format_table(drop.element.table)
- )
+ )
def visit_primary_key_constraint(self, constraint):
if len(constraint) == 0:
@@ -1231,6 +1256,7 @@ class MSDDLCompiler(compiler.DDLCompiler):
text += self.define_constraint_deferrability(constraint)
return text
+
class MSIdentifierPreparer(compiler.IdentifierPreparer):
reserved_words = RESERVED_WORDS
@@ -1251,7 +1277,7 @@ def _db_plus_owner_listing(fn):
def wrap(dialect, connection, schema=None, **kw):
dbname, owner = _owner_plus_db(dialect, schema)
return _switch_db(dbname, connection, fn, dialect, connection,
- dbname, owner, schema, **kw)
+ dbname, owner, schema, **kw)
return update_wrapper(wrap, fn)
@@ -1259,7 +1285,7 @@ def _db_plus_owner(fn):
def wrap(dialect, connection, tablename, schema=None, **kw):
dbname, owner = _owner_plus_db(dialect, schema)
return _switch_db(dbname, connection, fn, dialect, connection,
- tablename, dbname, owner, schema, **kw)
+ tablename, dbname, owner, schema, **kw)
return update_wrapper(wrap, fn)
@@ -1334,7 +1360,7 @@ class MSDialect(default.DefaultDialect):
self.use_scope_identity = use_scope_identity
self.max_identifier_length = int(max_identifier_length or 0) or \
- self.max_identifier_length
+ self.max_identifier_length
super(MSDialect, self).__init__(**opts)
def do_savepoint(self, connection, name):
@@ -1359,7 +1385,7 @@ class MSDialect(default.DefaultDialect):
"is configured in the FreeTDS configuration." %
".".join(str(x) for x in self.server_version_info))
if self.server_version_info >= MS_2005_VERSION and \
- 'implicit_returning' not in self.__dict__:
+ 'implicit_returning' not in self.__dict__:
self.implicit_returning = True
if self.server_version_info >= MS_2008_VERSION:
self.supports_multivalues_insert = True
@@ -1395,8 +1421,8 @@ class MSDialect(default.DefaultDialect):
@reflection.cache
def get_schema_names(self, connection, **kw):
s = sql.select([ischema.schemata.c.schema_name],
- order_by=[ischema.schemata.c.schema_name]
- )
+ order_by=[ischema.schemata.c.schema_name]
+ )
schema_names = [r[0] for r in connection.execute(s)]
return schema_names
@@ -1405,10 +1431,10 @@ class MSDialect(default.DefaultDialect):
def get_table_names(self, connection, dbname, owner, schema, **kw):
tables = ischema.tables
s = sql.select([tables.c.table_name],
- sql.and_(
- tables.c.table_schema == owner,
- tables.c.table_type == 'BASE TABLE'
- ),
+ sql.and_(
+ tables.c.table_schema == owner,
+ tables.c.table_type == 'BASE TABLE'
+ ),
order_by=[tables.c.table_name]
)
table_names = [r[0] for r in connection.execute(s)]
@@ -1419,10 +1445,10 @@ class MSDialect(default.DefaultDialect):
def get_view_names(self, connection, dbname, owner, schema, **kw):
tables = ischema.tables
s = sql.select([tables.c.table_name],
- sql.and_(
- tables.c.table_schema == owner,
- tables.c.table_type == 'VIEW'
- ),
+ sql.and_(
+ tables.c.table_schema == owner,
+ tables.c.table_type == 'VIEW'
+ ),
order_by=[tables.c.table_name]
)
view_names = [r[0] for r in connection.execute(s)]
@@ -1438,22 +1464,22 @@ class MSDialect(default.DefaultDialect):
rp = connection.execute(
sql.text("select ind.index_id, ind.is_unique, ind.name "
- "from sys.indexes as ind join sys.tables as tab on "
- "ind.object_id=tab.object_id "
- "join sys.schemas as sch on sch.schema_id=tab.schema_id "
- "where tab.name = :tabname "
- "and sch.name=:schname "
- "and ind.is_primary_key=0",
- bindparams=[
- sql.bindparam('tabname', tablename,
- sqltypes.String(convert_unicode=True)),
- sql.bindparam('schname', owner,
- sqltypes.String(convert_unicode=True))
- ],
- typemap={
- 'name': sqltypes.Unicode()
- }
- )
+ "from sys.indexes as ind join sys.tables as tab on "
+ "ind.object_id=tab.object_id "
+ "join sys.schemas as sch on sch.schema_id=tab.schema_id "
+ "where tab.name = :tabname "
+ "and sch.name=:schname "
+ "and ind.is_primary_key=0",
+ bindparams=[
+ sql.bindparam('tabname', tablename,
+ sqltypes.String(convert_unicode=True)),
+ sql.bindparam('schname', owner,
+ sqltypes.String(convert_unicode=True))
+ ],
+ typemap={
+ 'name': sqltypes.Unicode()
+ }
+ )
)
indexes = {}
for row in rp:
@@ -1473,15 +1499,15 @@ class MSDialect(default.DefaultDialect):
"join sys.schemas as sch on sch.schema_id=tab.schema_id "
"where tab.name=:tabname "
"and sch.name=:schname",
- bindparams=[
- sql.bindparam('tabname', tablename,
- sqltypes.String(convert_unicode=True)),
- sql.bindparam('schname', owner,
- sqltypes.String(convert_unicode=True))
- ],
- typemap={'name': sqltypes.Unicode()}
- ),
- )
+ bindparams=[
+ sql.bindparam('tabname', tablename,
+ sqltypes.String(convert_unicode=True)),
+ sql.bindparam('schname', owner,
+ sqltypes.String(convert_unicode=True))
+ ],
+ typemap={'name': sqltypes.Unicode()}
+ ),
+ )
for row in rp:
if row['index_id'] in indexes:
indexes[row['index_id']]['column_names'].append(row['name'])
@@ -1490,7 +1516,8 @@ class MSDialect(default.DefaultDialect):
@reflection.cache
@_db_plus_owner
- def get_view_definition(self, connection, viewname, dbname, owner, schema, **kw):
+ def get_view_definition(self, connection, viewname,
+ dbname, owner, schema, **kw):
rp = connection.execute(
sql.text(
"select definition from sys.sql_modules as mod, "
@@ -1502,9 +1529,9 @@ class MSDialect(default.DefaultDialect):
"views.name=:viewname and sch.name=:schname",
bindparams=[
sql.bindparam('viewname', viewname,
- sqltypes.String(convert_unicode=True)),
+ sqltypes.String(convert_unicode=True)),
sql.bindparam('schname', owner,
- sqltypes.String(convert_unicode=True))
+ sqltypes.String(convert_unicode=True))
]
)
)
@@ -1524,7 +1551,7 @@ class MSDialect(default.DefaultDialect):
else:
whereclause = columns.c.table_name == tablename
s = sql.select([columns], whereclause,
- order_by=[columns.c.ordinal_position])
+ order_by=[columns.c.ordinal_position])
c = connection.execute(s)
cols = []
@@ -1594,7 +1621,7 @@ class MSDialect(default.DefaultDialect):
ic = col_name
colmap[col_name]['autoincrement'] = True
colmap[col_name]['sequence'] = dict(
- name='%s_identity' % col_name)
+ name='%s_identity' % col_name)
break
cursor.close()
@@ -1603,7 +1630,7 @@ class MSDialect(default.DefaultDialect):
cursor = connection.execute(
"select ident_seed('%s'), ident_incr('%s')"
% (table_fullname, table_fullname)
- )
+ )
row = cursor.first()
if row is not None and row[0] is not None:
@@ -1615,18 +1642,21 @@ class MSDialect(default.DefaultDialect):
@reflection.cache
@_db_plus_owner
- def get_pk_constraint(self, connection, tablename, dbname, owner, schema, **kw):
+ def get_pk_constraint(self, connection, tablename,
+ dbname, owner, schema, **kw):
pkeys = []
TC = ischema.constraints
C = ischema.key_constraints.alias('C')
# Primary key constraints
- s = sql.select([C.c.column_name, TC.c.constraint_type, C.c.constraint_name],
- sql.and_(TC.c.constraint_name == C.c.constraint_name,
- TC.c.table_schema == C.c.table_schema,
- C.c.table_name == tablename,
- C.c.table_schema == owner)
- )
+ s = sql.select([C.c.column_name,
+ TC.c.constraint_type,
+ C.c.constraint_name],
+ sql.and_(TC.c.constraint_name == C.c.constraint_name,
+ TC.c.table_schema == C.c.table_schema,
+ C.c.table_name == tablename,
+ C.c.table_schema == owner)
+ )
c = connection.execute(s)
constraint_name = None
for row in c:
@@ -1638,7 +1668,8 @@ class MSDialect(default.DefaultDialect):
@reflection.cache
@_db_plus_owner
- def get_foreign_keys(self, connection, tablename, dbname, owner, schema, **kw):
+ def get_foreign_keys(self, connection, tablename,
+ dbname, owner, schema, **kw):
RR = ischema.ref_constraints
C = ischema.key_constraints.alias('C')
R = ischema.key_constraints.alias('R')
@@ -1653,11 +1684,11 @@ class MSDialect(default.DefaultDialect):
C.c.table_schema == owner,
C.c.constraint_name == RR.c.constraint_name,
R.c.constraint_name ==
- RR.c.unique_constraint_name,
+ RR.c.unique_constraint_name,
C.c.ordinal_position == R.c.ordinal_position
),
order_by=[RR.c.constraint_name, R.c.ordinal_position]
- )
+ )
# group rows by constraint ID, to handle multi-column FKs
fkeys = []
@@ -1687,8 +1718,8 @@ class MSDialect(default.DefaultDialect):
rec['referred_schema'] = rschema
local_cols, remote_cols = \
- rec['constrained_columns'],\
- rec['referred_columns']
+ rec['constrained_columns'],\
+ rec['referred_columns']
local_cols.append(scol)
remote_cols.append(rcol)
diff --git a/lib/sqlalchemy/dialects/mssql/information_schema.py b/lib/sqlalchemy/dialects/mssql/information_schema.py
index 77251e61a..371a1edcc 100644
--- a/lib/sqlalchemy/dialects/mssql/information_schema.py
+++ b/lib/sqlalchemy/dialects/mssql/information_schema.py
@@ -5,7 +5,8 @@
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
-# TODO: should be using the sys. catalog with SQL Server, not information schema
+# TODO: should be using the sys. catalog with SQL Server, not information
+# schema
from ... import Table, MetaData, Column
from ...types import String, Unicode, UnicodeText, Integer, TypeDecorator
@@ -16,6 +17,7 @@ from ...ext.compiler import compiles
ischema = MetaData()
+
class CoerceUnicode(TypeDecorator):
impl = Unicode
@@ -27,10 +29,12 @@ class CoerceUnicode(TypeDecorator):
def bind_expression(self, bindvalue):
return _cast_on_2005(bindvalue)
+
class _cast_on_2005(expression.ColumnElement):
def __init__(self, bindvalue):
self.bindvalue = bindvalue
+
@compiles(_cast_on_2005)
def _compile(element, compiler, **kw):
from . import base
@@ -40,76 +44,93 @@ def _compile(element, compiler, **kw):
return compiler.process(cast(element.bindvalue, Unicode), **kw)
schemata = Table("SCHEMATA", ischema,
- Column("CATALOG_NAME", CoerceUnicode, key="catalog_name"),
- Column("SCHEMA_NAME", CoerceUnicode, key="schema_name"),
- Column("SCHEMA_OWNER", CoerceUnicode, key="schema_owner"),
- schema="INFORMATION_SCHEMA")
+ Column("CATALOG_NAME", CoerceUnicode, key="catalog_name"),
+ Column("SCHEMA_NAME", CoerceUnicode, key="schema_name"),
+ Column("SCHEMA_OWNER", CoerceUnicode, key="schema_owner"),
+ schema="INFORMATION_SCHEMA")
tables = Table("TABLES", ischema,
- Column("TABLE_CATALOG", CoerceUnicode, key="table_catalog"),
- Column("TABLE_SCHEMA", CoerceUnicode, key="table_schema"),
- Column("TABLE_NAME", CoerceUnicode, key="table_name"),
- Column("TABLE_TYPE", String(convert_unicode=True), key="table_type"),
- schema="INFORMATION_SCHEMA")
+ Column("TABLE_CATALOG", CoerceUnicode, key="table_catalog"),
+ Column("TABLE_SCHEMA", CoerceUnicode, key="table_schema"),
+ Column("TABLE_NAME", CoerceUnicode, key="table_name"),
+ Column(
+ "TABLE_TYPE", String(convert_unicode=True),
+ key="table_type"),
+ schema="INFORMATION_SCHEMA")
columns = Table("COLUMNS", ischema,
- Column("TABLE_SCHEMA", CoerceUnicode, key="table_schema"),
- Column("TABLE_NAME", CoerceUnicode, key="table_name"),
- Column("COLUMN_NAME", CoerceUnicode, key="column_name"),
- Column("IS_NULLABLE", Integer, key="is_nullable"),
- Column("DATA_TYPE", String, key="data_type"),
- Column("ORDINAL_POSITION", Integer, key="ordinal_position"),
- Column("CHARACTER_MAXIMUM_LENGTH", Integer, key="character_maximum_length"),
- Column("NUMERIC_PRECISION", Integer, key="numeric_precision"),
- Column("NUMERIC_SCALE", Integer, key="numeric_scale"),
- Column("COLUMN_DEFAULT", Integer, key="column_default"),
- Column("COLLATION_NAME", String, key="collation_name"),
- schema="INFORMATION_SCHEMA")
+ Column("TABLE_SCHEMA", CoerceUnicode, key="table_schema"),
+ Column("TABLE_NAME", CoerceUnicode, key="table_name"),
+ Column("COLUMN_NAME", CoerceUnicode, key="column_name"),
+ Column("IS_NULLABLE", Integer, key="is_nullable"),
+ Column("DATA_TYPE", String, key="data_type"),
+ Column("ORDINAL_POSITION", Integer, key="ordinal_position"),
+ Column("CHARACTER_MAXIMUM_LENGTH", Integer,
+ key="character_maximum_length"),
+ Column("NUMERIC_PRECISION", Integer, key="numeric_precision"),
+ Column("NUMERIC_SCALE", Integer, key="numeric_scale"),
+ Column("COLUMN_DEFAULT", Integer, key="column_default"),
+ Column("COLLATION_NAME", String, key="collation_name"),
+ schema="INFORMATION_SCHEMA")
constraints = Table("TABLE_CONSTRAINTS", ischema,
- Column("TABLE_SCHEMA", CoerceUnicode, key="table_schema"),
- Column("TABLE_NAME", CoerceUnicode, key="table_name"),
- Column("CONSTRAINT_NAME", CoerceUnicode, key="constraint_name"),
- Column("CONSTRAINT_TYPE", String(convert_unicode=True), key="constraint_type"),
- schema="INFORMATION_SCHEMA")
+ Column("TABLE_SCHEMA", CoerceUnicode, key="table_schema"),
+ Column("TABLE_NAME", CoerceUnicode, key="table_name"),
+ Column("CONSTRAINT_NAME", CoerceUnicode,
+ key="constraint_name"),
+ Column("CONSTRAINT_TYPE", String(
+ convert_unicode=True), key="constraint_type"),
+ schema="INFORMATION_SCHEMA")
column_constraints = Table("CONSTRAINT_COLUMN_USAGE", ischema,
- Column("TABLE_SCHEMA", CoerceUnicode, key="table_schema"),
- Column("TABLE_NAME", CoerceUnicode, key="table_name"),
- Column("COLUMN_NAME", CoerceUnicode, key="column_name"),
- Column("CONSTRAINT_NAME", CoerceUnicode, key="constraint_name"),
- schema="INFORMATION_SCHEMA")
+ Column("TABLE_SCHEMA", CoerceUnicode,
+ key="table_schema"),
+ Column("TABLE_NAME", CoerceUnicode,
+ key="table_name"),
+ Column("COLUMN_NAME", CoerceUnicode,
+ key="column_name"),
+ Column("CONSTRAINT_NAME", CoerceUnicode,
+ key="constraint_name"),
+ schema="INFORMATION_SCHEMA")
key_constraints = Table("KEY_COLUMN_USAGE", ischema,
- Column("TABLE_SCHEMA", CoerceUnicode, key="table_schema"),
- Column("TABLE_NAME", CoerceUnicode, key="table_name"),
- Column("COLUMN_NAME", CoerceUnicode, key="column_name"),
- Column("CONSTRAINT_NAME", CoerceUnicode, key="constraint_name"),
- Column("ORDINAL_POSITION", Integer, key="ordinal_position"),
- schema="INFORMATION_SCHEMA")
+ Column("TABLE_SCHEMA", CoerceUnicode,
+ key="table_schema"),
+ Column("TABLE_NAME", CoerceUnicode,
+ key="table_name"),
+ Column("COLUMN_NAME", CoerceUnicode,
+ key="column_name"),
+ Column("CONSTRAINT_NAME", CoerceUnicode,
+ key="constraint_name"),
+ Column("ORDINAL_POSITION", Integer,
+ key="ordinal_position"),
+ schema="INFORMATION_SCHEMA")
ref_constraints = Table("REFERENTIAL_CONSTRAINTS", ischema,
- Column("CONSTRAINT_CATALOG", CoerceUnicode, key="constraint_catalog"),
- Column("CONSTRAINT_SCHEMA", CoerceUnicode, key="constraint_schema"),
- Column("CONSTRAINT_NAME", CoerceUnicode, key="constraint_name"),
- # TODO: is CATLOG misspelled ?
- Column("UNIQUE_CONSTRAINT_CATLOG", CoerceUnicode,
- key="unique_constraint_catalog"),
-
- Column("UNIQUE_CONSTRAINT_SCHEMA", CoerceUnicode,
- key="unique_constraint_schema"),
- Column("UNIQUE_CONSTRAINT_NAME", CoerceUnicode,
- key="unique_constraint_name"),
- Column("MATCH_OPTION", String, key="match_option"),
- Column("UPDATE_RULE", String, key="update_rule"),
- Column("DELETE_RULE", String, key="delete_rule"),
- schema="INFORMATION_SCHEMA")
+ Column("CONSTRAINT_CATALOG", CoerceUnicode,
+ key="constraint_catalog"),
+ Column("CONSTRAINT_SCHEMA", CoerceUnicode,
+ key="constraint_schema"),
+ Column("CONSTRAINT_NAME", CoerceUnicode,
+ key="constraint_name"),
+ # TODO: is CATLOG misspelled ?
+ Column("UNIQUE_CONSTRAINT_CATLOG", CoerceUnicode,
+ key="unique_constraint_catalog"),
+
+ Column("UNIQUE_CONSTRAINT_SCHEMA", CoerceUnicode,
+ key="unique_constraint_schema"),
+ Column("UNIQUE_CONSTRAINT_NAME", CoerceUnicode,
+ key="unique_constraint_name"),
+ Column("MATCH_OPTION", String, key="match_option"),
+ Column("UPDATE_RULE", String, key="update_rule"),
+ Column("DELETE_RULE", String, key="delete_rule"),
+ schema="INFORMATION_SCHEMA")
views = Table("VIEWS", ischema,
- Column("TABLE_CATALOG", CoerceUnicode, key="table_catalog"),
- Column("TABLE_SCHEMA", CoerceUnicode, key="table_schema"),
- Column("TABLE_NAME", CoerceUnicode, key="table_name"),
- Column("VIEW_DEFINITION", CoerceUnicode, key="view_definition"),
- Column("CHECK_OPTION", String, key="check_option"),
- Column("IS_UPDATABLE", String, key="is_updatable"),
- schema="INFORMATION_SCHEMA")
+ Column("TABLE_CATALOG", CoerceUnicode, key="table_catalog"),
+ Column("TABLE_SCHEMA", CoerceUnicode, key="table_schema"),
+ Column("TABLE_NAME", CoerceUnicode, key="table_name"),
+ Column("VIEW_DEFINITION", CoerceUnicode, key="view_definition"),
+ Column("CHECK_OPTION", String, key="check_option"),
+ Column("IS_UPDATABLE", String, key="is_updatable"),
+ schema="INFORMATION_SCHEMA")
diff --git a/lib/sqlalchemy/dialects/mssql/mxodbc.py b/lib/sqlalchemy/dialects/mssql/mxodbc.py
index ad9e9c2ba..ffe38d8dd 100644
--- a/lib/sqlalchemy/dialects/mssql/mxodbc.py
+++ b/lib/sqlalchemy/dialects/mssql/mxodbc.py
@@ -47,8 +47,8 @@ from ... import types as sqltypes
from ...connectors.mxodbc import MxODBCConnector
from .pyodbc import MSExecutionContext_pyodbc, _MSNumeric_pyodbc
from .base import (MSDialect,
- MSSQLStrictCompiler,
- _MSDateTime, _MSDate, _MSTime)
+ MSSQLStrictCompiler,
+ _MSDateTime, _MSDate, _MSTime)
class _MSNumeric_mxodbc(_MSNumeric_pyodbc):
@@ -82,7 +82,7 @@ class MSExecutionContext_mxodbc(MSExecutionContext_pyodbc):
SELECT SCOPE_IDENTITY in cases where OUTPUT clause
does not work (tables with insert triggers).
"""
- #todo - investigate whether the pyodbc execution context
+ # todo - investigate whether the pyodbc execution context
# is really only being used in cases where OUTPUT
# won't work.
@@ -91,7 +91,7 @@ class MSDialect_mxodbc(MxODBCConnector, MSDialect):
# this is only needed if "native ODBC" mode is used,
# which is now disabled by default.
- #statement_compiler = MSSQLStrictCompiler
+ # statement_compiler = MSSQLStrictCompiler
execution_ctx_cls = MSExecutionContext_mxodbc
diff --git a/lib/sqlalchemy/dialects/mssql/pymssql.py b/lib/sqlalchemy/dialects/mssql/pymssql.py
index 4b7be1ac4..8f76336ae 100644
--- a/lib/sqlalchemy/dialects/mssql/pymssql.py
+++ b/lib/sqlalchemy/dialects/mssql/pymssql.py
@@ -9,7 +9,8 @@
.. dialect:: mssql+pymssql
:name: pymssql
:dbapi: pymssql
- :connectstring: mssql+pymssql://<username>:<password>@<freetds_name>?charset=utf8
+ :connectstring: mssql+pymssql://<username>:<password>@<freetds_name>?\
+charset=utf8
:url: http://pymssql.org/
pymssql is a Python module that provides a Python DBAPI interface around
@@ -52,7 +53,7 @@ class MSDialect_pymssql(MSDialect):
client_ver = tuple(int(x) for x in module.__version__.split("."))
if client_ver < (1, ):
util.warn("The pymssql dialect expects at least "
- "the 1.0 series of the pymssql DBAPI.")
+ "the 1.0 series of the pymssql DBAPI.")
return module
def __init__(self, **params):
diff --git a/lib/sqlalchemy/dialects/mssql/pyodbc.py b/lib/sqlalchemy/dialects/mssql/pyodbc.py
index 31c55f502..1c75fe1ff 100644
--- a/lib/sqlalchemy/dialects/mssql/pyodbc.py
+++ b/lib/sqlalchemy/dialects/mssql/pyodbc.py
@@ -55,9 +55,9 @@ Examples of pyodbc connection string URLs:
DRIVER={SQL Server};Server=host;Database=db;UID=user;PWD=pass;port=123
-* ``mssql+pyodbc://user:pass@host/db?driver=MyDriver`` - connects using a connection
- string that includes a custom
- ODBC driver name. This will create the following connection string::
+* ``mssql+pyodbc://user:pass@host/db?driver=MyDriver`` - connects using a
+ connection string that includes a custom ODBC driver name. This will create
+ the following connection string::
DRIVER={MyDriver};Server=host;Database=db;UID=user;PWD=pass
@@ -85,14 +85,14 @@ Unicode Binds
-------------
The current state of PyODBC on a unix backend with FreeTDS and/or
-EasySoft is poor regarding unicode; different OS platforms and versions of UnixODBC
-versus IODBC versus FreeTDS/EasySoft versus PyODBC itself dramatically
-alter how strings are received. The PyODBC dialect attempts to use all the information
-it knows to determine whether or not a Python unicode literal can be
-passed directly to the PyODBC driver or not; while SQLAlchemy can encode
-these to bytestrings first, some users have reported that PyODBC mis-handles
-bytestrings for certain encodings and requires a Python unicode object,
-while the author has observed widespread cases where a Python unicode
+EasySoft is poor regarding unicode; different OS platforms and versions of
+UnixODBC versus IODBC versus FreeTDS/EasySoft versus PyODBC itself
+dramatically alter how strings are received. The PyODBC dialect attempts to
+use all the information it knows to determine whether or not a Python unicode
+literal can be passed directly to the PyODBC driver or not; while SQLAlchemy
+can encode these to bytestrings first, some users have reported that PyODBC
+mis-handles bytestrings for certain encodings and requires a Python unicode
+object, while the author has observed widespread cases where a Python unicode
is completely misinterpreted by PyODBC, particularly when dealing with
the information schema tables used in table reflection, and the value
must first be encoded to a bytestring.
@@ -117,6 +117,7 @@ from ...connectors.pyodbc import PyODBCConnector
from ... import types as sqltypes, util
import decimal
+
class _ms_numeric_pyodbc(object):
"""Turns Decimals with adjusted() < 0 or > 7 into strings.
@@ -129,7 +130,7 @@ class _ms_numeric_pyodbc(object):
def bind_processor(self, dialect):
super_process = super(_ms_numeric_pyodbc, self).\
- bind_processor(dialect)
+ bind_processor(dialect)
if not dialect._need_decimal_fix:
return super_process
@@ -155,38 +156,41 @@ class _ms_numeric_pyodbc(object):
def _small_dec_to_string(self, value):
return "%s0.%s%s" % (
- (value < 0 and '-' or ''),
- '0' * (abs(value.adjusted()) - 1),
- "".join([str(nint) for nint in value.as_tuple()[1]]))
+ (value < 0 and '-' or ''),
+ '0' * (abs(value.adjusted()) - 1),
+ "".join([str(nint) for nint in value.as_tuple()[1]]))
def _large_dec_to_string(self, value):
_int = value.as_tuple()[1]
if 'E' in str(value):
result = "%s%s%s" % (
- (value < 0 and '-' or ''),
- "".join([str(s) for s in _int]),
- "0" * (value.adjusted() - (len(_int) - 1)))
+ (value < 0 and '-' or ''),
+ "".join([str(s) for s in _int]),
+ "0" * (value.adjusted() - (len(_int) - 1)))
else:
if (len(_int) - 1) > value.adjusted():
result = "%s%s.%s" % (
- (value < 0 and '-' or ''),
- "".join(
- [str(s) for s in _int][0:value.adjusted() + 1]),
- "".join(
- [str(s) for s in _int][value.adjusted() + 1:]))
+ (value < 0 and '-' or ''),
+ "".join(
+ [str(s) for s in _int][0:value.adjusted() + 1]),
+ "".join(
+ [str(s) for s in _int][value.adjusted() + 1:]))
else:
result = "%s%s" % (
- (value < 0 and '-' or ''),
- "".join(
- [str(s) for s in _int][0:value.adjusted() + 1]))
+ (value < 0 and '-' or ''),
+ "".join(
+ [str(s) for s in _int][0:value.adjusted() + 1]))
return result
+
class _MSNumeric_pyodbc(_ms_numeric_pyodbc, sqltypes.Numeric):
pass
+
class _MSFloat_pyodbc(_ms_numeric_pyodbc, sqltypes.Float):
pass
+
class MSExecutionContext_pyodbc(MSExecutionContext):
_embedded_scope_identity = False
@@ -254,9 +258,9 @@ class MSDialect_pyodbc(PyODBCConnector, MSDialect):
self.description_encoding = params.pop('description_encoding')
super(MSDialect_pyodbc, self).__init__(**params)
self.use_scope_identity = self.use_scope_identity and \
- self.dbapi and \
- hasattr(self.dbapi.Cursor, 'nextset')
+ self.dbapi and \
+ hasattr(self.dbapi.Cursor, 'nextset')
self._need_decimal_fix = self.dbapi and \
- self._dbapi_version() < (2, 1, 8)
+ self._dbapi_version() < (2, 1, 8)
dialect = MSDialect_pyodbc
diff --git a/lib/sqlalchemy/dialects/mssql/zxjdbc.py b/lib/sqlalchemy/dialects/mssql/zxjdbc.py
index 5377be1ce..b23a010e7 100644
--- a/lib/sqlalchemy/dialects/mssql/zxjdbc.py
+++ b/lib/sqlalchemy/dialects/mssql/zxjdbc.py
@@ -9,7 +9,8 @@
.. dialect:: mssql+zxjdbc
:name: zxJDBC for Jython
:dbapi: zxjdbc
- :connectstring: mssql+zxjdbc://user:pass@host:port/dbname[?key=value&key=value...]
+ :connectstring: mssql+zxjdbc://user:pass@host:port/dbname\
+[?key=value&key=value...]
:driverurl: http://jtds.sourceforge.net/
@@ -42,12 +43,12 @@ class MSExecutionContext_zxjdbc(MSExecutionContext):
self._lastrowid = int(row[0])
if (self.isinsert or self.isupdate or self.isdelete) and \
- self.compiled.returning:
+ self.compiled.returning:
self._result_proxy = engine.FullyBufferedResultProxy(self)
if self._enable_identity_insert:
table = self.dialect.identifier_preparer.format_table(
- self.compiled.statement.table)
+ self.compiled.statement.table)
self.cursor.execute("SET IDENTITY_INSERT %s OFF" % table)
@@ -59,8 +60,8 @@ class MSDialect_zxjdbc(ZxJDBCConnector, MSDialect):
def _get_server_version_info(self, connection):
return tuple(
- int(x)
- for x in connection.connection.dbversion.split('.')
- )
+ int(x)
+ for x in connection.connection.dbversion.split('.')
+ )
dialect = MSDialect_zxjdbc
diff --git a/lib/sqlalchemy/dialects/mysql/__init__.py b/lib/sqlalchemy/dialects/mysql/__init__.py
index a9dbd819e..498603cf7 100644
--- a/lib/sqlalchemy/dialects/mysql/__init__.py
+++ b/lib/sqlalchemy/dialects/mysql/__init__.py
@@ -6,8 +6,8 @@
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from . import base, mysqldb, oursql, \
- pyodbc, zxjdbc, mysqlconnector, pymysql,\
- gaerdbms, cymysql
+ pyodbc, zxjdbc, mysqlconnector, pymysql,\
+ gaerdbms, cymysql
# default dialect
base.dialect = mysqldb.dialect
@@ -22,8 +22,10 @@ from .base import \
VARBINARY, VARCHAR, YEAR, dialect
__all__ = (
-'BIGINT', 'BINARY', 'BIT', 'BLOB', 'BOOLEAN', 'CHAR', 'DATE', 'DATETIME', 'DECIMAL', 'DOUBLE',
-'ENUM', 'DECIMAL', 'FLOAT', 'INTEGER', 'INTEGER', 'LONGBLOB', 'LONGTEXT', 'MEDIUMBLOB', 'MEDIUMINT',
-'MEDIUMTEXT', 'NCHAR', 'NVARCHAR', 'NUMERIC', 'SET', 'SMALLINT', 'REAL', 'TEXT', 'TIME', 'TIMESTAMP',
-'TINYBLOB', 'TINYINT', 'TINYTEXT', 'VARBINARY', 'VARCHAR', 'YEAR', 'dialect'
+ 'BIGINT', 'BINARY', 'BIT', 'BLOB', 'BOOLEAN', 'CHAR', 'DATE', 'DATETIME',
+ 'DECIMAL', 'DOUBLE', 'ENUM', 'DECIMAL', 'FLOAT', 'INTEGER', 'INTEGER',
+ 'LONGBLOB', 'LONGTEXT', 'MEDIUMBLOB', 'MEDIUMINT', 'MEDIUMTEXT', 'NCHAR',
+ 'NVARCHAR', 'NUMERIC', 'SET', 'SMALLINT', 'REAL', 'TEXT', 'TIME',
+ 'TIMESTAMP', 'TINYBLOB', 'TINYINT', 'TINYTEXT', 'VARBINARY', 'VARCHAR',
+ 'YEAR', 'dialect'
)
diff --git a/lib/sqlalchemy/dialects/mysql/base.py b/lib/sqlalchemy/dialects/mysql/base.py
index ee5747e39..0c00cf530 100644
--- a/lib/sqlalchemy/dialects/mysql/base.py
+++ b/lib/sqlalchemy/dialects/mysql/base.py
@@ -26,9 +26,9 @@ supported in any given server release.
Connection Timeouts
-------------------
-MySQL features an automatic connection close behavior, for connections that have
-been idle for eight hours or more. To circumvent having this issue, use the
-``pool_recycle`` option which controls the maximum age of any connection::
+MySQL features an automatic connection close behavior, for connections that
+have been idle for eight hours or more. To circumvent having this issue, use
+the ``pool_recycle`` option which controls the maximum age of any connection::
engine = create_engine('mysql+mysqldb://...', pool_recycle=3600)
@@ -38,10 +38,12 @@ CREATE TABLE arguments including Storage Engines
------------------------------------------------
MySQL's CREATE TABLE syntax includes a wide array of special options,
-including ``ENGINE``, ``CHARSET``, ``MAX_ROWS``, ``ROW_FORMAT``, ``INSERT_METHOD``, and many more.
+including ``ENGINE``, ``CHARSET``, ``MAX_ROWS``, ``ROW_FORMAT``,
+``INSERT_METHOD``, and many more.
To accommodate the rendering of these arguments, specify the form
``mysql_argument_name="value"``. For example, to specify a table with
-``ENGINE`` of ``InnoDB``, ``CHARSET`` of ``utf8``, and ``KEY_BLOCK_SIZE`` of ``1024``::
+``ENGINE`` of ``InnoDB``, ``CHARSET`` of ``utf8``, and ``KEY_BLOCK_SIZE``
+of ``1024``::
Table('mytable', metadata,
Column('data', String(32)),
@@ -50,26 +52,28 @@ To accommodate the rendering of these arguments, specify the form
mysql_key_block_size="1024"
)
-The MySQL dialect will normally transfer any keyword specified as ``mysql_keyword_name``
-to be rendered as ``KEYWORD_NAME`` in the ``CREATE TABLE`` statement. A handful
-of these names will render with a space instead of an underscore; to support this,
-the MySQL dialect has awareness of these particular names, which include
-``DATA DIRECTORY`` (e.g. ``mysql_data_directory``), ``CHARACTER SET`` (e.g.
-``mysql_character_set``) and ``INDEX DIRECTORY`` (e.g. ``mysql_index_directory``).
-
-The most common argument is ``mysql_engine``, which refers to the storage engine
-for the table. Historically, MySQL server installations would default
+The MySQL dialect will normally transfer any keyword specified as
+``mysql_keyword_name`` to be rendered as ``KEYWORD_NAME`` in the
+``CREATE TABLE`` statement. A handful of these names will render with a space
+instead of an underscore; to support this, the MySQL dialect has awareness of
+these particular names, which include ``DATA DIRECTORY``
+(e.g. ``mysql_data_directory``), ``CHARACTER SET`` (e.g.
+``mysql_character_set``) and ``INDEX DIRECTORY`` (e.g.
+``mysql_index_directory``).
+
+The most common argument is ``mysql_engine``, which refers to the storage
+engine for the table. Historically, MySQL server installations would default
to ``MyISAM`` for this value, although newer versions may be defaulting
to ``InnoDB``. The ``InnoDB`` engine is typically preferred for its support
of transactions and foreign keys.
A :class:`.Table` that is created in a MySQL database with a storage engine
-of ``MyISAM`` will be essentially non-transactional, meaning any INSERT/UPDATE/DELETE
-statement referring to this table will be invoked as autocommit. It also will have no
-support for foreign key constraints; while the ``CREATE TABLE`` statement
-accepts foreign key options, when using the ``MyISAM`` storage engine these
-arguments are discarded. Reflecting such a table will also produce no
-foreign key constraint information.
+of ``MyISAM`` will be essentially non-transactional, meaning any
+INSERT/UPDATE/DELETE statement referring to this table will be invoked as
+autocommit. It also will have no support for foreign key constraints; while
+the ``CREATE TABLE`` statement accepts foreign key options, when using the
+``MyISAM`` storage engine these arguments are discarded. Reflecting such a
+table will also produce no foreign key constraint information.
For fully atomic transactions as well as support for foreign key
constraints, all participating ``CREATE TABLE`` statements must specify a
@@ -118,7 +122,8 @@ AUTO_INCREMENT Behavior
-----------------------
When creating tables, SQLAlchemy will automatically set ``AUTO_INCREMENT`` on
-the first :class:`.Integer` primary key column which is not marked as a foreign key::
+the first :class:`.Integer` primary key column which is not marked as a
+foreign key::
>>> t = Table('mytable', metadata,
... Column('mytable_id', Integer, primary_key=True)
@@ -129,10 +134,10 @@ the first :class:`.Integer` primary key column which is not marked as a foreign
PRIMARY KEY (id)
)
-You can disable this behavior by passing ``False`` to the :paramref:`~.Column.autoincrement`
-argument of :class:`.Column`. This flag can also be used to enable
-auto-increment on a secondary column in a multi-column key for some storage
-engines::
+You can disable this behavior by passing ``False`` to the
+:paramref:`~.Column.autoincrement` argument of :class:`.Column`. This flag
+can also be used to enable auto-increment on a secondary column in a
+multi-column key for some storage engines::
Table('mytable', metadata,
Column('gid', Integer, primary_key=True, autoincrement=False),
@@ -184,8 +189,8 @@ usual definition of "number of rows matched by an UPDATE or DELETE" statement.
This is in contradiction to the default setting on most MySQL DBAPI drivers,
which is "number of rows actually modified/deleted". For this reason, the
SQLAlchemy MySQL dialects always set the ``constants.CLIENT.FOUND_ROWS`` flag,
-or whatever is equivalent for the DBAPI in use, on connect, unless the flag value
-is overridden using DBAPI-specific options
+or whatever is equivalent for the DBAPI in use, on connect, unless the flag
+value is overridden using DBAPI-specific options
(such as ``client_flag`` for the MySQL-Python driver, ``found_rows`` for the
OurSQL driver).
@@ -197,14 +202,14 @@ See also:
CAST Support
------------
-MySQL documents the CAST operator as available in version 4.0.2. When using the
-SQLAlchemy :func:`.cast` function, SQLAlchemy
-will not render the CAST token on MySQL before this version, based on server version
-detection, instead rendering the internal expression directly.
+MySQL documents the CAST operator as available in version 4.0.2. When using
+the SQLAlchemy :func:`.cast` function, SQLAlchemy
+will not render the CAST token on MySQL before this version, based on server
+version detection, instead rendering the internal expression directly.
-CAST may still not be desirable on an early MySQL version post-4.0.2, as it didn't
-add all datatype support until 4.1.1. If your application falls into this
-narrow area, the behavior of CAST can be controlled using the
+CAST may still not be desirable on an early MySQL version post-4.0.2, as it
+didn't add all datatype support until 4.1.1. If your application falls into
+this narrow area, the behavior of CAST can be controlled using the
:ref:`sqlalchemy.ext.compiler_toplevel` system, as per the recipe below::
from sqlalchemy.sql.expression import Cast
@@ -241,7 +246,8 @@ become part of the index. SQLAlchemy provides this feature via the
Index('my_index', my_table.c.data, mysql_length=10)
- Index('a_b_idx', my_table.c.a, my_table.c.b, mysql_length={'a': 4, 'b': 9})
+ Index('a_b_idx', my_table.c.a, my_table.c.b, mysql_length={'a': 4,
+ 'b': 9})
Prefix lengths are given in characters for nonbinary string types and in bytes
for binary string types. The value passed to the keyword argument *must* be
@@ -289,10 +295,10 @@ Foreign Key Arguments to Avoid
MySQL does not support the foreign key arguments "DEFERRABLE", "INITIALLY",
or "MATCH". Using the ``deferrable`` or ``initially`` keyword argument with
-:class:`.ForeignKeyConstraint` or :class:`.ForeignKey` will have the effect of these keywords being
-rendered in a DDL expression, which will then raise an error on MySQL.
-In order to use these keywords on a foreign key while having them ignored
-on a MySQL backend, use a custom compile rule::
+:class:`.ForeignKeyConstraint` or :class:`.ForeignKey` will have the effect of
+these keywords being rendered in a DDL expression, which will then raise an
+error on MySQL. In order to use these keywords on a foreign key while having
+them ignored on a MySQL backend, use a custom compile rule::
from sqlalchemy.ext.compiler import compiles
from sqlalchemy.schema import ForeignKeyConstraint
@@ -303,19 +309,20 @@ on a MySQL backend, use a custom compile rule::
return compiler.visit_foreign_key_constraint(element, **kw)
.. versionchanged:: 0.9.0 - the MySQL backend no longer silently ignores
- the ``deferrable`` or ``initially`` keyword arguments of :class:`.ForeignKeyConstraint`
- and :class:`.ForeignKey`.
+ the ``deferrable`` or ``initially`` keyword arguments of
+ :class:`.ForeignKeyConstraint` and :class:`.ForeignKey`.
The "MATCH" keyword is in fact more insidious, and is explicitly disallowed
-by SQLAlchemy in conjunction with the MySQL backend. This argument is silently
-ignored by MySQL, but in addition has the effect of ON UPDATE and ON DELETE options
-also being ignored by the backend. Therefore MATCH should never be used with the
-MySQL backend; as is the case with DEFERRABLE and INITIALLY, custom compilation
-rules can be used to correct a MySQL ForeignKeyConstraint at DDL definition time.
+by SQLAlchemy in conjunction with the MySQL backend. This argument is
+silently ignored by MySQL, but in addition has the effect of ON UPDATE and ON
+DELETE options also being ignored by the backend. Therefore MATCH should
+never be used with the MySQL backend; as is the case with DEFERRABLE and
+INITIALLY, custom compilation rules can be used to correct a MySQL
+ForeignKeyConstraint at DDL definition time.
-.. versionadded:: 0.9.0 - the MySQL backend will raise a :class:`.CompileError`
- when the ``match`` keyword is used with :class:`.ForeignKeyConstraint`
- or :class:`.ForeignKey`.
+.. versionadded:: 0.9.0 - the MySQL backend will raise a
+ :class:`.CompileError` when the ``match`` keyword is used with
+ :class:`.ForeignKeyConstraint` or :class:`.ForeignKey`.
Reflection of Foreign Key Constraints
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -350,7 +357,7 @@ from ...engine import default
from ... import types as sqltypes
from ...util import topological
from ...types import DATE, BOOLEAN, \
- BLOB, BINARY, VARBINARY
+ BLOB, BINARY, VARBINARY
RESERVED_WORDS = set(
['accessible', 'add', 'all', 'alter', 'analyze', 'and', 'as', 'asc',
@@ -364,15 +371,15 @@ RESERVED_WORDS = set(
'deterministic', 'distinct', 'distinctrow', 'div', 'double', 'drop',
'dual', 'each', 'else', 'elseif', 'enclosed', 'escaped', 'exists',
'exit', 'explain', 'false', 'fetch', 'float', 'float4', 'float8',
- 'for', 'force', 'foreign', 'from', 'fulltext', 'grant', 'group', 'having',
- 'high_priority', 'hour_microsecond', 'hour_minute', 'hour_second', 'if',
- 'ignore', 'in', 'index', 'infile', 'inner', 'inout', 'insensitive',
- 'insert', 'int', 'int1', 'int2', 'int3', 'int4', 'int8', 'integer',
- 'interval', 'into', 'is', 'iterate', 'join', 'key', 'keys', 'kill',
- 'leading', 'leave', 'left', 'like', 'limit', 'linear', 'lines', 'load',
- 'localtime', 'localtimestamp', 'lock', 'long', 'longblob', 'longtext',
- 'loop', 'low_priority', 'master_ssl_verify_server_cert', 'match',
- 'mediumblob', 'mediumint', 'mediumtext', 'middleint',
+ 'for', 'force', 'foreign', 'from', 'fulltext', 'grant', 'group',
+ 'having', 'high_priority', 'hour_microsecond', 'hour_minute',
+ 'hour_second', 'if', 'ignore', 'in', 'index', 'infile', 'inner', 'inout',
+ 'insensitive', 'insert', 'int', 'int1', 'int2', 'int3', 'int4', 'int8',
+ 'integer', 'interval', 'into', 'is', 'iterate', 'join', 'key', 'keys',
+ 'kill', 'leading', 'leave', 'left', 'like', 'limit', 'linear', 'lines',
+ 'load', 'localtime', 'localtimestamp', 'lock', 'long', 'longblob',
+ 'longtext', 'loop', 'low_priority', 'master_ssl_verify_server_cert',
+ 'match', 'mediumblob', 'mediumint', 'mediumtext', 'middleint',
'minute_microsecond', 'minute_second', 'mod', 'modifies', 'natural',
'not', 'no_write_to_binlog', 'null', 'numeric', 'on', 'optimize',
'option', 'optionally', 'or', 'order', 'out', 'outer', 'outfile',
@@ -397,9 +404,9 @@ RESERVED_WORDS = set(
'read_only', 'read_write', # 5.1
'general', 'ignore_server_ids', 'master_heartbeat_period', 'maxvalue',
- 'resignal', 'signal', 'slow', # 5.5
+ 'resignal', 'signal', 'slow', # 5.5
- 'get', 'io_after_gtids', 'io_before_gtids', 'master_bind', 'one_shot',
+ 'get', 'io_after_gtids', 'io_before_gtids', 'master_bind', 'one_shot',
'partition', 'sql_after_gtids', 'sql_before_gtids', # 5.6
])
@@ -427,7 +434,8 @@ class _NumericType(object):
def __repr__(self):
return util.generic_repr(self,
- to_inspect=[_NumericType, sqltypes.Numeric])
+ to_inspect=[_NumericType, sqltypes.Numeric])
+
class _FloatType(_NumericType, sqltypes.Float):
def __init__(self, precision=None, scale=None, asdecimal=True, **kw):
@@ -435,16 +443,19 @@ class _FloatType(_NumericType, sqltypes.Float):
(
(precision is None and scale is not None) or
(precision is not None and scale is None)
- ):
+ ):
raise exc.ArgumentError(
"You must specify both precision and scale or omit "
"both altogether.")
- super(_FloatType, self).__init__(precision=precision, asdecimal=asdecimal, **kw)
+ super(_FloatType, self).__init__(
+ precision=precision, asdecimal=asdecimal, **kw)
self.scale = scale
def __repr__(self):
- return util.generic_repr(self,
- to_inspect=[_FloatType, _NumericType, sqltypes.Float])
+ return util.generic_repr(self, to_inspect=[_FloatType,
+ _NumericType,
+ sqltypes.Float])
+
class _IntegerType(_NumericType, sqltypes.Integer):
def __init__(self, display_width=None, **kw):
@@ -452,8 +463,10 @@ class _IntegerType(_NumericType, sqltypes.Integer):
super(_IntegerType, self).__init__(**kw)
def __repr__(self):
- return util.generic_repr(self,
- to_inspect=[_IntegerType, _NumericType, sqltypes.Integer])
+ return util.generic_repr(self, to_inspect=[_IntegerType,
+ _NumericType,
+ sqltypes.Integer])
+
class _StringType(sqltypes.String):
"""Base for MySQL string types."""
@@ -474,7 +487,8 @@ class _StringType(sqltypes.String):
def __repr__(self):
return util.generic_repr(self,
- to_inspect=[_StringType, sqltypes.String])
+ to_inspect=[_StringType, sqltypes.String])
+
class NUMERIC(_NumericType, sqltypes.NUMERIC):
"""MySQL NUMERIC type."""
@@ -498,7 +512,7 @@ class NUMERIC(_NumericType, sqltypes.NUMERIC):
"""
super(NUMERIC, self).__init__(precision=precision,
- scale=scale, asdecimal=asdecimal, **kw)
+ scale=scale, asdecimal=asdecimal, **kw)
class DECIMAL(_NumericType, sqltypes.DECIMAL):
@@ -537,10 +551,10 @@ class DOUBLE(_FloatType):
.. note::
The :class:`.DOUBLE` type by default converts from float
- to Decimal, using a truncation that defaults to 10 digits. Specify
- either ``scale=n`` or ``decimal_return_scale=n`` in order to change
- this scale, or ``asdecimal=False`` to return values directly as
- Python floating points.
+ to Decimal, using a truncation that defaults to 10 digits.
+ Specify either ``scale=n`` or ``decimal_return_scale=n`` in order
+ to change this scale, or ``asdecimal=False`` to return values
+ directly as Python floating points.
:param precision: Total digits in this number. If scale and precision
are both None, values are stored to limits allowed by the server.
@@ -570,10 +584,10 @@ class REAL(_FloatType, sqltypes.REAL):
.. note::
The :class:`.REAL` type by default converts from float
- to Decimal, using a truncation that defaults to 10 digits. Specify
- either ``scale=n`` or ``decimal_return_scale=n`` in order to change
- this scale, or ``asdecimal=False`` to return values directly as
- Python floating points.
+ to Decimal, using a truncation that defaults to 10 digits.
+ Specify either ``scale=n`` or ``decimal_return_scale=n`` in order
+ to change this scale, or ``asdecimal=False`` to return values
+ directly as Python floating points.
:param precision: Total digits in this number. If scale and precision
are both None, values are stored to limits allowed by the server.
@@ -728,9 +742,9 @@ class SMALLINT(_IntegerType, sqltypes.SMALLINT):
class BIT(sqltypes.TypeEngine):
"""MySQL BIT type.
- This type is for MySQL 5.0.3 or greater for MyISAM, and 5.0.5 or greater for
- MyISAM, MEMORY, InnoDB and BDB. For older versions, use a MSTinyInteger()
- type.
+ This type is for MySQL 5.0.3 or greater for MyISAM, and 5.0.5 or greater
+ for MyISAM, MEMORY, InnoDB and BDB. For older versions, use a
+ MSTinyInteger() type.
"""
@@ -1075,11 +1089,12 @@ class CHAR(_StringType, sqltypes.CHAR):
ascii=type_.ascii,
binary=type_.binary,
unicode=type_.unicode,
- national=False # not supported in CAST
+ national=False # not supported in CAST
)
else:
return CHAR(length=type_.length)
+
class NVARCHAR(_StringType, sqltypes.NVARCHAR):
"""MySQL NVARCHAR type.
@@ -1149,6 +1164,7 @@ class LONGBLOB(sqltypes._Binary):
__visit_name__ = 'LONGBLOB'
+
class _EnumeratedValues(_StringType):
def _init_values(self, values, kw):
self.quoting = kw.pop('quoting', 'auto')
@@ -1191,6 +1207,7 @@ class _EnumeratedValues(_StringType):
strip_values.append(a)
return strip_values
+
class ENUM(sqltypes.Enum, _EnumeratedValues):
"""MySQL ENUM type."""
@@ -1257,8 +1274,8 @@ class ENUM(sqltypes.Enum, _EnumeratedValues):
sqltypes.Enum.__init__(self, *values)
def __repr__(self):
- return util.generic_repr(self,
- to_inspect=[ENUM, _StringType, sqltypes.Enum])
+ return util.generic_repr(
+ self, to_inspect=[ENUM, _StringType, sqltypes.Enum])
def bind_processor(self, dialect):
super_convert = super(ENUM, self).bind_processor(dialect)
@@ -1266,7 +1283,7 @@ class ENUM(sqltypes.Enum, _EnumeratedValues):
def process(value):
if self.strict and value is not None and value not in self.enums:
raise exc.InvalidRequestError('"%s" not a valid value for '
- 'this enum' % value)
+ 'this enum' % value)
if super_convert:
return super_convert(value)
else:
@@ -1360,7 +1377,8 @@ class SET(_EnumeratedValues):
super_convert = super(SET, self).bind_processor(dialect)
def process(value):
- if value is None or isinstance(value, util.int_types + util.string_types):
+ if value is None or isinstance(
+ value, util.int_types + util.string_types):
pass
else:
if None in value:
@@ -1480,11 +1498,11 @@ class MySQLCompiler(compiler.SQLCompiler):
def visit_concat_op_binary(self, binary, operator, **kw):
return "concat(%s, %s)" % (self.process(binary.left),
- self.process(binary.right))
+ self.process(binary.right))
def visit_match_op_binary(self, binary, operator, **kw):
return "MATCH (%s) AGAINST (%s IN BOOLEAN MODE)" % \
- (self.process(binary.left), self.process(binary.right))
+ (self.process(binary.left), self.process(binary.right))
def get_from_hint_text(self, table, text):
return text
@@ -1499,16 +1517,17 @@ class MySQLCompiler(compiler.SQLCompiler):
elif isinstance(type_, sqltypes.TIMESTAMP):
return 'DATETIME'
elif isinstance(type_, (sqltypes.DECIMAL, sqltypes.DateTime,
- sqltypes.Date, sqltypes.Time)):
+ sqltypes.Date, sqltypes.Time)):
return self.dialect.type_compiler.process(type_)
- elif isinstance(type_, sqltypes.String) and not isinstance(type_, (ENUM, SET)):
+ elif isinstance(type_, sqltypes.String) \
+ and not isinstance(type_, (ENUM, SET)):
adapted = CHAR._adapt_string_for_cast(type_)
return self.dialect.type_compiler.process(adapted)
elif isinstance(type_, sqltypes._Binary):
return 'BINARY'
elif isinstance(type_, sqltypes.NUMERIC):
return self.dialect.type_compiler.process(
- type_).replace('NUMERIC', 'DECIMAL')
+ type_).replace('NUMERIC', 'DECIMAL')
else:
return None
@@ -1569,7 +1588,8 @@ class MySQLCompiler(compiler.SQLCompiler):
# The latter is more readable for offsets but we're stuck with the
# former until we can refine dialects by server revision.
- limit_clause, offset_clause = select._limit_clause, select._offset_clause
+ limit_clause, offset_clause = select._limit_clause, \
+ select._offset_clause
if limit_clause is None and offset_clause is None:
return ''
@@ -1585,12 +1605,12 @@ class MySQLCompiler(compiler.SQLCompiler):
# bound as part of MySQL's "syntax" for OFFSET with
# no LIMIT
return ' \n LIMIT %s, %s' % (
- self.process(offset_clause),
- "18446744073709551615")
+ self.process(offset_clause),
+ "18446744073709551615")
else:
return ' \n LIMIT %s, %s' % (
- self.process(offset_clause),
- self.process(limit_clause))
+ self.process(offset_clause),
+ self.process(limit_clause))
else:
# No offset provided, so just use the limit
return ' \n LIMIT %s' % (self.process(limit_clause),)
@@ -1602,12 +1622,13 @@ class MySQLCompiler(compiler.SQLCompiler):
else:
return None
- def update_tables_clause(self, update_stmt, from_table, extra_froms, **kw):
+ def update_tables_clause(self, update_stmt, from_table,
+ extra_froms, **kw):
return ', '.join(t._compiler_dispatch(self, asfrom=True, **kw)
- for t in [from_table] + list(extra_froms))
+ for t in [from_table] + list(extra_froms))
def update_from_clause(self, update_stmt, from_table,
- extra_froms, from_hints, **kw):
+ extra_froms, from_hints, **kw):
return None
@@ -1620,11 +1641,12 @@ class MySQLDDLCompiler(compiler.DDLCompiler):
def create_table_constraints(self, table):
"""Get table constraints."""
constraint_string = super(
- MySQLDDLCompiler, self).create_table_constraints(table)
+ MySQLDDLCompiler, self).create_table_constraints(table)
# why self.dialect.name and not 'mysql'? because of drizzle
is_innodb = 'engine' in table.dialect_options[self.dialect.name] and \
- table.dialect_options[self.dialect.name]['engine'].lower() == 'innodb'
+ table.dialect_options[self.dialect.name][
+ 'engine'].lower() == 'innodb'
auto_inc_column = table._autoincrement_column
@@ -1634,11 +1656,11 @@ class MySQLDDLCompiler(compiler.DDLCompiler):
if constraint_string:
constraint_string += ", \n\t"
constraint_string += "KEY %s (%s)" % (
- self.preparer.quote(
- "idx_autoinc_%s" % auto_inc_column.name
- ),
- self.preparer.format_column(auto_inc_column)
- )
+ self.preparer.quote(
+ "idx_autoinc_%s" % auto_inc_column.name
+ ),
+ self.preparer.format_column(auto_inc_column)
+ )
return constraint_string
@@ -1646,7 +1668,7 @@ class MySQLDDLCompiler(compiler.DDLCompiler):
"""Builds column DDL."""
colspec = [self.preparer.format_column(column),
- self.dialect.type_compiler.process(column.type)
+ self.dialect.type_compiler.process(column.type)
]
default = self.get_column_default_string(column)
@@ -1661,7 +1683,7 @@ class MySQLDDLCompiler(compiler.DDLCompiler):
colspec.append('NULL')
if column is column.table._autoincrement_column and \
- column.server_default is None:
+ column.server_default is None:
colspec.append('AUTO_INCREMENT')
return ' '.join(colspec)
@@ -1697,7 +1719,8 @@ class MySQLDDLCompiler(compiler.DDLCompiler):
joiner = '='
if opt in ('TABLESPACE', 'DEFAULT CHARACTER SET',
- 'CHARACTER SET', 'COLLATE', 'PARTITION BY', 'PARTITIONS'):
+ 'CHARACTER SET', 'COLLATE',
+ 'PARTITION BY', 'PARTITIONS'):
joiner = ' '
table_opts.append(joiner.join((opt, arg)))
@@ -1709,8 +1732,8 @@ class MySQLDDLCompiler(compiler.DDLCompiler):
preparer = self.preparer
table = preparer.format_table(index.table)
columns = [self.sql_compiler.process(expr, include_table=False,
- literal_binds=True)
- for expr in index.expressions]
+ literal_binds=True)
+ for expr in index.expressions]
name = self._prepared_index_name(index)
@@ -1723,8 +1746,9 @@ class MySQLDDLCompiler(compiler.DDLCompiler):
if length is not None:
if isinstance(length, dict):
- # length value can be a (column_name --> integer value) mapping
- # specifying the prefix length for each column of the index
+ # length value can be a (column_name --> integer value)
+ # mapping specifying the prefix length for each column of the
+ # index
columns = ', '.join(
'%s(%d)' % (expr, length[col.name]) if col.name in length
else
@@ -1763,9 +1787,9 @@ class MySQLDDLCompiler(compiler.DDLCompiler):
index = drop.element
return "\nDROP INDEX %s ON %s" % (
- self._prepared_index_name(index,
- include_schema=False),
- self.preparer.format_table(index.table))
+ self._prepared_index_name(index,
+ include_schema=False),
+ self.preparer.format_table(index.table))
def visit_drop_constraint(self, drop):
constraint = drop.element
@@ -1782,16 +1806,17 @@ class MySQLDDLCompiler(compiler.DDLCompiler):
qual = ""
const = self.preparer.format_constraint(constraint)
return "ALTER TABLE %s DROP %s%s" % \
- (self.preparer.format_table(constraint.table),
- qual, const)
+ (self.preparer.format_table(constraint.table),
+ qual, const)
def define_constraint_match(self, constraint):
if constraint.match is not None:
raise exc.CompileError(
- "MySQL ignores the 'MATCH' keyword while at the same time "
- "causes ON UPDATE/ON DELETE clauses to be ignored.")
+ "MySQL ignores the 'MATCH' keyword while at the same time "
+ "causes ON UPDATE/ON DELETE clauses to be ignored.")
return ""
+
class MySQLTypeCompiler(compiler.GenericTypeCompiler):
def _extend_numeric(self, type_, spec):
"Extend a numeric-type declaration with MySQL specific extensions."
@@ -1845,78 +1870,78 @@ class MySQLTypeCompiler(compiler.GenericTypeCompiler):
return self._extend_numeric(type_, "NUMERIC")
elif type_.scale is None:
return self._extend_numeric(type_,
- "NUMERIC(%(precision)s)" %
- {'precision': type_.precision})
+ "NUMERIC(%(precision)s)" %
+ {'precision': type_.precision})
else:
return self._extend_numeric(type_,
- "NUMERIC(%(precision)s, %(scale)s)" %
- {'precision': type_.precision,
- 'scale': type_.scale})
+ "NUMERIC(%(precision)s, %(scale)s)" %
+ {'precision': type_.precision,
+ 'scale': type_.scale})
def visit_DECIMAL(self, type_):
if type_.precision is None:
return self._extend_numeric(type_, "DECIMAL")
elif type_.scale is None:
return self._extend_numeric(type_,
- "DECIMAL(%(precision)s)" %
- {'precision': type_.precision})
+ "DECIMAL(%(precision)s)" %
+ {'precision': type_.precision})
else:
return self._extend_numeric(type_,
- "DECIMAL(%(precision)s, %(scale)s)" %
- {'precision': type_.precision,
- 'scale': type_.scale})
+ "DECIMAL(%(precision)s, %(scale)s)" %
+ {'precision': type_.precision,
+ 'scale': type_.scale})
def visit_DOUBLE(self, type_):
if type_.precision is not None and type_.scale is not None:
return self._extend_numeric(type_,
- "DOUBLE(%(precision)s, %(scale)s)" %
- {'precision': type_.precision,
- 'scale': type_.scale})
+ "DOUBLE(%(precision)s, %(scale)s)" %
+ {'precision': type_.precision,
+ 'scale': type_.scale})
else:
return self._extend_numeric(type_, 'DOUBLE')
def visit_REAL(self, type_):
if type_.precision is not None and type_.scale is not None:
return self._extend_numeric(type_,
- "REAL(%(precision)s, %(scale)s)" %
- {'precision': type_.precision,
- 'scale': type_.scale})
+ "REAL(%(precision)s, %(scale)s)" %
+ {'precision': type_.precision,
+ 'scale': type_.scale})
else:
return self._extend_numeric(type_, 'REAL')
def visit_FLOAT(self, type_):
if self._mysql_type(type_) and \
- type_.scale is not None and \
- type_.precision is not None:
- return self._extend_numeric(type_,
- "FLOAT(%s, %s)" % (type_.precision, type_.scale))
+ type_.scale is not None and \
+ type_.precision is not None:
+ return self._extend_numeric(
+ type_, "FLOAT(%s, %s)" % (type_.precision, type_.scale))
elif type_.precision is not None:
return self._extend_numeric(type_,
- "FLOAT(%s)" % (type_.precision,))
+ "FLOAT(%s)" % (type_.precision,))
else:
return self._extend_numeric(type_, "FLOAT")
def visit_INTEGER(self, type_):
if self._mysql_type(type_) and type_.display_width is not None:
- return self._extend_numeric(type_,
- "INTEGER(%(display_width)s)" %
- {'display_width': type_.display_width})
+ return self._extend_numeric(
+ type_, "INTEGER(%(display_width)s)" %
+ {'display_width': type_.display_width})
else:
return self._extend_numeric(type_, "INTEGER")
def visit_BIGINT(self, type_):
if self._mysql_type(type_) and type_.display_width is not None:
- return self._extend_numeric(type_,
- "BIGINT(%(display_width)s)" %
- {'display_width': type_.display_width})
+ return self._extend_numeric(
+ type_, "BIGINT(%(display_width)s)" %
+ {'display_width': type_.display_width})
else:
return self._extend_numeric(type_, "BIGINT")
def visit_MEDIUMINT(self, type_):
if self._mysql_type(type_) and type_.display_width is not None:
- return self._extend_numeric(type_,
- "MEDIUMINT(%(display_width)s)" %
- {'display_width': type_.display_width})
+ return self._extend_numeric(
+ type_, "MEDIUMINT(%(display_width)s)" %
+ {'display_width': type_.display_width})
else:
return self._extend_numeric(type_, "MEDIUMINT")
@@ -1930,9 +1955,9 @@ class MySQLTypeCompiler(compiler.GenericTypeCompiler):
def visit_SMALLINT(self, type_):
if self._mysql_type(type_) and type_.display_width is not None:
return self._extend_numeric(type_,
- "SMALLINT(%(display_width)s)" %
- {'display_width': type_.display_width}
- )
+ "SMALLINT(%(display_width)s)" %
+ {'display_width': type_.display_width}
+ )
else:
return self._extend_numeric(type_, "SMALLINT")
@@ -1986,16 +2011,17 @@ class MySQLTypeCompiler(compiler.GenericTypeCompiler):
def visit_VARCHAR(self, type_):
if type_.length:
- return self._extend_string(type_, {}, "VARCHAR(%d)" % type_.length)
+ return self._extend_string(
+ type_, {}, "VARCHAR(%d)" % type_.length)
else:
raise exc.CompileError(
- "VARCHAR requires a length on dialect %s" %
- self.dialect.name)
+ "VARCHAR requires a length on dialect %s" %
+ self.dialect.name)
def visit_CHAR(self, type_):
if type_.length:
return self._extend_string(type_, {}, "CHAR(%(length)s)" %
- {'length': type_.length})
+ {'length': type_.length})
else:
return self._extend_string(type_, {}, "CHAR")
@@ -2003,19 +2029,21 @@ class MySQLTypeCompiler(compiler.GenericTypeCompiler):
# We'll actually generate the equiv. "NATIONAL VARCHAR" instead
# of "NVARCHAR".
if type_.length:
- return self._extend_string(type_, {'national': True},
- "VARCHAR(%(length)s)" % {'length': type_.length})
+ return self._extend_string(
+ type_, {'national': True},
+ "VARCHAR(%(length)s)" % {'length': type_.length})
else:
raise exc.CompileError(
- "NVARCHAR requires a length on dialect %s" %
- self.dialect.name)
+ "NVARCHAR requires a length on dialect %s" %
+ self.dialect.name)
def visit_NCHAR(self, type_):
# We'll actually generate the equiv.
# "NATIONAL CHAR" instead of "NCHAR".
if type_.length:
- return self._extend_string(type_, {'national': True},
- "CHAR(%(length)s)" % {'length': type_.length})
+ return self._extend_string(
+ type_, {'national': True},
+ "CHAR(%(length)s)" % {'length': type_.length})
else:
return self._extend_string(type_, {'national': True}, "CHAR")
@@ -2051,16 +2079,16 @@ class MySQLTypeCompiler(compiler.GenericTypeCompiler):
for e in enumerated_values:
quoted_enums.append("'%s'" % e.replace("'", "''"))
return self._extend_string(type_, {}, "%s(%s)" % (
- name, ",".join(quoted_enums))
- )
+ name, ",".join(quoted_enums))
+ )
def visit_ENUM(self, type_):
return self._visit_enumerated_values("ENUM", type_,
- type_._enumerated_values)
+ type_._enumerated_values)
def visit_SET(self, type_):
return self._visit_enumerated_values("SET", type_,
- type_._enumerated_values)
+ type_._enumerated_values)
def visit_BOOLEAN(self, type):
return "BOOL"
@@ -2077,9 +2105,9 @@ class MySQLIdentifierPreparer(compiler.IdentifierPreparer):
quote = '"'
super(MySQLIdentifierPreparer, self).__init__(
- dialect,
- initial_quote=quote,
- escape_quote=quote)
+ dialect,
+ initial_quote=quote,
+ escape_quote=quote)
def _quote_free_identifiers(self, *ids):
"""Unilaterally identifier-quote any number of strings."""
@@ -2089,7 +2117,9 @@ class MySQLIdentifierPreparer(compiler.IdentifierPreparer):
@log.class_logger
class MySQLDialect(default.DefaultDialect):
- """Details of the MySQL dialect. Not used directly in application code."""
+ """Details of the MySQL dialect.
+ Not used directly in application code.
+ """
name = 'mysql'
supports_alter = True
@@ -2148,8 +2178,8 @@ class MySQLDialect(default.DefaultDialect):
else:
return None
- _isolation_lookup = set(['SERIALIZABLE',
- 'READ UNCOMMITTED', 'READ COMMITTED', 'REPEATABLE READ'])
+ _isolation_lookup = set(['SERIALIZABLE', 'READ UNCOMMITTED',
+ 'READ COMMITTED', 'REPEATABLE READ'])
def set_isolation_level(self, connection, level):
level = level.replace('_', ' ')
@@ -2158,7 +2188,7 @@ class MySQLDialect(default.DefaultDialect):
"Invalid value '%s' for isolation_level. "
"Valid isolation levels for %s are %s" %
(level, self.name, ", ".join(self._isolation_lookup))
- )
+ )
cursor = connection.cursor()
cursor.execute("SET SESSION TRANSACTION ISOLATION LEVEL %s" % level)
cursor.execute("COMMIT")
@@ -2177,10 +2207,11 @@ class MySQLDialect(default.DefaultDialect):
"""Execute a COMMIT."""
# COMMIT/ROLLBACK were introduced in 3.23.15.
- # Yes, we have at least one user who has to talk to these old versions!
+ # Yes, we have at least one user who has to talk to these old
+ # versions!
#
- # Ignore commit/rollback if support isn't present, otherwise even basic
- # operations via autocommit fail.
+ # Ignore commit/rollback if support isn't present, otherwise even
+ # basic operations via autocommit fail.
try:
dbapi_connection.commit()
except:
@@ -2226,9 +2257,10 @@ class MySQLDialect(default.DefaultDialect):
return [row['data'][0:row['gtrid_length']] for row in resultset]
def is_disconnect(self, e, connection, cursor):
- if isinstance(e, (self.dbapi.OperationalError, self.dbapi.ProgrammingError)):
+ if isinstance(e, (self.dbapi.OperationalError,
+ self.dbapi.ProgrammingError)):
return self._extract_error_code(e) in \
- (2006, 2013, 2014, 2045, 2055)
+ (2006, 2013, 2014, 2045, 2055)
elif isinstance(e, self.dbapi.InterfaceError):
# if underlying connection is closed,
# this is the error you get
@@ -2296,15 +2328,15 @@ class MySQLDialect(default.DefaultDialect):
if self._server_ansiquotes:
# if ansiquotes == True, build a new IdentifierPreparer
# with the new setting
- self.identifier_preparer = self.preparer(self,
- server_ansiquotes=self._server_ansiquotes)
+ self.identifier_preparer = self.preparer(
+ self, server_ansiquotes=self._server_ansiquotes)
default.DefaultDialect.initialize(self, connection)
@property
def _supports_cast(self):
return self.server_version_info is None or \
- self.server_version_info >= (4, 0, 2)
+ self.server_version_info >= (4, 0, 2)
@reflection.cache
def get_schema_names(self, connection, **kw):
@@ -2321,17 +2353,19 @@ class MySQLDialect(default.DefaultDialect):
charset = self._connection_charset
if self.server_version_info < (5, 0, 2):
- rp = connection.execute("SHOW TABLES FROM %s" %
+ rp = connection.execute(
+ "SHOW TABLES FROM %s" %
self.identifier_preparer.quote_identifier(current_schema))
return [row[0] for
- row in self._compat_fetchall(rp, charset=charset)]
+ row in self._compat_fetchall(rp, charset=charset)]
else:
- rp = connection.execute("SHOW FULL TABLES FROM %s" %
- self.identifier_preparer.quote_identifier(current_schema))
+ rp = connection.execute(
+ "SHOW FULL TABLES FROM %s" %
+ self.identifier_preparer.quote_identifier(current_schema))
return [row[0]
- for row in self._compat_fetchall(rp, charset=charset)
- if row[1] == 'BASE TABLE']
+ for row in self._compat_fetchall(rp, charset=charset)
+ if row[1] == 'BASE TABLE']
@reflection.cache
def get_view_names(self, connection, schema=None, **kw):
@@ -2342,29 +2376,30 @@ class MySQLDialect(default.DefaultDialect):
if self.server_version_info < (5, 0, 2):
return self.get_table_names(connection, schema)
charset = self._connection_charset
- rp = connection.execute("SHOW FULL TABLES FROM %s" %
- self.identifier_preparer.quote_identifier(schema))
+ rp = connection.execute(
+ "SHOW FULL TABLES FROM %s" %
+ self.identifier_preparer.quote_identifier(schema))
return [row[0]
- for row in self._compat_fetchall(rp, charset=charset)
- if row[1] in ('VIEW', 'SYSTEM VIEW')]
+ for row in self._compat_fetchall(rp, charset=charset)
+ if row[1] in ('VIEW', 'SYSTEM VIEW')]
@reflection.cache
def get_table_options(self, connection, table_name, schema=None, **kw):
parsed_state = self._parsed_state_or_create(
- connection, table_name, schema, **kw)
+ connection, table_name, schema, **kw)
return parsed_state.table_options
@reflection.cache
def get_columns(self, connection, table_name, schema=None, **kw):
parsed_state = self._parsed_state_or_create(
- connection, table_name, schema, **kw)
+ connection, table_name, schema, **kw)
return parsed_state.columns
@reflection.cache
def get_pk_constraint(self, connection, table_name, schema=None, **kw):
parsed_state = self._parsed_state_or_create(
- connection, table_name, schema, **kw)
+ connection, table_name, schema, **kw)
for key in parsed_state.keys:
if key['type'] == 'PRIMARY':
# There can be only one.
@@ -2376,7 +2411,7 @@ class MySQLDialect(default.DefaultDialect):
def get_foreign_keys(self, connection, table_name, schema=None, **kw):
parsed_state = self._parsed_state_or_create(
- connection, table_name, schema, **kw)
+ connection, table_name, schema, **kw)
default_schema = None
fkeys = []
@@ -2384,7 +2419,8 @@ class MySQLDialect(default.DefaultDialect):
for spec in parsed_state.constraints:
# only FOREIGN KEYs
ref_name = spec['table'][-1]
- ref_schema = len(spec['table']) > 1 and spec['table'][-2] or schema
+ ref_schema = len(spec['table']) > 1 and \
+ spec['table'][-2] or schema
if not ref_schema:
if default_schema is None:
@@ -2416,7 +2452,7 @@ class MySQLDialect(default.DefaultDialect):
def get_indexes(self, connection, table_name, schema=None, **kw):
parsed_state = self._parsed_state_or_create(
- connection, table_name, schema, **kw)
+ connection, table_name, schema, **kw)
indexes = []
for spec in parsed_state.keys:
@@ -2466,13 +2502,13 @@ class MySQLDialect(default.DefaultDialect):
return sql
def _parsed_state_or_create(self, connection, table_name,
- schema=None, **kw):
+ schema=None, **kw):
return self._setup_parser(
- connection,
- table_name,
- schema,
- info_cache=kw.get('info_cache', None)
- )
+ connection,
+ table_name,
+ schema,
+ info_cache=kw.get('info_cache', None)
+ )
@util.memoized_property
def _tabledef_parser(self):
@@ -2519,7 +2555,7 @@ class MySQLDialect(default.DefaultDialect):
charset = self._connection_charset
row = self._compat_first(connection.execute(
"SHOW VARIABLES LIKE 'lower_case_table_names'"),
- charset=charset)
+ charset=charset)
if not row:
cs = 0
else:
@@ -2554,7 +2590,7 @@ class MySQLDialect(default.DefaultDialect):
row = self._compat_first(
connection.execute("SHOW VARIABLES LIKE 'sql_mode'"),
- charset=self._connection_charset)
+ charset=self._connection_charset)
if not row:
mode = ''
@@ -2570,7 +2606,6 @@ class MySQLDialect(default.DefaultDialect):
# as of MySQL 5.0.1
self._backslash_escapes = 'NO_BACKSLASH_ESCAPES' not in mode
-
def _show_create_table(self, connection, table, charset=None,
full_name=None):
"""Run SHOW CREATE TABLE for a ``Table``."""
@@ -2595,7 +2630,7 @@ class MySQLDialect(default.DefaultDialect):
return sql
def _describe_table(self, connection, table, charset=None,
- full_name=None):
+ full_name=None):
"""Run DESCRIBE for a ``Table`` and return processed rows."""
if full_name is None:
@@ -2687,7 +2722,7 @@ class MySQLTableDefinitionParser(object):
if m:
spec = m.groupdict()
spec['table'] = \
- self.preparer.unformat_identifiers(spec['table'])
+ self.preparer.unformat_identifiers(spec['table'])
spec['local'] = [c[0]
for c in self._parse_keyexprs(spec['local'])]
spec['foreign'] = [c[0]
@@ -2768,7 +2803,7 @@ class MySQLTableDefinitionParser(object):
util.warn("Incomplete reflection of column definition %r" % line)
name, type_, args, notnull = \
- spec['name'], spec['coltype'], spec['arg'], spec['notnull']
+ spec['name'], spec['coltype'], spec['arg'], spec['notnull']
try:
col_type = self.dialect.ischema_names[type_]
@@ -2838,7 +2873,7 @@ class MySQLTableDefinitionParser(object):
buffer = []
for row in columns:
(name, col_type, nullable, default, extra) = \
- [row[i] for i in (0, 1, 2, 4, 5)]
+ [row[i] for i in (0, 1, 2, 4, 5)]
line = [' ']
line.append(self.preparer.quote_identifier(name))
@@ -2917,15 +2952,15 @@ class MySQLTableDefinitionParser(object):
r'%(iq)s(?P<name>(?:%(esc_fq)s|[^%(fq)s])+)%(fq)s +'
r'(?P<coltype>\w+)'
r'(?:\((?P<arg>(?:\d+|\d+,\d+|'
- r'(?:\x27(?:\x27\x27|[^\x27])*\x27,?)+))\))?'
+ r'(?:\x27(?:\x27\x27|[^\x27])*\x27,?)+))\))?'
r'(?: +(?P<unsigned>UNSIGNED))?'
r'(?: +(?P<zerofill>ZEROFILL))?'
r'(?: +CHARACTER SET +(?P<charset>[\w_]+))?'
r'(?: +COLLATE +(?P<collate>[\w_]+))?'
r'(?: +(?P<notnull>NOT NULL))?'
r'(?: +DEFAULT +(?P<default>'
- r'(?:NULL|\x27(?:\x27\x27|[^\x27])*\x27|\w+'
- r'(?: +ON UPDATE \w+)?)'
+ r'(?:NULL|\x27(?:\x27\x27|[^\x27])*\x27|\w+'
+ r'(?: +ON UPDATE \w+)?)'
r'))?'
r'(?: +(?P<autoincr>AUTO_INCREMENT))?'
r'(?: +COMMENT +(P<comment>(?:\x27\x27|[^\x27])+))?'
@@ -2934,7 +2969,7 @@ class MySQLTableDefinitionParser(object):
r'(?: +(?P<extra>.*))?'
r',?$'
% quotes
- )
+ )
# Fallback, try to parse as little as possible
self._re_column_loose = _re_compile(
@@ -2944,7 +2979,7 @@ class MySQLTableDefinitionParser(object):
r'(?:\((?P<arg>(?:\d+|\d+,\d+|\x27(?:\x27\x27|[^\x27])+\x27))\))?'
r'.*?(?P<notnull>NOT NULL)?'
% quotes
- )
+ )
# (PRIMARY|UNIQUE|FULLTEXT|SPATIAL) INDEX `name` (USING (BTREE|HASH))?
# (`col` (ASC|DESC)?, `col` (ASC|DESC)?)
@@ -2960,7 +2995,7 @@ class MySQLTableDefinitionParser(object):
r'(?: +WITH PARSER +(?P<parser>\S+))?'
r',?$'
% quotes
- )
+ )
# CONSTRAINT `name` FOREIGN KEY (`local_col`)
# REFERENCES `remote` (`remote_col`)
@@ -2976,13 +3011,14 @@ class MySQLTableDefinitionParser(object):
r'%(iq)s(?P<name>(?:%(esc_fq)s|[^%(fq)s])+)%(fq)s +'
r'FOREIGN KEY +'
r'\((?P<local>[^\)]+?)\) REFERENCES +'
- r'(?P<table>%(iq)s[^%(fq)s]+%(fq)s(?:\.%(iq)s[^%(fq)s]+%(fq)s)?) +'
+ r'(?P<table>%(iq)s[^%(fq)s]+%(fq)s'
+ r'(?:\.%(iq)s[^%(fq)s]+%(fq)s)?) +'
r'\((?P<foreign>[^\)]+?)\)'
r'(?: +(?P<match>MATCH \w+))?'
r'(?: +ON DELETE (?P<ondelete>%(on)s))?'
r'(?: +ON UPDATE (?P<onupdate>%(on)s))?'
% kw
- )
+ )
# PARTITION
#
@@ -3005,8 +3041,9 @@ class MySQLTableDefinitionParser(object):
self._add_option_regex('UNION', r'\([^\)]+\)')
self._add_option_regex('TABLESPACE', r'.*? STORAGE DISK')
- self._add_option_regex('RAID_TYPE',
- r'\w+\s+RAID_CHUNKS\s*\=\s*\w+RAID_CHUNKSIZE\s*=\s*\w+')
+ self._add_option_regex(
+ 'RAID_TYPE',
+ r'\w+\s+RAID_CHUNKS\s*\=\s*\w+RAID_CHUNKSIZE\s*=\s*\w+')
_optional_equals = r'(?:\s*(?:=\s*)|\s+)'
@@ -3014,8 +3051,9 @@ class MySQLTableDefinitionParser(object):
regex = (r'(?P<directive>%s)%s'
r"'(?P<val>(?:[^']|'')*?)'(?!')" %
(re.escape(directive), self._optional_equals))
- self._pr_options.append(_pr_compile(regex, lambda v:
- v.replace("\\\\", "\\").replace("''", "'")))
+ self._pr_options.append(_pr_compile(
+ regex, lambda v: v.replace("\\\\", "\\").replace("''", "'")
+ ))
def _add_option_word(self, directive):
regex = (r'(?P<directive>%s)%s'
@@ -3033,7 +3071,6 @@ _options_of_type_string = ('COMMENT', 'DATA DIRECTORY', 'INDEX DIRECTORY',
'PASSWORD', 'CONNECTION')
-
class _DecodingRowProxy(object):
"""Return unicode-decoded values based on type inspection.
diff --git a/lib/sqlalchemy/dialects/mysql/cymysql.py b/lib/sqlalchemy/dialects/mysql/cymysql.py
index c9f82a0bd..51b63044e 100644
--- a/lib/sqlalchemy/dialects/mysql/cymysql.py
+++ b/lib/sqlalchemy/dialects/mysql/cymysql.py
@@ -10,7 +10,8 @@
.. dialect:: mysql+cymysql
:name: CyMySQL
:dbapi: cymysql
- :connectstring: mysql+cymysql://<username>:<password>@<host>/<dbname>[?<options>]
+ :connectstring: mysql+cymysql://<username>:<password>@<host>/<dbname>\
+[?<options>]
:url: https://github.com/nakagami/CyMySQL
"""
@@ -20,6 +21,7 @@ from .mysqldb import MySQLDialect_mysqldb
from .base import (BIT, MySQLDialect)
from ... import util
+
class _cymysqlBIT(BIT):
def result_processor(self, dialect, coltype):
"""Convert a MySQL's 64 bit, variable length binary string to a long.
@@ -74,7 +76,7 @@ class MySQLDialect_cymysql(MySQLDialect_mysqldb):
def is_disconnect(self, e, connection, cursor):
if isinstance(e, self.dbapi.OperationalError):
return self._extract_error_code(e) in \
- (2006, 2013, 2014, 2045, 2055)
+ (2006, 2013, 2014, 2045, 2055)
elif isinstance(e, self.dbapi.InterfaceError):
# if underlying connection is closed,
# this is the error you get
diff --git a/lib/sqlalchemy/dialects/mysql/gaerdbms.py b/lib/sqlalchemy/dialects/mysql/gaerdbms.py
index 6f231198d..0059f5a65 100644
--- a/lib/sqlalchemy/dialects/mysql/gaerdbms.py
+++ b/lib/sqlalchemy/dialects/mysql/gaerdbms.py
@@ -9,10 +9,11 @@
:name: Google Cloud SQL
:dbapi: rdbms
:connectstring: mysql+gaerdbms:///<dbname>?instance=<instancename>
- :url: https://developers.google.com/appengine/docs/python/cloud-sql/developers-guide
+ :url: https://developers.google.com/appengine/docs/python/cloud-sql/\
+developers-guide
- This dialect is based primarily on the :mod:`.mysql.mysqldb` dialect with minimal
- changes.
+ This dialect is based primarily on the :mod:`.mysql.mysqldb` dialect with
+ minimal changes.
.. versionadded:: 0.7.8
@@ -45,7 +46,7 @@ class MySQLDialect_gaerdbms(MySQLDialect_mysqldb):
# from django:
# http://code.google.com/p/googleappengine/source/
# browse/trunk/python/google/storage/speckle/
- # python/django/backend/base.py#118
+ # python/django/backend/base.py#118
# see also [ticket:2649]
# see also http://stackoverflow.com/q/14224679/34549
from google.appengine.api import apiproxy_stub_map
diff --git a/lib/sqlalchemy/dialects/mysql/mysqlconnector.py b/lib/sqlalchemy/dialects/mysql/mysqlconnector.py
index 91223e270..e51e80005 100644
--- a/lib/sqlalchemy/dialects/mysql/mysqlconnector.py
+++ b/lib/sqlalchemy/dialects/mysql/mysqlconnector.py
@@ -9,15 +9,16 @@
.. dialect:: mysql+mysqlconnector
:name: MySQL Connector/Python
:dbapi: myconnpy
- :connectstring: mysql+mysqlconnector://<user>:<password>@<host>[:<port>]/<dbname>
+ :connectstring: mysql+mysqlconnector://<user>:<password>@\
+<host>[:<port>]/<dbname>
:url: http://dev.mysql.com/downloads/connector/python/
"""
-from .base import (MySQLDialect,
- MySQLExecutionContext, MySQLCompiler, MySQLIdentifierPreparer,
- BIT)
+from .base import (MySQLDialect, MySQLExecutionContext,
+ MySQLCompiler, MySQLIdentifierPreparer,
+ BIT)
from ... import util
@@ -31,7 +32,7 @@ class MySQLExecutionContext_mysqlconnector(MySQLExecutionContext):
class MySQLCompiler_mysqlconnector(MySQLCompiler):
def visit_mod_binary(self, binary, operator, **kw):
return self.process(binary.left, **kw) + " %% " + \
- self.process(binary.right, **kw)
+ self.process(binary.right, **kw)
def post_process_text(self, text):
return text.replace('%', '%%')
@@ -98,7 +99,8 @@ class MySQLDialect_mysqlconnector(MySQLDialect):
if self.dbapi is not None:
try:
from mysql.connector.constants import ClientFlag
- client_flags = opts.get('client_flags', ClientFlag.get_default())
+ client_flags = opts.get(
+ 'client_flags', ClientFlag.get_default())
client_flags |= ClientFlag.FOUND_ROWS
opts['client_flags'] = client_flags
except:
diff --git a/lib/sqlalchemy/dialects/mysql/mysqldb.py b/lib/sqlalchemy/dialects/mysql/mysqldb.py
index 8ee367a07..73210d67a 100644
--- a/lib/sqlalchemy/dialects/mysql/mysqldb.py
+++ b/lib/sqlalchemy/dialects/mysql/mysqldb.py
@@ -42,7 +42,7 @@ It is strongly advised to use the latest version of MySQL-Python.
"""
from .base import (MySQLDialect, MySQLExecutionContext,
- MySQLCompiler, MySQLIdentifierPreparer)
+ MySQLCompiler, MySQLIdentifierPreparer)
from .base import TEXT
from ... import sql
from ... import util
@@ -58,14 +58,16 @@ class MySQLExecutionContext_mysqldb(MySQLExecutionContext):
else:
return self.cursor.rowcount
+
class MySQLCompiler_mysqldb(MySQLCompiler):
def visit_mod_binary(self, binary, operator, **kw):
return self.process(binary.left, **kw) + " %% " + \
- self.process(binary.right, **kw)
+ self.process(binary.right, **kw)
def post_process_text(self, text):
return text.replace('%', '%%')
+
class MySQLIdentifierPreparer_mysqldb(MySQLIdentifierPreparer):
def _escape_identifier(self, value):
@@ -86,7 +88,6 @@ class MySQLDialect_mysqldb(MySQLDialect):
statement_compiler = MySQLCompiler_mysqldb
preparer = MySQLIdentifierPreparer_mysqldb
-
@classmethod
def dbapi(cls):
return __import__('MySQLdb')
@@ -102,23 +103,22 @@ class MySQLDialect_mysqldb(MySQLDialect):
# specific issue w/ the utf8_bin collation and unicode returns
has_utf8_bin = connection.scalar(
- "show collation where %s = 'utf8' and %s = 'utf8_bin'"
- % (
- self.identifier_preparer.quote("Charset"),
- self.identifier_preparer.quote("Collation")
- ))
+ "show collation where %s = 'utf8' and %s = 'utf8_bin'"
+ % (
+ self.identifier_preparer.quote("Charset"),
+ self.identifier_preparer.quote("Collation")
+ ))
if has_utf8_bin:
additional_tests = [
sql.collate(sql.cast(
- sql.literal_column(
+ sql.literal_column(
"'test collated returns'"),
- TEXT(charset='utf8')), "utf8_bin")
+ TEXT(charset='utf8')), "utf8_bin")
]
else:
additional_tests = []
return super(MySQLDialect_mysqldb, self)._check_unicode_returns(
- connection, additional_tests)
-
+ connection, additional_tests)
def create_connect_args(self, url):
opts = url.translate_connect_args(database='db', username='user',
@@ -130,9 +130,9 @@ class MySQLDialect_mysqldb(MySQLDialect):
util.coerce_kw_type(opts, 'read_timeout', int)
util.coerce_kw_type(opts, 'client_flag', int)
util.coerce_kw_type(opts, 'local_infile', int)
- # Note: using either of the below will cause all strings to be returned
- # as Unicode, both in raw SQL operations and with column types like
- # String and MSString.
+ # Note: using either of the below will cause all strings to be
+ # returned as Unicode, both in raw SQL operations and with column
+ # types like String and MSString.
util.coerce_kw_type(opts, 'use_unicode', bool)
util.coerce_kw_type(opts, 'charset', str)
@@ -155,8 +155,8 @@ class MySQLDialect_mysqldb(MySQLDialect):
if self.dbapi is not None:
try:
CLIENT_FLAGS = __import__(
- self.dbapi.__name__ + '.constants.CLIENT'
- ).constants.CLIENT
+ self.dbapi.__name__ + '.constants.CLIENT'
+ ).constants.CLIENT
client_flag |= CLIENT_FLAGS.FOUND_ROWS
except (AttributeError, ImportError):
self.supports_sane_rowcount = False
diff --git a/lib/sqlalchemy/dialects/mysql/oursql.py b/lib/sqlalchemy/dialects/mysql/oursql.py
index 12136514c..fa127f3b0 100644
--- a/lib/sqlalchemy/dialects/mysql/oursql.py
+++ b/lib/sqlalchemy/dialects/mysql/oursql.py
@@ -80,7 +80,8 @@ class MySQLDialect_oursql(MySQLDialect):
return __import__('oursql')
def do_execute(self, cursor, statement, parameters, context=None):
- """Provide an implementation of *cursor.execute(statement, parameters)*."""
+ """Provide an implementation of
+ *cursor.execute(statement, parameters)*."""
if context and context.plain_query:
cursor.execute(statement, plain_query=True)
@@ -95,9 +96,11 @@ class MySQLDialect_oursql(MySQLDialect):
arg = connection.connection._escape_string(xid)
else:
charset = self._connection_charset
- arg = connection.connection._escape_string(xid.encode(charset)).decode(charset)
+ arg = connection.connection._escape_string(
+ xid.encode(charset)).decode(charset)
arg = "'%s'" % arg
- connection.execution_options(_oursql_plain_query=True).execute(query % arg)
+ connection.execution_options(
+ _oursql_plain_query=True).execute(query % arg)
# Because mysql is bad, these methods have to be
# reimplemented to use _PlainQuery. Basically, some queries
@@ -127,10 +130,10 @@ class MySQLDialect_oursql(MySQLDialect):
# am i on a newer/older version of OurSQL ?
def has_table(self, connection, table_name, schema=None):
return MySQLDialect.has_table(
- self,
- connection.connect().execution_options(_oursql_plain_query=True),
- table_name,
- schema
+ self,
+ connection.connect().execution_options(_oursql_plain_query=True),
+ table_name,
+ schema
)
def get_table_options(self, connection, table_name, schema=None, **kw):
@@ -190,7 +193,8 @@ class MySQLDialect_oursql(MySQLDialect):
def is_disconnect(self, e, connection, cursor):
if isinstance(e, self.dbapi.ProgrammingError):
- return e.errno is None and 'cursor' not in e.args[1] and e.args[1].endswith('closed')
+ return e.errno is None and 'cursor' not in e.args[1] \
+ and e.args[1].endswith('closed')
else:
return e.errno in (2006, 2013, 2014, 2045, 2055)
@@ -218,7 +222,7 @@ class MySQLDialect_oursql(MySQLDialect):
ssl = {}
for key in ['ssl_ca', 'ssl_key', 'ssl_cert',
- 'ssl_capath', 'ssl_cipher']:
+ 'ssl_capath', 'ssl_cipher']:
if key in opts:
ssl[key[4:]] = opts[key]
util.coerce_kw_type(ssl, key[4:], str)
diff --git a/lib/sqlalchemy/dialects/mysql/pymysql.py b/lib/sqlalchemy/dialects/mysql/pymysql.py
index b05c22295..31226cea0 100644
--- a/lib/sqlalchemy/dialects/mysql/pymysql.py
+++ b/lib/sqlalchemy/dialects/mysql/pymysql.py
@@ -10,21 +10,23 @@
.. dialect:: mysql+pymysql
:name: PyMySQL
:dbapi: pymysql
- :connectstring: mysql+pymysql://<username>:<password>@<host>/<dbname>[?<options>]
+ :connectstring: mysql+pymysql://<username>:<password>@<host>/<dbname>\
+[?<options>]
:url: http://code.google.com/p/pymysql/
MySQL-Python Compatibility
--------------------------
The pymysql DBAPI is a pure Python port of the MySQL-python (MySQLdb) driver,
-and targets 100% compatibility. Most behavioral notes for MySQL-python apply to
-the pymysql driver as well.
+and targets 100% compatibility. Most behavioral notes for MySQL-python apply
+to the pymysql driver as well.
"""
from .mysqldb import MySQLDialect_mysqldb
from ...util import py3k
+
class MySQLDialect_pymysql(MySQLDialect_mysqldb):
driver = 'pymysql'
@@ -32,7 +34,6 @@ class MySQLDialect_pymysql(MySQLDialect_mysqldb):
if py3k:
supports_unicode_statements = True
-
@classmethod
def dbapi(cls):
return __import__('pymysql')
diff --git a/lib/sqlalchemy/dialects/mysql/pyodbc.py b/lib/sqlalchemy/dialects/mysql/pyodbc.py
index 8b6821643..58e8b30fe 100644
--- a/lib/sqlalchemy/dialects/mysql/pyodbc.py
+++ b/lib/sqlalchemy/dialects/mysql/pyodbc.py
@@ -67,7 +67,8 @@ class MySQLDialect_pyodbc(PyODBCConnector, MySQLDialect):
if opts.get(key, None):
return opts[key]
- util.warn("Could not detect the connection character set. Assuming latin1.")
+ util.warn("Could not detect the connection character set. "
+ "Assuming latin1.")
return 'latin1'
def _extract_error_code(self, exception):
diff --git a/lib/sqlalchemy/dialects/mysql/zxjdbc.py b/lib/sqlalchemy/dialects/mysql/zxjdbc.py
index 17e062770..0cf92cd13 100644
--- a/lib/sqlalchemy/dialects/mysql/zxjdbc.py
+++ b/lib/sqlalchemy/dialects/mysql/zxjdbc.py
@@ -10,7 +10,8 @@
.. dialect:: mysql+zxjdbc
:name: zxjdbc for Jython
:dbapi: zxjdbc
- :connectstring: mysql+zxjdbc://<user>:<password>@<hostname>[:<port>]/<database>
+ :connectstring: mysql+zxjdbc://<user>:<password>@<hostname>[:<port>]/\
+<database>
:driverurl: http://dev.mysql.com/downloads/connector/j/
Character Sets
@@ -83,7 +84,8 @@ class MySQLDialect_zxjdbc(ZxJDBCConnector, MySQLDialect):
if opts.get(key, None):
return opts[key]
- util.warn("Could not detect the connection character set. Assuming latin1.")
+ util.warn("Could not detect the connection character set. "
+ "Assuming latin1.")
return 'latin1'
def _driver_kwargs(self):
diff --git a/lib/sqlalchemy/dialects/oracle/__init__.py b/lib/sqlalchemy/dialects/oracle/__init__.py
index 4e57e3cee..fd32f2235 100644
--- a/lib/sqlalchemy/dialects/oracle/__init__.py
+++ b/lib/sqlalchemy/dialects/oracle/__init__.py
@@ -17,8 +17,8 @@ from sqlalchemy.dialects.oracle.base import \
__all__ = (
-'VARCHAR', 'NVARCHAR', 'CHAR', 'DATE', 'NUMBER',
-'BLOB', 'BFILE', 'CLOB', 'NCLOB', 'TIMESTAMP', 'RAW',
-'FLOAT', 'DOUBLE_PRECISION', 'LONG', 'dialect', 'INTERVAL',
-'VARCHAR2', 'NVARCHAR2', 'ROWID'
+ 'VARCHAR', 'NVARCHAR', 'CHAR', 'DATE', 'NUMBER',
+ 'BLOB', 'BFILE', 'CLOB', 'NCLOB', 'TIMESTAMP', 'RAW',
+ 'FLOAT', 'DOUBLE_PRECISION', 'LONG', 'dialect', 'INTERVAL',
+ 'VARCHAR2', 'NVARCHAR2', 'ROWID'
)
diff --git a/lib/sqlalchemy/dialects/oracle/base.py b/lib/sqlalchemy/dialects/oracle/base.py
index 781fc601f..40ba051f7 100644
--- a/lib/sqlalchemy/dialects/oracle/base.py
+++ b/lib/sqlalchemy/dialects/oracle/base.py
@@ -9,31 +9,37 @@
.. dialect:: oracle
:name: Oracle
- Oracle version 8 through current (11g at the time of this writing) are supported.
+ Oracle version 8 through current (11g at the time of this writing) are
+ supported.
Connect Arguments
-----------------
-The dialect supports several :func:`~sqlalchemy.create_engine()` arguments which
-affect the behavior of the dialect regardless of driver in use.
+The dialect supports several :func:`~sqlalchemy.create_engine()` arguments
+which affect the behavior of the dialect regardless of driver in use.
-* ``use_ansi`` - Use ANSI JOIN constructs (see the section on Oracle 8). Defaults
- to ``True``. If ``False``, Oracle-8 compatible constructs are used for joins.
+* ``use_ansi`` - Use ANSI JOIN constructs (see the section on Oracle 8).
+ Defaults to ``True``. If ``False``, Oracle-8 compatible constructs are used
+ for joins.
-* ``optimize_limits`` - defaults to ``False``. see the section on LIMIT/OFFSET.
+* ``optimize_limits`` - defaults to ``False``. see the section on
+ LIMIT/OFFSET.
-* ``use_binds_for_limits`` - defaults to ``True``. see the section on LIMIT/OFFSET.
+* ``use_binds_for_limits`` - defaults to ``True``. see the section on
+ LIMIT/OFFSET.
Auto Increment Behavior
-----------------------
-SQLAlchemy Table objects which include integer primary keys are usually assumed to have
-"autoincrementing" behavior, meaning they can generate their own primary key values upon
-INSERT. Since Oracle has no "autoincrement" feature, SQLAlchemy relies upon sequences
-to produce these values. With the Oracle dialect, *a sequence must always be explicitly
-specified to enable autoincrement*. This is divergent with the majority of documentation
-examples which assume the usage of an autoincrement-capable database. To specify sequences,
-use the sqlalchemy.schema.Sequence object which is passed to a Column construct::
+SQLAlchemy Table objects which include integer primary keys are usually
+assumed to have "autoincrementing" behavior, meaning they can generate their
+own primary key values upon INSERT. Since Oracle has no "autoincrement"
+feature, SQLAlchemy relies upon sequences to produce these values. With the
+Oracle dialect, *a sequence must always be explicitly specified to enable
+autoincrement*. This is divergent with the majority of documentation
+examples which assume the usage of an autoincrement-capable database. To
+specify sequences, use the sqlalchemy.schema.Sequence object which is passed
+to a Column construct::
t = Table('mytable', metadata,
Column('id', Integer, Sequence('id_seq'), primary_key=True),
@@ -50,15 +56,16 @@ This step is also required when using table reflection, i.e. autoload=True::
Identifier Casing
-----------------
-In Oracle, the data dictionary represents all case insensitive identifier names
-using UPPERCASE text. SQLAlchemy on the other hand considers an all-lower case identifier
-name to be case insensitive. The Oracle dialect converts all case insensitive identifiers
-to and from those two formats during schema level communication, such as reflection of
-tables and indexes. Using an UPPERCASE name on the SQLAlchemy side indicates a
-case sensitive identifier, and SQLAlchemy will quote the name - this will cause mismatches
-against data dictionary data received from Oracle, so unless identifier names have been
-truly created as case sensitive (i.e. using quoted names), all lowercase names should be
-used on the SQLAlchemy side.
+In Oracle, the data dictionary represents all case insensitive identifier
+names using UPPERCASE text. SQLAlchemy on the other hand considers an
+all-lower case identifier name to be case insensitive. The Oracle dialect
+converts all case insensitive identifiers to and from those two formats during
+schema level communication, such as reflection of tables and indexes. Using
+an UPPERCASE name on the SQLAlchemy side indicates a case sensitive
+identifier, and SQLAlchemy will quote the name - this will cause mismatches
+against data dictionary data received from Oracle, so unless identifier names
+have been truly created as case sensitive (i.e. using quoted names), all
+lowercase names should be used on the SQLAlchemy side.
LIMIT/OFFSET Support
@@ -71,44 +78,49 @@ http://www.oracle.com/technology/oramag/oracle/06-sep/o56asktom.html .
There are two options which affect its behavior:
-* the "FIRST ROWS()" optimization keyword is not used by default. To enable the usage of this
- optimization directive, specify ``optimize_limits=True`` to :func:`.create_engine`.
-* the values passed for the limit/offset are sent as bound parameters. Some users have observed
- that Oracle produces a poor query plan when the values are sent as binds and not
- rendered literally. To render the limit/offset values literally within the SQL
- statement, specify ``use_binds_for_limits=False`` to :func:`.create_engine`.
-
-Some users have reported better performance when the entirely different approach of a
-window query is used, i.e. ROW_NUMBER() OVER (ORDER BY), to provide LIMIT/OFFSET (note
-that the majority of users don't observe this). To suit this case the
-method used for LIMIT/OFFSET can be replaced entirely. See the recipe at
+* the "FIRST ROWS()" optimization keyword is not used by default. To enable
+ the usage of this optimization directive, specify ``optimize_limits=True``
+ to :func:`.create_engine`.
+* the values passed for the limit/offset are sent as bound parameters. Some
+ users have observed that Oracle produces a poor query plan when the values
+ are sent as binds and not rendered literally. To render the limit/offset
+ values literally within the SQL statement, specify
+ ``use_binds_for_limits=False`` to :func:`.create_engine`.
+
+Some users have reported better performance when the entirely different
+approach of a window query is used, i.e. ROW_NUMBER() OVER (ORDER BY), to
+provide LIMIT/OFFSET (note that the majority of users don't observe this).
+To suit this case the method used for LIMIT/OFFSET can be replaced entirely.
+See the recipe at
http://www.sqlalchemy.org/trac/wiki/UsageRecipes/WindowFunctionsByDefault
-which installs a select compiler that overrides the generation of limit/offset with
-a window function.
+which installs a select compiler that overrides the generation of limit/offset
+with a window function.
.. _oracle_returning:
RETURNING Support
-----------------
-The Oracle database supports a limited form of RETURNING, in order to retrieve result
-sets of matched rows from INSERT, UPDATE and DELETE statements. Oracle's
-RETURNING..INTO syntax only supports one row being returned, as it relies upon
-OUT parameters in order to function. In addition, supported DBAPIs have further
-limitations (see :ref:`cx_oracle_returning`).
+The Oracle database supports a limited form of RETURNING, in order to retrieve
+result sets of matched rows from INSERT, UPDATE and DELETE statements.
+Oracle's RETURNING..INTO syntax only supports one row being returned, as it
+relies upon OUT parameters in order to function. In addition, supported
+DBAPIs have further limitations (see :ref:`cx_oracle_returning`).
-SQLAlchemy's "implicit returning" feature, which employs RETURNING within an INSERT
-and sometimes an UPDATE statement in order to fetch newly generated primary key values
-and other SQL defaults and expressions, is normally enabled on the Oracle
-backend. By default, "implicit returning" typically only fetches the value of a
-single ``nextval(some_seq)`` expression embedded into an INSERT in order to increment
-a sequence within an INSERT statement and get the value back at the same time.
-To disable this feature across the board, specify ``implicit_returning=False`` to
-:func:`.create_engine`::
+SQLAlchemy's "implicit returning" feature, which employs RETURNING within an
+INSERT and sometimes an UPDATE statement in order to fetch newly generated
+primary key values and other SQL defaults and expressions, is normally enabled
+on the Oracle backend. By default, "implicit returning" typically only
+fetches the value of a single ``nextval(some_seq)`` expression embedded into
+an INSERT in order to increment a sequence within an INSERT statement and get
+the value back at the same time. To disable this feature across the board,
+specify ``implicit_returning=False`` to :func:`.create_engine`::
- engine = create_engine("oracle://scott:tiger@dsn", implicit_returning=False)
+ engine = create_engine("oracle://scott:tiger@dsn",
+ implicit_returning=False)
-Implicit returning can also be disabled on a table-by-table basis as a table option::
+Implicit returning can also be disabled on a table-by-table basis as a table
+option::
# Core Table
my_table = Table("my_table", metadata, ..., implicit_returning=False)
@@ -121,13 +133,15 @@ Implicit returning can also be disabled on a table-by-table basis as a table opt
.. seealso::
- :ref:`cx_oracle_returning` - additional cx_oracle-specific restrictions on implicit returning.
+ :ref:`cx_oracle_returning` - additional cx_oracle-specific restrictions on
+ implicit returning.
ON UPDATE CASCADE
-----------------
-Oracle doesn't have native ON UPDATE CASCADE functionality. A trigger based solution
-is available at http://asktom.oracle.com/tkyte/update_cascade/index.html .
+Oracle doesn't have native ON UPDATE CASCADE functionality. A trigger based
+solution is available at
+http://asktom.oracle.com/tkyte/update_cascade/index.html .
When using the SQLAlchemy ORM, the ORM has limited ability to manually issue
cascading updates - specify ForeignKey objects using the
@@ -137,29 +151,32 @@ and specify "passive_updates=False" on each relationship().
Oracle 8 Compatibility
----------------------
-When Oracle 8 is detected, the dialect internally configures itself to the following
-behaviors:
+When Oracle 8 is detected, the dialect internally configures itself to the
+following behaviors:
* the use_ansi flag is set to False. This has the effect of converting all
JOIN phrases into the WHERE clause, and in the case of LEFT OUTER JOIN
makes use of Oracle's (+) operator.
* the NVARCHAR2 and NCLOB datatypes are no longer generated as DDL when
- the :class:`~sqlalchemy.types.Unicode` is used - VARCHAR2 and CLOB are issued
- instead. This because these types don't seem to work correctly on Oracle 8
- even though they are available. The :class:`~sqlalchemy.types.NVARCHAR`
- and :class:`~sqlalchemy.dialects.oracle.NCLOB` types will always generate NVARCHAR2 and NCLOB.
+ the :class:`~sqlalchemy.types.Unicode` is used - VARCHAR2 and CLOB are
+ issued instead. This because these types don't seem to work correctly on
+ Oracle 8 even though they are available. The
+ :class:`~sqlalchemy.types.NVARCHAR` and
+ :class:`~sqlalchemy.dialects.oracle.NCLOB` types will always generate
+ NVARCHAR2 and NCLOB.
* the "native unicode" mode is disabled when using cx_oracle, i.e. SQLAlchemy
- encodes all Python unicode objects to "string" before passing in as bind parameters.
+ encodes all Python unicode objects to "string" before passing in as bind
+ parameters.
Synonym/DBLINK Reflection
-------------------------
-When using reflection with Table objects, the dialect can optionally search for tables
-indicated by synonyms, either in local or remote schemas or accessed over DBLINK,
-by passing the flag ``oracle_resolve_synonyms=True`` as a
-keyword argument to the :class:`.Table` construct::
+When using reflection with Table objects, the dialect can optionally search
+for tables indicated by synonyms, either in local or remote schemas or
+accessed over DBLINK, by passing the flag ``oracle_resolve_synonyms=True`` as
+a keyword argument to the :class:`.Table` construct::
some_table = Table('some_table', autoload=True,
autoload_with=some_engine,
@@ -167,9 +184,10 @@ keyword argument to the :class:`.Table` construct::
When this flag is set, the given name (such as ``some_table`` above) will
be searched not just in the ``ALL_TABLES`` view, but also within the
-``ALL_SYNONYMS`` view to see if this name is actually a synonym to another name.
-If the synonym is located and refers to a DBLINK, the oracle dialect knows
-how to locate the table's information using DBLINK syntax (e.g. ``@dblink``).
+``ALL_SYNONYMS`` view to see if this name is actually a synonym to another
+name. If the synonym is located and refers to a DBLINK, the oracle dialect
+knows how to locate the table's information using DBLINK syntax(e.g.
+``@dblink``).
``oracle_resolve_synonyms`` is accepted wherever reflection arguments are
accepted, including methods such as :meth:`.MetaData.reflect` and
@@ -202,24 +220,25 @@ import re
from sqlalchemy import util, sql
from sqlalchemy.engine import default, base, reflection
from sqlalchemy.sql import compiler, visitors, expression
-from sqlalchemy.sql import operators as sql_operators, functions as sql_functions
+from sqlalchemy.sql import (operators as sql_operators,
+ functions as sql_functions)
from sqlalchemy import types as sqltypes, schema as sa_schema
from sqlalchemy.types import VARCHAR, NVARCHAR, CHAR, \
- BLOB, CLOB, TIMESTAMP, FLOAT
+ BLOB, CLOB, TIMESTAMP, FLOAT
RESERVED_WORDS = \
- set('SHARE RAW DROP BETWEEN FROM DESC OPTION PRIOR LONG THEN '\
- 'DEFAULT ALTER IS INTO MINUS INTEGER NUMBER GRANT IDENTIFIED '\
- 'ALL TO ORDER ON FLOAT DATE HAVING CLUSTER NOWAIT RESOURCE '\
- 'ANY TABLE INDEX FOR UPDATE WHERE CHECK SMALLINT WITH DELETE '\
- 'BY ASC REVOKE LIKE SIZE RENAME NOCOMPRESS NULL GROUP VALUES '\
- 'AS IN VIEW EXCLUSIVE COMPRESS SYNONYM SELECT INSERT EXISTS '\
- 'NOT TRIGGER ELSE CREATE INTERSECT PCTFREE DISTINCT USER '\
- 'CONNECT SET MODE OF UNIQUE VARCHAR2 VARCHAR LOCK OR CHAR '\
+ set('SHARE RAW DROP BETWEEN FROM DESC OPTION PRIOR LONG THEN '
+ 'DEFAULT ALTER IS INTO MINUS INTEGER NUMBER GRANT IDENTIFIED '
+ 'ALL TO ORDER ON FLOAT DATE HAVING CLUSTER NOWAIT RESOURCE '
+ 'ANY TABLE INDEX FOR UPDATE WHERE CHECK SMALLINT WITH DELETE '
+ 'BY ASC REVOKE LIKE SIZE RENAME NOCOMPRESS NULL GROUP VALUES '
+ 'AS IN VIEW EXCLUSIVE COMPRESS SYNONYM SELECT INSERT EXISTS '
+ 'NOT TRIGGER ELSE CREATE INTERSECT PCTFREE DISTINCT USER '
+ 'CONNECT SET MODE OF UNIQUE VARCHAR2 VARCHAR LOCK OR CHAR '
'DECIMAL UNION PUBLIC AND START UID COMMENT CURRENT LEVEL'.split())
NO_ARG_FNS = set('UID CURRENT_DATE SYSDATE USER '
- 'CURRENT_TIME CURRENT_TIMESTAMP'.split())
+ 'CURRENT_TIME CURRENT_TIMESTAMP'.split())
class RAW(sqltypes._Binary):
@@ -244,7 +263,8 @@ class NUMBER(sqltypes.Numeric, sqltypes.Integer):
if asdecimal is None:
asdecimal = bool(scale and scale > 0)
- super(NUMBER, self).__init__(precision=precision, scale=scale, asdecimal=asdecimal)
+ super(NUMBER, self).__init__(
+ precision=precision, scale=scale, asdecimal=asdecimal)
def adapt(self, impltype):
ret = super(NUMBER, self).adapt(impltype)
@@ -267,7 +287,8 @@ class DOUBLE_PRECISION(sqltypes.Numeric):
if asdecimal is None:
asdecimal = False
- super(DOUBLE_PRECISION, self).__init__(precision=precision, scale=scale, asdecimal=asdecimal)
+ super(DOUBLE_PRECISION, self).__init__(
+ precision=precision, scale=scale, asdecimal=asdecimal)
class BFILE(sqltypes.LargeBinary):
@@ -277,6 +298,7 @@ class BFILE(sqltypes.LargeBinary):
class LONG(sqltypes.Text):
__visit_name__ = 'LONG'
+
class DATE(sqltypes.DateTime):
"""Provide the oracle DATE type.
@@ -289,7 +311,6 @@ class DATE(sqltypes.DateTime):
"""
__visit_name__ = 'DATE'
-
def _compare_type_affinity(self, other):
return other._type_affinity in (sqltypes.DateTime, sqltypes.Date)
@@ -298,18 +319,19 @@ class INTERVAL(sqltypes.TypeEngine):
__visit_name__ = 'INTERVAL'
def __init__(self,
- day_precision=None,
- second_precision=None):
+ day_precision=None,
+ second_precision=None):
"""Construct an INTERVAL.
Note that only DAY TO SECOND intervals are currently supported.
This is due to a lack of support for YEAR TO MONTH intervals
within available DBAPIs (cx_oracle and zxjdbc).
- :param day_precision: the day precision value. this is the number of digits
- to store for the day field. Defaults to "2"
- :param second_precision: the second precision value. this is the number of digits
- to store for the fractional seconds field. Defaults to "6".
+ :param day_precision: the day precision value. this is the number of
+ digits to store for the day field. Defaults to "2"
+ :param second_precision: the second precision value. this is the
+ number of digits to store for the fractional seconds field.
+ Defaults to "6".
"""
self.day_precision = day_precision
@@ -385,11 +407,11 @@ class OracleTypeCompiler(compiler.GenericTypeCompiler):
def visit_INTERVAL(self, type_):
return "INTERVAL DAY%s TO SECOND%s" % (
type_.day_precision is not None and
- "(%d)" % type_.day_precision or
- "",
+ "(%d)" % type_.day_precision or
+ "",
type_.second_precision is not None and
- "(%d)" % type_.second_precision or
- "",
+ "(%d)" % type_.second_precision or
+ "",
)
def visit_LONG(self, type_):
@@ -483,7 +505,7 @@ class OracleCompiler(compiler.SQLCompiler):
compound_keywords = util.update_copy(
compiler.SQLCompiler.compound_keywords,
{
- expression.CompoundSelect.EXCEPT: 'MINUS'
+ expression.CompoundSelect.EXCEPT: 'MINUS'
}
)
@@ -504,7 +526,7 @@ class OracleCompiler(compiler.SQLCompiler):
def visit_match_op_binary(self, binary, operator, **kw):
return "CONTAINS (%s, %s)" % (self.process(binary.left),
- self.process(binary.right))
+ self.process(binary.right))
def visit_true(self, expr, **kw):
return '1'
@@ -542,8 +564,7 @@ class OracleCompiler(compiler.SQLCompiler):
else:
right = join.right
return self.process(join.left, **kwargs) + \
- ", " + self.process(right, **kwargs)
-
+ ", " + self.process(right, **kwargs)
def _get_nonansi_join_whereclause(self, froms):
clauses = []
@@ -556,8 +577,8 @@ class OracleCompiler(compiler.SQLCompiler):
binary.left = _OuterJoinColumn(binary.left)
elif join.right.is_derived_from(binary.right.table):
binary.right = _OuterJoinColumn(binary.right)
- clauses.append(visitors.cloned_traverse(join.onclause, {},
- {'binary': visit_binary}))
+ clauses.append(visitors.cloned_traverse(
+ join.onclause, {}, {'binary': visit_binary}))
else:
clauses.append(join.onclause)
@@ -580,46 +601,54 @@ class OracleCompiler(compiler.SQLCompiler):
return self.process(vc.column) + "(+)"
def visit_sequence(self, seq):
- return self.dialect.identifier_preparer.format_sequence(seq) + ".nextval"
+ return (self.dialect.identifier_preparer.format_sequence(seq) +
+ ".nextval")
def visit_alias(self, alias, asfrom=False, ashint=False, **kwargs):
- """Oracle doesn't like ``FROM table AS alias``. Is the AS standard SQL??"""
+ """Oracle doesn't like ``FROM table AS alias``. Is the AS standard
+ SQL??
+ """
if asfrom or ashint:
alias_name = isinstance(alias.name, expression._truncated_label) and \
- self._truncated_identifier("alias", alias.name) or alias.name
+ self._truncated_identifier("alias", alias.name) or alias.name
if ashint:
return alias_name
elif asfrom:
return self.process(alias.original, asfrom=asfrom, **kwargs) + \
- " " + self.preparer.format_alias(alias, alias_name)
+ " " + self.preparer.format_alias(alias, alias_name)
else:
return self.process(alias.original, **kwargs)
def returning_clause(self, stmt, returning_cols):
columns = []
binds = []
- for i, column in enumerate(expression._select_iterables(returning_cols)):
+ for i, column in enumerate(
+ expression._select_iterables(returning_cols)):
if column.type._has_column_expression:
col_expr = column.type.column_expression(column)
else:
col_expr = column
outparam = sql.outparam("ret_%d" % i, type_=column.type)
self.binds[outparam.key] = outparam
- binds.append(self.bindparam_string(self._truncate_bindparam(outparam)))
- columns.append(self.process(col_expr, within_columns_clause=False))
+ binds.append(
+ self.bindparam_string(self._truncate_bindparam(outparam)))
+ columns.append(
+ self.process(col_expr, within_columns_clause=False))
self.result_map[outparam.key] = (
outparam.key,
(column, getattr(column, 'name', None),
- getattr(column, 'key', None)),
+ getattr(column, 'key', None)),
column.type
)
return 'RETURNING ' + ', '.join(columns) + " INTO " + ", ".join(binds)
def _TODO_visit_compound_select(self, select):
- """Need to determine how to get ``LIMIT``/``OFFSET`` into a ``UNION`` for Oracle."""
+ """Need to determine how to get ``LIMIT``/``OFFSET`` into a
+ ``UNION`` for Oracle.
+ """
pass
def visit_select(self, select, **kwargs):
@@ -630,7 +659,7 @@ class OracleCompiler(compiler.SQLCompiler):
if not getattr(select, '_oracle_visit', None):
if not self.dialect.use_ansi:
froms = self._display_froms_for_select(
- select, kwargs.get('asfrom', False))
+ select, kwargs.get('asfrom', False))
whereclause = self._get_nonansi_join_whereclause(froms)
if whereclause is not None:
select = select.where(whereclause)
@@ -639,15 +668,17 @@ class OracleCompiler(compiler.SQLCompiler):
limit_clause = select._limit_clause
offset_clause = select._offset_clause
if limit_clause is not None or offset_clause is not None:
- # See http://www.oracle.com/technology/oramag/oracle/06-sep/o56asktom.html
+ # See http://www.oracle.com/technology/oramag/oracle/06-sep/\
+ # o56asktom.html
#
# Generalized form of an Oracle pagination query:
# select ... from (
- # select /*+ FIRST_ROWS(N) */ ...., rownum as ora_rn from (
- # select distinct ... where ... order by ...
+ # select /*+ FIRST_ROWS(N) */ ...., rownum as ora_rn from
+ # ( select distinct ... where ... order by ...
# ) where ROWNUM <= :limit+:offset
# ) where ora_rn > :offset
- # Outer select and "ROWNUM as ora_rn" can be dropped if limit=0
+ # Outer select and "ROWNUM as ora_rn" can be dropped if
+ # limit=0
# TODO: use annotations instead of clone + attr set ?
select = select._generate()
@@ -659,8 +690,8 @@ class OracleCompiler(compiler.SQLCompiler):
self.dialect.optimize_limits and \
select._simple_int_limit:
limitselect = limitselect.prefix_with(
- "/*+ FIRST_ROWS(%d) */" %
- select._limit)
+ "/*+ FIRST_ROWS(%d) */" %
+ select._limit)
limitselect._oracle_visit = True
limitselect._is_wrapper = True
@@ -680,7 +711,7 @@ class OracleCompiler(compiler.SQLCompiler):
if offset_clause is not None:
max_row = max_row + offset_clause
limitselect.append_whereclause(
- sql.literal_column("ROWNUM") <= max_row)
+ sql.literal_column("ROWNUM") <= max_row)
# If needed, add the ora_rn, and wrap again with offset.
if offset_clause is None:
@@ -688,20 +719,20 @@ class OracleCompiler(compiler.SQLCompiler):
select = limitselect
else:
limitselect = limitselect.column(
- sql.literal_column("ROWNUM").label("ora_rn"))
+ sql.literal_column("ROWNUM").label("ora_rn"))
limitselect._oracle_visit = True
limitselect._is_wrapper = True
offsetselect = sql.select(
- [c for c in limitselect.c if c.key != 'ora_rn'])
+ [c for c in limitselect.c if c.key != 'ora_rn'])
offsetselect._oracle_visit = True
offsetselect._is_wrapper = True
if not self.dialect.use_binds_for_limits:
offset_clause = sql.literal_column(
- "%d" % select._offset)
+ "%d" % select._offset)
offsetselect.append_whereclause(
- sql.literal_column("ora_rn") > offset_clause)
+ sql.literal_column("ora_rn") > offset_clause)
offsetselect._for_update_arg = select._for_update_arg
select = offsetselect
@@ -720,9 +751,9 @@ class OracleCompiler(compiler.SQLCompiler):
if select._for_update_arg.of:
tmp += ' OF ' + ', '.join(
- self.process(elem) for elem in
- select._for_update_arg.of
- )
+ self.process(elem) for elem in
+ select._for_update_arg.of
+ )
if select._for_update_arg.nowait:
tmp += " NOWAIT"
@@ -738,18 +769,20 @@ class OracleDDLCompiler(compiler.DDLCompiler):
text += " ON DELETE %s" % constraint.ondelete
# oracle has no ON UPDATE CASCADE -
- # its only available via triggers http://asktom.oracle.com/tkyte/update_cascade/index.html
+ # its only available via triggers
+ # http://asktom.oracle.com/tkyte/update_cascade/index.html
if constraint.onupdate is not None:
util.warn(
"Oracle does not contain native UPDATE CASCADE "
- "functionality - onupdates will not be rendered for foreign keys. "
- "Consider using deferrable=True, initially='deferred' or triggers.")
+ "functionality - onupdates will not be rendered for foreign "
+ "keys. Consider using deferrable=True, initially='deferred' "
+ "or triggers.")
return text
def visit_create_index(self, create, **kw):
return super(OracleDDLCompiler, self).\
- visit_create_index(create, include_schema=True)
+ visit_create_index(create, include_schema=True)
class OracleIdentifierPreparer(compiler.IdentifierPreparer):
@@ -767,14 +800,16 @@ class OracleIdentifierPreparer(compiler.IdentifierPreparer):
def format_savepoint(self, savepoint):
name = re.sub(r'^_+', '', savepoint.ident)
- return super(OracleIdentifierPreparer, self).format_savepoint(savepoint, name)
+ return super(
+ OracleIdentifierPreparer, self).format_savepoint(savepoint, name)
class OracleExecutionContext(default.DefaultExecutionContext):
def fire_sequence(self, seq, type_):
- return self._execute_scalar("SELECT " +
- self.dialect.identifier_preparer.format_sequence(seq) +
- ".nextval FROM DUAL", type_)
+ return self._execute_scalar(
+ "SELECT " +
+ self.dialect.identifier_preparer.format_sequence(seq) +
+ ".nextval FROM DUAL", type_)
class OracleDialect(default.DefaultDialect):
@@ -811,10 +846,10 @@ class OracleDialect(default.DefaultDialect):
]
def __init__(self,
- use_ansi=True,
- optimize_limits=False,
- use_binds_for_limits=True,
- **kwargs):
+ use_ansi=True,
+ optimize_limits=False,
+ use_binds_for_limits=True,
+ **kwargs):
default.DefaultDialect.__init__(self, **kwargs)
self.use_ansi = use_ansi
self.optimize_limits = optimize_limits
@@ -823,9 +858,9 @@ class OracleDialect(default.DefaultDialect):
def initialize(self, connection):
super(OracleDialect, self).initialize(connection)
self.implicit_returning = self.__dict__.get(
- 'implicit_returning',
- self.server_version_info > (10, )
- )
+ 'implicit_returning',
+ self.server_version_info > (10, )
+ )
if self._is_oracle_8:
self.colspecs = self.colspecs.copy()
@@ -835,7 +870,7 @@ class OracleDialect(default.DefaultDialect):
@property
def _is_oracle_8(self):
return self.server_version_info and \
- self.server_version_info < (9, )
+ self.server_version_info < (9, )
@property
def _supports_char_length(self):
@@ -855,7 +890,8 @@ class OracleDialect(default.DefaultDialect):
cursor = connection.execute(
sql.text("SELECT table_name FROM all_tables "
"WHERE table_name = :name AND owner = :schema_name"),
- name=self.denormalize_name(table_name), schema_name=self.denormalize_name(schema))
+ name=self.denormalize_name(table_name),
+ schema_name=self.denormalize_name(schema))
return cursor.first() is not None
def has_sequence(self, connection, sequence_name, schema=None):
@@ -863,8 +899,10 @@ class OracleDialect(default.DefaultDialect):
schema = self.default_schema_name
cursor = connection.execute(
sql.text("SELECT sequence_name FROM all_sequences "
- "WHERE sequence_name = :name AND sequence_owner = :schema_name"),
- name=self.denormalize_name(sequence_name), schema_name=self.denormalize_name(schema))
+ "WHERE sequence_name = :name AND "
+ "sequence_owner = :schema_name"),
+ name=self.denormalize_name(sequence_name),
+ schema_name=self.denormalize_name(schema))
return cursor.first() is not None
def normalize_name(self, name):
@@ -873,8 +911,8 @@ class OracleDialect(default.DefaultDialect):
if util.py2k:
if isinstance(name, str):
name = name.decode(self.encoding)
- if name.upper() == name and \
- not self.identifier_preparer._requires_quotes(name.lower()):
+ if name.upper() == name and not \
+ self.identifier_preparer._requires_quotes(name.lower()):
return name.lower()
else:
return name
@@ -882,7 +920,8 @@ class OracleDialect(default.DefaultDialect):
def denormalize_name(self, name):
if name is None:
return None
- elif name.lower() == name and not self.identifier_preparer._requires_quotes(name.lower()):
+ elif name.lower() == name and not \
+ self.identifier_preparer._requires_quotes(name.lower()):
name = name.upper()
if util.py2k:
if not self.supports_unicode_binds:
@@ -892,18 +931,21 @@ class OracleDialect(default.DefaultDialect):
return name
def _get_default_schema_name(self, connection):
- return self.normalize_name(connection.execute('SELECT USER FROM DUAL').scalar())
+ return self.normalize_name(
+ connection.execute('SELECT USER FROM DUAL').scalar())
- def _resolve_synonym(self, connection, desired_owner=None, desired_synonym=None, desired_table=None):
+ def _resolve_synonym(self, connection, desired_owner=None,
+ desired_synonym=None, desired_table=None):
"""search for a local synonym matching the given desired owner/name.
if desired_owner is None, attempts to locate a distinct owner.
- returns the actual name, owner, dblink name, and synonym name if found.
+ returns the actual name, owner, dblink name, and synonym name if
+ found.
"""
q = "SELECT owner, table_owner, table_name, db_link, "\
- "synonym_name FROM all_synonyms WHERE "
+ "synonym_name FROM all_synonyms WHERE "
clauses = []
params = {}
if desired_synonym:
@@ -922,16 +964,20 @@ class OracleDialect(default.DefaultDialect):
if desired_owner:
row = result.first()
if row:
- return row['table_name'], row['table_owner'], row['db_link'], row['synonym_name']
+ return (row['table_name'], row['table_owner'],
+ row['db_link'], row['synonym_name'])
else:
return None, None, None, None
else:
rows = result.fetchall()
if len(rows) > 1:
- raise AssertionError("There are multiple tables visible to the schema, you must specify owner")
+ raise AssertionError(
+ "There are multiple tables visible to the schema, you "
+ "must specify owner")
elif len(rows) == 1:
row = rows[0]
- return row['table_name'], row['table_owner'], row['db_link'], row['synonym_name']
+ return (row['table_name'], row['table_owner'],
+ row['db_link'], row['synonym_name'])
else:
return None, None, None, None
@@ -941,10 +987,10 @@ class OracleDialect(default.DefaultDialect):
if resolve_synonyms:
actual_name, owner, dblink, synonym = self._resolve_synonym(
- connection,
- desired_owner=self.denormalize_name(schema),
- desired_synonym=self.denormalize_name(table_name)
- )
+ connection,
+ desired_owner=self.denormalize_name(schema),
+ desired_synonym=self.denormalize_name(table_name)
+ )
else:
actual_name, owner, dblink, synonym = None, None, None, None
if not actual_name:
@@ -957,8 +1003,8 @@ class OracleDialect(default.DefaultDialect):
# will need to hear from more users if we are doing
# the right thing here. See [ticket:2619]
owner = connection.scalar(
- sql.text("SELECT username FROM user_db_links "
- "WHERE db_link=:link"), link=dblink)
+ sql.text("SELECT username FROM user_db_links "
+ "WHERE db_link=:link"), link=dblink)
dblink = "@" + dblink
elif not owner:
owner = self.denormalize_name(schema or self.default_schema_name)
@@ -980,7 +1026,8 @@ class OracleDialect(default.DefaultDialect):
schema = self.default_schema_name
s = sql.text(
"SELECT table_name FROM all_tables "
- "WHERE nvl(tablespace_name, 'no tablespace') NOT IN ('SYSTEM', 'SYSAUX') "
+ "WHERE nvl(tablespace_name, 'no tablespace') NOT IN "
+ "('SYSTEM', 'SYSAUX') "
"AND OWNER = :owner "
"AND IOT_NAME IS NULL")
cursor = connection.execute(s, owner=schema)
@@ -1021,9 +1068,9 @@ class OracleDialect(default.DefaultDialect):
params = {"table_name": table_name}
text = "SELECT column_name, data_type, %(char_length_col)s, "\
- "data_precision, data_scale, "\
- "nullable, data_default FROM ALL_TAB_COLUMNS%(dblink)s "\
- "WHERE table_name = :table_name"
+ "data_precision, data_scale, "\
+ "nullable, data_default FROM ALL_TAB_COLUMNS%(dblink)s "\
+ "WHERE table_name = :table_name"
if schema is not None:
params['owner'] = schema
text += " AND owner = :owner "
@@ -1034,7 +1081,8 @@ class OracleDialect(default.DefaultDialect):
for row in c:
(colname, orig_colname, coltype, length, precision, scale, nullable, default) = \
- (self.normalize_name(row[0]), row[0], row[1], row[2], row[3], row[4], row[5] == 'Y', row[6])
+ (self.normalize_name(row[0]), row[0], row[1], row[
+ 2], row[3], row[4], row[5] == 'Y', row[6])
if coltype == 'NUMBER':
coltype = NUMBER(precision, scale)
@@ -1121,21 +1169,23 @@ class OracleDialect(default.DefaultDialect):
for rset in rp:
if rset.index_name != last_index_name:
remove_if_primary_key(index)
- index = dict(name=self.normalize_name(rset.index_name), column_names=[])
+ index = dict(name=self.normalize_name(rset.index_name),
+ column_names=[])
indexes.append(index)
index['unique'] = uniqueness.get(rset.uniqueness, False)
# filter out Oracle SYS_NC names. could also do an outer join
# to the all_tab_columns table and check for real col names there.
if not oracle_sys_col.match(rset.column_name):
- index['column_names'].append(self.normalize_name(rset.column_name))
+ index['column_names'].append(
+ self.normalize_name(rset.column_name))
last_index_name = rset.index_name
remove_if_primary_key(index)
return indexes
@reflection.cache
def _get_constraint_data(self, connection, table_name, schema=None,
- dblink='', **kw):
+ dblink='', **kw):
params = {'table_name': table_name}
@@ -1184,9 +1234,9 @@ class OracleDialect(default.DefaultDialect):
info_cache=info_cache)
pkeys = []
constraint_name = None
- constraint_data = self._get_constraint_data(connection, table_name,
- schema, dblink,
- info_cache=kw.get('info_cache'))
+ constraint_data = self._get_constraint_data(
+ connection, table_name, schema, dblink,
+ info_cache=kw.get('info_cache'))
for row in constraint_data:
(cons_name, cons_type, local_column, remote_table, remote_column, remote_owner) = \
@@ -1219,9 +1269,9 @@ class OracleDialect(default.DefaultDialect):
resolve_synonyms, dblink,
info_cache=info_cache)
- constraint_data = self._get_constraint_data(connection, table_name,
- schema, dblink,
- info_cache=kw.get('info_cache'))
+ constraint_data = self._get_constraint_data(
+ connection, table_name, schema, dblink,
+ info_cache=kw.get('info_cache'))
def fkey_rec():
return {
@@ -1236,7 +1286,7 @@ class OracleDialect(default.DefaultDialect):
for row in constraint_data:
(cons_name, cons_type, local_column, remote_table, remote_column, remote_owner) = \
- row[0:2] + tuple([self.normalize_name(x) for x in row[2:6]])
+ row[0:2] + tuple([self.normalize_name(x) for x in row[2:6]])
if cons_type == 'R':
if remote_table is None:
@@ -1249,23 +1299,28 @@ class OracleDialect(default.DefaultDialect):
rec = fkeys[cons_name]
rec['name'] = cons_name
- local_cols, remote_cols = rec['constrained_columns'], rec['referred_columns']
+ local_cols, remote_cols = rec[
+ 'constrained_columns'], rec['referred_columns']
if not rec['referred_table']:
if resolve_synonyms:
ref_remote_name, ref_remote_owner, ref_dblink, ref_synonym = \
- self._resolve_synonym(
- connection,
- desired_owner=self.denormalize_name(remote_owner),
- desired_table=self.denormalize_name(remote_table)
- )
+ self._resolve_synonym(
+ connection,
+ desired_owner=self.denormalize_name(
+ remote_owner),
+ desired_table=self.denormalize_name(
+ remote_table)
+ )
if ref_synonym:
remote_table = self.normalize_name(ref_synonym)
- remote_owner = self.normalize_name(ref_remote_owner)
+ remote_owner = self.normalize_name(
+ ref_remote_owner)
rec['referred_table'] = remote_table
- if requested_schema is not None or self.denormalize_name(remote_owner) != schema:
+ if requested_schema is not None or \
+ self.denormalize_name(remote_owner) != schema:
rec['referred_schema'] = remote_owner
local_cols.append(local_column)
diff --git a/lib/sqlalchemy/dialects/oracle/cx_oracle.py b/lib/sqlalchemy/dialects/oracle/cx_oracle.py
index bb3c837cc..4a1ceecb1 100644
--- a/lib/sqlalchemy/dialects/oracle/cx_oracle.py
+++ b/lib/sqlalchemy/dialects/oracle/cx_oracle.py
@@ -10,7 +10,8 @@
.. dialect:: oracle+cx_oracle
:name: cx-Oracle
:dbapi: cx_oracle
- :connectstring: oracle+cx_oracle://user:pass@host:port/dbname[?key=value&key=value...]
+ :connectstring: oracle+cx_oracle://user:pass@host:port/dbname\
+[?key=value&key=value...]
:url: http://cx-oracle.sourceforge.net/
Additional Connect Arguments
@@ -52,21 +53,21 @@ on the URL, or as keyword arguments to :func:`.create_engine()` are:
.. versionadded:: 0.8 specific DBAPI types can be excluded from the
auto_setinputsizes feature via the exclude_setinputsizes attribute.
-* ``mode`` - This is given the string value of SYSDBA or SYSOPER, or alternatively
- an integer value. This value is only available as a URL query string
- argument.
+* ``mode`` - This is given the string value of SYSDBA or SYSOPER, or
+ alternatively an integer value. This value is only available as a URL query
+ string argument.
-* ``threaded`` - enable multithreaded access to cx_oracle connections. Defaults
- to ``True``. Note that this is the opposite default of the cx_Oracle DBAPI
- itself.
+* ``threaded`` - enable multithreaded access to cx_oracle connections.
+ Defaults to ``True``. Note that this is the opposite default of the
+ cx_Oracle DBAPI itself.
.. _cx_oracle_unicode:
Unicode
-------
-The cx_Oracle DBAPI as of version 5 fully supports unicode, and has the ability
-to return string results as Python unicode objects natively.
+The cx_Oracle DBAPI as of version 5 fully supports unicode, and has the
+ability to return string results as Python unicode objects natively.
When used in Python 3, cx_Oracle returns all strings as Python unicode objects
(that is, plain ``str`` in Python 3). In Python 2, it will return as Python
@@ -74,37 +75,39 @@ unicode those column values that are of type ``NVARCHAR`` or ``NCLOB``. For
column values that are of type ``VARCHAR`` or other non-unicode string types,
it will return values as Python strings (e.g. bytestrings).
-The cx_Oracle SQLAlchemy dialect presents two different options for the use case of
-returning ``VARCHAR`` column values as Python unicode objects under Python 2:
+The cx_Oracle SQLAlchemy dialect presents two different options for the use
+case of returning ``VARCHAR`` column values as Python unicode objects under
+Python 2:
* the cx_Oracle DBAPI has the ability to coerce all string results to Python
unicode objects unconditionally using output type handlers. This has
the advantage that the unicode conversion is global to all statements
at the cx_Oracle driver level, meaning it works with raw textual SQL
statements that have no typing information associated. However, this system
- has been observed to incur signfiicant performance overhead, not only because
- it takes effect for all string values unconditionally, but also because cx_Oracle under
- Python 2 seems to use a pure-Python function call in order to do the
- decode operation, which under cPython can orders of magnitude slower
- than doing it using C functions alone.
-
-* SQLAlchemy has unicode-decoding services built in, and when using SQLAlchemy's
- C extensions, these functions do not use any Python function calls and
- are very fast. The disadvantage to this approach is that the unicode
- conversion only takes effect for statements where the :class:`.Unicode` type
- or :class:`.String` type with ``convert_unicode=True`` is explicitly
- associated with the result column. This is the case for any ORM or Core
- query or SQL expression as well as for a :func:`.text` construct that specifies
- output column types, so in the vast majority of cases this is not an issue.
- However, when sending a completely raw string to :meth:`.Connection.execute`,
- this typing information isn't present, unless the string is handled
- within a :func:`.text` construct that adds typing information.
+ has been observed to incur signfiicant performance overhead, not only
+ because it takes effect for all string values unconditionally, but also
+ because cx_Oracle under Python 2 seems to use a pure-Python function call in
+ order to do the decode operation, which under cPython can orders of
+ magnitude slower than doing it using C functions alone.
+
+* SQLAlchemy has unicode-decoding services built in, and when using
+ SQLAlchemy's C extensions, these functions do not use any Python function
+ calls and are very fast. The disadvantage to this approach is that the
+ unicode conversion only takes effect for statements where the
+ :class:`.Unicode` type or :class:`.String` type with
+ ``convert_unicode=True`` is explicitly associated with the result column.
+ This is the case for any ORM or Core query or SQL expression as well as for
+ a :func:`.text` construct that specifies output column types, so in the vast
+ majority of cases this is not an issue. However, when sending a completely
+ raw string to :meth:`.Connection.execute`, this typing information isn't
+ present, unless the string is handled within a :func:`.text` construct that
+ adds typing information.
As of version 0.9.2 of SQLAlchemy, the default approach is to use SQLAlchemy's
typing system. This keeps cx_Oracle's expensive Python 2 approach
-disabled unless the user explicitly wants it. Under Python 3, SQLAlchemy detects
-that cx_Oracle is returning unicode objects natively and cx_Oracle's system
-is used.
+disabled unless the user explicitly wants it. Under Python 3, SQLAlchemy
+detects that cx_Oracle is returning unicode objects natively and cx_Oracle's
+system is used.
To re-enable cx_Oracle's output type handler under Python 2, the
``coerce_to_unicode=True`` flag (new in 0.9.4) can be passed to
@@ -117,12 +120,13 @@ as Python unicode under Python 2 without using cx_Oracle's native handlers,
the :func:`.text` feature can be used::
from sqlalchemy import text, Unicode
- result = conn.execute(text("select username from user").columns(username=Unicode))
+ result = conn.execute(
+ text("select username from user").columns(username=Unicode))
-.. versionchanged:: 0.9.2 cx_Oracle's outputtypehandlers are no longer used for
- unicode results of non-unicode datatypes in Python 2, after they were identified as a major
- performance bottleneck. SQLAlchemy's own unicode facilities are used
- instead.
+.. versionchanged:: 0.9.2 cx_Oracle's outputtypehandlers are no longer used
+ for unicode results of non-unicode datatypes in Python 2, after they were
+ identified as a major performance bottleneck. SQLAlchemy's own unicode
+ facilities are used instead.
.. versionadded:: 0.9.4 Added the ``coerce_to_unicode`` flag, to re-enable
cx_Oracle's outputtypehandler and revert to pre-0.9.2 behavior.
@@ -132,38 +136,43 @@ the :func:`.text` feature can be used::
RETURNING Support
-----------------
-The cx_oracle DBAPI supports a limited subset of Oracle's already limited RETURNING support.
-Typically, results can only be guaranteed for at most one column being returned;
-this is the typical case when SQLAlchemy uses RETURNING to get just the value of a
-primary-key-associated sequence value. Additional column expressions will
-cause problems in a non-determinative way, due to cx_oracle's lack of support for
-the OCI_DATA_AT_EXEC API which is required for more complex RETURNING scenarios.
+The cx_oracle DBAPI supports a limited subset of Oracle's already limited
+RETURNING support. Typically, results can only be guaranteed for at most one
+column being returned; this is the typical case when SQLAlchemy uses RETURNING
+to get just the value of a primary-key-associated sequence value.
+Additional column expressions will cause problems in a non-determinative way,
+due to cx_oracle's lack of support for the OCI_DATA_AT_EXEC API which is
+required for more complex RETURNING scenarios.
-For this reason, stability may be enhanced by disabling RETURNING support completely;
-SQLAlchemy otherwise will use RETURNING to fetch newly sequence-generated
-primary keys. As illustrated in :ref:`oracle_returning`::
+For this reason, stability may be enhanced by disabling RETURNING support
+completely; SQLAlchemy otherwise will use RETURNING to fetch newly
+sequence-generated primary keys. As illustrated in :ref:`oracle_returning`::
- engine = create_engine("oracle://scott:tiger@dsn", implicit_returning=False)
+ engine = create_engine("oracle://scott:tiger@dsn",
+ implicit_returning=False)
.. seealso::
- http://docs.oracle.com/cd/B10501_01/appdev.920/a96584/oci05bnd.htm#420693 - OCI documentation for RETURNING
+ http://docs.oracle.com/cd/B10501_01/appdev.920/a96584/oci05bnd.htm#420693
+ - OCI documentation for RETURNING
- http://sourceforge.net/mailarchive/message.php?msg_id=31338136 - cx_oracle developer commentary
+ http://sourceforge.net/mailarchive/message.php?msg_id=31338136
+ - cx_oracle developer commentary
.. _cx_oracle_lob:
LOB Objects
-----------
-cx_oracle returns oracle LOBs using the cx_oracle.LOB object. SQLAlchemy converts
-these to strings so that the interface of the Binary type is consistent with that of
-other backends, and so that the linkage to a live cursor is not needed in scenarios
-like result.fetchmany() and result.fetchall(). This means that by default, LOB
-objects are fully fetched unconditionally by SQLAlchemy, and the linkage to a live
-cursor is broken.
+cx_oracle returns oracle LOBs using the cx_oracle.LOB object. SQLAlchemy
+converts these to strings so that the interface of the Binary type is
+consistent with that of other backends, and so that the linkage to a live
+cursor is not needed in scenarios like result.fetchmany() and
+result.fetchall(). This means that by default, LOB objects are fully fetched
+unconditionally by SQLAlchemy, and the linkage to a live cursor is broken.
-To disable this processing, pass ``auto_convert_lobs=False`` to :func:`.create_engine()`.
+To disable this processing, pass ``auto_convert_lobs=False`` to
+:func:`.create_engine()`.
Two Phase Transaction Support
-----------------------------
@@ -314,7 +323,7 @@ class _OracleNumeric(sqltypes.Numeric):
if self.precision is None and self.scale is None:
return processors.to_float
elif not getattr(self, '_is_oracle_number', False) \
- and self.scale is not None:
+ and self.scale is not None:
return processors.to_float
else:
return None
@@ -322,7 +331,7 @@ class _OracleNumeric(sqltypes.Numeric):
# cx_oracle 4 behavior, will assume
# floats
return super(_OracleNumeric, self).\
- result_processor(dialect, coltype)
+ result_processor(dialect, coltype)
class _OracleDate(sqltypes.Date):
@@ -363,7 +372,8 @@ class _NativeUnicodeMixin(object):
return unicode(value)
return process
else:
- return super(_NativeUnicodeMixin, self).bind_processor(dialect)
+ return super(
+ _NativeUnicodeMixin, self).bind_processor(dialect)
# we apply a connection output handler that returns
# unicode in all cases, so the "native_unicode" flag
@@ -392,11 +402,13 @@ class _OracleLong(oracle.LONG):
def get_dbapi_type(self, dbapi):
return dbapi.LONG_STRING
+
class _OracleString(_NativeUnicodeMixin, sqltypes.String):
pass
-class _OracleUnicodeText(_LOBMixin, _NativeUnicodeMixin, sqltypes.UnicodeText):
+class _OracleUnicodeText(
+ _LOBMixin, _NativeUnicodeMixin, sqltypes.UnicodeText):
def get_dbapi_type(self, dbapi):
return dbapi.NCLOB
@@ -405,7 +417,8 @@ class _OracleUnicodeText(_LOBMixin, _NativeUnicodeMixin, sqltypes.UnicodeText):
if lob_processor is None:
return None
- string_processor = sqltypes.UnicodeText.result_processor(self, dialect, coltype)
+ string_processor = sqltypes.UnicodeText.result_processor(
+ self, dialect, coltype)
if string_processor is None:
return lob_processor
@@ -450,7 +463,7 @@ class OracleCompiler_cx_oracle(OracleCompiler):
def bindparam_string(self, name, **kw):
quote = getattr(name, 'quote', None)
if quote is True or quote is not False and \
- self.preparer._bindparam_requires_quotes(name):
+ self.preparer._bindparam_requires_quotes(name):
quoted_name = '"%s"' % name
self._quoted_bind_names[name] = quoted_name
return OracleCompiler.bindparam_string(self, quoted_name, **kw)
@@ -470,12 +483,12 @@ class OracleExecutionContext_cx_oracle(OracleExecutionContext):
# here. so convert names in quoted_bind_names
# to encoded as well.
quoted_bind_names = \
- dict(
- (fromname.encode(self.dialect.encoding),
- toname.encode(self.dialect.encoding))
- for fromname, toname in
- quoted_bind_names.items()
- )
+ dict(
+ (fromname.encode(self.dialect.encoding),
+ toname.encode(self.dialect.encoding))
+ for fromname, toname in
+ quoted_bind_names.items()
+ )
for param in self.parameters:
for fromname, toname in quoted_bind_names.items():
param[toname] = param[fromname]
@@ -485,29 +498,30 @@ class OracleExecutionContext_cx_oracle(OracleExecutionContext):
# cx_oracle really has issues when you setinputsizes
# on String, including that outparams/RETURNING
# breaks for varchars
- self.set_input_sizes(quoted_bind_names,
- exclude_types=self.dialect.exclude_setinputsizes
- )
+ self.set_input_sizes(
+ quoted_bind_names,
+ exclude_types=self.dialect.exclude_setinputsizes
+ )
# if a single execute, check for outparams
if len(self.compiled_parameters) == 1:
for bindparam in self.compiled.binds.values():
if bindparam.isoutparam:
dbtype = bindparam.type.dialect_impl(self.dialect).\
- get_dbapi_type(self.dialect.dbapi)
+ get_dbapi_type(self.dialect.dbapi)
if not hasattr(self, 'out_parameters'):
self.out_parameters = {}
if dbtype is None:
raise exc.InvalidRequestError(
- "Cannot create out parameter for parameter "
- "%r - its type %r is not supported by"
- " cx_oracle" %
- (bindparam.key, bindparam.type)
- )
+ "Cannot create out parameter for parameter "
+ "%r - its type %r is not supported by"
+ " cx_oracle" %
+ (bindparam.key, bindparam.type)
+ )
name = self.compiled.bind_names[bindparam]
self.out_parameters[name] = self.cursor.var(dbtype)
self.parameters[0][quoted_bind_names.get(name, name)] = \
- self.out_parameters[name]
+ self.out_parameters[name]
def create_cursor(self):
c = self._dbapi_connection.cursor()
@@ -519,9 +533,9 @@ class OracleExecutionContext_cx_oracle(OracleExecutionContext):
def get_result_proxy(self):
if hasattr(self, 'out_parameters') and self.compiled.returning:
returning_params = dict(
- (k, v.getvalue())
- for k, v in self.out_parameters.items()
- )
+ (k, v.getvalue())
+ for k, v in self.out_parameters.items()
+ )
return ReturningResultProxy(self, returning_params)
result = None
@@ -543,25 +557,29 @@ class OracleExecutionContext_cx_oracle(OracleExecutionContext):
if name in self.out_parameters:
type = bind.type
impl_type = type.dialect_impl(self.dialect)
- dbapi_type = impl_type.get_dbapi_type(self.dialect.dbapi)
+ dbapi_type = impl_type.get_dbapi_type(
+ self.dialect.dbapi)
result_processor = impl_type.\
- result_processor(self.dialect,
- dbapi_type)
+ result_processor(self.dialect,
+ dbapi_type)
if result_processor is not None:
out_parameters[name] = \
- result_processor(self.out_parameters[name].getvalue())
+ result_processor(
+ self.out_parameters[name].getvalue())
else:
- out_parameters[name] = self.out_parameters[name].getvalue()
+ out_parameters[name] = self.out_parameters[
+ name].getvalue()
else:
result.out_parameters = dict(
(k, v.getvalue())
- for k, v in self.out_parameters.items()
- )
+ for k, v in self.out_parameters.items()
+ )
return result
-class OracleExecutionContext_cx_oracle_with_unicode(OracleExecutionContext_cx_oracle):
+class OracleExecutionContext_cx_oracle_with_unicode(
+ OracleExecutionContext_cx_oracle):
"""Support WITH_UNICODE in Python 2.xx.
WITH_UNICODE allows cx_Oracle's Python 3 unicode handling
@@ -574,17 +592,19 @@ class OracleExecutionContext_cx_oracle_with_unicode(OracleExecutionContext_cx_or
passed as Python unicode objects.
"""
+
def __init__(self, *arg, **kw):
OracleExecutionContext_cx_oracle.__init__(self, *arg, **kw)
self.statement = util.text_type(self.statement)
def _execute_scalar(self, stmt):
return super(OracleExecutionContext_cx_oracle_with_unicode, self).\
- _execute_scalar(util.text_type(stmt))
+ _execute_scalar(util.text_type(stmt))
class ReturningResultProxy(_result.FullyBufferedResultProxy):
- """Result proxy which stuffs the _returning clause + outparams into the fetch."""
+ """Result proxy which stuffs the _returning clause + outparams
+ into the fetch."""
def __init__(self, context, returning_params):
self._returning_params = returning_params
@@ -598,8 +618,10 @@ class ReturningResultProxy(_result.FullyBufferedResultProxy):
]
def _buffer_rows(self):
- return collections.deque([tuple(self._returning_params["ret_%d" % i]
- for i, c in enumerate(self._returning_params))])
+ return collections.deque(
+ [tuple(self._returning_params["ret_%d" % i]
+ for i, c in enumerate(self._returning_params))]
+ )
class OracleDialect_cx_oracle(OracleDialect):
@@ -610,7 +632,8 @@ class OracleDialect_cx_oracle(OracleDialect):
colspecs = colspecs = {
sqltypes.Numeric: _OracleNumeric,
- sqltypes.Date: _OracleDate, # generic type, assume datetime.date is desired
+ # generic type, assume datetime.date is desired
+ sqltypes.Date: _OracleDate,
sqltypes.LargeBinary: _OracleBinary,
sqltypes.Boolean: oracle._OracleBoolean,
sqltypes.Interval: _OracleInterval,
@@ -637,50 +660,50 @@ class OracleDialect_cx_oracle(OracleDialect):
execute_sequence_format = list
def __init__(self,
- auto_setinputsizes=True,
- exclude_setinputsizes=("STRING", "UNICODE"),
- auto_convert_lobs=True,
- threaded=True,
- allow_twophase=True,
- coerce_to_decimal=True,
- coerce_to_unicode=False,
- arraysize=50, **kwargs):
+ auto_setinputsizes=True,
+ exclude_setinputsizes=("STRING", "UNICODE"),
+ auto_convert_lobs=True,
+ threaded=True,
+ allow_twophase=True,
+ coerce_to_decimal=True,
+ coerce_to_unicode=False,
+ arraysize=50, **kwargs):
OracleDialect.__init__(self, **kwargs)
self.threaded = threaded
self.arraysize = arraysize
self.allow_twophase = allow_twophase
self.supports_timestamp = self.dbapi is None or \
- hasattr(self.dbapi, 'TIMESTAMP')
+ hasattr(self.dbapi, 'TIMESTAMP')
self.auto_setinputsizes = auto_setinputsizes
self.auto_convert_lobs = auto_convert_lobs
if hasattr(self.dbapi, 'version'):
self.cx_oracle_ver = tuple([int(x) for x in
- self.dbapi.version.split('.')])
+ self.dbapi.version.split('.')])
else:
self.cx_oracle_ver = (0, 0, 0)
def types(*names):
return set(
- getattr(self.dbapi, name, None) for name in names
- ).difference([None])
+ getattr(self.dbapi, name, None) for name in names
+ ).difference([None])
self.exclude_setinputsizes = types(*(exclude_setinputsizes or ()))
self._cx_oracle_string_types = types("STRING", "UNICODE",
- "NCLOB", "CLOB")
+ "NCLOB", "CLOB")
self._cx_oracle_unicode_types = types("UNICODE", "NCLOB")
self._cx_oracle_binary_types = types("BFILE", "CLOB", "NCLOB", "BLOB")
self.supports_unicode_binds = self.cx_oracle_ver >= (5, 0)
self.coerce_to_unicode = (
- self.cx_oracle_ver >= (5, 0) and
- coerce_to_unicode
- )
+ self.cx_oracle_ver >= (5, 0) and
+ coerce_to_unicode
+ )
self.supports_native_decimal = (
- self.cx_oracle_ver >= (5, 0) and
- coerce_to_decimal
- )
+ self.cx_oracle_ver >= (5, 0) and
+ coerce_to_decimal
+ )
self._cx_oracle_native_nvarchar = self.cx_oracle_ver >= (5, 0)
@@ -688,7 +711,8 @@ class OracleDialect_cx_oracle(OracleDialect):
# this occurs in tests with mock DBAPIs
self._cx_oracle_string_types = set()
self._cx_oracle_with_unicode = False
- elif self.cx_oracle_ver >= (5,) and not hasattr(self.dbapi, 'UNICODE'):
+ elif self.cx_oracle_ver >= (5,) and not \
+ hasattr(self.dbapi, 'UNICODE'):
# cx_Oracle WITH_UNICODE mode. *only* python
# unicode objects accepted for anything
self.supports_unicode_statements = True
@@ -696,32 +720,32 @@ class OracleDialect_cx_oracle(OracleDialect):
self._cx_oracle_with_unicode = True
if util.py2k:
- # There's really no reason to run with WITH_UNICODE under Python 2.x.
- # Give the user a hint.
+ # There's really no reason to run with WITH_UNICODE under
+ # Python 2.x. Give the user a hint.
util.warn(
"cx_Oracle is compiled under Python 2.xx using the "
"WITH_UNICODE flag. Consider recompiling cx_Oracle "
- "without this flag, which is in no way necessary for full "
- "support of Unicode. Otherwise, all string-holding bind "
- "parameters must be explicitly typed using SQLAlchemy's "
- "String type or one of its subtypes,"
+ "without this flag, which is in no way necessary for "
+ "full support of Unicode. Otherwise, all string-holding "
+ "bind parameters must be explicitly typed using "
+ "SQLAlchemy's String type or one of its subtypes,"
"or otherwise be passed as Python unicode. "
"Plain Python strings passed as bind parameters will be "
"silently corrupted by cx_Oracle."
- )
+ )
self.execution_ctx_cls = \
- OracleExecutionContext_cx_oracle_with_unicode
+ OracleExecutionContext_cx_oracle_with_unicode
else:
self._cx_oracle_with_unicode = False
if self.cx_oracle_ver is None or \
- not self.auto_convert_lobs or \
- not hasattr(self.dbapi, 'CLOB'):
+ not self.auto_convert_lobs or \
+ not hasattr(self.dbapi, 'CLOB'):
self.dbapi_type_map = {}
else:
# only use this for LOB objects. using it for strings, dates
- # etc. leads to a little too much magic, reflection doesn't know if it should
- # expect encoded strings or unicodes, etc.
+ # etc. leads to a little too much magic, reflection doesn't know
+ # if it should expect encoded strings or unicodes, etc.
self.dbapi_type_map = {
self.dbapi.CLOB: oracle.CLOB(),
self.dbapi.NCLOB: oracle.NCLOB(),
@@ -764,8 +788,8 @@ class OracleDialect_cx_oracle(OracleDialect):
def output_type_handler(cursor, name, defaultType,
size, precision, scale):
return cursor.var(
- cx_Oracle.STRING,
- 255, arraysize=cursor.arraysize)
+ cx_Oracle.STRING,
+ 255, arraysize=cursor.arraysize)
cursor = conn.cursor()
cursor.outputtypehandler = output_type_handler
@@ -796,17 +820,17 @@ class OracleDialect_cx_oracle(OracleDialect):
cx_Oracle = self.dbapi
def output_type_handler(cursor, name, defaultType,
- size, precision, scale):
+ size, precision, scale):
# convert all NUMBER with precision + positive scale to Decimal
# this almost allows "native decimal" mode.
if self.supports_native_decimal and \
defaultType == cx_Oracle.NUMBER and \
precision and scale > 0:
return cursor.var(
- cx_Oracle.STRING,
- 255,
- outconverter=self._to_decimal,
- arraysize=cursor.arraysize)
+ cx_Oracle.STRING,
+ 255,
+ outconverter=self._to_decimal,
+ arraysize=cursor.arraysize)
# if NUMBER with zero precision and 0 or neg scale, this appears
# to indicate "ambiguous". Use a slower converter that will
# make a decision based on each value received - the type
@@ -816,10 +840,10 @@ class OracleDialect_cx_oracle(OracleDialect):
defaultType == cx_Oracle.NUMBER \
and not precision and scale <= 0:
return cursor.var(
- cx_Oracle.STRING,
- 255,
- outconverter=self._detect_decimal,
- arraysize=cursor.arraysize)
+ cx_Oracle.STRING,
+ 255,
+ outconverter=self._detect_decimal,
+ arraysize=cursor.arraysize)
# allow all strings to come back natively as Unicode
elif self.coerce_to_unicode and \
defaultType in (cx_Oracle.STRING, cx_Oracle.FIXED_CHAR):
@@ -856,7 +880,7 @@ class OracleDialect_cx_oracle(OracleDialect):
dsn=dsn,
threaded=self.threaded,
twophase=self.allow_twophase,
- )
+ )
if util.py2k:
if self._cx_oracle_with_unicode:
@@ -882,9 +906,9 @@ class OracleDialect_cx_oracle(OracleDialect):
def _get_server_version_info(self, connection):
return tuple(
- int(x)
- for x in connection.connection.version.split('.')
- )
+ int(x)
+ for x in connection.connection.version.split('.')
+ )
def is_disconnect(self, e, connection, cursor):
error, = e.args
@@ -924,11 +948,11 @@ class OracleDialect_cx_oracle(OracleDialect):
connection.info['cx_oracle_prepared'] = result
def do_rollback_twophase(self, connection, xid, is_prepared=True,
- recover=False):
+ recover=False):
self.do_rollback(connection.connection)
def do_commit_twophase(self, connection, xid, is_prepared=True,
- recover=False):
+ recover=False):
if not is_prepared:
self.do_commit(connection.connection)
else:
diff --git a/lib/sqlalchemy/dialects/oracle/zxjdbc.py b/lib/sqlalchemy/dialects/oracle/zxjdbc.py
index 19a668a3e..82c8e2f0f 100644
--- a/lib/sqlalchemy/dialects/oracle/zxjdbc.py
+++ b/lib/sqlalchemy/dialects/oracle/zxjdbc.py
@@ -10,7 +10,8 @@
:name: zxJDBC for Jython
:dbapi: zxjdbc
:connectstring: oracle+zxjdbc://user:pass@host/dbname
- :driverurl: http://www.oracle.com/technology/software/tech/java/sqlj_jdbc/index.html.
+ :driverurl: http://www.oracle.com/technology/software/tech/java/\
+sqlj_jdbc/index.html.
"""
import decimal
@@ -18,7 +19,9 @@ import re
from sqlalchemy import sql, types as sqltypes, util
from sqlalchemy.connectors.zxJDBC import ZxJDBCConnector
-from sqlalchemy.dialects.oracle.base import OracleCompiler, OracleDialect, OracleExecutionContext
+from sqlalchemy.dialects.oracle.base import (OracleCompiler,
+ OracleDialect,
+ OracleExecutionContext)
from sqlalchemy.engine import result as _result
from sqlalchemy.sql import expression
import collections
@@ -40,7 +43,7 @@ class _ZxJDBCDate(sqltypes.Date):
class _ZxJDBCNumeric(sqltypes.Numeric):
def result_processor(self, dialect, coltype):
- #XXX: does the dialect return Decimal or not???
+ # XXX: does the dialect return Decimal or not???
# if it does (in all cases), we could use a None processor as well as
# the to_float generic processor
if self.asdecimal:
@@ -61,10 +64,12 @@ class _ZxJDBCNumeric(sqltypes.Numeric):
class OracleCompiler_zxjdbc(OracleCompiler):
def returning_clause(self, stmt, returning_cols):
- self.returning_cols = list(expression._select_iterables(returning_cols))
+ self.returning_cols = list(
+ expression._select_iterables(returning_cols))
# within_columns_clause=False so that labels (foo AS bar) don't render
- columns = [self.process(c, within_columns_clause=False, result_map=self.result_map)
+ columns = [self.process(c, within_columns_clause=False,
+ result_map=self.result_map)
for c in self.returning_cols]
if not hasattr(self, 'returning_parameters'):
@@ -72,12 +77,15 @@ class OracleCompiler_zxjdbc(OracleCompiler):
binds = []
for i, col in enumerate(self.returning_cols):
- dbtype = col.type.dialect_impl(self.dialect).get_dbapi_type(self.dialect.dbapi)
+ dbtype = col.type.dialect_impl(
+ self.dialect).get_dbapi_type(self.dialect.dbapi)
self.returning_parameters.append((i + 1, dbtype))
- bindparam = sql.bindparam("ret_%d" % i, value=ReturningParam(dbtype))
+ bindparam = sql.bindparam(
+ "ret_%d" % i, value=ReturningParam(dbtype))
self.binds[bindparam.key] = bindparam
- binds.append(self.bindparam_string(self._truncate_bindparam(bindparam)))
+ binds.append(
+ self.bindparam_string(self._truncate_bindparam(bindparam)))
return 'RETURNING ' + ', '.join(columns) + " INTO " + ", ".join(binds)
@@ -98,13 +106,17 @@ class OracleExecutionContext_zxjdbc(OracleExecutionContext):
rrs = self.statement.__statement__.getReturnResultSet()
next(rrs)
except SQLException as sqle:
- msg = '%s [SQLCode: %d]' % (sqle.getMessage(), sqle.getErrorCode())
+ msg = '%s [SQLCode: %d]' % (
+ sqle.getMessage(), sqle.getErrorCode())
if sqle.getSQLState() is not None:
msg += ' [SQLState: %s]' % sqle.getSQLState()
raise zxJDBC.Error(msg)
else:
- row = tuple(self.cursor.datahandler.getPyObject(rrs, index, dbtype)
- for index, dbtype in self.compiled.returning_parameters)
+ row = tuple(
+ self.cursor.datahandler.getPyObject(
+ rrs, index, dbtype)
+ for index, dbtype in
+ self.compiled.returning_parameters)
return ReturningResultProxy(self, row)
finally:
if rrs is not None:
@@ -165,8 +177,8 @@ class ReturningParam(object):
def __repr__(self):
kls = self.__class__
- return '<%s.%s object at 0x%x type=%s>' % (kls.__module__, kls.__name__, id(self),
- self.type)
+ return '<%s.%s object at 0x%x type=%s>' % (
+ kls.__module__, kls.__name__, id(self), self.type)
class OracleDialect_zxjdbc(ZxJDBCConnector, OracleDialect):
@@ -207,13 +219,16 @@ class OracleDialect_zxjdbc(ZxJDBCConnector, OracleDialect):
def initialize(self, connection):
super(OracleDialect_zxjdbc, self).initialize(connection)
- self.implicit_returning = connection.connection.driverversion >= '10.2'
+ self.implicit_returning = \
+ connection.connection.driverversion >= '10.2'
def _create_jdbc_url(self, url):
- return 'jdbc:oracle:thin:@%s:%s:%s' % (url.host, url.port or 1521, url.database)
+ return 'jdbc:oracle:thin:@%s:%s:%s' % (
+ url.host, url.port or 1521, url.database)
def _get_server_version_info(self, connection):
- version = re.search(r'Release ([\d\.]+)', connection.connection.dbversion).group(1)
+ version = re.search(
+ r'Release ([\d\.]+)', connection.connection.dbversion).group(1)
return tuple(int(x) for x in version.split('.'))
dialect = OracleDialect_zxjdbc
diff --git a/lib/sqlalchemy/dialects/postgres.py b/lib/sqlalchemy/dialects/postgres.py
index 046be760d..f813e0003 100644
--- a/lib/sqlalchemy/dialects/postgres.py
+++ b/lib/sqlalchemy/dialects/postgres.py
@@ -9,9 +9,10 @@
from sqlalchemy.util import warn_deprecated
warn_deprecated(
- "The SQLAlchemy PostgreSQL dialect has been renamed from 'postgres' to 'postgresql'. "
- "The new URL format is postgresql[+driver]://<user>:<pass>@<host>/<dbname>"
- )
+ "The SQLAlchemy PostgreSQL dialect has been renamed from 'postgres' to "
+ "'postgresql'. The new URL format is "
+ "postgresql[+driver]://<user>:<pass>@<host>/<dbname>"
+)
from sqlalchemy.dialects.postgresql import *
from sqlalchemy.dialects.postgresql import base
diff --git a/lib/sqlalchemy/dialects/postgresql/base.py b/lib/sqlalchemy/dialects/postgresql/base.py
index 8081f75dd..6f23a497b 100644
--- a/lib/sqlalchemy/dialects/postgresql/base.py
+++ b/lib/sqlalchemy/dialects/postgresql/base.py
@@ -50,22 +50,25 @@ Transaction Isolation Level
All Postgresql dialects support setting of transaction isolation level
both via a dialect-specific parameter ``isolation_level``
accepted by :func:`.create_engine`,
-as well as the ``isolation_level`` argument as passed to :meth:`.Connection.execution_options`.
-When using a non-psycopg2 dialect, this feature works by issuing the
-command ``SET SESSION CHARACTERISTICS AS TRANSACTION ISOLATION LEVEL
-<level>`` for each new connection.
+as well as the ``isolation_level`` argument as passed to
+:meth:`.Connection.execution_options`. When using a non-psycopg2 dialect,
+this feature works by issuing the command
+``SET SESSION CHARACTERISTICS AS TRANSACTION ISOLATION LEVEL <level>`` for
+each new connection.
To set isolation level using :func:`.create_engine`::
engine = create_engine(
- "postgresql+pg8000://scott:tiger@localhost/test",
- isolation_level="READ UNCOMMITTED"
- )
+ "postgresql+pg8000://scott:tiger@localhost/test",
+ isolation_level="READ UNCOMMITTED"
+ )
To set using per-connection execution options::
connection = engine.connect()
- connection = connection.execution_options(isolation_level="READ COMMITTED")
+ connection = connection.execution_options(
+ isolation_level="READ COMMITTED"
+ )
Valid values for ``isolation_level`` include:
@@ -93,12 +96,13 @@ The Postgresql dialect can reflect tables from any schema. The
:paramref:`.Table.schema` argument, or alternatively the
:paramref:`.MetaData.reflect.schema` argument determines which schema will
be searched for the table or tables. The reflected :class:`.Table` objects
-will in all cases retain this ``.schema`` attribute as was specified. However,
-with regards to tables which these :class:`.Table` objects refer to via
-foreign key constraint, a decision must be made as to how the ``.schema``
+will in all cases retain this ``.schema`` attribute as was specified.
+However, with regards to tables which these :class:`.Table` objects refer to
+via foreign key constraint, a decision must be made as to how the ``.schema``
is represented in those remote tables, in the case where that remote
schema name is also a member of the current
-`Postgresql search path <http://www.postgresql.org/docs/9.0/static/ddl-schemas.html#DDL-SCHEMAS-PATH>`_.
+`Postgresql search path
+<http://www.postgresql.org/docs/9.0/static/ddl-schemas.html#DDL-SCHEMAS-PATH>`_.
By default, the Postgresql dialect mimics the behavior encouraged by
Postgresql's own ``pg_get_constraintdef()`` builtin procedure. This function
@@ -115,7 +119,8 @@ illustrates this behavior::
CREATE TABLE
test=> SET search_path TO public, test_schema;
test=> SELECT pg_catalog.pg_get_constraintdef(r.oid, true) FROM
- test-> pg_catalog.pg_class c JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace
+ test-> pg_catalog.pg_class c JOIN pg_catalog.pg_namespace n
+ test-> ON n.oid = c.relnamespace
test-> JOIN pg_catalog.pg_constraint r ON c.oid = r.conrelid
test-> WHERE c.relname='referring' AND r.contype = 'f'
test-> ;
@@ -124,10 +129,11 @@ illustrates this behavior::
FOREIGN KEY (referred_id) REFERENCES referred(id)
(1 row)
-Above, we created a table ``referred`` as a member of the remote schema ``test_schema``, however
-when we added ``test_schema`` to the PG ``search_path`` and then asked ``pg_get_constraintdef()``
-for the ``FOREIGN KEY`` syntax, ``test_schema`` was not included in the
-output of the function.
+Above, we created a table ``referred`` as a member of the remote schema
+``test_schema``, however when we added ``test_schema`` to the
+PG ``search_path`` and then asked ``pg_get_constraintdef()`` for the
+``FOREIGN KEY`` syntax, ``test_schema`` was not included in the output of
+the function.
On the other hand, if we set the search path back to the typical default
of ``public``::
@@ -139,7 +145,8 @@ The same query against ``pg_get_constraintdef()`` now returns the fully
schema-qualified name for us::
test=> SELECT pg_catalog.pg_get_constraintdef(r.oid, true) FROM
- test-> pg_catalog.pg_class c JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace
+ test-> pg_catalog.pg_class c JOIN pg_catalog.pg_namespace n
+ test-> ON n.oid = c.relnamespace
test-> JOIN pg_catalog.pg_constraint r ON c.oid = r.conrelid
test-> WHERE c.relname='referring' AND r.contype = 'f';
pg_get_constraintdef
@@ -157,7 +164,8 @@ reflection process as follows::
>>> with engine.connect() as conn:
... conn.execute("SET search_path TO test_schema, public")
... meta = MetaData()
- ... referring = Table('referring', meta, autoload=True, autoload_with=conn)
+ ... referring = Table('referring', meta,
+ ... autoload=True, autoload_with=conn)
...
<sqlalchemy.engine.result.ResultProxy object at 0x101612ed0>
@@ -167,16 +175,18 @@ The above process would deliver to the :attr:`.MetaData.tables` collection
>>> meta.tables['referred'].schema is None
True
-To alter the behavior of reflection such that the referred schema is maintained
-regardless of the ``search_path`` setting, use the ``postgresql_ignore_search_path``
-option, which can be specified as a dialect-specific argument to both
-:class:`.Table` as well as :meth:`.MetaData.reflect`::
+To alter the behavior of reflection such that the referred schema is
+maintained regardless of the ``search_path`` setting, use the
+``postgresql_ignore_search_path`` option, which can be specified as a
+dialect-specific argument to both :class:`.Table` as well as
+:meth:`.MetaData.reflect`::
>>> with engine.connect() as conn:
... conn.execute("SET search_path TO test_schema, public")
... meta = MetaData()
- ... referring = Table('referring', meta, autoload=True, autoload_with=conn,
- ... postgresql_ignore_search_path=True)
+ ... referring = Table('referring', meta, autoload=True,
+ ... autoload_with=conn,
+ ... postgresql_ignore_search_path=True)
...
<sqlalchemy.engine.result.ResultProxy object at 0x1016126d0>
@@ -187,29 +197,33 @@ We will now have ``test_schema.referred`` stored as schema-qualified::
.. sidebar:: Best Practices for Postgresql Schema reflection
- The description of Postgresql schema reflection behavior is complex, and is
- the product of many years of dealing with widely varied use cases and user preferences.
- But in fact, there's no need to understand any of it if you just stick to the simplest
- use pattern: leave the ``search_path`` set to its default of ``public`` only, never refer
- to the name ``public`` as an explicit schema name otherwise, and
- refer to all other schema names explicitly when building
- up a :class:`.Table` object. The options described here are only for those users
- who can't, or prefer not to, stay within these guidelines.
-
-Note that **in all cases**, the "default" schema is always reflected as ``None``.
-The "default" schema on Postgresql is that which is returned by the
-Postgresql ``current_schema()`` function. On a typical Postgresql installation,
-this is the name ``public``. So a table that refers to another which is
-in the ``public`` (i.e. default) schema will always have the ``.schema`` attribute
-set to ``None``.
+ The description of Postgresql schema reflection behavior is complex, and
+ is the product of many years of dealing with widely varied use cases and
+ user preferences. But in fact, there's no need to understand any of it if
+ you just stick to the simplest use pattern: leave the ``search_path`` set
+ to its default of ``public`` only, never refer to the name ``public`` as
+ an explicit schema name otherwise, and refer to all other schema names
+ explicitly when building up a :class:`.Table` object. The options
+ described here are only for those users who can't, or prefer not to, stay
+ within these guidelines.
+
+Note that **in all cases**, the "default" schema is always reflected as
+``None``. The "default" schema on Postgresql is that which is returned by the
+Postgresql ``current_schema()`` function. On a typical Postgresql
+installation, this is the name ``public``. So a table that refers to another
+which is in the ``public`` (i.e. default) schema will always have the
+``.schema`` attribute set to ``None``.
.. versionadded:: 0.9.2 Added the ``postgresql_ignore_search_path``
- dialect-level option accepted by :class:`.Table` and :meth:`.MetaData.reflect`.
+ dialect-level option accepted by :class:`.Table` and
+ :meth:`.MetaData.reflect`.
.. seealso::
- `The Schema Search Path <http://www.postgresql.org/docs/9.0/static/ddl-schemas.html#DDL-SCHEMAS-PATH>`_ - on the Postgresql website.
+ `The Schema Search Path
+ <http://www.postgresql.org/docs/9.0/static/ddl-schemas.html#DDL-SCHEMAS-PATH>`_
+ - on the Postgresql website.
INSERT/UPDATE...RETURNING
-------------------------
@@ -273,19 +287,19 @@ produces a statement equivalent to::
SELECT CAST('some text' AS TSVECTOR) AS anon_1
Full Text Searches in Postgresql are influenced by a combination of: the
-PostgresSQL setting of ``default_text_search_config``, the ``regconfig`` used to
-build the GIN/GiST indexes, and the ``regconfig`` optionally passed in during a
-query.
+PostgresSQL setting of ``default_text_search_config``, the ``regconfig`` used
+to build the GIN/GiST indexes, and the ``regconfig`` optionally passed in
+during a query.
When performing a Full Text Search against a column that has a GIN or
-GiST index that is already pre-computed (which is common on full text searches)
-one may need to explicitly pass in a particular PostgresSQL ``regconfig`` value
-to ensure the query-planner utilizes the index and does not re-compute the
-column on demand.
+GiST index that is already pre-computed (which is common on full text
+searches) one may need to explicitly pass in a particular PostgresSQL
+``regconfig`` value to ensure the query-planner utilizes the index and does
+not re-compute the column on demand.
-In order to provide for this explicit query planning, or to use different search
-strategies, the ``match`` method accepts a ``postgresql_regconfig`` keyword
-argument.
+In order to provide for this explicit query planning, or to use different
+search strategies, the ``match`` method accepts a ``postgresql_regconfig``
+keyword argument.
select([mytable.c.id]).where(
mytable.c.title.match('somestring', postgresql_regconfig='english')
@@ -296,8 +310,8 @@ Emits the equivalent of::
SELECT mytable.id FROM mytable
WHERE mytable.title @@ to_tsquery('english', 'somestring')
-One can also specifically pass in a `'regconfig'` value to the ``to_tsvector()``
-command as the initial argument.
+One can also specifically pass in a `'regconfig'` value to the
+``to_tsvector()`` command as the initial argument.
select([mytable.c.id]).where(
func.to_tsvector('english', mytable.c.title )\
@@ -310,9 +324,9 @@ produces a statement equivalent to::
WHERE to_tsvector('english', mytable.title) @@
to_tsquery('english', 'somestring')
-It is recommended that you use the ``EXPLAIN ANALYZE...`` tool from PostgresSQL
-to ensure that you are generating queries with SQLAlchemy that take full
-advantage of any indexes you may have created for full text search.
+It is recommended that you use the ``EXPLAIN ANALYZE...`` tool from
+PostgresSQL to ensure that you are generating queries with SQLAlchemy that
+take full advantage of any indexes you may have created for full text search.
FROM ONLY ...
------------------------
@@ -402,26 +416,26 @@ except ImportError:
_python_UUID = None
from sqlalchemy.types import INTEGER, BIGINT, SMALLINT, VARCHAR, \
- CHAR, TEXT, FLOAT, NUMERIC, \
- DATE, BOOLEAN, REAL
+ CHAR, TEXT, FLOAT, NUMERIC, \
+ DATE, BOOLEAN, REAL
RESERVED_WORDS = set(
["all", "analyse", "analyze", "and", "any", "array", "as", "asc",
- "asymmetric", "both", "case", "cast", "check", "collate", "column",
- "constraint", "create", "current_catalog", "current_date",
- "current_role", "current_time", "current_timestamp", "current_user",
- "default", "deferrable", "desc", "distinct", "do", "else", "end",
- "except", "false", "fetch", "for", "foreign", "from", "grant", "group",
- "having", "in", "initially", "intersect", "into", "leading", "limit",
- "localtime", "localtimestamp", "new", "not", "null", "of", "off", "offset",
- "old", "on", "only", "or", "order", "placing", "primary", "references",
- "returning", "select", "session_user", "some", "symmetric", "table",
- "then", "to", "trailing", "true", "union", "unique", "user", "using",
- "variadic", "when", "where", "window", "with", "authorization",
- "between", "binary", "cross", "current_schema", "freeze", "full",
- "ilike", "inner", "is", "isnull", "join", "left", "like", "natural",
- "notnull", "outer", "over", "overlaps", "right", "similar", "verbose"
- ])
+ "asymmetric", "both", "case", "cast", "check", "collate", "column",
+ "constraint", "create", "current_catalog", "current_date",
+ "current_role", "current_time", "current_timestamp", "current_user",
+ "default", "deferrable", "desc", "distinct", "do", "else", "end",
+ "except", "false", "fetch", "for", "foreign", "from", "grant", "group",
+ "having", "in", "initially", "intersect", "into", "leading", "limit",
+ "localtime", "localtimestamp", "new", "not", "null", "of", "off",
+ "offset", "old", "on", "only", "or", "order", "placing", "primary",
+ "references", "returning", "select", "session_user", "some", "symmetric",
+ "table", "then", "to", "trailing", "true", "union", "unique", "user",
+ "using", "variadic", "when", "where", "window", "with", "authorization",
+ "between", "binary", "cross", "current_schema", "freeze", "full",
+ "ilike", "inner", "is", "isnull", "join", "left", "like", "natural",
+ "notnull", "outer", "over", "overlaps", "right", "similar", "verbose"
+ ])
_DECIMAL_TYPES = (1231, 1700)
_FLOAT_TYPES = (700, 701, 1021, 1022)
@@ -452,6 +466,7 @@ PGMacAddr = MACADDR
class OID(sqltypes.TypeEngine):
+
"""Provide the Postgresql OID type.
.. versionadded:: 0.9.5
@@ -461,18 +476,21 @@ class OID(sqltypes.TypeEngine):
class TIMESTAMP(sqltypes.TIMESTAMP):
+
def __init__(self, timezone=False, precision=None):
super(TIMESTAMP, self).__init__(timezone=timezone)
self.precision = precision
class TIME(sqltypes.TIME):
+
def __init__(self, timezone=False, precision=None):
super(TIME, self).__init__(timezone=timezone)
self.precision = precision
class INTERVAL(sqltypes.TypeEngine):
+
"""Postgresql INTERVAL type.
The INTERVAL type may not be supported on all DBAPIs.
@@ -511,6 +529,7 @@ PGBit = BIT
class UUID(sqltypes.TypeEngine):
+
"""Postgresql UUID type.
Represents the UUID column type, interpreting
@@ -534,7 +553,8 @@ class UUID(sqltypes.TypeEngine):
"""
if as_uuid and _python_UUID is None:
raise NotImplementedError(
- "This version of Python does not support the native UUID type."
+ "This version of Python does not support "
+ "the native UUID type."
)
self.as_uuid = as_uuid
@@ -560,7 +580,9 @@ class UUID(sqltypes.TypeEngine):
PGUuid = UUID
+
class TSVECTOR(sqltypes.TypeEngine):
+
"""The :class:`.postgresql.TSVECTOR` type implements the Postgresql
text search type TSVECTOR.
@@ -577,21 +599,21 @@ class TSVECTOR(sqltypes.TypeEngine):
__visit_name__ = 'TSVECTOR'
-
class _Slice(expression.ColumnElement):
__visit_name__ = 'slice'
type = sqltypes.NULLTYPE
def __init__(self, slice_, source_comparator):
self.start = source_comparator._check_literal(
- source_comparator.expr,
- operators.getitem, slice_.start)
+ source_comparator.expr,
+ operators.getitem, slice_.start)
self.stop = source_comparator._check_literal(
- source_comparator.expr,
- operators.getitem, slice_.stop)
+ source_comparator.expr,
+ operators.getitem, slice_.stop)
class Any(expression.ColumnElement):
+
"""Represent the clause ``left operator ANY (right)``. ``right`` must be
an array expression.
@@ -612,6 +634,7 @@ class Any(expression.ColumnElement):
class All(expression.ColumnElement):
+
"""Represent the clause ``left operator ALL (right)``. ``right`` must be
an array expression.
@@ -632,6 +655,7 @@ class All(expression.ColumnElement):
class array(expression.Tuple):
+
"""A Postgresql ARRAY literal.
This is used to produce ARRAY literals in SQL expressions, e.g.::
@@ -673,7 +697,7 @@ class array(expression.Tuple):
def _bind_param(self, operator, obj):
return array(*[
expression.BindParameter(None, o, _compared_to_operator=operator,
- _compared_to_type=self.type, unique=True)
+ _compared_to_type=self.type, unique=True)
for o in obj
])
@@ -682,6 +706,7 @@ class array(expression.Tuple):
class ARRAY(sqltypes.Concatenable, sqltypes.TypeEngine):
+
"""Postgresql ARRAY type.
Represents values as Python lists.
@@ -757,6 +782,7 @@ class ARRAY(sqltypes.Concatenable, sqltypes.TypeEngine):
__visit_name__ = 'ARRAY'
class Comparator(sqltypes.Concatenable.Comparator):
+
"""Define comparison operations for :class:`.ARRAY`."""
def __getitem__(self, index):
@@ -775,7 +801,7 @@ class ARRAY(sqltypes.Concatenable, sqltypes.TypeEngine):
return_type = self.type.item_type
return self._binary_operate(self.expr, operators.getitem, index,
- result_type=return_type)
+ result_type=return_type)
def any(self, other, operator=operators.eq):
"""Return ``other operator ANY (array)`` clause.
@@ -902,7 +928,7 @@ class ARRAY(sqltypes.Concatenable, sqltypes.TypeEngine):
"""
if isinstance(item_type, ARRAY):
raise ValueError("Do not nest ARRAY types; ARRAY(basetype) "
- "handles multi-dimensional arrays of basetype")
+ "handles multi-dimensional arrays of basetype")
if isinstance(item_type, type):
item_type = item_type()
self.item_type = item_type
@@ -921,59 +947,60 @@ class ARRAY(sqltypes.Concatenable, sqltypes.TypeEngine):
if dim is None:
arr = list(arr)
if dim == 1 or dim is None and (
- # this has to be (list, tuple), or at least
- # not hasattr('__iter__'), since Py3K strings
- # etc. have __iter__
- not arr or not isinstance(arr[0], (list, tuple))):
+ # this has to be (list, tuple), or at least
+ # not hasattr('__iter__'), since Py3K strings
+ # etc. have __iter__
+ not arr or not isinstance(arr[0], (list, tuple))):
if itemproc:
return collection(itemproc(x) for x in arr)
else:
return collection(arr)
else:
return collection(
- self._proc_array(
- x, itemproc,
- dim - 1 if dim is not None else None,
- collection)
- for x in arr
- )
+ self._proc_array(
+ x, itemproc,
+ dim - 1 if dim is not None else None,
+ collection)
+ for x in arr
+ )
def bind_processor(self, dialect):
item_proc = self.item_type.\
- dialect_impl(dialect).\
- bind_processor(dialect)
+ dialect_impl(dialect).\
+ bind_processor(dialect)
def process(value):
if value is None:
return value
else:
return self._proc_array(
- value,
- item_proc,
- self.dimensions,
- list)
+ value,
+ item_proc,
+ self.dimensions,
+ list)
return process
def result_processor(self, dialect, coltype):
item_proc = self.item_type.\
- dialect_impl(dialect).\
- result_processor(dialect, coltype)
+ dialect_impl(dialect).\
+ result_processor(dialect, coltype)
def process(value):
if value is None:
return value
else:
return self._proc_array(
- value,
- item_proc,
- self.dimensions,
- tuple if self.as_tuple else list)
+ value,
+ item_proc,
+ self.dimensions,
+ tuple if self.as_tuple else list)
return process
PGArray = ARRAY
class ENUM(sqltypes.Enum):
+
"""Postgresql ENUM type.
This is a subclass of :class:`.types.Enum` which includes
@@ -1047,7 +1074,8 @@ class ENUM(sqltypes.Enum):
return
if not checkfirst or \
- not bind.dialect.has_type(bind, self.name, schema=self.schema):
+ not bind.dialect.has_type(
+ bind, self.name, schema=self.schema):
bind.execute(CreateEnumType(self))
def drop(self, bind=None, checkfirst=True):
@@ -1069,7 +1097,7 @@ class ENUM(sqltypes.Enum):
return
if not checkfirst or \
- bind.dialect.has_type(bind, self.name, schema=self.schema):
+ bind.dialect.has_type(bind, self.name, schema=self.schema):
bind.execute(DropEnumType(self))
def _check_for_name_in_memos(self, checkfirst, kw):
@@ -1144,7 +1172,7 @@ ischema_names = {
'interval': INTERVAL,
'interval year to month': INTERVAL,
'interval day to second': INTERVAL,
- 'tsvector' : TSVECTOR
+ 'tsvector': TSVECTOR
}
@@ -1155,9 +1183,9 @@ class PGCompiler(compiler.SQLCompiler):
def visit_slice(self, element, **kw):
return "%s:%s" % (
- self.process(element.start, **kw),
- self.process(element.stop, **kw),
- )
+ self.process(element.start, **kw),
+ self.process(element.stop, **kw),
+ )
def visit_any(self, element, **kw):
return "%s%sANY (%s)" % (
@@ -1181,7 +1209,7 @@ class PGCompiler(compiler.SQLCompiler):
def visit_match_op_binary(self, binary, operator, **kw):
if "postgresql_regconfig" in binary.modifiers:
- regconfig = self.render_literal_value(\
+ regconfig = self.render_literal_value(
binary.modifiers['postgresql_regconfig'],
sqltypes.STRINGTYPE)
if regconfig:
@@ -1199,8 +1227,8 @@ class PGCompiler(compiler.SQLCompiler):
escape = binary.modifiers.get("escape", None)
return '%s ILIKE %s' % \
- (self.process(binary.left, **kw),
- self.process(binary.right, **kw)) \
+ (self.process(binary.left, **kw),
+ self.process(binary.right, **kw)) \
+ (
' ESCAPE ' +
self.render_literal_value(escape, sqltypes.STRINGTYPE)
@@ -1210,8 +1238,8 @@ class PGCompiler(compiler.SQLCompiler):
def visit_notilike_op_binary(self, binary, operator, **kw):
escape = binary.modifiers.get("escape", None)
return '%s NOT ILIKE %s' % \
- (self.process(binary.left, **kw),
- self.process(binary.right, **kw)) \
+ (self.process(binary.left, **kw),
+ self.process(binary.right, **kw)) \
+ (
' ESCAPE ' +
self.render_literal_value(escape, sqltypes.STRINGTYPE)
@@ -1265,12 +1293,12 @@ class PGCompiler(compiler.SQLCompiler):
if select._for_update_arg.of:
tables = util.OrderedSet(
- c.table if isinstance(c, expression.ColumnClause)
- else c for c in select._for_update_arg.of)
+ c.table if isinstance(c, expression.ColumnClause)
+ else c for c in select._for_update_arg.of)
tmp += " OF " + ", ".join(
- self.process(table, ashint=True)
- for table in tables
- )
+ self.process(table, ashint=True)
+ for table in tables
+ )
if select._for_update_arg.nowait:
tmp += " NOWAIT"
@@ -1280,13 +1308,12 @@ class PGCompiler(compiler.SQLCompiler):
def returning_clause(self, stmt, returning_cols):
columns = [
- self._label_select_column(None, c, True, False, {})
- for c in expression._select_iterables(returning_cols)
- ]
+ self._label_select_column(None, c, True, False, {})
+ for c in expression._select_iterables(returning_cols)
+ ]
return 'RETURNING ' + ', '.join(columns)
-
def visit_substring_func(self, func, **kw):
s = self.process(func.clauses.clauses[0], **kw)
start = self.process(func.clauses.clauses[1], **kw)
@@ -1296,7 +1323,9 @@ class PGCompiler(compiler.SQLCompiler):
else:
return "SUBSTRING(%s FROM %s)" % (s, start)
+
class PGDDLCompiler(compiler.DDLCompiler):
+
def get_column_specification(self, column, **kwargs):
colspec = self.preparer.format_column(column)
@@ -1335,7 +1364,7 @@ class PGDDLCompiler(compiler.DDLCompiler):
self.preparer.format_type(type_),
", ".join(
self.sql_compiler.process(sql.literal(e), literal_binds=True)
- for e in type_.enums)
+ for e in type_.enums)
)
def visit_drop_enum_type(self, drop):
@@ -1353,10 +1382,10 @@ class PGDDLCompiler(compiler.DDLCompiler):
if index.unique:
text += "UNIQUE "
text += "INDEX %s ON %s " % (
- self._prepared_index_name(index,
- include_schema=False),
- preparer.format_table(index.table)
- )
+ self._prepared_index_name(index,
+ include_schema=False),
+ preparer.format_table(index.table)
+ )
using = index.dialect_options['postgresql']['using']
if using:
@@ -1367,20 +1396,20 @@ class PGDDLCompiler(compiler.DDLCompiler):
% (
', '.join([
self.sql_compiler.process(
- expr.self_group()
- if not isinstance(expr, expression.ColumnClause)
- else expr,
- include_table=False, literal_binds=True) +
+ expr.self_group()
+ if not isinstance(expr, expression.ColumnClause)
+ else expr,
+ include_table=False, literal_binds=True) +
(c.key in ops and (' ' + ops[c.key]) or '')
for expr, c in zip(index.expressions, index.columns)])
- )
+ )
whereclause = index.dialect_options["postgresql"]["where"]
if whereclause is not None:
where_compiled = self.sql_compiler.process(
- whereclause, include_table=False,
- literal_binds=True)
+ whereclause, include_table=False,
+ literal_binds=True)
text += " WHERE " + where_compiled
return text
@@ -1392,17 +1421,19 @@ class PGDDLCompiler(compiler.DDLCompiler):
elements = []
for c in constraint.columns:
op = constraint.operators[c.name]
- elements.append(self.preparer.quote(c.name) + ' WITH '+op)
- text += "EXCLUDE USING %s (%s)" % (constraint.using, ', '.join(elements))
+ elements.append(self.preparer.quote(c.name) + ' WITH ' + op)
+ text += "EXCLUDE USING %s (%s)" % (constraint.using,
+ ', '.join(elements))
if constraint.where is not None:
text += ' WHERE (%s)' % self.sql_compiler.process(
- constraint.where,
- literal_binds=True)
+ constraint.where,
+ literal_binds=True)
text += self.define_constraint_deferrability(constraint)
return text
class PGTypeCompiler(compiler.GenericTypeCompiler):
+
def visit_TSVECTOR(self, type):
return "TSVECTOR"
@@ -1509,8 +1540,8 @@ class PGTypeCompiler(compiler.GenericTypeCompiler):
def visit_ARRAY(self, type_):
return self.process(type_.item_type) + ('[]' * (type_.dimensions
- if type_.dimensions
- is not None else 1))
+ if type_.dimensions
+ is not None else 1))
class PGIdentifierPreparer(compiler.IdentifierPreparer):
@@ -1520,7 +1551,7 @@ class PGIdentifierPreparer(compiler.IdentifierPreparer):
def _unquote_identifier(self, value):
if value[0] == self.initial_quote:
value = value[1:-1].\
- replace(self.escape_to_quote, self.escape_quote)
+ replace(self.escape_to_quote, self.escape_quote)
return value
def format_type(self, type_, use_schema=True):
@@ -1554,21 +1585,25 @@ class DropEnumType(schema._CreateDropBase):
class PGExecutionContext(default.DefaultExecutionContext):
+
def fire_sequence(self, seq, type_):
- return self._execute_scalar(("select nextval('%s')" % \
- self.dialect.identifier_preparer.format_sequence(seq)), type_)
+ return self._execute_scalar((
+ "select nextval('%s')" %
+ self.dialect.identifier_preparer.format_sequence(seq)), type_)
def get_insert_default(self, column):
- if column.primary_key and column is column.table._autoincrement_column:
+ if column.primary_key and \
+ column is column.table._autoincrement_column:
if column.server_default and column.server_default.has_argument:
# pre-execute passive defaults on primary key columns
return self._execute_scalar("select %s" %
- column.server_default.arg, column.type)
+ column.server_default.arg,
+ column.type)
elif (column.default is None or
- (column.default.is_sequence and
- column.default.optional)):
+ (column.default.is_sequence and
+ column.default.optional)):
# execute the sequence associated with a SERIAL primary
# key column. for non-primary-key SERIAL, the ID just
@@ -1587,10 +1622,10 @@ class PGExecutionContext(default.DefaultExecutionContext):
sch = column.table.schema
if sch is not None:
exc = "select nextval('\"%s\".\"%s\"')" % \
- (sch, seq_name)
+ (sch, seq_name)
else:
exc = "select nextval('\"%s\"')" % \
- (seq_name, )
+ (seq_name, )
return self._execute_scalar(exc, column.type)
@@ -1643,7 +1678,7 @@ class PGDialect(default.DefaultDialect):
_backslash_escapes = True
def __init__(self, isolation_level=None, json_serializer=None,
- json_deserializer=None, **kwargs):
+ json_deserializer=None, **kwargs):
default.DefaultDialect.__init__(self, **kwargs)
self.isolation_level = isolation_level
self._json_deserializer = json_deserializer
@@ -1652,7 +1687,7 @@ class PGDialect(default.DefaultDialect):
def initialize(self, connection):
super(PGDialect, self).initialize(connection)
self.implicit_returning = self.server_version_info > (8, 2) and \
- self.__dict__.get('implicit_returning', True)
+ self.__dict__.get('implicit_returning', True)
self.supports_native_enum = self.server_version_info >= (8, 3)
if not self.supports_native_enum:
self.colspecs = self.colspecs.copy()
@@ -1665,9 +1700,9 @@ class PGDialect(default.DefaultDialect):
self.supports_smallserial = self.server_version_info >= (9, 2)
self._backslash_escapes = self.server_version_info < (8, 2) or \
- connection.scalar(
- "show standard_conforming_strings"
- ) == 'off'
+ connection.scalar(
+ "show standard_conforming_strings"
+ ) == 'off'
def on_connect(self):
if self.isolation_level is not None:
@@ -1677,8 +1712,8 @@ class PGDialect(default.DefaultDialect):
else:
return None
- _isolation_lookup = set(['SERIALIZABLE',
- 'READ UNCOMMITTED', 'READ COMMITTED', 'REPEATABLE READ'])
+ _isolation_lookup = set(['SERIALIZABLE', 'READ UNCOMMITTED',
+ 'READ COMMITTED', 'REPEATABLE READ'])
def set_isolation_level(self, connection, level):
level = level.replace('_', ' ')
@@ -1687,7 +1722,7 @@ class PGDialect(default.DefaultDialect):
"Invalid value '%s' for isolation_level. "
"Valid isolation levels for %s are %s" %
(level, self.name, ", ".join(self._isolation_lookup))
- )
+ )
cursor = connection.cursor()
cursor.execute(
"SET SESSION CHARACTERISTICS AS TRANSACTION "
@@ -1709,10 +1744,10 @@ class PGDialect(default.DefaultDialect):
connection.execute("PREPARE TRANSACTION '%s'" % xid)
def do_rollback_twophase(self, connection, xid,
- is_prepared=True, recover=False):
+ is_prepared=True, recover=False):
if is_prepared:
if recover:
- #FIXME: ugly hack to get out of transaction
+ # FIXME: ugly hack to get out of transaction
# context when committing recoverable transactions
# Must find out a way how to make the dbapi not
# open a transaction.
@@ -1724,7 +1759,7 @@ class PGDialect(default.DefaultDialect):
self.do_rollback(connection.connection)
def do_commit_twophase(self, connection, xid,
- is_prepared=True, recover=False):
+ is_prepared=True, recover=False):
if is_prepared:
if recover:
connection.execute("ROLLBACK")
@@ -1736,14 +1771,15 @@ class PGDialect(default.DefaultDialect):
def do_recover_twophase(self, connection):
resultset = connection.execute(
- sql.text("SELECT gid FROM pg_prepared_xacts"))
+ sql.text("SELECT gid FROM pg_prepared_xacts"))
return [row[0] for row in resultset]
def _get_default_schema_name(self, connection):
return connection.scalar("select current_schema()")
def has_schema(self, connection, schema):
- query = "select nspname from pg_namespace where lower(nspname)=:schema"
+ query = ("select nspname from pg_namespace "
+ "where lower(nspname)=:schema")
cursor = connection.execute(
sql.text(
query,
@@ -1761,25 +1797,27 @@ class PGDialect(default.DefaultDialect):
if schema is None:
cursor = connection.execute(
sql.text(
- "select relname from pg_class c join pg_namespace n on "
- "n.oid=c.relnamespace where n.nspname=current_schema() and "
- "relname=:name",
- bindparams=[
+ "select relname from pg_class c join pg_namespace n on "
+ "n.oid=c.relnamespace where n.nspname=current_schema() "
+ "and relname=:name",
+ bindparams=[
sql.bindparam('name', util.text_type(table_name),
- type_=sqltypes.Unicode)]
+ type_=sqltypes.Unicode)]
)
)
else:
cursor = connection.execute(
sql.text(
- "select relname from pg_class c join pg_namespace n on "
- "n.oid=c.relnamespace where n.nspname=:schema and "
- "relname=:name",
+ "select relname from pg_class c join pg_namespace n on "
+ "n.oid=c.relnamespace where n.nspname=:schema and "
+ "relname=:name",
bindparams=[
sql.bindparam('name',
- util.text_type(table_name), type_=sqltypes.Unicode),
+ util.text_type(table_name),
+ type_=sqltypes.Unicode),
sql.bindparam('schema',
- util.text_type(schema), type_=sqltypes.Unicode)]
+ util.text_type(schema),
+ type_=sqltypes.Unicode)]
)
)
return bool(cursor.first())
@@ -1794,23 +1832,24 @@ class PGDialect(default.DefaultDialect):
"and relname=:name",
bindparams=[
sql.bindparam('name', util.text_type(sequence_name),
- type_=sqltypes.Unicode)
+ type_=sqltypes.Unicode)
]
)
)
else:
cursor = connection.execute(
sql.text(
- "SELECT relname FROM pg_class c join pg_namespace n on "
- "n.oid=c.relnamespace where relkind='S' and "
- "n.nspname=:schema and relname=:name",
- bindparams=[
- sql.bindparam('name', util.text_type(sequence_name),
- type_=sqltypes.Unicode),
- sql.bindparam('schema',
- util.text_type(schema), type_=sqltypes.Unicode)
- ]
- )
+ "SELECT relname FROM pg_class c join pg_namespace n on "
+ "n.oid=c.relnamespace where relkind='S' and "
+ "n.nspname=:schema and relname=:name",
+ bindparams=[
+ sql.bindparam('name', util.text_type(sequence_name),
+ type_=sqltypes.Unicode),
+ sql.bindparam('schema',
+ util.text_type(schema),
+ type_=sqltypes.Unicode)
+ ]
+ )
)
return bool(cursor.first())
@@ -1836,14 +1875,14 @@ class PGDialect(default.DefaultDialect):
"""
query = sql.text(query)
query = query.bindparams(
- sql.bindparam('typname',
- util.text_type(type_name), type_=sqltypes.Unicode),
- )
+ sql.bindparam('typname',
+ util.text_type(type_name), type_=sqltypes.Unicode),
+ )
if schema is not None:
query = query.bindparams(
- sql.bindparam('nspname',
- util.text_type(schema), type_=sqltypes.Unicode),
- )
+ sql.bindparam('nspname',
+ util.text_type(schema), type_=sqltypes.Unicode),
+ )
cursor = connection.execute(query)
return bool(cursor.scalar())
@@ -1855,7 +1894,7 @@ class PGDialect(default.DefaultDialect):
v)
if not m:
raise AssertionError(
- "Could not determine version from string '%s'" % v)
+ "Could not determine version from string '%s'" % v)
return tuple([int(x) for x in m.group(1, 2, 3) if x is not None])
@reflection.cache
@@ -1905,11 +1944,11 @@ class PGDialect(default.DefaultDialect):
# what about system tables?
if util.py2k:
- schema_names = [row[0].decode(self.encoding) for row in rp \
- if not row[0].startswith('pg_')]
+ schema_names = [row[0].decode(self.encoding) for row in rp
+ if not row[0].startswith('pg_')]
else:
- schema_names = [row[0] for row in rp \
- if not row[0].startswith('pg_')]
+ schema_names = [row[0] for row in rp
+ if not row[0].startswith('pg_')]
return schema_names
@reflection.cache
@@ -1921,12 +1960,12 @@ class PGDialect(default.DefaultDialect):
result = connection.execute(
sql.text("SELECT relname FROM pg_class c "
- "WHERE relkind = 'r' "
- "AND '%s' = (select nspname from pg_namespace n "
- "where n.oid = c.relnamespace) " %
- current_schema,
- typemap={'relname': sqltypes.Unicode}
- )
+ "WHERE relkind = 'r' "
+ "AND '%s' = (select nspname from pg_namespace n "
+ "where n.oid = c.relnamespace) " %
+ current_schema,
+ typemap={'relname': sqltypes.Unicode}
+ )
)
return [row[0] for row in result]
@@ -1946,7 +1985,7 @@ class PGDialect(default.DefaultDialect):
if util.py2k:
view_names = [row[0].decode(self.encoding)
- for row in connection.execute(s)]
+ for row in connection.execute(s)]
else:
view_names = [row[0] for row in connection.execute(s)]
return view_names
@@ -1991,9 +2030,12 @@ class PGDialect(default.DefaultDialect):
ORDER BY a.attnum
"""
s = sql.text(SQL_COLS,
- bindparams=[sql.bindparam('table_oid', type_=sqltypes.Integer)],
- typemap={'attname': sqltypes.Unicode, 'default': sqltypes.Unicode}
- )
+ bindparams=[
+ sql.bindparam('table_oid', type_=sqltypes.Integer)],
+ typemap={
+ 'attname': sqltypes.Unicode,
+ 'default': sqltypes.Unicode}
+ )
c = connection.execute(s, table_oid=table_oid)
rows = c.fetchall()
domains = self._load_domains(connection)
@@ -2009,7 +2051,7 @@ class PGDialect(default.DefaultDialect):
def _get_column_info(self, name, format_type, default,
notnull, domains, enums, schema):
- ## strip (*) from character varying(5), timestamp(5)
+ # strip (*) from character varying(5), timestamp(5)
# with time zone, geometry(POLYGON), etc.
attype = re.sub(r'\(.*\)', '', format_type)
@@ -2057,7 +2099,7 @@ class PGDialect(default.DefaultDialect):
else:
args = ()
elif attype in ('interval', 'interval year to month',
- 'interval day to second'):
+ 'interval day to second'):
if charlen:
kwargs['precision'] = int(charlen)
args = ()
@@ -2112,8 +2154,8 @@ class PGDialect(default.DefaultDialect):
# later be enhanced to obey quoting rules /
# "quote schema"
default = match.group(1) + \
- ('"%s"' % sch) + '.' + \
- match.group(2) + match.group(3)
+ ('"%s"' % sch) + '.' + \
+ match.group(2) + match.group(3)
column_info = dict(name=name, type=coltype, nullable=nullable,
default=default, autoincrement=autoincrement)
@@ -2169,7 +2211,7 @@ class PGDialect(default.DefaultDialect):
@reflection.cache
def get_foreign_keys(self, connection, table_name, schema=None,
- postgresql_ignore_search_path=False, **kw):
+ postgresql_ignore_search_path=False, **kw):
preparer = self.identifier_preparer
table_oid = self.get_table_oid(connection, table_name, schema,
info_cache=kw.get('info_cache'))
@@ -2192,29 +2234,32 @@ class PGDialect(default.DefaultDialect):
FK_REGEX = re.compile(
r'FOREIGN KEY \((.*?)\) REFERENCES (?:(.*?)\.)?(.*?)\((.*?)\)'
r'[\s]?(MATCH (FULL|PARTIAL|SIMPLE)+)?'
- r'[\s]?(ON UPDATE (CASCADE|RESTRICT|NO ACTION|SET NULL|SET DEFAULT)+)?'
- r'[\s]?(ON DELETE (CASCADE|RESTRICT|NO ACTION|SET NULL|SET DEFAULT)+)?'
+ r'[\s]?(ON UPDATE '
+ r'(CASCADE|RESTRICT|NO ACTION|SET NULL|SET DEFAULT)+)?'
+ r'[\s]?(ON DELETE '
+ r'(CASCADE|RESTRICT|NO ACTION|SET NULL|SET DEFAULT)+)?'
r'[\s]?(DEFERRABLE|NOT DEFERRABLE)?'
r'[\s]?(INITIALLY (DEFERRED|IMMEDIATE)+)?'
)
t = sql.text(FK_SQL, typemap={
- 'conname': sqltypes.Unicode,
- 'condef': sqltypes.Unicode})
+ 'conname': sqltypes.Unicode,
+ 'condef': sqltypes.Unicode})
c = connection.execute(t, table=table_oid)
fkeys = []
for conname, condef, conschema in c.fetchall():
m = re.search(FK_REGEX, condef).groups()
constrained_columns, referred_schema, \
- referred_table, referred_columns, \
- _, match, _, onupdate, _, ondelete, \
- deferrable, _, initially = m
+ referred_table, referred_columns, \
+ _, match, _, onupdate, _, ondelete, \
+ deferrable, _, initially = m
if deferrable is not None:
deferrable = True if deferrable == 'DEFERRABLE' else False
constrained_columns = [preparer._unquote_identifier(x)
- for x in re.split(r'\s*,\s*', constrained_columns)]
+ for x in re.split(
+ r'\s*,\s*', constrained_columns)]
if postgresql_ignore_search_path:
# when ignoring search path, we use the actual schema
@@ -2228,7 +2273,7 @@ class PGDialect(default.DefaultDialect):
# pg_get_constraintdef(). If the schema is in the search
# path, pg_get_constraintdef() will give us None.
referred_schema = \
- preparer._unquote_identifier(referred_schema)
+ preparer._unquote_identifier(referred_schema)
elif schema is not None and schema == conschema:
# If the actual schema matches the schema of the table
# we're reflecting, then we will use that.
@@ -2236,7 +2281,8 @@ class PGDialect(default.DefaultDialect):
referred_table = preparer._unquote_identifier(referred_table)
referred_columns = [preparer._unquote_identifier(x)
- for x in re.split(r'\s*,\s', referred_columns)]
+ for x in
+ re.split(r'\s*,\s', referred_columns)]
fkey_d = {
'name': conname,
'constrained_columns': constrained_columns,
@@ -2263,9 +2309,9 @@ class PGDialect(default.DefaultDialect):
# for now.
# regards, tom lane"
return "(%s)" % " OR ".join(
- "%s[%d] = %s" % (compare_to, ind, col)
- for ind in range(0, 10)
- )
+ "%s[%d] = %s" % (compare_to, ind, col)
+ for ind in range(0, 10)
+ )
else:
return "%s = ANY(%s)" % (col, compare_to)
@@ -2297,12 +2343,12 @@ class PGDialect(default.DefaultDialect):
t.relname,
i.relname
""" % (
- # version 8.3 here was based on observing the
- # cast does not work in PG 8.2.4, does work in 8.3.0.
- # nothing in PG changelogs regarding this.
- "::varchar" if self.server_version_info >= (8, 3) else "",
- self._pg_index_any("a.attnum", "ix.indkey")
- )
+ # version 8.3 here was based on observing the
+ # cast does not work in PG 8.2.4, does work in 8.3.0.
+ # nothing in PG changelogs regarding this.
+ "::varchar" if self.server_version_info >= (8, 3) else "",
+ self._pg_index_any("a.attnum", "ix.indkey")
+ )
t = sql.text(IDX_SQL, typemap={'attname': sqltypes.Unicode})
c = connection.execute(t, table_oid=table_oid)
@@ -2316,16 +2362,16 @@ class PGDialect(default.DefaultDialect):
if expr:
if idx_name != sv_idx_name:
util.warn(
- "Skipped unsupported reflection of "
- "expression-based index %s"
- % idx_name)
+ "Skipped unsupported reflection of "
+ "expression-based index %s"
+ % idx_name)
sv_idx_name = idx_name
continue
if prd and not idx_name == sv_idx_name:
util.warn(
- "Predicate of partial index %s ignored during reflection"
- % idx_name)
+ "Predicate of partial index %s ignored during reflection"
+ % idx_name)
sv_idx_name = idx_name
index = indexes[idx_name]
@@ -2356,7 +2402,8 @@ class PGDialect(default.DefaultDialect):
FROM
pg_catalog.pg_constraint cons
join pg_attribute a
- on cons.conrelid = a.attrelid AND a.attnum = ANY(cons.conkey)
+ on cons.conrelid = a.attrelid AND
+ a.attnum = ANY(cons.conkey)
WHERE
cons.conrelid = :table_oid AND
cons.contype = 'u'
@@ -2381,7 +2428,7 @@ class PGDialect(default.DefaultDialect):
if not self.supports_native_enum:
return {}
- ## Load data types for enums:
+ # Load data types for enums:
SQL_ENUMS = """
SELECT t.typname as "name",
-- no enum defaults in 8.4 at least
@@ -2397,8 +2444,8 @@ class PGDialect(default.DefaultDialect):
"""
s = sql.text(SQL_ENUMS, typemap={
- 'attname': sqltypes.Unicode,
- 'label': sqltypes.Unicode})
+ 'attname': sqltypes.Unicode,
+ 'label': sqltypes.Unicode})
c = connection.execute(s)
enums = {}
@@ -2416,13 +2463,13 @@ class PGDialect(default.DefaultDialect):
enums[name]['labels'].append(enum['label'])
else:
enums[name] = {
- 'labels': [enum['label']],
- }
+ 'labels': [enum['label']],
+ }
return enums
def _load_domains(self, connection):
- ## Load data types for domains:
+ # Load data types for domains:
SQL_DOMAINS = """
SELECT t.typname as "name",
pg_catalog.format_type(t.typbasetype, t.typtypmod) as "attype",
@@ -2440,7 +2487,7 @@ class PGDialect(default.DefaultDialect):
domains = {}
for domain in c.fetchall():
- ## strip (30) from character varying(30)
+ # strip (30) from character varying(30)
attype = re.search('([^\(]+)', domain['attype']).group(1)
if domain['visible']:
# 'visible' just means whether or not the domain is in a
@@ -2452,9 +2499,9 @@ class PGDialect(default.DefaultDialect):
name = "%s.%s" % (domain['schema'], domain['name'])
domains[name] = {
- 'attype': attype,
- 'nullable': domain['nullable'],
- 'default': domain['default']
- }
+ 'attype': attype,
+ 'nullable': domain['nullable'],
+ 'default': domain['default']
+ }
return domains
diff --git a/lib/sqlalchemy/dialects/postgresql/constraints.py b/lib/sqlalchemy/dialects/postgresql/constraints.py
index 2eed2fb36..e8ebc75dd 100644
--- a/lib/sqlalchemy/dialects/postgresql/constraints.py
+++ b/lib/sqlalchemy/dialects/postgresql/constraints.py
@@ -6,13 +6,15 @@
from sqlalchemy.schema import ColumnCollectionConstraint
from sqlalchemy.sql import expression
+
class ExcludeConstraint(ColumnCollectionConstraint):
"""A table-level EXCLUDE constraint.
Defines an EXCLUDE constraint as described in the `postgres
documentation`__.
- __ http://www.postgresql.org/docs/9.0/static/sql-createtable.html#SQL-CREATETABLE-EXCLUDE
+ __ http://www.postgresql.org/docs/9.0/\
+static/sql-createtable.html#SQL-CREATETABLE-EXCLUDE
"""
__visit_name__ = 'exclude_constraint'
@@ -52,7 +54,7 @@ class ExcludeConstraint(ColumnCollectionConstraint):
name=kw.get('name'),
deferrable=kw.get('deferrable'),
initially=kw.get('initially')
- )
+ )
self.operators = {}
for col_or_string, op in elements:
name = getattr(col_or_string, 'name', col_or_string)
@@ -60,15 +62,14 @@ class ExcludeConstraint(ColumnCollectionConstraint):
self.using = kw.get('using', 'gist')
where = kw.get('where')
if where:
- self.where = expression._literal_as_text(where)
+ self.where = expression._literal_as_text(where)
def copy(self, **kw):
elements = [(col, self.operators[col])
for col in self.columns.keys()]
c = self.__class__(*elements,
- name=self.name,
- deferrable=self.deferrable,
- initially=self.initially)
+ name=self.name,
+ deferrable=self.deferrable,
+ initially=self.initially)
c.dispatch._update(self.dispatch)
return c
-
diff --git a/lib/sqlalchemy/dialects/postgresql/hstore.py b/lib/sqlalchemy/dialects/postgresql/hstore.py
index f1fb3d308..9601edc41 100644
--- a/lib/sqlalchemy/dialects/postgresql/hstore.py
+++ b/lib/sqlalchemy/dialects/postgresql/hstore.py
@@ -69,11 +69,13 @@ def _parse_hstore(hstore_str):
pair_match = HSTORE_PAIR_RE.match(hstore_str)
while pair_match is not None:
- key = pair_match.group('key').replace(r'\"', '"').replace("\\\\", "\\")
+ key = pair_match.group('key').replace(r'\"', '"').replace(
+ "\\\\", "\\")
if pair_match.group('value_null'):
value = None
else:
- value = pair_match.group('value').replace(r'\"', '"').replace("\\\\", "\\")
+ value = pair_match.group('value').replace(
+ r'\"', '"').replace("\\\\", "\\")
result[key] = value
pos += pair_match.end()
@@ -140,15 +142,16 @@ class HSTORE(sqltypes.Concatenable, sqltypes.TypeEngine):
data_table.c.data + {"k1": "v1"}
- For a full list of special methods see :class:`.HSTORE.comparator_factory`.
+ For a full list of special methods see
+ :class:`.HSTORE.comparator_factory`.
For usage with the SQLAlchemy ORM, it may be desirable to combine
the usage of :class:`.HSTORE` with :class:`.MutableDict` dictionary
now part of the :mod:`sqlalchemy.ext.mutable`
extension. This extension will allow "in-place" changes to the
dictionary, e.g. addition of new keys or replacement/removal of existing
- keys to/from the current dictionary, to produce events which will be detected
- by the unit of work::
+ keys to/from the current dictionary, to produce events which will be
+ detected by the unit of work::
from sqlalchemy.ext.mutable import MutableDict
@@ -167,9 +170,9 @@ class HSTORE(sqltypes.Concatenable, sqltypes.TypeEngine):
session.commit()
When the :mod:`sqlalchemy.ext.mutable` extension is not used, the ORM
- will not be alerted to any changes to the contents of an existing dictionary,
- unless that dictionary value is re-assigned to the HSTORE-attribute itself,
- thus generating a change event.
+ will not be alerted to any changes to the contents of an existing
+ dictionary, unless that dictionary value is re-assigned to the
+ HSTORE-attribute itself, thus generating a change event.
.. versionadded:: 0.8
@@ -272,6 +275,7 @@ class HSTORE(sqltypes.Concatenable, sqltypes.TypeEngine):
def bind_processor(self, dialect):
if util.py2k:
encoding = dialect.encoding
+
def process(value):
if isinstance(value, dict):
return _serialize_hstore(value).encode(encoding)
@@ -288,6 +292,7 @@ class HSTORE(sqltypes.Concatenable, sqltypes.TypeEngine):
def result_processor(self, dialect, coltype):
if util.py2k:
encoding = dialect.encoding
+
def process(value):
if value is not None:
return _parse_hstore(value.decode(encoding))
diff --git a/lib/sqlalchemy/dialects/postgresql/json.py b/lib/sqlalchemy/dialects/postgresql/json.py
index 902d0a80d..25ac342af 100644
--- a/lib/sqlalchemy/dialects/postgresql/json.py
+++ b/lib/sqlalchemy/dialects/postgresql/json.py
@@ -27,24 +27,28 @@ class JSONElement(elements.BinaryExpression):
expr = mytable.c.json_data['some_key']
The expression typically compiles to a JSON access such as ``col -> key``.
- Modifiers are then available for typing behavior, including :meth:`.JSONElement.cast`
- and :attr:`.JSONElement.astext`.
+ Modifiers are then available for typing behavior, including
+ :meth:`.JSONElement.cast` and :attr:`.JSONElement.astext`.
"""
- def __init__(self, left, right, astext=False, opstring=None, result_type=None):
+
+ def __init__(self, left, right, astext=False,
+ opstring=None, result_type=None):
self._astext = astext
if opstring is None:
if hasattr(right, '__iter__') and \
- not isinstance(right, util.string_types):
+ not isinstance(right, util.string_types):
opstring = "#>"
- right = "{%s}" % (", ".join(util.text_type(elem) for elem in right))
+ right = "{%s}" % (
+ ", ".join(util.text_type(elem) for elem in right))
else:
opstring = "->"
self._json_opstring = opstring
operator = custom_op(opstring, precedence=5)
right = left._check_literal(left, operator, right)
- super(JSONElement, self).__init__(left, right, operator, type_=result_type)
+ super(JSONElement, self).__init__(
+ left, right, operator, type_=result_type)
@property
def astext(self):
@@ -64,12 +68,12 @@ class JSONElement(elements.BinaryExpression):
return self
else:
return JSONElement(
- self.left,
- self.right,
- astext=True,
- opstring=self._json_opstring + ">",
- result_type=sqltypes.String(convert_unicode=True)
- )
+ self.left,
+ self.right,
+ astext=True,
+ opstring=self._json_opstring + ">",
+ result_type=sqltypes.String(convert_unicode=True)
+ )
def cast(self, type_):
"""Convert this :class:`.JSONElement` to apply both the 'astext' operator
@@ -126,15 +130,16 @@ class JSON(sqltypes.TypeEngine):
* Path index operations returning text (required for text comparison)::
- data_table.c.data[('key_1', 'key_2', ..., 'key_n')].astext == 'some value'
+ data_table.c.data[('key_1', 'key_2', ..., 'key_n')].astext == \\
+ 'some value'
- Index operations return an instance of :class:`.JSONElement`, which represents
- an expression such as ``column -> index``. This element then defines
- methods such as :attr:`.JSONElement.astext` and :meth:`.JSONElement.cast`
- for setting up type behavior.
+ Index operations return an instance of :class:`.JSONElement`, which
+ represents an expression such as ``column -> index``. This element then
+ defines methods such as :attr:`.JSONElement.astext` and
+ :meth:`.JSONElement.cast` for setting up type behavior.
- The :class:`.JSON` type, when used with the SQLAlchemy ORM, does not detect
- in-place mutations to the structure. In order to detect these, the
+ The :class:`.JSON` type, when used with the SQLAlchemy ORM, does not
+ detect in-place mutations to the structure. In order to detect these, the
:mod:`sqlalchemy.ext.mutable` extension must be used. This extension will
allow "in-place" changes to the datastructure to produce events which
will be detected by the unit of work. See the example at :class:`.HSTORE`
@@ -178,6 +183,7 @@ class JSON(sqltypes.TypeEngine):
json_serializer = dialect._json_serializer or json.dumps
if util.py2k:
encoding = dialect.encoding
+
def process(value):
return json_serializer(value).encode(encoding)
else:
@@ -189,6 +195,7 @@ class JSON(sqltypes.TypeEngine):
json_deserializer = dialect._json_deserializer or json.loads
if util.py2k:
encoding = dialect.encoding
+
def process(value):
return json_deserializer(value.decode(encoding))
else:
@@ -200,7 +207,6 @@ class JSON(sqltypes.TypeEngine):
ischema_names['json'] = JSON
-
class JSONB(JSON):
"""Represent the Postgresql JSONB type.
@@ -237,15 +243,16 @@ class JSONB(JSON):
* Path index operations returning text (required for text comparison)::
- data_table.c.data[('key_1', 'key_2', ..., 'key_n')].astext == 'some value'
+ data_table.c.data[('key_1', 'key_2', ..., 'key_n')].astext == \\
+ 'some value'
- Index operations return an instance of :class:`.JSONElement`, which represents
- an expression such as ``column -> index``. This element then defines
- methods such as :attr:`.JSONElement.astext` and :meth:`.JSONElement.cast`
- for setting up type behavior.
+ Index operations return an instance of :class:`.JSONElement`, which
+ represents an expression such as ``column -> index``. This element then
+ defines methods such as :attr:`.JSONElement.astext` and
+ :meth:`.JSONElement.cast` for setting up type behavior.
- The :class:`.JSON` type, when used with the SQLAlchemy ORM, does not detect
- in-place mutations to the structure. In order to detect these, the
+ The :class:`.JSON` type, when used with the SQLAlchemy ORM, does not
+ detect in-place mutations to the structure. In order to detect these, the
:mod:`sqlalchemy.ext.mutable` extension must be used. This extension will
allow "in-place" changes to the datastructure to produce events which
will be detected by the unit of work. See the example at :class:`.HSTORE`
@@ -280,7 +287,8 @@ class JSONB(JSON):
return JSONElement(self.expr, other)
def _adapt_expression(self, op, other_comparator):
- # How does one do equality?? jsonb also has "=" eg. '[1,2,3]'::jsonb = '[1,2,3]'::jsonb
+ # How does one do equality?? jsonb also has "=" eg.
+ # '[1,2,3]'::jsonb = '[1,2,3]'::jsonb
if isinstance(op, custom_op):
if op.opstring in ['?', '?&', '?|', '@>', '<@']:
return op, sqltypes.Boolean
@@ -317,4 +325,4 @@ class JSONB(JSON):
"""
return self.expr.op('<@')(other)
-ischema_names['jsonb'] = JSONB \ No newline at end of file
+ischema_names['jsonb'] = JSONB
diff --git a/lib/sqlalchemy/dialects/postgresql/pg8000.py b/lib/sqlalchemy/dialects/postgresql/pg8000.py
index dc5ed6e73..512f3e1b0 100644
--- a/lib/sqlalchemy/dialects/postgresql/pg8000.py
+++ b/lib/sqlalchemy/dialects/postgresql/pg8000.py
@@ -165,6 +165,6 @@ class PGDialect_pg8000(PGDialect):
"Invalid value '%s' for isolation_level. "
"Valid isolation levels for %s are %s or AUTOCOMMIT" %
(level, self.name, ", ".join(self._isolation_lookup))
- )
+ )
dialect = PGDialect_pg8000
diff --git a/lib/sqlalchemy/dialects/postgresql/psycopg2.py b/lib/sqlalchemy/dialects/postgresql/psycopg2.py
index 0ab4abb09..e6450c97f 100644
--- a/lib/sqlalchemy/dialects/postgresql/psycopg2.py
+++ b/lib/sqlalchemy/dialects/postgresql/psycopg2.py
@@ -9,7 +9,8 @@
.. dialect:: postgresql+psycopg2
:name: psycopg2
:dbapi: psycopg2
- :connectstring: postgresql+psycopg2://user:password@host:port/dbname[?key=value&key=value...]
+ :connectstring: postgresql+psycopg2://user:password@host:port/dbname\
+[?key=value&key=value...]
:url: http://pypi.python.org/pypi/psycopg2/
psycopg2 Connect Arguments
@@ -21,9 +22,9 @@ psycopg2-specific keyword arguments which are accepted by
* ``server_side_cursors``: Enable the usage of "server side cursors" for SQL
statements which support this feature. What this essentially means from a
psycopg2 point of view is that the cursor is created using a name, e.g.
- ``connection.cursor('some name')``, which has the effect that result rows are
- not immediately pre-fetched and buffered after statement execution, but are
- instead left on the server and only retrieved as needed. SQLAlchemy's
+ ``connection.cursor('some name')``, which has the effect that result rows
+ are not immediately pre-fetched and buffered after statement execution, but
+ are instead left on the server and only retrieved as needed. SQLAlchemy's
:class:`~sqlalchemy.engine.ResultProxy` uses special row-buffering
behavior when this feature is enabled, such that groups of 100 rows at a
time are fetched over the wire to reduce conversational overhead.
@@ -54,7 +55,8 @@ using ``host`` as an additional keyword argument::
See also:
-`PQconnectdbParams <http://www.postgresql.org/docs/9.1/static/libpq-connect.html#LIBPQ-PQCONNECTDBPARAMS>`_
+`PQconnectdbParams <http://www.postgresql.org/docs/9.1/static\
+/libpq-connect.html#LIBPQ-PQCONNECTDBPARAMS>`_
Per-Statement/Connection Execution Options
-------------------------------------------
@@ -90,11 +92,13 @@ Typically, this can be changed to ``utf-8``, as a more useful default::
A second way to affect the client encoding is to set it within Psycopg2
locally. SQLAlchemy will call psycopg2's ``set_client_encoding()``
-method (see: http://initd.org/psycopg/docs/connection.html#connection.set_client_encoding)
+method (see:
+http://initd.org/psycopg/docs/connection.html#connection.set_client_encoding)
on all new connections based on the value passed to
:func:`.create_engine` using the ``client_encoding`` parameter::
- engine = create_engine("postgresql://user:pass@host/dbname", client_encoding='utf8')
+ engine = create_engine("postgresql://user:pass@host/dbname",
+ client_encoding='utf8')
This overrides the encoding specified in the Postgresql client configuration.
@@ -128,11 +132,12 @@ Psycopg2 Transaction Isolation Level
As discussed in :ref:`postgresql_isolation_level`,
all Postgresql dialects support setting of transaction isolation level
both via the ``isolation_level`` parameter passed to :func:`.create_engine`,
-as well as the ``isolation_level`` argument used by :meth:`.Connection.execution_options`.
-When using the psycopg2 dialect, these options make use of
-psycopg2's ``set_isolation_level()`` connection method, rather than
-emitting a Postgresql directive; this is because psycopg2's API-level
-setting is always emitted at the start of each transaction in any case.
+as well as the ``isolation_level`` argument used by
+:meth:`.Connection.execution_options`. When using the psycopg2 dialect, these
+options make use of psycopg2's ``set_isolation_level()`` connection method,
+rather than emitting a Postgresql directive; this is because psycopg2's
+API-level setting is always emitted at the start of each transaction in any
+case.
The psycopg2 dialect supports these constants for isolation level:
@@ -166,35 +171,38 @@ The psycopg2 dialect will log Postgresql NOTICE messages via the
HSTORE type
------------
-The ``psycopg2`` DBAPI includes an extension to natively handle marshalling of the
-HSTORE type. The SQLAlchemy psycopg2 dialect will enable this extension
+The ``psycopg2`` DBAPI includes an extension to natively handle marshalling of
+the HSTORE type. The SQLAlchemy psycopg2 dialect will enable this extension
by default when it is detected that the target database has the HSTORE
type set up for use. In other words, when the dialect makes the first
connection, a sequence like the following is performed:
-1. Request the available HSTORE oids using ``psycopg2.extras.HstoreAdapter.get_oids()``.
- If this function returns a list of HSTORE identifiers, we then determine that
- the ``HSTORE`` extension is present.
+1. Request the available HSTORE oids using
+ ``psycopg2.extras.HstoreAdapter.get_oids()``.
+ If this function returns a list of HSTORE identifiers, we then determine
+ that the ``HSTORE`` extension is present.
2. If the ``use_native_hstore`` flag is at its default of ``True``, and
we've detected that ``HSTORE`` oids are available, the
``psycopg2.extensions.register_hstore()`` extension is invoked for all
connections.
-The ``register_hstore()`` extension has the effect of **all Python dictionaries
-being accepted as parameters regardless of the type of target column in SQL**.
-The dictionaries are converted by this extension into a textual HSTORE expression.
-If this behavior is not desired, disable the
-use of the hstore extension by setting ``use_native_hstore`` to ``False`` as follows::
+The ``register_hstore()`` extension has the effect of **all Python
+dictionaries being accepted as parameters regardless of the type of target
+column in SQL**. The dictionaries are converted by this extension into a
+textual HSTORE expression. If this behavior is not desired, disable the
+use of the hstore extension by setting ``use_native_hstore`` to ``False`` as
+follows::
engine = create_engine("postgresql+psycopg2://scott:tiger@localhost/test",
use_native_hstore=False)
-The ``HSTORE`` type is **still supported** when the ``psycopg2.extensions.register_hstore()``
-extension is not used. It merely means that the coercion between Python dictionaries and the HSTORE
+The ``HSTORE`` type is **still supported** when the
+``psycopg2.extensions.register_hstore()`` extension is not used. It merely
+means that the coercion between Python dictionaries and the HSTORE
string format, on both the parameter side and the result side, will take
-place within SQLAlchemy's own marshalling logic, and not that of ``psycopg2`` which
-may be more performant.
+place within SQLAlchemy's own marshalling logic, and not that of ``psycopg2``
+which may be more performant.
"""
from __future__ import absolute_import
@@ -209,9 +217,9 @@ from ...engine import result as _result
from ...sql import expression
from ... import types as sqltypes
from .base import PGDialect, PGCompiler, \
- PGIdentifierPreparer, PGExecutionContext, \
- ENUM, ARRAY, _DECIMAL_TYPES, _FLOAT_TYPES,\
- _INT_TYPES
+ PGIdentifierPreparer, PGExecutionContext, \
+ ENUM, ARRAY, _DECIMAL_TYPES, _FLOAT_TYPES,\
+ _INT_TYPES
from .hstore import HSTORE
from .json import JSON
@@ -227,14 +235,14 @@ class _PGNumeric(sqltypes.Numeric):
if self.asdecimal:
if coltype in _FLOAT_TYPES:
return processors.to_decimal_processor_factory(
- decimal.Decimal,
- self._effective_decimal_return_scale)
+ decimal.Decimal,
+ self._effective_decimal_return_scale)
elif coltype in _DECIMAL_TYPES or coltype in _INT_TYPES:
# pg8000 returns Decimal natively for 1700
return None
else:
raise exc.InvalidRequestError(
- "Unknown PG numeric type: %d" % coltype)
+ "Unknown PG numeric type: %d" % coltype)
else:
if coltype in _FLOAT_TYPES:
# pg8000 returns float natively for 701
@@ -243,7 +251,7 @@ class _PGNumeric(sqltypes.Numeric):
return processors.to_float
else:
raise exc.InvalidRequestError(
- "Unknown PG numeric type: %d" % coltype)
+ "Unknown PG numeric type: %d" % coltype)
class _PGEnum(ENUM):
@@ -255,6 +263,7 @@ class _PGEnum(ENUM):
self.convert_unicode = "force_nocheck"
return super(_PGEnum, self).result_processor(dialect, coltype)
+
class _PGHStore(HSTORE):
def bind_processor(self, dialect):
if dialect._has_native_hstore:
@@ -293,13 +302,16 @@ class PGExecutionContext_psycopg2(PGExecutionContext):
if self.dialect.server_side_cursors:
is_server_side = \
self.execution_options.get('stream_results', True) and (
- (self.compiled and isinstance(self.compiled.statement, expression.Selectable) \
- or \
- (
+ (self.compiled and isinstance(self.compiled.statement,
+ expression.Selectable)
+ or
+ (
(not self.compiled or
- isinstance(self.compiled.statement, expression.TextClause))
- and self.statement and SERVER_SIDE_CURSOR_RE.match(self.statement))
- )
+ isinstance(self.compiled.statement,
+ expression.TextClause))
+ and self.statement and SERVER_SIDE_CURSOR_RE.match(
+ self.statement))
+ )
)
else:
is_server_side = \
@@ -309,7 +321,8 @@ class PGExecutionContext_psycopg2(PGExecutionContext):
if is_server_side:
# use server-side cursors:
# http://lists.initd.org/pipermail/psycopg/2007-January/005251.html
- ident = "c_%s_%s" % (hex(id(self))[2:], hex(_server_side_id())[2:])
+ ident = "c_%s_%s" % (hex(id(self))[2:],
+ hex(_server_side_id())[2:])
return self._dbapi_connection.cursor(ident)
else:
return self._dbapi_connection.cursor()
@@ -336,7 +349,7 @@ class PGExecutionContext_psycopg2(PGExecutionContext):
class PGCompiler_psycopg2(PGCompiler):
def visit_mod_binary(self, binary, operator, **kw):
return self.process(binary.left, **kw) + " %% " + \
- self.process(binary.right, **kw)
+ self.process(binary.right, **kw)
def post_process_text(self, text):
return text.replace('%', '%%')
@@ -354,7 +367,8 @@ class PGDialect_psycopg2(PGDialect):
supports_unicode_statements = False
default_paramstyle = 'pyformat'
- supports_sane_multi_rowcount = False # set to true based on psycopg2 version
+ # set to true based on psycopg2 version
+ supports_sane_multi_rowcount = False
execution_ctx_cls = PGExecutionContext_psycopg2
statement_compiler = PGCompiler_psycopg2
preparer = PGIdentifierPreparer_psycopg2
@@ -375,9 +389,9 @@ class PGDialect_psycopg2(PGDialect):
)
def __init__(self, server_side_cursors=False, use_native_unicode=True,
- client_encoding=None,
- use_native_hstore=True,
- **kwargs):
+ client_encoding=None,
+ use_native_hstore=True,
+ **kwargs):
PGDialect.__init__(self, **kwargs)
self.server_side_cursors = server_side_cursors
self.use_native_unicode = use_native_unicode
@@ -386,18 +400,18 @@ class PGDialect_psycopg2(PGDialect):
self.client_encoding = client_encoding
if self.dbapi and hasattr(self.dbapi, '__version__'):
m = re.match(r'(\d+)\.(\d+)(?:\.(\d+))?',
- self.dbapi.__version__)
+ self.dbapi.__version__)
if m:
self.psycopg2_version = tuple(
- int(x)
- for x in m.group(1, 2, 3)
- if x is not None)
+ int(x)
+ for x in m.group(1, 2, 3)
+ if x is not None)
def initialize(self, connection):
super(PGDialect_psycopg2, self).initialize(connection)
self._has_native_hstore = self.use_native_hstore and \
- self._hstore_oids(connection.connection) \
- is not None
+ self._hstore_oids(connection.connection) \
+ is not None
self._has_native_json = self.psycopg2_version >= (2, 5)
# http://initd.org/psycopg/docs/news.html#what-s-new-in-psycopg-2-0-9
@@ -427,7 +441,7 @@ class PGDialect_psycopg2(PGDialect):
"Invalid value '%s' for isolation_level. "
"Valid isolation levels for %s are %s" %
(level, self.name, ", ".join(self._isolation_lookup))
- )
+ )
connection.set_isolation_level(level)
@@ -458,16 +472,17 @@ class PGDialect_psycopg2(PGDialect):
oid, array_oid = hstore_oids
if util.py2k:
extras.register_hstore(conn, oid=oid,
- array_oid=array_oid,
- unicode=True)
+ array_oid=array_oid,
+ unicode=True)
else:
extras.register_hstore(conn, oid=oid,
- array_oid=array_oid)
+ array_oid=array_oid)
fns.append(on_connect)
if self.dbapi and self._json_deserializer:
def on_connect(conn):
- extras.register_default_json(conn, loads=self._json_deserializer)
+ extras.register_default_json(
+ conn, loads=self._json_deserializer)
fns.append(on_connect)
if fns:
diff --git a/lib/sqlalchemy/dialects/postgresql/pypostgresql.py b/lib/sqlalchemy/dialects/postgresql/pypostgresql.py
index fc785d450..3ebd0135f 100644
--- a/lib/sqlalchemy/dialects/postgresql/pypostgresql.py
+++ b/lib/sqlalchemy/dialects/postgresql/pypostgresql.py
@@ -9,7 +9,8 @@
.. dialect:: postgresql+pypostgresql
:name: py-postgresql
:dbapi: pypostgresql
- :connectstring: postgresql+pypostgresql://user:password@host:port/dbname[?key=value&key=value...]
+ :connectstring: postgresql+pypostgresql://user:password@host:port/dbname\
+[?key=value&key=value...]
:url: http://python.projects.pgfoundry.org/
diff --git a/lib/sqlalchemy/dialects/postgresql/ranges.py b/lib/sqlalchemy/dialects/postgresql/ranges.py
index 31434743c..28f80d000 100644
--- a/lib/sqlalchemy/dialects/postgresql/ranges.py
+++ b/lib/sqlalchemy/dialects/postgresql/ranges.py
@@ -9,6 +9,7 @@ from ... import types as sqltypes
__all__ = ('INT4RANGE', 'INT8RANGE', 'NUMRANGE')
+
class RangeOperators(object):
"""
This mixin provides functionality for the Range Operators
@@ -94,6 +95,7 @@ class RangeOperators(object):
"""
return self.expr.op('+')(other)
+
class INT4RANGE(RangeOperators, sqltypes.TypeEngine):
"""Represent the Postgresql INT4RANGE type.
@@ -105,6 +107,7 @@ class INT4RANGE(RangeOperators, sqltypes.TypeEngine):
ischema_names['int4range'] = INT4RANGE
+
class INT8RANGE(RangeOperators, sqltypes.TypeEngine):
"""Represent the Postgresql INT8RANGE type.
@@ -116,6 +119,7 @@ class INT8RANGE(RangeOperators, sqltypes.TypeEngine):
ischema_names['int8range'] = INT8RANGE
+
class NUMRANGE(RangeOperators, sqltypes.TypeEngine):
"""Represent the Postgresql NUMRANGE type.
@@ -127,6 +131,7 @@ class NUMRANGE(RangeOperators, sqltypes.TypeEngine):
ischema_names['numrange'] = NUMRANGE
+
class DATERANGE(RangeOperators, sqltypes.TypeEngine):
"""Represent the Postgresql DATERANGE type.
@@ -138,6 +143,7 @@ class DATERANGE(RangeOperators, sqltypes.TypeEngine):
ischema_names['daterange'] = DATERANGE
+
class TSRANGE(RangeOperators, sqltypes.TypeEngine):
"""Represent the Postgresql TSRANGE type.
@@ -149,6 +155,7 @@ class TSRANGE(RangeOperators, sqltypes.TypeEngine):
ischema_names['tsrange'] = TSRANGE
+
class TSTZRANGE(RangeOperators, sqltypes.TypeEngine):
"""Represent the Postgresql TSTZRANGE type.
diff --git a/lib/sqlalchemy/dialects/sqlite/base.py b/lib/sqlalchemy/dialects/sqlite/base.py
index 8daada528..306f45023 100644
--- a/lib/sqlalchemy/dialects/sqlite/base.py
+++ b/lib/sqlalchemy/dialects/sqlite/base.py
@@ -39,8 +39,8 @@ Two things to note:
one column, if the table has a composite (i.e. multi-column) primary key.
This is regardless of the AUTOINCREMENT keyword being present or not.
-To specifically render the AUTOINCREMENT keyword on the primary key column when
-rendering DDL, add the flag ``sqlite_autoincrement=True`` to the Table
+To specifically render the AUTOINCREMENT keyword on the primary key column
+when rendering DDL, add the flag ``sqlite_autoincrement=True`` to the Table
construct::
Table('sometable', metadata,
@@ -63,29 +63,29 @@ Database Locking Behavior / Concurrency
Note that SQLite is not designed for a high level of concurrency. The database
itself, being a file, is locked completely during write operations and within
transactions, meaning exactly one connection has exclusive access to the
-database during this period - all other connections will be blocked during this
-time.
-
-The Python DBAPI specification also calls for a connection model that is always
-in a transaction; there is no BEGIN method, only commit and rollback. This
-implies that a SQLite DBAPI driver would technically allow only serialized
-access to a particular database file at all times. The pysqlite driver attempts
-to ameliorate this by deferring the actual BEGIN statement until the first DML
-(INSERT, UPDATE, or DELETE) is received within a transaction. While this breaks
-serializable isolation, it at least delays the exclusive locking inherent in
-SQLite's design.
-
-SQLAlchemy's default mode of usage with the ORM is known as "autocommit=False",
-which means the moment the :class:`.Session` begins to be used, a transaction
-is begun. As the :class:`.Session` is used, the autoflush feature, also on by
-default, will flush out pending changes to the database before each query. The
-effect of this is that a :class:`.Session` used in its default mode will often
-emit DML early on, long before the transaction is actually committed. This
-again will have the effect of serializing access to the SQLite database. If
-highly concurrent reads are desired against the SQLite database, it is advised
-that the autoflush feature be disabled, and potentially even that autocommit be
-re-enabled, which has the effect of each SQL statement and flush committing
-changes immediately.
+database during this period - all other connections will be blocked during
+this time.
+
+The Python DBAPI specification also calls for a connection model that is
+always in a transaction; there is no BEGIN method, only commit and rollback.
+This implies that a SQLite DBAPI driver would technically allow only
+serialized access to a particular database file at all times. The pysqlite
+driver attempts to ameliorate this by deferring the actual BEGIN statement
+until the first DML (INSERT, UPDATE, or DELETE) is received within a
+transaction. While this breaks serializable isolation, it at least delays the
+exclusive locking inherent in SQLite's design.
+
+SQLAlchemy's default mode of usage with the ORM is known as
+"autocommit=False", which means the moment the :class:`.Session` begins to be
+used, a transaction is begun. As the :class:`.Session` is used, the autoflush
+feature, also on by default, will flush out pending changes to the database
+before each query. The effect of this is that a :class:`.Session` used in its
+default mode will often emit DML early on, long before the transaction is
+actually committed. This again will have the effect of serializing access to
+the SQLite database. If highly concurrent reads are desired against the SQLite
+database, it is advised that the autoflush feature be disabled, and
+potentially even that autocommit be re-enabled, which has the effect of each
+SQL statement and flush committing changes immediately.
For more information on SQLite's lack of concurrency by design, please see
`Situations Where Another RDBMS May Work Better - High Concurrency
@@ -105,8 +105,8 @@ Constraint checking on SQLite has three prerequisites:
* At least version 3.6.19 of SQLite must be in use
* The SQLite library must be compiled *without* the SQLITE_OMIT_FOREIGN_KEY
or SQLITE_OMIT_TRIGGER symbols enabled.
-* The ``PRAGMA foreign_keys = ON`` statement must be emitted on all connections
- before use.
+* The ``PRAGMA foreign_keys = ON`` statement must be emitted on all
+ connections before use.
SQLAlchemy allows for the ``PRAGMA`` statement to be emitted automatically for
new connections through the usage of events::
@@ -122,8 +122,8 @@ new connections through the usage of events::
.. seealso::
- `SQLite Foreign Key Support <http://www.sqlite.org/foreignkeys.html>`_ - on
- the SQLite web site.
+ `SQLite Foreign Key Support <http://www.sqlite.org/foreignkeys.html>`_
+ - on the SQLite web site.
:ref:`event_toplevel` - SQLAlchemy event API.
@@ -189,8 +189,9 @@ from ... import util
from ...engine import default, reflection
from ...sql import compiler
-from ...types import (BLOB, BOOLEAN, CHAR, DATE, DECIMAL, FLOAT, INTEGER, REAL,
- NUMERIC, SMALLINT, TEXT, TIMESTAMP, VARCHAR)
+from ...types import (BLOB, BOOLEAN, CHAR, DATE, DECIMAL, FLOAT,
+ INTEGER, REAL, NUMERIC, SMALLINT, TEXT,
+ TIMESTAMP, VARCHAR)
class _DateTimeMixin(object):
@@ -214,6 +215,7 @@ class _DateTimeMixin(object):
def literal_processor(self, dialect):
bp = self.bind_processor(dialect)
+
def process(value):
return "'%s'" % bp(value)
return process
@@ -224,7 +226,8 @@ class DATETIME(_DateTimeMixin, sqltypes.DateTime):
The default string storage format is::
- "%(year)04d-%(month)02d-%(day)02d %(hour)02d:%(min)02d:%(second)02d.%(microsecond)06d"
+ "%(year)04d-%(month)02d-%(day)02d %(hour)02d:%(min)02d:\
+%(second)02d.%(microsecond)06d"
e.g.::
@@ -237,12 +240,13 @@ class DATETIME(_DateTimeMixin, sqltypes.DateTime):
from sqlalchemy.dialects.sqlite import DATETIME
dt = DATETIME(
- storage_format="%(year)04d/%(month)02d/%(day)02d %(hour)02d:%(min)02d:%(second)02d",
+ storage_format="%(year)04d/%(month)02d/%(day)02d %(hour)02d:\
+%(min)02d:%(second)02d",
regexp=r"(\d+)/(\d+)/(\d+) (\d+)-(\d+)-(\d+)"
)
- :param storage_format: format string which will be applied to the dict with
- keys year, month, day, hour, minute, second, and microsecond.
+ :param storage_format: format string which will be applied to the dict
+ with keys year, month, day, hour, minute, second, and microsecond.
:param regexp: regular expression which will be applied to incoming result
rows. If the regexp contains named groups, the resulting match dict is
@@ -390,12 +394,13 @@ class TIME(_DateTimeMixin, sqltypes.Time):
from sqlalchemy.dialects.sqlite import TIME
t = TIME(
- storage_format="%(hour)02d-%(minute)02d-%(second)02d-%(microsecond)06d",
+ storage_format="%(hour)02d-%(minute)02d-%(second)02d-\
+%(microsecond)06d",
regexp=re.compile("(\d+)-(\d+)-(\d+)-(?:-(\d+))?")
)
- :param storage_format: format string which will be applied to the dict with
- keys hour, minute, second, and microsecond.
+ :param storage_format: format string which will be applied to the dict
+ with keys hour, minute, second, and microsecond.
:param regexp: regular expression which will be applied to incoming result
rows. If the regexp contains named groups, the resulting match dict is
@@ -581,8 +586,9 @@ class SQLiteDDLCompiler(compiler.DDLCompiler):
if local_table.schema != remote_table.schema:
return None
else:
- return super(SQLiteDDLCompiler, self).visit_foreign_key_constraint(
- constraint)
+ return super(
+ SQLiteDDLCompiler,
+ self).visit_foreign_key_constraint(constraint)
def define_constraint_remote_table(self, constraint, table, preparer):
"""Format the remote table clause of a CREATE CONSTRAINT clause."""
@@ -619,7 +625,7 @@ class SQLiteIdentifierPreparer(compiler.IdentifierPreparer):
'temporary', 'then', 'to', 'transaction', 'trigger', 'true', 'union',
'unique', 'update', 'using', 'vacuum', 'values', 'view', 'virtual',
'when', 'where',
- ])
+ ])
def format_index(self, index, use_schema=True, name=None):
"""Prepare a quoted index and schema name."""
@@ -630,8 +636,8 @@ class SQLiteIdentifierPreparer(compiler.IdentifierPreparer):
if (not self.omit_schema and
use_schema and
getattr(index.table, "schema", None)):
- result = self.quote_schema(index.table.schema,
- index.table.quote_schema) + "." + result
+ result = self.quote_schema(
+ index.table.schema, index.table.quote_schema) + "." + result
return result
@@ -641,8 +647,8 @@ class SQLiteExecutionContext(default.DefaultExecutionContext):
return self.execution_options.get("sqlite_raw_colnames", False)
def _translate_colname(self, colname):
- # adjust for dotted column names. SQLite in the case of UNION may store
- # col names as "tablename.colname" in cursor.description
+ # adjust for dotted column names. SQLite in the case of UNION may
+ # store col names as "tablename.colname" in cursor.description
if not self._preserve_raw_colnames and "." in colname:
return colname.split(".")[1], colname
else:
@@ -685,9 +691,10 @@ class SQLiteDialect(default.DefaultDialect):
default.DefaultDialect.__init__(self, **kwargs)
self.isolation_level = isolation_level
- # this flag used by pysqlite dialect, and perhaps others in the future,
- # to indicate the driver is handling date/timestamp conversions (and
- # perhaps datetime/time as well on some hypothetical driver ?)
+ # this flag used by pysqlite dialect, and perhaps others in the
+ # future, to indicate the driver is handling date/timestamp
+ # conversions (and perhaps datetime/time as well on some hypothetical
+ # driver ?)
self.native_datetime = native_datetime
if self.dbapi is not None:
@@ -716,7 +723,7 @@ class SQLiteDialect(default.DefaultDialect):
"Invalid value '%s' for isolation_level. "
"Valid isolation levels for %s are %s" %
(level, self.name, ", ".join(self._isolation_lookup))
- )
+ )
cursor = connection.cursor()
cursor.execute("PRAGMA read_uncommitted = %d" % isolation_level)
cursor.close()
@@ -918,9 +925,9 @@ class SQLiteDialect(default.DefaultDialect):
coltype = coltype(*[int(a) for a in args])
except TypeError:
util.warn(
- "Could not instantiate type %s with "
- "reflected arguments %s; using no arguments." %
- (coltype, args))
+ "Could not instantiate type %s with "
+ "reflected arguments %s; using no arguments." %
+ (coltype, args))
coltype = coltype()
else:
coltype = coltype()
@@ -952,7 +959,8 @@ class SQLiteDialect(default.DefaultDialect):
row = c.fetchone()
if row is None:
break
- (numerical_id, rtbl, lcol, rcol) = (row[0], row[2], row[3], row[4])
+ (numerical_id, rtbl, lcol, rcol) = (
+ row[0], row[2], row[3], row[4])
self._parse_fk(fks, fkeys, numerical_id, rtbl, lcol, rcol)
return fkeys
diff --git a/lib/sqlalchemy/dialects/sqlite/pysqlite.py b/lib/sqlalchemy/dialects/sqlite/pysqlite.py
index 51e5f0cdf..c67333283 100644
--- a/lib/sqlalchemy/dialects/sqlite/pysqlite.py
+++ b/lib/sqlalchemy/dialects/sqlite/pysqlite.py
@@ -88,7 +88,8 @@ nor should be necessary, for use with SQLAlchemy, usage of PARSE_DECLTYPES
can be forced if one configures "native_datetime=True" on create_engine()::
engine = create_engine('sqlite://',
- connect_args={'detect_types': sqlite3.PARSE_DECLTYPES|sqlite3.PARSE_COLNAMES},
+ connect_args={'detect_types':
+ sqlite3.PARSE_DECLTYPES|sqlite3.PARSE_COLNAMES},
native_datetime=True
)
@@ -96,7 +97,8 @@ With this flag enabled, the DATE and TIMESTAMP types (but note - not the
DATETIME or TIME types...confused yet ?) will not perform any bind parameter
or result processing. Execution of "func.current_date()" will return a string.
"func.current_timestamp()" is registered as returning a DATETIME type in
-SQLAlchemy, so this function still receives SQLAlchemy-level result processing.
+SQLAlchemy, so this function still receives SQLAlchemy-level result
+processing.
.. _pysqlite_threading_pooling:
@@ -111,12 +113,12 @@ did not allow a ``:memory:`` database to be used in multiple threads
under any circumstances.
Pysqlite does include a now-undocumented flag known as
-``check_same_thread`` which will disable this check, however note that pysqlite
-connections are still not safe to use in concurrently in multiple threads.
-In particular, any statement execution calls would need to be externally
-mutexed, as Pysqlite does not provide for thread-safe propagation of error
-messages among other things. So while even ``:memory:`` databases can be
-shared among threads in modern SQLite, Pysqlite doesn't provide enough
+``check_same_thread`` which will disable this check, however note that
+pysqlite connections are still not safe to use in concurrently in multiple
+threads. In particular, any statement execution calls would need to be
+externally mutexed, as Pysqlite does not provide for thread-safe propagation
+of error messages among other things. So while even ``:memory:`` databases
+can be shared among threads in modern SQLite, Pysqlite doesn't provide enough
thread-safety to make this usage worth it.
SQLAlchemy sets up pooling to work with Pysqlite's default behavior:
@@ -142,8 +144,8 @@ SQLAlchemy sets up pooling to work with Pysqlite's default behavior:
Using a Memory Database in Multiple Threads
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-To use a ``:memory:`` database in a multithreaded scenario, the same connection
-object must be shared among threads, since the database exists
+To use a ``:memory:`` database in a multithreaded scenario, the same
+connection object must be shared among threads, since the database exists
only within the scope of that connection. The
:class:`.StaticPool` implementation will maintain a single connection
globally, and the ``check_same_thread`` flag can be passed to Pysqlite
@@ -164,10 +166,10 @@ Due to the way SQLite deals with temporary tables, if you wish to use a
temporary table in a file-based SQLite database across multiple checkouts
from the connection pool, such as when using an ORM :class:`.Session` where
the temporary table should continue to remain after :meth:`.Session.commit` or
-:meth:`.Session.rollback` is called, a pool which maintains a single connection must
-be used. Use :class:`.SingletonThreadPool` if the scope is only needed
-within the current thread, or :class:`.StaticPool` is scope is needed within
-multiple threads for this case::
+:meth:`.Session.rollback` is called, a pool which maintains a single
+connection must be used. Use :class:`.SingletonThreadPool` if the scope is
+only needed within the current thread, or :class:`.StaticPool` is scope is
+needed within multiple threads for this case::
# maintain the same connection per thread
from sqlalchemy.pool import SingletonThreadPool
@@ -215,7 +217,8 @@ a :meth:`.ConnectionEvents.begin` handler to achieve this::
from sqlalchemy import create_engine, event
- engine = create_engine("sqlite:///myfile.db", isolation_level='SERIALIZABLE')
+ engine = create_engine("sqlite:///myfile.db",
+ isolation_level='SERIALIZABLE')
@event.listens_for(engine, "begin")
def do_begin(conn):
@@ -331,6 +334,6 @@ class SQLiteDialect_pysqlite(SQLiteDialect):
def is_disconnect(self, e, connection, cursor):
return isinstance(e, self.dbapi.ProgrammingError) and \
- "Cannot operate on a closed database." in str(e)
+ "Cannot operate on a closed database." in str(e)
dialect = SQLiteDialect_pysqlite
diff --git a/lib/sqlalchemy/dialects/sybase/__init__.py b/lib/sqlalchemy/dialects/sybase/__init__.py
index a9263dc3f..eb313592b 100644
--- a/lib/sqlalchemy/dialects/sybase/__init__.py
+++ b/lib/sqlalchemy/dialects/sybase/__init__.py
@@ -11,11 +11,11 @@ from sqlalchemy.dialects.sybase import base, pysybase, pyodbc
base.dialect = pyodbc.dialect
from .base import CHAR, VARCHAR, TIME, NCHAR, NVARCHAR,\
- TEXT, DATE, DATETIME, FLOAT, NUMERIC,\
- BIGINT, INT, INTEGER, SMALLINT, BINARY,\
- VARBINARY, UNITEXT, UNICHAR, UNIVARCHAR,\
- IMAGE, BIT, MONEY, SMALLMONEY, TINYINT,\
- dialect
+ TEXT, DATE, DATETIME, FLOAT, NUMERIC,\
+ BIGINT, INT, INTEGER, SMALLINT, BINARY,\
+ VARBINARY, UNITEXT, UNICHAR, UNIVARCHAR,\
+ IMAGE, BIT, MONEY, SMALLMONEY, TINYINT,\
+ dialect
__all__ = (
diff --git a/lib/sqlalchemy/dialects/sybase/base.py b/lib/sqlalchemy/dialects/sybase/base.py
index 38f665838..26f5ef04a 100644
--- a/lib/sqlalchemy/dialects/sybase/base.py
+++ b/lib/sqlalchemy/dialects/sybase/base.py
@@ -32,10 +32,10 @@ from sqlalchemy import schema as sa_schema
from sqlalchemy import util, sql, exc
from sqlalchemy.types import CHAR, VARCHAR, TIME, NCHAR, NVARCHAR,\
- TEXT, DATE, DATETIME, FLOAT, NUMERIC,\
- BIGINT, INT, INTEGER, SMALLINT, BINARY,\
- VARBINARY, DECIMAL, TIMESTAMP, Unicode,\
- UnicodeText, REAL
+ TEXT, DATE, DATETIME, FLOAT, NUMERIC,\
+ BIGINT, INT, INTEGER, SMALLINT, BINARY,\
+ VARBINARY, DECIMAL, TIMESTAMP, Unicode,\
+ UnicodeText, REAL
RESERVED_WORDS = set([
"add", "all", "alter", "and",
@@ -94,7 +94,7 @@ RESERVED_WORDS = set([
"when", "where", "while", "window",
"with", "with_cube", "with_lparen", "with_rollup",
"within", "work", "writetext",
- ])
+])
class _SybaseUnitypeMixin(object):
@@ -225,7 +225,7 @@ ischema_names = {
'image': IMAGE,
'bit': BIT,
-# not in documentation for ASE 15.7
+ # not in documentation for ASE 15.7
'long varchar': TEXT, # TODO
'timestamp': TIMESTAMP,
'uniqueidentifier': UNIQUEIDENTIFIER,
@@ -268,12 +268,13 @@ class SybaseExecutionContext(default.DefaultExecutionContext):
if insert_has_sequence:
self._enable_identity_insert = \
- seq_column.key in self.compiled_parameters[0]
+ seq_column.key in self.compiled_parameters[0]
else:
self._enable_identity_insert = False
if self._enable_identity_insert:
- self.cursor.execute("SET IDENTITY_INSERT %s ON" %
+ self.cursor.execute(
+ "SET IDENTITY_INSERT %s ON" %
self.dialect.identifier_preparer.format_table(tbl))
if self.isddl:
@@ -282,15 +283,15 @@ class SybaseExecutionContext(default.DefaultExecutionContext):
# include a note about that.
if not self.should_autocommit:
raise exc.InvalidRequestError(
- "The Sybase dialect only supports "
- "DDL in 'autocommit' mode at this time.")
+ "The Sybase dialect only supports "
+ "DDL in 'autocommit' mode at this time.")
self.root_connection.engine.logger.info(
- "AUTOCOMMIT (Assuming no Sybase 'ddl in tran')")
+ "AUTOCOMMIT (Assuming no Sybase 'ddl in tran')")
self.set_ddl_autocommit(
- self.root_connection.connection.connection,
- True)
+ self.root_connection.connection.connection,
+ True)
def post_exec(self):
if self.isddl:
@@ -298,10 +299,10 @@ class SybaseExecutionContext(default.DefaultExecutionContext):
if self._enable_identity_insert:
self.cursor.execute(
- "SET IDENTITY_INSERT %s OFF" %
- self.dialect.identifier_preparer.
- format_table(self.compiled.statement.table)
- )
+ "SET IDENTITY_INSERT %s OFF" %
+ self.dialect.identifier_preparer.
+ format_table(self.compiled.statement.table)
+ )
def get_lastrowid(self):
cursor = self.create_cursor()
@@ -317,10 +318,10 @@ class SybaseSQLCompiler(compiler.SQLCompiler):
extract_map = util.update_copy(
compiler.SQLCompiler.extract_map,
{
- 'doy': 'dayofyear',
- 'dow': 'weekday',
- 'milliseconds': 'millisecond'
- })
+ 'doy': 'dayofyear',
+ 'dow': 'weekday',
+ 'milliseconds': 'millisecond'
+ })
def get_select_precolumns(self, select):
s = select._distinct and "DISTINCT " or ""
@@ -328,10 +329,10 @@ class SybaseSQLCompiler(compiler.SQLCompiler):
# bind params for FIRST / TOP
limit = select._limit
if limit:
- #if select._limit == 1:
- #s += "FIRST "
- #else:
- #s += "TOP %s " % (select._limit,)
+ # if select._limit == 1:
+ # s += "FIRST "
+ # else:
+ # s += "TOP %s " % (select._limit,)
s += "TOP %s " % (limit,)
offset = select._offset
if offset:
@@ -352,7 +353,7 @@ class SybaseSQLCompiler(compiler.SQLCompiler):
def visit_extract(self, extract, **kw):
field = self.extract_map.get(extract.field, extract.field)
return 'DATEPART("%s", %s)' % (
- field, self.process(extract.expr, **kw))
+ field, self.process(extract.expr, **kw))
def visit_now_func(self, fn, **kw):
return "GETDATE()"
@@ -376,21 +377,21 @@ class SybaseSQLCompiler(compiler.SQLCompiler):
class SybaseDDLCompiler(compiler.DDLCompiler):
def get_column_specification(self, column, **kwargs):
colspec = self.preparer.format_column(column) + " " + \
- self.dialect.type_compiler.process(column.type)
+ self.dialect.type_compiler.process(column.type)
if column.table is None:
raise exc.CompileError(
- "The Sybase dialect requires Table-bound "
- "columns in order to generate DDL")
+ "The Sybase dialect requires Table-bound "
+ "columns in order to generate DDL")
seq_col = column.table._autoincrement_column
# install a IDENTITY Sequence if we have an implicit IDENTITY column
if seq_col is column:
sequence = isinstance(column.default, sa_schema.Sequence) \
- and column.default
+ and column.default
if sequence:
start, increment = sequence.start or 1, \
- sequence.increment or 1
+ sequence.increment or 1
else:
start, increment = 1, 1
if (start, increment) == (1, 1):
@@ -416,8 +417,8 @@ class SybaseDDLCompiler(compiler.DDLCompiler):
return "\nDROP INDEX %s.%s" % (
self.preparer.quote_identifier(index.table.name),
self._prepared_index_name(drop.element,
- include_schema=False)
- )
+ include_schema=False)
+ )
class SybaseIdentifierPreparer(compiler.IdentifierPreparer):
@@ -447,14 +448,14 @@ class SybaseDialect(default.DefaultDialect):
def _get_default_schema_name(self, connection):
return connection.scalar(
- text("SELECT user_name() as user_name",
- typemap={'user_name': Unicode})
- )
+ text("SELECT user_name() as user_name",
+ typemap={'user_name': Unicode})
+ )
def initialize(self, connection):
super(SybaseDialect, self).initialize(connection)
if self.server_version_info is not None and\
- self.server_version_info < (15, ):
+ self.server_version_info < (15, ):
self.max_identifier_length = 30
else:
self.max_identifier_length = 255
@@ -520,14 +521,15 @@ class SybaseDialect(default.DefaultDialect):
for (name, type_, nullable, autoincrement, default, precision, scale,
length) in results:
col_info = self._get_column_info(name, type_, bool(nullable),
- bool(autoincrement), default, precision, scale,
- length)
+ bool(autoincrement),
+ default, precision, scale,
+ length)
columns.append(col_info)
return columns
def _get_column_info(self, name, type_, nullable, autoincrement, default,
- precision, scale, length):
+ precision, scale, length):
coltype = self.ischema_names.get(type_, None)
@@ -544,8 +546,8 @@ class SybaseDialect(default.DefaultDialect):
if coltype:
coltype = coltype(*args, **kwargs)
- #is this necessary
- #if is_array:
+ # is this necessary
+ # if is_array:
# coltype = ARRAY(coltype)
else:
util.warn("Did not recognize type '%s' of column '%s'" %
@@ -643,12 +645,12 @@ class SybaseDialect(default.DefaultDialect):
referred_columns.append(reftable_columns[r["refkey%i" % i]])
fk_info = {
- "constrained_columns": constrained_columns,
- "referred_schema": reftable["schema"],
- "referred_table": reftable["name"],
- "referred_columns": referred_columns,
- "name": r["name"]
- }
+ "constrained_columns": constrained_columns,
+ "referred_schema": reftable["schema"],
+ "referred_table": reftable["name"],
+ "referred_columns": referred_columns,
+ "name": r["name"]
+ }
foreign_keys.append(fk_info)
diff --git a/lib/sqlalchemy/dialects/sybase/pyodbc.py b/lib/sqlalchemy/dialects/sybase/pyodbc.py
index 3b849a680..cb76d1379 100644
--- a/lib/sqlalchemy/dialects/sybase/pyodbc.py
+++ b/lib/sqlalchemy/dialects/sybase/pyodbc.py
@@ -9,7 +9,8 @@
.. dialect:: sybase+pyodbc
:name: PyODBC
:dbapi: pyodbc
- :connectstring: sybase+pyodbc://<username>:<password>@<dsnname>[/<database>]
+ :connectstring: sybase+pyodbc://<username>:<password>@<dsnname>\
+[/<database>]
:url: http://pypi.python.org/pypi/pyodbc/
@@ -34,7 +35,7 @@ Currently *not* supported are::
"""
from sqlalchemy.dialects.sybase.base import SybaseDialect,\
- SybaseExecutionContext
+ SybaseExecutionContext
from sqlalchemy.connectors.pyodbc import PyODBCConnector
from sqlalchemy import types as sqltypes, processors
import decimal
@@ -51,7 +52,7 @@ class _SybNumeric_pyodbc(sqltypes.Numeric):
def bind_processor(self, dialect):
super_process = super(_SybNumeric_pyodbc, self).\
- bind_processor(dialect)
+ bind_processor(dialect)
def process(value):
if self.asdecimal and \
diff --git a/lib/sqlalchemy/dialects/sybase/pysybase.py b/lib/sqlalchemy/dialects/sybase/pysybase.py
index 678c146d3..6843eb480 100644
--- a/lib/sqlalchemy/dialects/sybase/pysybase.py
+++ b/lib/sqlalchemy/dialects/sybase/pysybase.py
@@ -9,7 +9,8 @@
.. dialect:: sybase+pysybase
:name: Python-Sybase
:dbapi: Sybase
- :connectstring: sybase+pysybase://<username>:<password>@<dsn>/[database name]
+ :connectstring: sybase+pysybase://<username>:<password>@<dsn>/\
+[database name]
:url: http://python-sybase.sourceforge.net/
Unicode Support
@@ -22,7 +23,7 @@ kind at this time.
from sqlalchemy import types as sqltypes, processors
from sqlalchemy.dialects.sybase.base import SybaseDialect, \
- SybaseExecutionContext, SybaseSQLCompiler
+ SybaseExecutionContext, SybaseSQLCompiler
class _SybNumeric(sqltypes.Numeric):
@@ -62,8 +63,8 @@ class SybaseDialect_pysybase(SybaseDialect):
statement_compiler = SybaseSQLCompiler_pysybase
colspecs = {
- sqltypes.Numeric: _SybNumeric,
- sqltypes.Float: sqltypes.Float
+ sqltypes.Numeric: _SybNumeric,
+ sqltypes.Float: sqltypes.Float
}
@classmethod
@@ -90,7 +91,7 @@ class SybaseDialect_pysybase(SybaseDialect):
def is_disconnect(self, e, connection, cursor):
if isinstance(e, (self.dbapi.OperationalError,
- self.dbapi.ProgrammingError)):
+ self.dbapi.ProgrammingError)):
msg = str(e)
return ('Unable to complete network request to host' in msg or
'Invalid connection state' in msg or