summaryrefslogtreecommitdiff
path: root/lib/sqlalchemy
diff options
context:
space:
mode:
Diffstat (limited to 'lib/sqlalchemy')
-rw-r--r--lib/sqlalchemy/__init__.py4
-rw-r--r--lib/sqlalchemy/cextension/resultproxy.c13
-rw-r--r--lib/sqlalchemy/dialects/__init__.py2
-rw-r--r--lib/sqlalchemy/dialects/mssql/base.py2
-rw-r--r--lib/sqlalchemy/dialects/mysql/__init__.py6
-rw-r--r--lib/sqlalchemy/dialects/mysql/base.py1535
-rw-r--r--lib/sqlalchemy/dialects/mysql/enumerated.py307
-rw-r--r--lib/sqlalchemy/dialects/mysql/json.py90
-rw-r--r--lib/sqlalchemy/dialects/mysql/reflection.py449
-rw-r--r--lib/sqlalchemy/dialects/mysql/types.py766
-rw-r--r--lib/sqlalchemy/dialects/oracle/zxjdbc.py3
-rw-r--r--lib/sqlalchemy/dialects/postgresql/array.py42
-rw-r--r--lib/sqlalchemy/dialects/postgresql/base.py30
-rw-r--r--lib/sqlalchemy/dialects/postgresql/ext.py2
-rw-r--r--lib/sqlalchemy/dialects/postgresql/hstore.py17
-rw-r--r--lib/sqlalchemy/dialects/postgresql/json.py206
-rw-r--r--lib/sqlalchemy/dialects/postgresql/pg8000.py1
-rw-r--r--lib/sqlalchemy/dialects/postgresql/psycopg2.py1
-rw-r--r--lib/sqlalchemy/dialects/sqlite/base.py27
-rw-r--r--lib/sqlalchemy/dialects/sybase/base.py6
-rw-r--r--lib/sqlalchemy/engine/__init__.py3
-rw-r--r--lib/sqlalchemy/engine/base.py69
-rw-r--r--lib/sqlalchemy/engine/default.py37
-rw-r--r--lib/sqlalchemy/engine/interfaces.py107
-rw-r--r--lib/sqlalchemy/engine/reflection.py3
-rw-r--r--lib/sqlalchemy/engine/result.py405
-rw-r--r--lib/sqlalchemy/engine/strategies.py12
-rw-r--r--lib/sqlalchemy/engine/url.py10
-rw-r--r--lib/sqlalchemy/ext/automap.py2
-rw-r--r--lib/sqlalchemy/ext/compiler.py12
-rw-r--r--lib/sqlalchemy/ext/declarative/api.py16
-rw-r--r--lib/sqlalchemy/orm/mapper.py50
-rw-r--r--lib/sqlalchemy/orm/persistence.py2
-rw-r--r--lib/sqlalchemy/orm/query.py27
-rw-r--r--lib/sqlalchemy/orm/relationships.py19
-rw-r--r--lib/sqlalchemy/orm/scoping.py15
-rw-r--r--lib/sqlalchemy/orm/strategies.py2
-rw-r--r--lib/sqlalchemy/orm/strategy_options.py52
-rw-r--r--lib/sqlalchemy/sql/__init__.py1
-rw-r--r--lib/sqlalchemy/sql/base.py29
-rw-r--r--lib/sqlalchemy/sql/compiler.py190
-rw-r--r--lib/sqlalchemy/sql/ddl.py21
-rw-r--r--lib/sqlalchemy/sql/default_comparator.py28
-rw-r--r--lib/sqlalchemy/sql/elements.py93
-rw-r--r--lib/sqlalchemy/sql/functions.py23
-rw-r--r--lib/sqlalchemy/sql/operators.py15
-rw-r--r--lib/sqlalchemy/sql/schema.py56
-rw-r--r--lib/sqlalchemy/sql/selectable.py3
-rw-r--r--lib/sqlalchemy/sql/sqltypes.py316
-rw-r--r--lib/sqlalchemy/testing/__init__.py2
-rw-r--r--lib/sqlalchemy/testing/assertions.py15
-rw-r--r--lib/sqlalchemy/testing/assertsql.py9
-rw-r--r--lib/sqlalchemy/testing/plugin/pytestplugin.py4
-rw-r--r--lib/sqlalchemy/testing/profiling.py5
-rw-r--r--lib/sqlalchemy/testing/requirements.py13
-rw-r--r--lib/sqlalchemy/testing/suite/test_types.py257
-rw-r--r--lib/sqlalchemy/types.py5
-rw-r--r--lib/sqlalchemy/util/compat.py24
58 files changed, 3371 insertions, 2090 deletions
diff --git a/lib/sqlalchemy/__init__.py b/lib/sqlalchemy/__init__.py
index 12d4e8d1c..dde179bf5 100644
--- a/lib/sqlalchemy/__init__.py
+++ b/lib/sqlalchemy/__init__.py
@@ -51,10 +51,11 @@ from .sql import (
union,
union_all,
update,
+ within_group,
)
from .types import (
- Array,
+ ARRAY,
BIGINT,
BINARY,
BLOB,
@@ -76,6 +77,7 @@ from .types import (
INTEGER,
Integer,
Interval,
+ JSON,
LargeBinary,
NCHAR,
NVARCHAR,
diff --git a/lib/sqlalchemy/cextension/resultproxy.c b/lib/sqlalchemy/cextension/resultproxy.c
index ae2a059cf..9c4d0c7e4 100644
--- a/lib/sqlalchemy/cextension/resultproxy.c
+++ b/lib/sqlalchemy/cextension/resultproxy.c
@@ -315,8 +315,11 @@ BaseRowProxy_subscript(BaseRowProxy *self, PyObject *key)
if (exception == NULL)
return NULL;
- // wow. this seems quite excessive.
- cstr_obj = PyObject_Str(key);
+ cstr_obj = PyTuple_GetItem(record, 1);
+ if (cstr_obj == NULL)
+ return NULL;
+
+ cstr_obj = PyObject_Str(cstr_obj);
if (cstr_obj == NULL)
return NULL;
@@ -326,6 +329,8 @@ BaseRowProxy_subscript(BaseRowProxy *self, PyObject *key)
InvalidRequestError without any message like in the
python version.
*/
+
+
#if PY_MAJOR_VERSION >= 3
bytes = PyUnicode_AsASCIIString(cstr_obj);
if (bytes == NULL)
@@ -341,8 +346,8 @@ BaseRowProxy_subscript(BaseRowProxy *self, PyObject *key)
Py_DECREF(cstr_obj);
PyErr_Format(exception,
- "Ambiguous column name '%.200s' in result set! "
- "try 'use_labels' option on select statement.", cstr_key);
+ "Ambiguous column name '%.200s' in "
+ "result set column descriptions", cstr_key);
return NULL;
}
diff --git a/lib/sqlalchemy/dialects/__init__.py b/lib/sqlalchemy/dialects/__init__.py
index d90a83809..f851a4ab8 100644
--- a/lib/sqlalchemy/dialects/__init__.py
+++ b/lib/sqlalchemy/dialects/__init__.py
@@ -43,3 +43,5 @@ def _auto_fn(name):
return None
registry = util.PluginLoader("sqlalchemy.dialects", auto_fn=_auto_fn)
+
+plugins = util.PluginLoader("sqlalchemy.plugins") \ No newline at end of file
diff --git a/lib/sqlalchemy/dialects/mssql/base.py b/lib/sqlalchemy/dialects/mssql/base.py
index 1ee328e83..815529c88 100644
--- a/lib/sqlalchemy/dialects/mssql/base.py
+++ b/lib/sqlalchemy/dialects/mssql/base.py
@@ -1157,7 +1157,7 @@ class MSSQLCompiler(compiler.SQLCompiler):
def visit_extract(self, extract, **kw):
field = self.extract_map.get(extract.field, extract.field)
- return 'DATEPART("%s", %s)' % \
+ return 'DATEPART(%s, %s)' % \
(field, self.process(extract.expr, **kw))
def visit_savepoint(self, savepoint_stmt):
diff --git a/lib/sqlalchemy/dialects/mysql/__init__.py b/lib/sqlalchemy/dialects/mysql/__init__.py
index c1f78bd1d..ca204fcd1 100644
--- a/lib/sqlalchemy/dialects/mysql/__init__.py
+++ b/lib/sqlalchemy/dialects/mysql/__init__.py
@@ -15,7 +15,7 @@ base.dialect = mysqldb.dialect
from .base import \
BIGINT, BINARY, BIT, BLOB, BOOLEAN, CHAR, DATE, DATETIME, \
DECIMAL, DOUBLE, ENUM, DECIMAL,\
- FLOAT, INTEGER, INTEGER, LONGBLOB, LONGTEXT, MEDIUMBLOB, \
+ FLOAT, INTEGER, INTEGER, JSON, LONGBLOB, LONGTEXT, MEDIUMBLOB, \
MEDIUMINT, MEDIUMTEXT, NCHAR, \
NVARCHAR, NUMERIC, SET, SMALLINT, REAL, TEXT, TIME, TIMESTAMP, \
TINYBLOB, TINYINT, TINYTEXT,\
@@ -24,8 +24,8 @@ from .base import \
__all__ = (
'BIGINT', 'BINARY', 'BIT', 'BLOB', 'BOOLEAN', 'CHAR', 'DATE', 'DATETIME',
'DECIMAL', 'DOUBLE', 'ENUM', 'DECIMAL', 'FLOAT', 'INTEGER', 'INTEGER',
- 'LONGBLOB', 'LONGTEXT', 'MEDIUMBLOB', 'MEDIUMINT', 'MEDIUMTEXT', 'NCHAR',
- 'NVARCHAR', 'NUMERIC', 'SET', 'SMALLINT', 'REAL', 'TEXT', 'TIME',
+ 'JSON', 'LONGBLOB', 'LONGTEXT', 'MEDIUMBLOB', 'MEDIUMINT', 'MEDIUMTEXT',
+ 'NCHAR', 'NVARCHAR', 'NUMERIC', 'SET', 'SMALLINT', 'REAL', 'TEXT', 'TIME',
'TIMESTAMP', 'TINYBLOB', 'TINYINT', 'TINYTEXT', 'VARBINARY', 'VARCHAR',
'YEAR', 'dialect'
)
diff --git a/lib/sqlalchemy/dialects/mysql/base.py b/lib/sqlalchemy/dialects/mysql/base.py
index 8830cb0c1..61c4a3fac 100644
--- a/lib/sqlalchemy/dialects/mysql/base.py
+++ b/lib/sqlalchemy/dialects/mysql/base.py
@@ -536,13 +536,13 @@ output::
"""
-import datetime
import re
import sys
+import json
from ... import schema as sa_schema
from ... import exc, log, sql, util
-from ...sql import compiler
+from ...sql import compiler, elements
from array import array as _array
from ...engine import reflection
@@ -552,6 +552,17 @@ from ...util import topological
from ...types import DATE, BOOLEAN, \
BLOB, BINARY, VARBINARY
+from . import reflection as _reflection
+from .types import BIGINT, BIT, CHAR, DECIMAL, DATETIME, \
+ DOUBLE, FLOAT, INTEGER, LONGBLOB, LONGTEXT, MEDIUMBLOB, MEDIUMINT, \
+ MEDIUMTEXT, NCHAR, NUMERIC, NVARCHAR, REAL, SMALLINT, TEXT, TIME, \
+ TIMESTAMP, TINYBLOB, TINYINT, TINYTEXT, VARCHAR, YEAR
+from .types import _StringType, _IntegerType, _NumericType, \
+ _FloatType, _MatchType
+from .enumerated import ENUM, SET
+from .json import JSON, JSONIndexType, JSONPathType
+
+
RESERVED_WORDS = set(
['accessible', 'add', 'all', 'alter', 'analyze', 'and', 'as', 'asc',
'asensitive', 'before', 'between', 'bigint', 'binary', 'blob', 'both',
@@ -614,1056 +625,6 @@ SET_RE = re.compile(
re.I | re.UNICODE)
-class _NumericType(object):
- """Base for MySQL numeric types.
-
- This is the base both for NUMERIC as well as INTEGER, hence
- it's a mixin.
-
- """
-
- def __init__(self, unsigned=False, zerofill=False, **kw):
- self.unsigned = unsigned
- self.zerofill = zerofill
- super(_NumericType, self).__init__(**kw)
-
- def __repr__(self):
- return util.generic_repr(self,
- to_inspect=[_NumericType, sqltypes.Numeric])
-
-
-class _FloatType(_NumericType, sqltypes.Float):
- def __init__(self, precision=None, scale=None, asdecimal=True, **kw):
- if isinstance(self, (REAL, DOUBLE)) and \
- (
- (precision is None and scale is not None) or
- (precision is not None and scale is None)
- ):
- raise exc.ArgumentError(
- "You must specify both precision and scale or omit "
- "both altogether.")
- super(_FloatType, self).__init__(
- precision=precision, asdecimal=asdecimal, **kw)
- self.scale = scale
-
- def __repr__(self):
- return util.generic_repr(self, to_inspect=[_FloatType,
- _NumericType,
- sqltypes.Float])
-
-
-class _IntegerType(_NumericType, sqltypes.Integer):
- def __init__(self, display_width=None, **kw):
- self.display_width = display_width
- super(_IntegerType, self).__init__(**kw)
-
- def __repr__(self):
- return util.generic_repr(self, to_inspect=[_IntegerType,
- _NumericType,
- sqltypes.Integer])
-
-
-class _StringType(sqltypes.String):
- """Base for MySQL string types."""
-
- def __init__(self, charset=None, collation=None,
- ascii=False, binary=False, unicode=False,
- national=False, **kw):
- self.charset = charset
-
- # allow collate= or collation=
- kw.setdefault('collation', kw.pop('collate', collation))
-
- self.ascii = ascii
- self.unicode = unicode
- self.binary = binary
- self.national = national
- super(_StringType, self).__init__(**kw)
-
- def __repr__(self):
- return util.generic_repr(self,
- to_inspect=[_StringType, sqltypes.String])
-
-
-class _MatchType(sqltypes.Float, sqltypes.MatchType):
- def __init__(self, **kw):
- # TODO: float arguments?
- sqltypes.Float.__init__(self)
- sqltypes.MatchType.__init__(self)
-
-
-
-class NUMERIC(_NumericType, sqltypes.NUMERIC):
- """MySQL NUMERIC type."""
-
- __visit_name__ = 'NUMERIC'
-
- def __init__(self, precision=None, scale=None, asdecimal=True, **kw):
- """Construct a NUMERIC.
-
- :param precision: Total digits in this number. If scale and precision
- are both None, values are stored to limits allowed by the server.
-
- :param scale: The number of digits after the decimal point.
-
- :param unsigned: a boolean, optional.
-
- :param zerofill: Optional. If true, values will be stored as strings
- left-padded with zeros. Note that this does not effect the values
- returned by the underlying database API, which continue to be
- numeric.
-
- """
- super(NUMERIC, self).__init__(precision=precision,
- scale=scale, asdecimal=asdecimal, **kw)
-
-
-class DECIMAL(_NumericType, sqltypes.DECIMAL):
- """MySQL DECIMAL type."""
-
- __visit_name__ = 'DECIMAL'
-
- def __init__(self, precision=None, scale=None, asdecimal=True, **kw):
- """Construct a DECIMAL.
-
- :param precision: Total digits in this number. If scale and precision
- are both None, values are stored to limits allowed by the server.
-
- :param scale: The number of digits after the decimal point.
-
- :param unsigned: a boolean, optional.
-
- :param zerofill: Optional. If true, values will be stored as strings
- left-padded with zeros. Note that this does not effect the values
- returned by the underlying database API, which continue to be
- numeric.
-
- """
- super(DECIMAL, self).__init__(precision=precision, scale=scale,
- asdecimal=asdecimal, **kw)
-
-
-class DOUBLE(_FloatType):
- """MySQL DOUBLE type."""
-
- __visit_name__ = 'DOUBLE'
-
- def __init__(self, precision=None, scale=None, asdecimal=True, **kw):
- """Construct a DOUBLE.
-
- .. note::
-
- The :class:`.DOUBLE` type by default converts from float
- to Decimal, using a truncation that defaults to 10 digits.
- Specify either ``scale=n`` or ``decimal_return_scale=n`` in order
- to change this scale, or ``asdecimal=False`` to return values
- directly as Python floating points.
-
- :param precision: Total digits in this number. If scale and precision
- are both None, values are stored to limits allowed by the server.
-
- :param scale: The number of digits after the decimal point.
-
- :param unsigned: a boolean, optional.
-
- :param zerofill: Optional. If true, values will be stored as strings
- left-padded with zeros. Note that this does not effect the values
- returned by the underlying database API, which continue to be
- numeric.
-
- """
- super(DOUBLE, self).__init__(precision=precision, scale=scale,
- asdecimal=asdecimal, **kw)
-
-
-class REAL(_FloatType, sqltypes.REAL):
- """MySQL REAL type."""
-
- __visit_name__ = 'REAL'
-
- def __init__(self, precision=None, scale=None, asdecimal=True, **kw):
- """Construct a REAL.
-
- .. note::
-
- The :class:`.REAL` type by default converts from float
- to Decimal, using a truncation that defaults to 10 digits.
- Specify either ``scale=n`` or ``decimal_return_scale=n`` in order
- to change this scale, or ``asdecimal=False`` to return values
- directly as Python floating points.
-
- :param precision: Total digits in this number. If scale and precision
- are both None, values are stored to limits allowed by the server.
-
- :param scale: The number of digits after the decimal point.
-
- :param unsigned: a boolean, optional.
-
- :param zerofill: Optional. If true, values will be stored as strings
- left-padded with zeros. Note that this does not effect the values
- returned by the underlying database API, which continue to be
- numeric.
-
- """
- super(REAL, self).__init__(precision=precision, scale=scale,
- asdecimal=asdecimal, **kw)
-
-
-class FLOAT(_FloatType, sqltypes.FLOAT):
- """MySQL FLOAT type."""
-
- __visit_name__ = 'FLOAT'
-
- def __init__(self, precision=None, scale=None, asdecimal=False, **kw):
- """Construct a FLOAT.
-
- :param precision: Total digits in this number. If scale and precision
- are both None, values are stored to limits allowed by the server.
-
- :param scale: The number of digits after the decimal point.
-
- :param unsigned: a boolean, optional.
-
- :param zerofill: Optional. If true, values will be stored as strings
- left-padded with zeros. Note that this does not effect the values
- returned by the underlying database API, which continue to be
- numeric.
-
- """
- super(FLOAT, self).__init__(precision=precision, scale=scale,
- asdecimal=asdecimal, **kw)
-
- def bind_processor(self, dialect):
- return None
-
-
-class INTEGER(_IntegerType, sqltypes.INTEGER):
- """MySQL INTEGER type."""
-
- __visit_name__ = 'INTEGER'
-
- def __init__(self, display_width=None, **kw):
- """Construct an INTEGER.
-
- :param display_width: Optional, maximum display width for this number.
-
- :param unsigned: a boolean, optional.
-
- :param zerofill: Optional. If true, values will be stored as strings
- left-padded with zeros. Note that this does not effect the values
- returned by the underlying database API, which continue to be
- numeric.
-
- """
- super(INTEGER, self).__init__(display_width=display_width, **kw)
-
-
-class BIGINT(_IntegerType, sqltypes.BIGINT):
- """MySQL BIGINTEGER type."""
-
- __visit_name__ = 'BIGINT'
-
- def __init__(self, display_width=None, **kw):
- """Construct a BIGINTEGER.
-
- :param display_width: Optional, maximum display width for this number.
-
- :param unsigned: a boolean, optional.
-
- :param zerofill: Optional. If true, values will be stored as strings
- left-padded with zeros. Note that this does not effect the values
- returned by the underlying database API, which continue to be
- numeric.
-
- """
- super(BIGINT, self).__init__(display_width=display_width, **kw)
-
-
-class MEDIUMINT(_IntegerType):
- """MySQL MEDIUMINTEGER type."""
-
- __visit_name__ = 'MEDIUMINT'
-
- def __init__(self, display_width=None, **kw):
- """Construct a MEDIUMINTEGER
-
- :param display_width: Optional, maximum display width for this number.
-
- :param unsigned: a boolean, optional.
-
- :param zerofill: Optional. If true, values will be stored as strings
- left-padded with zeros. Note that this does not effect the values
- returned by the underlying database API, which continue to be
- numeric.
-
- """
- super(MEDIUMINT, self).__init__(display_width=display_width, **kw)
-
-
-class TINYINT(_IntegerType):
- """MySQL TINYINT type."""
-
- __visit_name__ = 'TINYINT'
-
- def __init__(self, display_width=None, **kw):
- """Construct a TINYINT.
-
- :param display_width: Optional, maximum display width for this number.
-
- :param unsigned: a boolean, optional.
-
- :param zerofill: Optional. If true, values will be stored as strings
- left-padded with zeros. Note that this does not effect the values
- returned by the underlying database API, which continue to be
- numeric.
-
- """
- super(TINYINT, self).__init__(display_width=display_width, **kw)
-
-
-class SMALLINT(_IntegerType, sqltypes.SMALLINT):
- """MySQL SMALLINTEGER type."""
-
- __visit_name__ = 'SMALLINT'
-
- def __init__(self, display_width=None, **kw):
- """Construct a SMALLINTEGER.
-
- :param display_width: Optional, maximum display width for this number.
-
- :param unsigned: a boolean, optional.
-
- :param zerofill: Optional. If true, values will be stored as strings
- left-padded with zeros. Note that this does not effect the values
- returned by the underlying database API, which continue to be
- numeric.
-
- """
- super(SMALLINT, self).__init__(display_width=display_width, **kw)
-
-
-class BIT(sqltypes.TypeEngine):
- """MySQL BIT type.
-
- This type is for MySQL 5.0.3 or greater for MyISAM, and 5.0.5 or greater
- for MyISAM, MEMORY, InnoDB and BDB. For older versions, use a
- MSTinyInteger() type.
-
- """
-
- __visit_name__ = 'BIT'
-
- def __init__(self, length=None):
- """Construct a BIT.
-
- :param length: Optional, number of bits.
-
- """
- self.length = length
-
- def result_processor(self, dialect, coltype):
- """Convert a MySQL's 64 bit, variable length binary string to a long.
-
- TODO: this is MySQL-db, pyodbc specific. OurSQL and mysqlconnector
- already do this, so this logic should be moved to those dialects.
-
- """
-
- def process(value):
- if value is not None:
- v = 0
- for i in value:
- if not isinstance(i, int):
- i = ord(i) # convert byte to int on Python 2
- v = v << 8 | i
- return v
- return value
- return process
-
-
-class TIME(sqltypes.TIME):
- """MySQL TIME type. """
-
- __visit_name__ = 'TIME'
-
- def __init__(self, timezone=False, fsp=None):
- """Construct a MySQL TIME type.
-
- :param timezone: not used by the MySQL dialect.
- :param fsp: fractional seconds precision value.
- MySQL 5.6 supports storage of fractional seconds;
- this parameter will be used when emitting DDL
- for the TIME type.
-
- .. note::
-
- DBAPI driver support for fractional seconds may
- be limited; current support includes
- MySQL Connector/Python.
-
- .. versionadded:: 0.8 The MySQL-specific TIME
- type as well as fractional seconds support.
-
- """
- super(TIME, self).__init__(timezone=timezone)
- self.fsp = fsp
-
- def result_processor(self, dialect, coltype):
- time = datetime.time
-
- def process(value):
- # convert from a timedelta value
- if value is not None:
- microseconds = value.microseconds
- seconds = value.seconds
- minutes = seconds // 60
- return time(minutes // 60,
- minutes % 60,
- seconds - minutes * 60,
- microsecond=microseconds)
- else:
- return None
- return process
-
-
-class TIMESTAMP(sqltypes.TIMESTAMP):
- """MySQL TIMESTAMP type.
-
- """
-
- __visit_name__ = 'TIMESTAMP'
-
- def __init__(self, timezone=False, fsp=None):
- """Construct a MySQL TIMESTAMP type.
-
- :param timezone: not used by the MySQL dialect.
- :param fsp: fractional seconds precision value.
- MySQL 5.6.4 supports storage of fractional seconds;
- this parameter will be used when emitting DDL
- for the TIMESTAMP type.
-
- .. note::
-
- DBAPI driver support for fractional seconds may
- be limited; current support includes
- MySQL Connector/Python.
-
- .. versionadded:: 0.8.5 Added MySQL-specific :class:`.mysql.TIMESTAMP`
- with fractional seconds support.
-
- """
- super(TIMESTAMP, self).__init__(timezone=timezone)
- self.fsp = fsp
-
-
-class DATETIME(sqltypes.DATETIME):
- """MySQL DATETIME type.
-
- """
-
- __visit_name__ = 'DATETIME'
-
- def __init__(self, timezone=False, fsp=None):
- """Construct a MySQL DATETIME type.
-
- :param timezone: not used by the MySQL dialect.
- :param fsp: fractional seconds precision value.
- MySQL 5.6.4 supports storage of fractional seconds;
- this parameter will be used when emitting DDL
- for the DATETIME type.
-
- .. note::
-
- DBAPI driver support for fractional seconds may
- be limited; current support includes
- MySQL Connector/Python.
-
- .. versionadded:: 0.8.5 Added MySQL-specific :class:`.mysql.DATETIME`
- with fractional seconds support.
-
- """
- super(DATETIME, self).__init__(timezone=timezone)
- self.fsp = fsp
-
-
-class YEAR(sqltypes.TypeEngine):
- """MySQL YEAR type, for single byte storage of years 1901-2155."""
-
- __visit_name__ = 'YEAR'
-
- def __init__(self, display_width=None):
- self.display_width = display_width
-
-
-class TEXT(_StringType, sqltypes.TEXT):
- """MySQL TEXT type, for text up to 2^16 characters."""
-
- __visit_name__ = 'TEXT'
-
- def __init__(self, length=None, **kw):
- """Construct a TEXT.
-
- :param length: Optional, if provided the server may optimize storage
- by substituting the smallest TEXT type sufficient to store
- ``length`` characters.
-
- :param charset: Optional, a column-level character set for this string
- value. Takes precedence to 'ascii' or 'unicode' short-hand.
-
- :param collation: Optional, a column-level collation for this string
- value. Takes precedence to 'binary' short-hand.
-
- :param ascii: Defaults to False: short-hand for the ``latin1``
- character set, generates ASCII in schema.
-
- :param unicode: Defaults to False: short-hand for the ``ucs2``
- character set, generates UNICODE in schema.
-
- :param national: Optional. If true, use the server's configured
- national character set.
-
- :param binary: Defaults to False: short-hand, pick the binary
- collation type that matches the column's character set. Generates
- BINARY in schema. This does not affect the type of data stored,
- only the collation of character data.
-
- """
- super(TEXT, self).__init__(length=length, **kw)
-
-
-class TINYTEXT(_StringType):
- """MySQL TINYTEXT type, for text up to 2^8 characters."""
-
- __visit_name__ = 'TINYTEXT'
-
- def __init__(self, **kwargs):
- """Construct a TINYTEXT.
-
- :param charset: Optional, a column-level character set for this string
- value. Takes precedence to 'ascii' or 'unicode' short-hand.
-
- :param collation: Optional, a column-level collation for this string
- value. Takes precedence to 'binary' short-hand.
-
- :param ascii: Defaults to False: short-hand for the ``latin1``
- character set, generates ASCII in schema.
-
- :param unicode: Defaults to False: short-hand for the ``ucs2``
- character set, generates UNICODE in schema.
-
- :param national: Optional. If true, use the server's configured
- national character set.
-
- :param binary: Defaults to False: short-hand, pick the binary
- collation type that matches the column's character set. Generates
- BINARY in schema. This does not affect the type of data stored,
- only the collation of character data.
-
- """
- super(TINYTEXT, self).__init__(**kwargs)
-
-
-class MEDIUMTEXT(_StringType):
- """MySQL MEDIUMTEXT type, for text up to 2^24 characters."""
-
- __visit_name__ = 'MEDIUMTEXT'
-
- def __init__(self, **kwargs):
- """Construct a MEDIUMTEXT.
-
- :param charset: Optional, a column-level character set for this string
- value. Takes precedence to 'ascii' or 'unicode' short-hand.
-
- :param collation: Optional, a column-level collation for this string
- value. Takes precedence to 'binary' short-hand.
-
- :param ascii: Defaults to False: short-hand for the ``latin1``
- character set, generates ASCII in schema.
-
- :param unicode: Defaults to False: short-hand for the ``ucs2``
- character set, generates UNICODE in schema.
-
- :param national: Optional. If true, use the server's configured
- national character set.
-
- :param binary: Defaults to False: short-hand, pick the binary
- collation type that matches the column's character set. Generates
- BINARY in schema. This does not affect the type of data stored,
- only the collation of character data.
-
- """
- super(MEDIUMTEXT, self).__init__(**kwargs)
-
-
-class LONGTEXT(_StringType):
- """MySQL LONGTEXT type, for text up to 2^32 characters."""
-
- __visit_name__ = 'LONGTEXT'
-
- def __init__(self, **kwargs):
- """Construct a LONGTEXT.
-
- :param charset: Optional, a column-level character set for this string
- value. Takes precedence to 'ascii' or 'unicode' short-hand.
-
- :param collation: Optional, a column-level collation for this string
- value. Takes precedence to 'binary' short-hand.
-
- :param ascii: Defaults to False: short-hand for the ``latin1``
- character set, generates ASCII in schema.
-
- :param unicode: Defaults to False: short-hand for the ``ucs2``
- character set, generates UNICODE in schema.
-
- :param national: Optional. If true, use the server's configured
- national character set.
-
- :param binary: Defaults to False: short-hand, pick the binary
- collation type that matches the column's character set. Generates
- BINARY in schema. This does not affect the type of data stored,
- only the collation of character data.
-
- """
- super(LONGTEXT, self).__init__(**kwargs)
-
-
-class VARCHAR(_StringType, sqltypes.VARCHAR):
- """MySQL VARCHAR type, for variable-length character data."""
-
- __visit_name__ = 'VARCHAR'
-
- def __init__(self, length=None, **kwargs):
- """Construct a VARCHAR.
-
- :param charset: Optional, a column-level character set for this string
- value. Takes precedence to 'ascii' or 'unicode' short-hand.
-
- :param collation: Optional, a column-level collation for this string
- value. Takes precedence to 'binary' short-hand.
-
- :param ascii: Defaults to False: short-hand for the ``latin1``
- character set, generates ASCII in schema.
-
- :param unicode: Defaults to False: short-hand for the ``ucs2``
- character set, generates UNICODE in schema.
-
- :param national: Optional. If true, use the server's configured
- national character set.
-
- :param binary: Defaults to False: short-hand, pick the binary
- collation type that matches the column's character set. Generates
- BINARY in schema. This does not affect the type of data stored,
- only the collation of character data.
-
- """
- super(VARCHAR, self).__init__(length=length, **kwargs)
-
-
-class CHAR(_StringType, sqltypes.CHAR):
- """MySQL CHAR type, for fixed-length character data."""
-
- __visit_name__ = 'CHAR'
-
- def __init__(self, length=None, **kwargs):
- """Construct a CHAR.
-
- :param length: Maximum data length, in characters.
-
- :param binary: Optional, use the default binary collation for the
- national character set. This does not affect the type of data
- stored, use a BINARY type for binary data.
-
- :param collation: Optional, request a particular collation. Must be
- compatible with the national character set.
-
- """
- super(CHAR, self).__init__(length=length, **kwargs)
-
- @classmethod
- def _adapt_string_for_cast(self, type_):
- # copy the given string type into a CHAR
- # for the purposes of rendering a CAST expression
- type_ = sqltypes.to_instance(type_)
- if isinstance(type_, sqltypes.CHAR):
- return type_
- elif isinstance(type_, _StringType):
- return CHAR(
- length=type_.length,
- charset=type_.charset,
- collation=type_.collation,
- ascii=type_.ascii,
- binary=type_.binary,
- unicode=type_.unicode,
- national=False # not supported in CAST
- )
- else:
- return CHAR(length=type_.length)
-
-
-class NVARCHAR(_StringType, sqltypes.NVARCHAR):
- """MySQL NVARCHAR type.
-
- For variable-length character data in the server's configured national
- character set.
- """
-
- __visit_name__ = 'NVARCHAR'
-
- def __init__(self, length=None, **kwargs):
- """Construct an NVARCHAR.
-
- :param length: Maximum data length, in characters.
-
- :param binary: Optional, use the default binary collation for the
- national character set. This does not affect the type of data
- stored, use a BINARY type for binary data.
-
- :param collation: Optional, request a particular collation. Must be
- compatible with the national character set.
-
- """
- kwargs['national'] = True
- super(NVARCHAR, self).__init__(length=length, **kwargs)
-
-
-class NCHAR(_StringType, sqltypes.NCHAR):
- """MySQL NCHAR type.
-
- For fixed-length character data in the server's configured national
- character set.
- """
-
- __visit_name__ = 'NCHAR'
-
- def __init__(self, length=None, **kwargs):
- """Construct an NCHAR.
-
- :param length: Maximum data length, in characters.
-
- :param binary: Optional, use the default binary collation for the
- national character set. This does not affect the type of data
- stored, use a BINARY type for binary data.
-
- :param collation: Optional, request a particular collation. Must be
- compatible with the national character set.
-
- """
- kwargs['national'] = True
- super(NCHAR, self).__init__(length=length, **kwargs)
-
-
-class TINYBLOB(sqltypes._Binary):
- """MySQL TINYBLOB type, for binary data up to 2^8 bytes."""
-
- __visit_name__ = 'TINYBLOB'
-
-
-class MEDIUMBLOB(sqltypes._Binary):
- """MySQL MEDIUMBLOB type, for binary data up to 2^24 bytes."""
-
- __visit_name__ = 'MEDIUMBLOB'
-
-
-class LONGBLOB(sqltypes._Binary):
- """MySQL LONGBLOB type, for binary data up to 2^32 bytes."""
-
- __visit_name__ = 'LONGBLOB'
-
-
-class _EnumeratedValues(_StringType):
- def _init_values(self, values, kw):
- self.quoting = kw.pop('quoting', 'auto')
-
- if self.quoting == 'auto' and len(values):
- # What quoting character are we using?
- q = None
- for e in values:
- if len(e) == 0:
- self.quoting = 'unquoted'
- break
- elif q is None:
- q = e[0]
-
- if len(e) == 1 or e[0] != q or e[-1] != q:
- self.quoting = 'unquoted'
- break
- else:
- self.quoting = 'quoted'
-
- if self.quoting == 'quoted':
- util.warn_deprecated(
- 'Manually quoting %s value literals is deprecated. Supply '
- 'unquoted values and use the quoting= option in cases of '
- 'ambiguity.' % self.__class__.__name__)
-
- values = self._strip_values(values)
-
- self._enumerated_values = values
- length = max([len(v) for v in values] + [0])
- return values, length
-
- @classmethod
- def _strip_values(cls, values):
- strip_values = []
- for a in values:
- if a[0:1] == '"' or a[0:1] == "'":
- # strip enclosing quotes and unquote interior
- a = a[1:-1].replace(a[0] * 2, a[0])
- strip_values.append(a)
- return strip_values
-
-
-class ENUM(sqltypes.Enum, _EnumeratedValues):
- """MySQL ENUM type."""
-
- __visit_name__ = 'ENUM'
-
- def __init__(self, *enums, **kw):
- """Construct an ENUM.
-
- E.g.::
-
- Column('myenum', ENUM("foo", "bar", "baz"))
-
- :param enums: The range of valid values for this ENUM. Values will be
- quoted when generating the schema according to the quoting flag (see
- below).
-
- :param strict: Defaults to False: ensure that a given value is in this
- ENUM's range of permissible values when inserting or updating rows.
- Note that MySQL will not raise a fatal error if you attempt to store
- an out of range value- an alternate value will be stored instead.
- (See MySQL ENUM documentation.)
-
- :param charset: Optional, a column-level character set for this string
- value. Takes precedence to 'ascii' or 'unicode' short-hand.
-
- :param collation: Optional, a column-level collation for this string
- value. Takes precedence to 'binary' short-hand.
-
- :param ascii: Defaults to False: short-hand for the ``latin1``
- character set, generates ASCII in schema.
-
- :param unicode: Defaults to False: short-hand for the ``ucs2``
- character set, generates UNICODE in schema.
-
- :param binary: Defaults to False: short-hand, pick the binary
- collation type that matches the column's character set. Generates
- BINARY in schema. This does not affect the type of data stored,
- only the collation of character data.
-
- :param quoting: Defaults to 'auto': automatically determine enum value
- quoting. If all enum values are surrounded by the same quoting
- character, then use 'quoted' mode. Otherwise, use 'unquoted' mode.
-
- 'quoted': values in enums are already quoted, they will be used
- directly when generating the schema - this usage is deprecated.
-
- 'unquoted': values in enums are not quoted, they will be escaped and
- surrounded by single quotes when generating the schema.
-
- Previous versions of this type always required manually quoted
- values to be supplied; future versions will always quote the string
- literals for you. This is a transitional option.
-
- """
- values, length = self._init_values(enums, kw)
- self.strict = kw.pop('strict', False)
- kw.pop('metadata', None)
- kw.pop('schema', None)
- kw.pop('name', None)
- kw.pop('quote', None)
- kw.pop('native_enum', None)
- kw.pop('inherit_schema', None)
- kw.pop('_create_events', None)
- _StringType.__init__(self, length=length, **kw)
- sqltypes.Enum.__init__(self, *values)
-
- def __repr__(self):
- return util.generic_repr(
- self, to_inspect=[ENUM, _StringType, sqltypes.Enum])
-
- def bind_processor(self, dialect):
- super_convert = super(ENUM, self).bind_processor(dialect)
-
- def process(value):
- if self.strict and value is not None and value not in self.enums:
- raise exc.InvalidRequestError('"%s" not a valid value for '
- 'this enum' % value)
- if super_convert:
- return super_convert(value)
- else:
- return value
- return process
-
- def adapt(self, cls, **kw):
- if issubclass(cls, ENUM):
- kw['strict'] = self.strict
- return sqltypes.Enum.adapt(self, cls, **kw)
-
-
-class SET(_EnumeratedValues):
- """MySQL SET type."""
-
- __visit_name__ = 'SET'
-
- def __init__(self, *values, **kw):
- """Construct a SET.
-
- E.g.::
-
- Column('myset', SET("foo", "bar", "baz"))
-
-
- The list of potential values is required in the case that this
- set will be used to generate DDL for a table, or if the
- :paramref:`.SET.retrieve_as_bitwise` flag is set to True.
-
- :param values: The range of valid values for this SET.
-
- :param convert_unicode: Same flag as that of
- :paramref:`.String.convert_unicode`.
-
- :param collation: same as that of :paramref:`.String.collation`
-
- :param charset: same as that of :paramref:`.VARCHAR.charset`.
-
- :param ascii: same as that of :paramref:`.VARCHAR.ascii`.
-
- :param unicode: same as that of :paramref:`.VARCHAR.unicode`.
-
- :param binary: same as that of :paramref:`.VARCHAR.binary`.
-
- :param quoting: Defaults to 'auto': automatically determine set value
- quoting. If all values are surrounded by the same quoting
- character, then use 'quoted' mode. Otherwise, use 'unquoted' mode.
-
- 'quoted': values in enums are already quoted, they will be used
- directly when generating the schema - this usage is deprecated.
-
- 'unquoted': values in enums are not quoted, they will be escaped and
- surrounded by single quotes when generating the schema.
-
- Previous versions of this type always required manually quoted
- values to be supplied; future versions will always quote the string
- literals for you. This is a transitional option.
-
- .. versionadded:: 0.9.0
-
- :param retrieve_as_bitwise: if True, the data for the set type will be
- persisted and selected using an integer value, where a set is coerced
- into a bitwise mask for persistence. MySQL allows this mode which
- has the advantage of being able to store values unambiguously,
- such as the blank string ``''``. The datatype will appear
- as the expression ``col + 0`` in a SELECT statement, so that the
- value is coerced into an integer value in result sets.
- This flag is required if one wishes
- to persist a set that can store the blank string ``''`` as a value.
-
- .. warning::
-
- When using :paramref:`.mysql.SET.retrieve_as_bitwise`, it is
- essential that the list of set values is expressed in the
- **exact same order** as exists on the MySQL database.
-
- .. versionadded:: 1.0.0
-
-
- """
- self.retrieve_as_bitwise = kw.pop('retrieve_as_bitwise', False)
- values, length = self._init_values(values, kw)
- self.values = tuple(values)
- if not self.retrieve_as_bitwise and '' in values:
- raise exc.ArgumentError(
- "Can't use the blank value '' in a SET without "
- "setting retrieve_as_bitwise=True")
- if self.retrieve_as_bitwise:
- self._bitmap = dict(
- (value, 2 ** idx)
- for idx, value in enumerate(self.values)
- )
- self._bitmap.update(
- (2 ** idx, value)
- for idx, value in enumerate(self.values)
- )
- kw.setdefault('length', length)
- super(SET, self).__init__(**kw)
-
- def column_expression(self, colexpr):
- if self.retrieve_as_bitwise:
- return sql.type_coerce(
- sql.type_coerce(colexpr, sqltypes.Integer) + 0,
- self
- )
- else:
- return colexpr
-
- def result_processor(self, dialect, coltype):
- if self.retrieve_as_bitwise:
- def process(value):
- if value is not None:
- value = int(value)
-
- return set(
- util.map_bits(self._bitmap.__getitem__, value)
- )
- else:
- return None
- else:
- super_convert = super(SET, self).result_processor(dialect, coltype)
-
- def process(value):
- if isinstance(value, util.string_types):
- # MySQLdb returns a string, let's parse
- if super_convert:
- value = super_convert(value)
- return set(re.findall(r'[^,]+', value))
- else:
- # mysql-connector-python does a naive
- # split(",") which throws in an empty string
- if value is not None:
- value.discard('')
- return value
- return process
-
- def bind_processor(self, dialect):
- super_convert = super(SET, self).bind_processor(dialect)
- if self.retrieve_as_bitwise:
- def process(value):
- if value is None:
- return None
- elif isinstance(value, util.int_types + util.string_types):
- if super_convert:
- return super_convert(value)
- else:
- return value
- else:
- int_value = 0
- for v in value:
- int_value |= self._bitmap[v]
- return int_value
- else:
-
- def process(value):
- # accept strings and int (actually bitflag) values directly
- if value is not None and not isinstance(
- value, util.int_types + util.string_types):
- value = ",".join(value)
-
- if super_convert:
- return super_convert(value)
- else:
- return value
- return process
-
- def adapt(self, impltype, **kw):
- kw['retrieve_as_bitwise'] = self.retrieve_as_bitwise
- return util.constructor_copy(
- self, impltype,
- *self.values,
- **kw
- )
-
# old names
MSTime = TIME
MSSet = SET
@@ -1704,7 +665,11 @@ colspecs = {
sqltypes.Float: FLOAT,
sqltypes.Time: TIME,
sqltypes.Enum: ENUM,
- sqltypes.MatchType: _MatchType
+ sqltypes.MatchType: _MatchType,
+ sqltypes.JSON: JSON,
+ sqltypes.JSON.JSONIndexType: JSONIndexType,
+ sqltypes.JSON.JSONPathType: JSONPathType
+
}
# Everything 3.23 through 5.1 excepting OpenGIS types.
@@ -1724,6 +689,7 @@ ischema_names = {
'float': FLOAT,
'int': INTEGER,
'integer': INTEGER,
+ 'json': JSON,
'longblob': LONGBLOB,
'longtext': LONGTEXT,
'mediumblob': MEDIUMBLOB,
@@ -1769,6 +735,16 @@ class MySQLCompiler(compiler.SQLCompiler):
def visit_sysdate_func(self, fn, **kw):
return "SYSDATE()"
+ def visit_json_getitem_op_binary(self, binary, operator, **kw):
+ return "JSON_EXTRACT(%s, %s)" % (
+ self.process(binary.left),
+ self.process(binary.right))
+
+ def visit_json_path_getitem_op_binary(self, binary, operator, **kw):
+ return "JSON_EXTRACT(%s, %s)" % (
+ self.process(binary.left),
+ self.process(binary.right))
+
def visit_concat_op_binary(self, binary, operator, **kw):
return "concat(%s, %s)" % (self.process(binary.left),
self.process(binary.right))
@@ -1801,6 +777,8 @@ class MySQLCompiler(compiler.SQLCompiler):
return self.dialect.type_compiler.process(adapted)
elif isinstance(type_, sqltypes._Binary):
return 'BINARY'
+ elif isinstance(type_, sqltypes.JSON):
+ return "JSON"
elif isinstance(type_, sqltypes.NUMERIC):
return self.dialect.type_compiler.process(
type_).replace('NUMERIC', 'DECIMAL')
@@ -1974,7 +952,7 @@ class MySQLDDLCompiler(compiler.DDLCompiler):
('PARTITION_BY', 'PARTITIONS'), # only for test consistency
], opts):
arg = opts[opt]
- if opt in _options_of_type_string:
+ if opt in _reflection._options_of_type_string:
arg = "'%s'" % arg.replace("\\", "\\\\").replace("'", "''")
if opt in ('DATA_DIRECTORY', 'INDEX_DIRECTORY',
@@ -2316,6 +1294,9 @@ class MySQLTypeCompiler(compiler.GenericTypeCompiler):
def visit_VARBINARY(self, type_, **kw):
return "VARBINARY(%d)" % type_.length
+ def visit_JSON(self, type_, **kw):
+ return "JSON"
+
def visit_large_binary(self, type_, **kw):
return self.visit_BLOB(type_)
@@ -2435,10 +1416,13 @@ class MySQLDialect(default.DefaultDialect):
})
]
- def __init__(self, isolation_level=None, **kwargs):
+ def __init__(self, isolation_level=None, json_serializer=None,
+ json_deserializer=None, **kwargs):
kwargs.pop('use_ansiquotes', None) # legacy
default.DefaultDialect.__init__(self, **kwargs)
self.isolation_level = isolation_level
+ self._json_serializer = json_serializer
+ self._json_deserializer = json_deserializer
def on_connect(self):
if self.isolation_level is not None:
@@ -2605,6 +1589,10 @@ class MySQLDialect(default.DefaultDialect):
default.DefaultDialect.initialize(self, connection)
@property
+ def _is_mariadb(self):
+ return 'MariaDB' in self.server_version_info
+
+ @property
def _supports_cast(self):
return self.server_version_info is None or \
self.server_version_info >= (4, 0, 2)
@@ -2796,7 +1784,7 @@ class MySQLDialect(default.DefaultDialect):
preparer = self.preparer(self, server_ansiquotes=False)
else:
preparer = self.identifier_preparer
- return MySQLTableDefinitionParser(self, preparer)
+ return _reflection.MySQLTableDefinitionParser(self, preparer)
@reflection.cache
def _setup_parser(self, connection, table_name, schema=None, **kw):
@@ -2928,430 +1916,6 @@ class MySQLDialect(default.DefaultDialect):
return rows
-class ReflectedState(object):
- """Stores raw information about a SHOW CREATE TABLE statement."""
-
- def __init__(self):
- self.columns = []
- self.table_options = {}
- self.table_name = None
- self.keys = []
- self.constraints = []
-
-
-@log.class_logger
-class MySQLTableDefinitionParser(object):
- """Parses the results of a SHOW CREATE TABLE statement."""
-
- def __init__(self, dialect, preparer):
- self.dialect = dialect
- self.preparer = preparer
- self._prep_regexes()
-
- def parse(self, show_create, charset):
- state = ReflectedState()
- state.charset = charset
- for line in re.split(r'\r?\n', show_create):
- if line.startswith(' ' + self.preparer.initial_quote):
- self._parse_column(line, state)
- # a regular table options line
- elif line.startswith(') '):
- self._parse_table_options(line, state)
- # an ANSI-mode table options line
- elif line == ')':
- pass
- elif line.startswith('CREATE '):
- self._parse_table_name(line, state)
- # Not present in real reflection, but may be if
- # loading from a file.
- elif not line:
- pass
- else:
- type_, spec = self._parse_constraints(line)
- if type_ is None:
- util.warn("Unknown schema content: %r" % line)
- elif type_ == 'key':
- state.keys.append(spec)
- elif type_ == 'constraint':
- state.constraints.append(spec)
- else:
- pass
- return state
-
- def _parse_constraints(self, line):
- """Parse a KEY or CONSTRAINT line.
-
- :param line: A line of SHOW CREATE TABLE output
- """
-
- # KEY
- m = self._re_key.match(line)
- if m:
- spec = m.groupdict()
- # convert columns into name, length pairs
- spec['columns'] = self._parse_keyexprs(spec['columns'])
- return 'key', spec
-
- # CONSTRAINT
- m = self._re_constraint.match(line)
- if m:
- spec = m.groupdict()
- spec['table'] = \
- self.preparer.unformat_identifiers(spec['table'])
- spec['local'] = [c[0]
- for c in self._parse_keyexprs(spec['local'])]
- spec['foreign'] = [c[0]
- for c in self._parse_keyexprs(spec['foreign'])]
- return 'constraint', spec
-
- # PARTITION and SUBPARTITION
- m = self._re_partition.match(line)
- if m:
- # Punt!
- return 'partition', line
-
- # No match.
- return (None, line)
-
- def _parse_table_name(self, line, state):
- """Extract the table name.
-
- :param line: The first line of SHOW CREATE TABLE
- """
-
- regex, cleanup = self._pr_name
- m = regex.match(line)
- if m:
- state.table_name = cleanup(m.group('name'))
-
- def _parse_table_options(self, line, state):
- """Build a dictionary of all reflected table-level options.
-
- :param line: The final line of SHOW CREATE TABLE output.
- """
-
- options = {}
-
- if not line or line == ')':
- pass
-
- else:
- rest_of_line = line[:]
- for regex, cleanup in self._pr_options:
- m = regex.search(rest_of_line)
- if not m:
- continue
- directive, value = m.group('directive'), m.group('val')
- if cleanup:
- value = cleanup(value)
- options[directive.lower()] = value
- rest_of_line = regex.sub('', rest_of_line)
-
- for nope in ('auto_increment', 'data directory', 'index directory'):
- options.pop(nope, None)
-
- for opt, val in options.items():
- state.table_options['%s_%s' % (self.dialect.name, opt)] = val
-
- def _parse_column(self, line, state):
- """Extract column details.
-
- Falls back to a 'minimal support' variant if full parse fails.
-
- :param line: Any column-bearing line from SHOW CREATE TABLE
- """
-
- spec = None
- m = self._re_column.match(line)
- if m:
- spec = m.groupdict()
- spec['full'] = True
- else:
- m = self._re_column_loose.match(line)
- if m:
- spec = m.groupdict()
- spec['full'] = False
- if not spec:
- util.warn("Unknown column definition %r" % line)
- return
- if not spec['full']:
- util.warn("Incomplete reflection of column definition %r" % line)
-
- name, type_, args = spec['name'], spec['coltype'], spec['arg']
-
- try:
- col_type = self.dialect.ischema_names[type_]
- except KeyError:
- util.warn("Did not recognize type '%s' of column '%s'" %
- (type_, name))
- col_type = sqltypes.NullType
-
- # Column type positional arguments eg. varchar(32)
- if args is None or args == '':
- type_args = []
- elif args[0] == "'" and args[-1] == "'":
- type_args = self._re_csv_str.findall(args)
- else:
- type_args = [int(v) for v in self._re_csv_int.findall(args)]
-
- # Column type keyword options
- type_kw = {}
-
- if issubclass(col_type, (DATETIME, TIME, TIMESTAMP)):
- if type_args:
- type_kw['fsp'] = type_args.pop(0)
-
- for kw in ('unsigned', 'zerofill'):
- if spec.get(kw, False):
- type_kw[kw] = True
- for kw in ('charset', 'collate'):
- if spec.get(kw, False):
- type_kw[kw] = spec[kw]
- if issubclass(col_type, _EnumeratedValues):
- type_args = _EnumeratedValues._strip_values(type_args)
-
- if issubclass(col_type, SET) and '' in type_args:
- type_kw['retrieve_as_bitwise'] = True
-
- type_instance = col_type(*type_args, **type_kw)
-
- col_kw = {}
-
- # NOT NULL
- col_kw['nullable'] = True
- # this can be "NULL" in the case of TIMESTAMP
- if spec.get('notnull', False) == 'NOT NULL':
- col_kw['nullable'] = False
-
- # AUTO_INCREMENT
- if spec.get('autoincr', False):
- col_kw['autoincrement'] = True
- elif issubclass(col_type, sqltypes.Integer):
- col_kw['autoincrement'] = False
-
- # DEFAULT
- default = spec.get('default', None)
-
- if default == 'NULL':
- # eliminates the need to deal with this later.
- default = None
-
- col_d = dict(name=name, type=type_instance, default=default)
- col_d.update(col_kw)
- state.columns.append(col_d)
-
- def _describe_to_create(self, table_name, columns):
- """Re-format DESCRIBE output as a SHOW CREATE TABLE string.
-
- DESCRIBE is a much simpler reflection and is sufficient for
- reflecting views for runtime use. This method formats DDL
- for columns only- keys are omitted.
-
- :param columns: A sequence of DESCRIBE or SHOW COLUMNS 6-tuples.
- SHOW FULL COLUMNS FROM rows must be rearranged for use with
- this function.
- """
-
- buffer = []
- for row in columns:
- (name, col_type, nullable, default, extra) = \
- [row[i] for i in (0, 1, 2, 4, 5)]
-
- line = [' ']
- line.append(self.preparer.quote_identifier(name))
- line.append(col_type)
- if not nullable:
- line.append('NOT NULL')
- if default:
- if 'auto_increment' in default:
- pass
- elif (col_type.startswith('timestamp') and
- default.startswith('C')):
- line.append('DEFAULT')
- line.append(default)
- elif default == 'NULL':
- line.append('DEFAULT')
- line.append(default)
- else:
- line.append('DEFAULT')
- line.append("'%s'" % default.replace("'", "''"))
- if extra:
- line.append(extra)
-
- buffer.append(' '.join(line))
-
- return ''.join([('CREATE TABLE %s (\n' %
- self.preparer.quote_identifier(table_name)),
- ',\n'.join(buffer),
- '\n) '])
-
- def _parse_keyexprs(self, identifiers):
- """Unpack '"col"(2),"col" ASC'-ish strings into components."""
-
- return self._re_keyexprs.findall(identifiers)
-
- def _prep_regexes(self):
- """Pre-compile regular expressions."""
-
- self._re_columns = []
- self._pr_options = []
-
- _final = self.preparer.final_quote
-
- quotes = dict(zip(('iq', 'fq', 'esc_fq'),
- [re.escape(s) for s in
- (self.preparer.initial_quote,
- _final,
- self.preparer._escape_identifier(_final))]))
-
- self._pr_name = _pr_compile(
- r'^CREATE (?:\w+ +)?TABLE +'
- r'%(iq)s(?P<name>(?:%(esc_fq)s|[^%(fq)s])+)%(fq)s +\($' % quotes,
- self.preparer._unescape_identifier)
-
- # `col`,`col2`(32),`col3`(15) DESC
- #
- # Note: ASC and DESC aren't reflected, so we'll punt...
- self._re_keyexprs = _re_compile(
- r'(?:'
- r'(?:%(iq)s((?:%(esc_fq)s|[^%(fq)s])+)%(fq)s)'
- r'(?:\((\d+)\))?(?=\,|$))+' % quotes)
-
- # 'foo' or 'foo','bar' or 'fo,o','ba''a''r'
- self._re_csv_str = _re_compile(r'\x27(?:\x27\x27|[^\x27])*\x27')
-
- # 123 or 123,456
- self._re_csv_int = _re_compile(r'\d+')
-
- # `colname` <type> [type opts]
- # (NOT NULL | NULL)
- # DEFAULT ('value' | CURRENT_TIMESTAMP...)
- # COMMENT 'comment'
- # COLUMN_FORMAT (FIXED|DYNAMIC|DEFAULT)
- # STORAGE (DISK|MEMORY)
- self._re_column = _re_compile(
- r' '
- r'%(iq)s(?P<name>(?:%(esc_fq)s|[^%(fq)s])+)%(fq)s +'
- r'(?P<coltype>\w+)'
- r'(?:\((?P<arg>(?:\d+|\d+,\d+|'
- r'(?:\x27(?:\x27\x27|[^\x27])*\x27,?)+))\))?'
- r'(?: +(?P<unsigned>UNSIGNED))?'
- r'(?: +(?P<zerofill>ZEROFILL))?'
- r'(?: +CHARACTER SET +(?P<charset>[\w_]+))?'
- r'(?: +COLLATE +(?P<collate>[\w_]+))?'
- r'(?: +(?P<notnull>(?:NOT )?NULL))?'
- r'(?: +DEFAULT +(?P<default>'
- r'(?:NULL|\x27(?:\x27\x27|[^\x27])*\x27|\w+'
- r'(?: +ON UPDATE \w+)?)'
- r'))?'
- r'(?: +(?P<autoincr>AUTO_INCREMENT))?'
- r'(?: +COMMENT +(P<comment>(?:\x27\x27|[^\x27])+))?'
- r'(?: +COLUMN_FORMAT +(?P<colfmt>\w+))?'
- r'(?: +STORAGE +(?P<storage>\w+))?'
- r'(?: +(?P<extra>.*))?'
- r',?$'
- % quotes
- )
-
- # Fallback, try to parse as little as possible
- self._re_column_loose = _re_compile(
- r' '
- r'%(iq)s(?P<name>(?:%(esc_fq)s|[^%(fq)s])+)%(fq)s +'
- r'(?P<coltype>\w+)'
- r'(?:\((?P<arg>(?:\d+|\d+,\d+|\x27(?:\x27\x27|[^\x27])+\x27))\))?'
- r'.*?(?P<notnull>(?:NOT )NULL)?'
- % quotes
- )
-
- # (PRIMARY|UNIQUE|FULLTEXT|SPATIAL) INDEX `name` (USING (BTREE|HASH))?
- # (`col` (ASC|DESC)?, `col` (ASC|DESC)?)
- # KEY_BLOCK_SIZE size | WITH PARSER name
- self._re_key = _re_compile(
- r' '
- r'(?:(?P<type>\S+) )?KEY'
- r'(?: +%(iq)s(?P<name>(?:%(esc_fq)s|[^%(fq)s])+)%(fq)s)?'
- r'(?: +USING +(?P<using_pre>\S+))?'
- r' +\((?P<columns>.+?)\)'
- r'(?: +USING +(?P<using_post>\S+))?'
- r'(?: +KEY_BLOCK_SIZE *[ =]? *(?P<keyblock>\S+))?'
- r'(?: +WITH PARSER +(?P<parser>\S+))?'
- r',?$'
- % quotes
- )
-
- # CONSTRAINT `name` FOREIGN KEY (`local_col`)
- # REFERENCES `remote` (`remote_col`)
- # MATCH FULL | MATCH PARTIAL | MATCH SIMPLE
- # ON DELETE CASCADE ON UPDATE RESTRICT
- #
- # unique constraints come back as KEYs
- kw = quotes.copy()
- kw['on'] = 'RESTRICT|CASCADE|SET NULL|NOACTION'
- self._re_constraint = _re_compile(
- r' '
- r'CONSTRAINT +'
- r'%(iq)s(?P<name>(?:%(esc_fq)s|[^%(fq)s])+)%(fq)s +'
- r'FOREIGN KEY +'
- r'\((?P<local>[^\)]+?)\) REFERENCES +'
- r'(?P<table>%(iq)s[^%(fq)s]+%(fq)s'
- r'(?:\.%(iq)s[^%(fq)s]+%(fq)s)?) +'
- r'\((?P<foreign>[^\)]+?)\)'
- r'(?: +(?P<match>MATCH \w+))?'
- r'(?: +ON DELETE (?P<ondelete>%(on)s))?'
- r'(?: +ON UPDATE (?P<onupdate>%(on)s))?'
- % kw
- )
-
- # PARTITION
- #
- # punt!
- self._re_partition = _re_compile(r'(?:.*)(?:SUB)?PARTITION(?:.*)')
-
- # Table-level options (COLLATE, ENGINE, etc.)
- # Do the string options first, since they have quoted
- # strings we need to get rid of.
- for option in _options_of_type_string:
- self._add_option_string(option)
-
- for option in ('ENGINE', 'TYPE', 'AUTO_INCREMENT',
- 'AVG_ROW_LENGTH', 'CHARACTER SET',
- 'DEFAULT CHARSET', 'CHECKSUM',
- 'COLLATE', 'DELAY_KEY_WRITE', 'INSERT_METHOD',
- 'MAX_ROWS', 'MIN_ROWS', 'PACK_KEYS', 'ROW_FORMAT',
- 'KEY_BLOCK_SIZE'):
- self._add_option_word(option)
-
- self._add_option_regex('UNION', r'\([^\)]+\)')
- self._add_option_regex('TABLESPACE', r'.*? STORAGE DISK')
- self._add_option_regex(
- 'RAID_TYPE',
- r'\w+\s+RAID_CHUNKS\s*\=\s*\w+RAID_CHUNKSIZE\s*=\s*\w+')
-
- _optional_equals = r'(?:\s*(?:=\s*)|\s+)'
-
- def _add_option_string(self, directive):
- regex = (r'(?P<directive>%s)%s'
- r"'(?P<val>(?:[^']|'')*?)'(?!')" %
- (re.escape(directive), self._optional_equals))
- self._pr_options.append(_pr_compile(
- regex, lambda v: v.replace("\\\\", "\\").replace("''", "'")
- ))
-
- def _add_option_word(self, directive):
- regex = (r'(?P<directive>%s)%s'
- r'(?P<val>\w+)' %
- (re.escape(directive), self._optional_equals))
- self._pr_options.append(_pr_compile(regex))
-
- def _add_option_regex(self, directive, regex):
- regex = (r'(?P<directive>%s)%s'
- r'(?P<val>%s)' %
- (re.escape(directive), self._optional_equals, regex))
- self._pr_options.append(_pr_compile(regex))
-
-_options_of_type_string = ('COMMENT', 'DATA DIRECTORY', 'INDEX DIRECTORY',
- 'PASSWORD', 'CONNECTION')
-
class _DecodingRowProxy(object):
"""Return unicode-decoded values based on type inspection.
@@ -3397,14 +1961,3 @@ class _DecodingRowProxy(object):
else:
return item
-
-def _pr_compile(regex, cleanup=None):
- """Prepare a 2-tuple of compiled regex and callable."""
-
- return (_re_compile(regex), cleanup)
-
-
-def _re_compile(regex):
- """Compile a string to regex, I and UNICODE."""
-
- return re.compile(regex, re.I | re.UNICODE)
diff --git a/lib/sqlalchemy/dialects/mysql/enumerated.py b/lib/sqlalchemy/dialects/mysql/enumerated.py
new file mode 100644
index 000000000..53de2b5fe
--- /dev/null
+++ b/lib/sqlalchemy/dialects/mysql/enumerated.py
@@ -0,0 +1,307 @@
+# mysql/enumerated.py
+# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors
+# <see AUTHORS file>
+#
+# This module is part of SQLAlchemy and is released under
+# the MIT License: http://www.opensource.org/licenses/mit-license.php
+
+import re
+
+from .types import _StringType
+from ... import exc, sql, util
+from ... import types as sqltypes
+
+
+class _EnumeratedValues(_StringType):
+ def _init_values(self, values, kw):
+ self.quoting = kw.pop('quoting', 'auto')
+
+ if self.quoting == 'auto' and len(values):
+ # What quoting character are we using?
+ q = None
+ for e in values:
+ if len(e) == 0:
+ self.quoting = 'unquoted'
+ break
+ elif q is None:
+ q = e[0]
+
+ if len(e) == 1 or e[0] != q or e[-1] != q:
+ self.quoting = 'unquoted'
+ break
+ else:
+ self.quoting = 'quoted'
+
+ if self.quoting == 'quoted':
+ util.warn_deprecated(
+ 'Manually quoting %s value literals is deprecated. Supply '
+ 'unquoted values and use the quoting= option in cases of '
+ 'ambiguity.' % self.__class__.__name__)
+
+ values = self._strip_values(values)
+
+ self._enumerated_values = values
+ length = max([len(v) for v in values] + [0])
+ return values, length
+
+ @classmethod
+ def _strip_values(cls, values):
+ strip_values = []
+ for a in values:
+ if a[0:1] == '"' or a[0:1] == "'":
+ # strip enclosing quotes and unquote interior
+ a = a[1:-1].replace(a[0] * 2, a[0])
+ strip_values.append(a)
+ return strip_values
+
+
+class ENUM(sqltypes.Enum, _EnumeratedValues):
+ """MySQL ENUM type."""
+
+ __visit_name__ = 'ENUM'
+
+ def __init__(self, *enums, **kw):
+ """Construct an ENUM.
+
+ E.g.::
+
+ Column('myenum', ENUM("foo", "bar", "baz"))
+
+ :param enums: The range of valid values for this ENUM. Values will be
+ quoted when generating the schema according to the quoting flag (see
+ below).
+
+ :param strict: Defaults to False: ensure that a given value is in this
+ ENUM's range of permissible values when inserting or updating rows.
+ Note that MySQL will not raise a fatal error if you attempt to store
+ an out of range value- an alternate value will be stored instead.
+ (See MySQL ENUM documentation.)
+
+ :param charset: Optional, a column-level character set for this string
+ value. Takes precedence to 'ascii' or 'unicode' short-hand.
+
+ :param collation: Optional, a column-level collation for this string
+ value. Takes precedence to 'binary' short-hand.
+
+ :param ascii: Defaults to False: short-hand for the ``latin1``
+ character set, generates ASCII in schema.
+
+ :param unicode: Defaults to False: short-hand for the ``ucs2``
+ character set, generates UNICODE in schema.
+
+ :param binary: Defaults to False: short-hand, pick the binary
+ collation type that matches the column's character set. Generates
+ BINARY in schema. This does not affect the type of data stored,
+ only the collation of character data.
+
+ :param quoting: Defaults to 'auto': automatically determine enum value
+ quoting. If all enum values are surrounded by the same quoting
+ character, then use 'quoted' mode. Otherwise, use 'unquoted' mode.
+
+ 'quoted': values in enums are already quoted, they will be used
+ directly when generating the schema - this usage is deprecated.
+
+ 'unquoted': values in enums are not quoted, they will be escaped and
+ surrounded by single quotes when generating the schema.
+
+ Previous versions of this type always required manually quoted
+ values to be supplied; future versions will always quote the string
+ literals for you. This is a transitional option.
+
+ """
+ values, length = self._init_values(enums, kw)
+ self.strict = kw.pop('strict', False)
+ kw.pop('metadata', None)
+ kw.pop('schema', None)
+ kw.pop('name', None)
+ kw.pop('quote', None)
+ kw.pop('native_enum', None)
+ kw.pop('inherit_schema', None)
+ kw.pop('_create_events', None)
+ _StringType.__init__(self, length=length, **kw)
+ sqltypes.Enum.__init__(self, *values)
+
+ def __repr__(self):
+ return util.generic_repr(
+ self, to_inspect=[ENUM, _StringType, sqltypes.Enum])
+
+ def bind_processor(self, dialect):
+ super_convert = super(ENUM, self).bind_processor(dialect)
+
+ def process(value):
+ if self.strict and value is not None and value not in self.enums:
+ raise exc.InvalidRequestError('"%s" not a valid value for '
+ 'this enum' % value)
+ if super_convert:
+ return super_convert(value)
+ else:
+ return value
+ return process
+
+ def adapt(self, cls, **kw):
+ if issubclass(cls, ENUM):
+ kw['strict'] = self.strict
+ return sqltypes.Enum.adapt(self, cls, **kw)
+
+
+class SET(_EnumeratedValues):
+ """MySQL SET type."""
+
+ __visit_name__ = 'SET'
+
+ def __init__(self, *values, **kw):
+ """Construct a SET.
+
+ E.g.::
+
+ Column('myset', SET("foo", "bar", "baz"))
+
+
+ The list of potential values is required in the case that this
+ set will be used to generate DDL for a table, or if the
+ :paramref:`.SET.retrieve_as_bitwise` flag is set to True.
+
+ :param values: The range of valid values for this SET.
+
+ :param convert_unicode: Same flag as that of
+ :paramref:`.String.convert_unicode`.
+
+ :param collation: same as that of :paramref:`.String.collation`
+
+ :param charset: same as that of :paramref:`.VARCHAR.charset`.
+
+ :param ascii: same as that of :paramref:`.VARCHAR.ascii`.
+
+ :param unicode: same as that of :paramref:`.VARCHAR.unicode`.
+
+ :param binary: same as that of :paramref:`.VARCHAR.binary`.
+
+ :param quoting: Defaults to 'auto': automatically determine set value
+ quoting. If all values are surrounded by the same quoting
+ character, then use 'quoted' mode. Otherwise, use 'unquoted' mode.
+
+ 'quoted': values in enums are already quoted, they will be used
+ directly when generating the schema - this usage is deprecated.
+
+ 'unquoted': values in enums are not quoted, they will be escaped and
+ surrounded by single quotes when generating the schema.
+
+ Previous versions of this type always required manually quoted
+ values to be supplied; future versions will always quote the string
+ literals for you. This is a transitional option.
+
+ .. versionadded:: 0.9.0
+
+ :param retrieve_as_bitwise: if True, the data for the set type will be
+ persisted and selected using an integer value, where a set is coerced
+ into a bitwise mask for persistence. MySQL allows this mode which
+ has the advantage of being able to store values unambiguously,
+ such as the blank string ``''``. The datatype will appear
+ as the expression ``col + 0`` in a SELECT statement, so that the
+ value is coerced into an integer value in result sets.
+ This flag is required if one wishes
+ to persist a set that can store the blank string ``''`` as a value.
+
+ .. warning::
+
+ When using :paramref:`.mysql.SET.retrieve_as_bitwise`, it is
+ essential that the list of set values is expressed in the
+ **exact same order** as exists on the MySQL database.
+
+ .. versionadded:: 1.0.0
+
+
+ """
+ self.retrieve_as_bitwise = kw.pop('retrieve_as_bitwise', False)
+ values, length = self._init_values(values, kw)
+ self.values = tuple(values)
+ if not self.retrieve_as_bitwise and '' in values:
+ raise exc.ArgumentError(
+ "Can't use the blank value '' in a SET without "
+ "setting retrieve_as_bitwise=True")
+ if self.retrieve_as_bitwise:
+ self._bitmap = dict(
+ (value, 2 ** idx)
+ for idx, value in enumerate(self.values)
+ )
+ self._bitmap.update(
+ (2 ** idx, value)
+ for idx, value in enumerate(self.values)
+ )
+ kw.setdefault('length', length)
+ super(SET, self).__init__(**kw)
+
+ def column_expression(self, colexpr):
+ if self.retrieve_as_bitwise:
+ return sql.type_coerce(
+ sql.type_coerce(colexpr, sqltypes.Integer) + 0,
+ self
+ )
+ else:
+ return colexpr
+
+ def result_processor(self, dialect, coltype):
+ if self.retrieve_as_bitwise:
+ def process(value):
+ if value is not None:
+ value = int(value)
+
+ return set(
+ util.map_bits(self._bitmap.__getitem__, value)
+ )
+ else:
+ return None
+ else:
+ super_convert = super(SET, self).result_processor(dialect, coltype)
+
+ def process(value):
+ if isinstance(value, util.string_types):
+ # MySQLdb returns a string, let's parse
+ if super_convert:
+ value = super_convert(value)
+ return set(re.findall(r'[^,]+', value))
+ else:
+ # mysql-connector-python does a naive
+ # split(",") which throws in an empty string
+ if value is not None:
+ value.discard('')
+ return value
+ return process
+
+ def bind_processor(self, dialect):
+ super_convert = super(SET, self).bind_processor(dialect)
+ if self.retrieve_as_bitwise:
+ def process(value):
+ if value is None:
+ return None
+ elif isinstance(value, util.int_types + util.string_types):
+ if super_convert:
+ return super_convert(value)
+ else:
+ return value
+ else:
+ int_value = 0
+ for v in value:
+ int_value |= self._bitmap[v]
+ return int_value
+ else:
+
+ def process(value):
+ # accept strings and int (actually bitflag) values directly
+ if value is not None and not isinstance(
+ value, util.int_types + util.string_types):
+ value = ",".join(value)
+
+ if super_convert:
+ return super_convert(value)
+ else:
+ return value
+ return process
+
+ def adapt(self, impltype, **kw):
+ kw['retrieve_as_bitwise'] = self.retrieve_as_bitwise
+ return util.constructor_copy(
+ self, impltype,
+ *self.values,
+ **kw
+ )
diff --git a/lib/sqlalchemy/dialects/mysql/json.py b/lib/sqlalchemy/dialects/mysql/json.py
new file mode 100644
index 000000000..a30cdc841
--- /dev/null
+++ b/lib/sqlalchemy/dialects/mysql/json.py
@@ -0,0 +1,90 @@
+# mysql/json.py
+# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors
+# <see AUTHORS file>
+#
+# This module is part of SQLAlchemy and is released under
+# the MIT License: http://www.opensource.org/licenses/mit-license.php
+
+from __future__ import absolute_import
+
+import json
+
+from ...sql import elements
+from ... import types as sqltypes
+from ... import util
+
+
+class JSON(sqltypes.JSON):
+ """MySQL JSON type.
+
+ MySQL supports JSON as of version 5.7. Note that MariaDB does **not**
+ support JSON at the time of this writing.
+
+ The :class:`.mysql.JSON` type supports persistence of JSON values
+ as well as the core index operations provided by :class:`.types.JSON`
+ datatype, by adapting the operations to render the ``JSON_EXTRACT``
+ function at the database level.
+
+ .. versionadded:: 1.1
+
+ """
+
+ @util.memoized_property
+ def _str_impl(self):
+ return sqltypes.String(convert_unicode=True)
+
+ def bind_processor(self, dialect):
+ string_process = self._str_impl.bind_processor(dialect)
+
+ json_serializer = dialect._json_serializer or json.dumps
+
+ def process(value):
+ if value is self.NULL:
+ value = None
+ elif isinstance(value, elements.Null) or (
+ value is None and self.none_as_null
+ ):
+ return None
+
+ serialized = json_serializer(value)
+ if string_process:
+ serialized = string_process(serialized)
+ return serialized
+
+ return process
+
+ def result_processor(self, dialect, coltype):
+ string_process = self._str_impl.result_processor(dialect, coltype)
+ json_deserializer = dialect._json_deserializer or json.loads
+
+ def process(value):
+ if value is None:
+ return None
+ if string_process:
+ value = string_process(value)
+ return json_deserializer(value)
+ return process
+
+
+class JSONIndexType(sqltypes.JSON.JSONIndexType):
+ def bind_processor(self, dialect):
+ def process(value):
+ if isinstance(value, int):
+ return "$[%s]" % value
+ else:
+ return '$."%s"' % value
+
+ return process
+
+
+class JSONPathType(sqltypes.JSON.JSONPathType):
+ def bind_processor(self, dialect):
+ def process(value):
+ return "$%s" % (
+ "".join([
+ "[%s]" % elem if isinstance(elem, int)
+ else '."%s"' % elem for elem in value
+ ])
+ )
+
+ return process
diff --git a/lib/sqlalchemy/dialects/mysql/reflection.py b/lib/sqlalchemy/dialects/mysql/reflection.py
new file mode 100644
index 000000000..cf1078252
--- /dev/null
+++ b/lib/sqlalchemy/dialects/mysql/reflection.py
@@ -0,0 +1,449 @@
+# mysql/reflection.py
+# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors
+# <see AUTHORS file>
+#
+# This module is part of SQLAlchemy and is released under
+# the MIT License: http://www.opensource.org/licenses/mit-license.php
+
+import re
+from ... import log, util
+from ... import types as sqltypes
+from .enumerated import _EnumeratedValues, SET
+from .types import DATETIME, TIME, TIMESTAMP
+
+
+class ReflectedState(object):
+ """Stores raw information about a SHOW CREATE TABLE statement."""
+
+ def __init__(self):
+ self.columns = []
+ self.table_options = {}
+ self.table_name = None
+ self.keys = []
+ self.constraints = []
+
+
+@log.class_logger
+class MySQLTableDefinitionParser(object):
+ """Parses the results of a SHOW CREATE TABLE statement."""
+
+ def __init__(self, dialect, preparer):
+ self.dialect = dialect
+ self.preparer = preparer
+ self._prep_regexes()
+
+ def parse(self, show_create, charset):
+ state = ReflectedState()
+ state.charset = charset
+ for line in re.split(r'\r?\n', show_create):
+ if line.startswith(' ' + self.preparer.initial_quote):
+ self._parse_column(line, state)
+ # a regular table options line
+ elif line.startswith(') '):
+ self._parse_table_options(line, state)
+ # an ANSI-mode table options line
+ elif line == ')':
+ pass
+ elif line.startswith('CREATE '):
+ self._parse_table_name(line, state)
+ # Not present in real reflection, but may be if
+ # loading from a file.
+ elif not line:
+ pass
+ else:
+ type_, spec = self._parse_constraints(line)
+ if type_ is None:
+ util.warn("Unknown schema content: %r" % line)
+ elif type_ == 'key':
+ state.keys.append(spec)
+ elif type_ == 'constraint':
+ state.constraints.append(spec)
+ else:
+ pass
+ return state
+
+ def _parse_constraints(self, line):
+ """Parse a KEY or CONSTRAINT line.
+
+ :param line: A line of SHOW CREATE TABLE output
+ """
+
+ # KEY
+ m = self._re_key.match(line)
+ if m:
+ spec = m.groupdict()
+ # convert columns into name, length pairs
+ spec['columns'] = self._parse_keyexprs(spec['columns'])
+ return 'key', spec
+
+ # CONSTRAINT
+ m = self._re_constraint.match(line)
+ if m:
+ spec = m.groupdict()
+ spec['table'] = \
+ self.preparer.unformat_identifiers(spec['table'])
+ spec['local'] = [c[0]
+ for c in self._parse_keyexprs(spec['local'])]
+ spec['foreign'] = [c[0]
+ for c in self._parse_keyexprs(spec['foreign'])]
+ return 'constraint', spec
+
+ # PARTITION and SUBPARTITION
+ m = self._re_partition.match(line)
+ if m:
+ # Punt!
+ return 'partition', line
+
+ # No match.
+ return (None, line)
+
+ def _parse_table_name(self, line, state):
+ """Extract the table name.
+
+ :param line: The first line of SHOW CREATE TABLE
+ """
+
+ regex, cleanup = self._pr_name
+ m = regex.match(line)
+ if m:
+ state.table_name = cleanup(m.group('name'))
+
+ def _parse_table_options(self, line, state):
+ """Build a dictionary of all reflected table-level options.
+
+ :param line: The final line of SHOW CREATE TABLE output.
+ """
+
+ options = {}
+
+ if not line or line == ')':
+ pass
+
+ else:
+ rest_of_line = line[:]
+ for regex, cleanup in self._pr_options:
+ m = regex.search(rest_of_line)
+ if not m:
+ continue
+ directive, value = m.group('directive'), m.group('val')
+ if cleanup:
+ value = cleanup(value)
+ options[directive.lower()] = value
+ rest_of_line = regex.sub('', rest_of_line)
+
+ for nope in ('auto_increment', 'data directory', 'index directory'):
+ options.pop(nope, None)
+
+ for opt, val in options.items():
+ state.table_options['%s_%s' % (self.dialect.name, opt)] = val
+
+ def _parse_column(self, line, state):
+ """Extract column details.
+
+ Falls back to a 'minimal support' variant if full parse fails.
+
+ :param line: Any column-bearing line from SHOW CREATE TABLE
+ """
+
+ spec = None
+ m = self._re_column.match(line)
+ if m:
+ spec = m.groupdict()
+ spec['full'] = True
+ else:
+ m = self._re_column_loose.match(line)
+ if m:
+ spec = m.groupdict()
+ spec['full'] = False
+ if not spec:
+ util.warn("Unknown column definition %r" % line)
+ return
+ if not spec['full']:
+ util.warn("Incomplete reflection of column definition %r" % line)
+
+ name, type_, args = spec['name'], spec['coltype'], spec['arg']
+
+ try:
+ col_type = self.dialect.ischema_names[type_]
+ except KeyError:
+ util.warn("Did not recognize type '%s' of column '%s'" %
+ (type_, name))
+ col_type = sqltypes.NullType
+
+ # Column type positional arguments eg. varchar(32)
+ if args is None or args == '':
+ type_args = []
+ elif args[0] == "'" and args[-1] == "'":
+ type_args = self._re_csv_str.findall(args)
+ else:
+ type_args = [int(v) for v in self._re_csv_int.findall(args)]
+
+ # Column type keyword options
+ type_kw = {}
+
+ if issubclass(col_type, (DATETIME, TIME, TIMESTAMP)):
+ if type_args:
+ type_kw['fsp'] = type_args.pop(0)
+
+ for kw in ('unsigned', 'zerofill'):
+ if spec.get(kw, False):
+ type_kw[kw] = True
+ for kw in ('charset', 'collate'):
+ if spec.get(kw, False):
+ type_kw[kw] = spec[kw]
+ if issubclass(col_type, _EnumeratedValues):
+ type_args = _EnumeratedValues._strip_values(type_args)
+
+ if issubclass(col_type, SET) and '' in type_args:
+ type_kw['retrieve_as_bitwise'] = True
+
+ type_instance = col_type(*type_args, **type_kw)
+
+ col_kw = {}
+
+ # NOT NULL
+ col_kw['nullable'] = True
+ # this can be "NULL" in the case of TIMESTAMP
+ if spec.get('notnull', False) == 'NOT NULL':
+ col_kw['nullable'] = False
+
+ # AUTO_INCREMENT
+ if spec.get('autoincr', False):
+ col_kw['autoincrement'] = True
+ elif issubclass(col_type, sqltypes.Integer):
+ col_kw['autoincrement'] = False
+
+ # DEFAULT
+ default = spec.get('default', None)
+
+ if default == 'NULL':
+ # eliminates the need to deal with this later.
+ default = None
+
+ col_d = dict(name=name, type=type_instance, default=default)
+ col_d.update(col_kw)
+ state.columns.append(col_d)
+
+ def _describe_to_create(self, table_name, columns):
+ """Re-format DESCRIBE output as a SHOW CREATE TABLE string.
+
+ DESCRIBE is a much simpler reflection and is sufficient for
+ reflecting views for runtime use. This method formats DDL
+ for columns only- keys are omitted.
+
+ :param columns: A sequence of DESCRIBE or SHOW COLUMNS 6-tuples.
+ SHOW FULL COLUMNS FROM rows must be rearranged for use with
+ this function.
+ """
+
+ buffer = []
+ for row in columns:
+ (name, col_type, nullable, default, extra) = \
+ [row[i] for i in (0, 1, 2, 4, 5)]
+
+ line = [' ']
+ line.append(self.preparer.quote_identifier(name))
+ line.append(col_type)
+ if not nullable:
+ line.append('NOT NULL')
+ if default:
+ if 'auto_increment' in default:
+ pass
+ elif (col_type.startswith('timestamp') and
+ default.startswith('C')):
+ line.append('DEFAULT')
+ line.append(default)
+ elif default == 'NULL':
+ line.append('DEFAULT')
+ line.append(default)
+ else:
+ line.append('DEFAULT')
+ line.append("'%s'" % default.replace("'", "''"))
+ if extra:
+ line.append(extra)
+
+ buffer.append(' '.join(line))
+
+ return ''.join([('CREATE TABLE %s (\n' %
+ self.preparer.quote_identifier(table_name)),
+ ',\n'.join(buffer),
+ '\n) '])
+
+ def _parse_keyexprs(self, identifiers):
+ """Unpack '"col"(2),"col" ASC'-ish strings into components."""
+
+ return self._re_keyexprs.findall(identifiers)
+
+ def _prep_regexes(self):
+ """Pre-compile regular expressions."""
+
+ self._re_columns = []
+ self._pr_options = []
+
+ _final = self.preparer.final_quote
+
+ quotes = dict(zip(('iq', 'fq', 'esc_fq'),
+ [re.escape(s) for s in
+ (self.preparer.initial_quote,
+ _final,
+ self.preparer._escape_identifier(_final))]))
+
+ self._pr_name = _pr_compile(
+ r'^CREATE (?:\w+ +)?TABLE +'
+ r'%(iq)s(?P<name>(?:%(esc_fq)s|[^%(fq)s])+)%(fq)s +\($' % quotes,
+ self.preparer._unescape_identifier)
+
+ # `col`,`col2`(32),`col3`(15) DESC
+ #
+ # Note: ASC and DESC aren't reflected, so we'll punt...
+ self._re_keyexprs = _re_compile(
+ r'(?:'
+ r'(?:%(iq)s((?:%(esc_fq)s|[^%(fq)s])+)%(fq)s)'
+ r'(?:\((\d+)\))?(?=\,|$))+' % quotes)
+
+ # 'foo' or 'foo','bar' or 'fo,o','ba''a''r'
+ self._re_csv_str = _re_compile(r'\x27(?:\x27\x27|[^\x27])*\x27')
+
+ # 123 or 123,456
+ self._re_csv_int = _re_compile(r'\d+')
+
+ # `colname` <type> [type opts]
+ # (NOT NULL | NULL)
+ # DEFAULT ('value' | CURRENT_TIMESTAMP...)
+ # COMMENT 'comment'
+ # COLUMN_FORMAT (FIXED|DYNAMIC|DEFAULT)
+ # STORAGE (DISK|MEMORY)
+ self._re_column = _re_compile(
+ r' '
+ r'%(iq)s(?P<name>(?:%(esc_fq)s|[^%(fq)s])+)%(fq)s +'
+ r'(?P<coltype>\w+)'
+ r'(?:\((?P<arg>(?:\d+|\d+,\d+|'
+ r'(?:\x27(?:\x27\x27|[^\x27])*\x27,?)+))\))?'
+ r'(?: +(?P<unsigned>UNSIGNED))?'
+ r'(?: +(?P<zerofill>ZEROFILL))?'
+ r'(?: +CHARACTER SET +(?P<charset>[\w_]+))?'
+ r'(?: +COLLATE +(?P<collate>[\w_]+))?'
+ r'(?: +(?P<notnull>(?:NOT )?NULL))?'
+ r'(?: +DEFAULT +(?P<default>'
+ r'(?:NULL|\x27(?:\x27\x27|[^\x27])*\x27|\w+'
+ r'(?: +ON UPDATE \w+)?)'
+ r'))?'
+ r'(?: +(?P<autoincr>AUTO_INCREMENT))?'
+ r'(?: +COMMENT +(P<comment>(?:\x27\x27|[^\x27])+))?'
+ r'(?: +COLUMN_FORMAT +(?P<colfmt>\w+))?'
+ r'(?: +STORAGE +(?P<storage>\w+))?'
+ r'(?: +(?P<extra>.*))?'
+ r',?$'
+ % quotes
+ )
+
+ # Fallback, try to parse as little as possible
+ self._re_column_loose = _re_compile(
+ r' '
+ r'%(iq)s(?P<name>(?:%(esc_fq)s|[^%(fq)s])+)%(fq)s +'
+ r'(?P<coltype>\w+)'
+ r'(?:\((?P<arg>(?:\d+|\d+,\d+|\x27(?:\x27\x27|[^\x27])+\x27))\))?'
+ r'.*?(?P<notnull>(?:NOT )NULL)?'
+ % quotes
+ )
+
+ # (PRIMARY|UNIQUE|FULLTEXT|SPATIAL) INDEX `name` (USING (BTREE|HASH))?
+ # (`col` (ASC|DESC)?, `col` (ASC|DESC)?)
+ # KEY_BLOCK_SIZE size | WITH PARSER name
+ self._re_key = _re_compile(
+ r' '
+ r'(?:(?P<type>\S+) )?KEY'
+ r'(?: +%(iq)s(?P<name>(?:%(esc_fq)s|[^%(fq)s])+)%(fq)s)?'
+ r'(?: +USING +(?P<using_pre>\S+))?'
+ r' +\((?P<columns>.+?)\)'
+ r'(?: +USING +(?P<using_post>\S+))?'
+ r'(?: +KEY_BLOCK_SIZE *[ =]? *(?P<keyblock>\S+))?'
+ r'(?: +WITH PARSER +(?P<parser>\S+))?'
+ r',?$'
+ % quotes
+ )
+
+ # CONSTRAINT `name` FOREIGN KEY (`local_col`)
+ # REFERENCES `remote` (`remote_col`)
+ # MATCH FULL | MATCH PARTIAL | MATCH SIMPLE
+ # ON DELETE CASCADE ON UPDATE RESTRICT
+ #
+ # unique constraints come back as KEYs
+ kw = quotes.copy()
+ kw['on'] = 'RESTRICT|CASCADE|SET NULL|NOACTION'
+ self._re_constraint = _re_compile(
+ r' '
+ r'CONSTRAINT +'
+ r'%(iq)s(?P<name>(?:%(esc_fq)s|[^%(fq)s])+)%(fq)s +'
+ r'FOREIGN KEY +'
+ r'\((?P<local>[^\)]+?)\) REFERENCES +'
+ r'(?P<table>%(iq)s[^%(fq)s]+%(fq)s'
+ r'(?:\.%(iq)s[^%(fq)s]+%(fq)s)?) +'
+ r'\((?P<foreign>[^\)]+?)\)'
+ r'(?: +(?P<match>MATCH \w+))?'
+ r'(?: +ON DELETE (?P<ondelete>%(on)s))?'
+ r'(?: +ON UPDATE (?P<onupdate>%(on)s))?'
+ % kw
+ )
+
+ # PARTITION
+ #
+ # punt!
+ self._re_partition = _re_compile(r'(?:.*)(?:SUB)?PARTITION(?:.*)')
+
+ # Table-level options (COLLATE, ENGINE, etc.)
+ # Do the string options first, since they have quoted
+ # strings we need to get rid of.
+ for option in _options_of_type_string:
+ self._add_option_string(option)
+
+ for option in ('ENGINE', 'TYPE', 'AUTO_INCREMENT',
+ 'AVG_ROW_LENGTH', 'CHARACTER SET',
+ 'DEFAULT CHARSET', 'CHECKSUM',
+ 'COLLATE', 'DELAY_KEY_WRITE', 'INSERT_METHOD',
+ 'MAX_ROWS', 'MIN_ROWS', 'PACK_KEYS', 'ROW_FORMAT',
+ 'KEY_BLOCK_SIZE'):
+ self._add_option_word(option)
+
+ self._add_option_regex('UNION', r'\([^\)]+\)')
+ self._add_option_regex('TABLESPACE', r'.*? STORAGE DISK')
+ self._add_option_regex(
+ 'RAID_TYPE',
+ r'\w+\s+RAID_CHUNKS\s*\=\s*\w+RAID_CHUNKSIZE\s*=\s*\w+')
+
+ _optional_equals = r'(?:\s*(?:=\s*)|\s+)'
+
+ def _add_option_string(self, directive):
+ regex = (r'(?P<directive>%s)%s'
+ r"'(?P<val>(?:[^']|'')*?)'(?!')" %
+ (re.escape(directive), self._optional_equals))
+ self._pr_options.append(_pr_compile(
+ regex, lambda v: v.replace("\\\\", "\\").replace("''", "'")
+ ))
+
+ def _add_option_word(self, directive):
+ regex = (r'(?P<directive>%s)%s'
+ r'(?P<val>\w+)' %
+ (re.escape(directive), self._optional_equals))
+ self._pr_options.append(_pr_compile(regex))
+
+ def _add_option_regex(self, directive, regex):
+ regex = (r'(?P<directive>%s)%s'
+ r'(?P<val>%s)' %
+ (re.escape(directive), self._optional_equals, regex))
+ self._pr_options.append(_pr_compile(regex))
+
+_options_of_type_string = ('COMMENT', 'DATA DIRECTORY', 'INDEX DIRECTORY',
+ 'PASSWORD', 'CONNECTION')
+
+
+def _pr_compile(regex, cleanup=None):
+ """Prepare a 2-tuple of compiled regex and callable."""
+
+ return (_re_compile(regex), cleanup)
+
+
+def _re_compile(regex):
+ """Compile a string to regex, I and UNICODE."""
+
+ return re.compile(regex, re.I | re.UNICODE)
diff --git a/lib/sqlalchemy/dialects/mysql/types.py b/lib/sqlalchemy/dialects/mysql/types.py
new file mode 100644
index 000000000..9512982f5
--- /dev/null
+++ b/lib/sqlalchemy/dialects/mysql/types.py
@@ -0,0 +1,766 @@
+# mysql/types.py
+# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors
+# <see AUTHORS file>
+#
+# This module is part of SQLAlchemy and is released under
+# the MIT License: http://www.opensource.org/licenses/mit-license.php
+
+import datetime
+from ... import exc, util
+from ... import types as sqltypes
+
+
+class _NumericType(object):
+ """Base for MySQL numeric types.
+
+ This is the base both for NUMERIC as well as INTEGER, hence
+ it's a mixin.
+
+ """
+
+ def __init__(self, unsigned=False, zerofill=False, **kw):
+ self.unsigned = unsigned
+ self.zerofill = zerofill
+ super(_NumericType, self).__init__(**kw)
+
+ def __repr__(self):
+ return util.generic_repr(self,
+ to_inspect=[_NumericType, sqltypes.Numeric])
+
+
+class _FloatType(_NumericType, sqltypes.Float):
+ def __init__(self, precision=None, scale=None, asdecimal=True, **kw):
+ if isinstance(self, (REAL, DOUBLE)) and \
+ (
+ (precision is None and scale is not None) or
+ (precision is not None and scale is None)
+ ):
+ raise exc.ArgumentError(
+ "You must specify both precision and scale or omit "
+ "both altogether.")
+ super(_FloatType, self).__init__(
+ precision=precision, asdecimal=asdecimal, **kw)
+ self.scale = scale
+
+ def __repr__(self):
+ return util.generic_repr(self, to_inspect=[_FloatType,
+ _NumericType,
+ sqltypes.Float])
+
+
+class _IntegerType(_NumericType, sqltypes.Integer):
+ def __init__(self, display_width=None, **kw):
+ self.display_width = display_width
+ super(_IntegerType, self).__init__(**kw)
+
+ def __repr__(self):
+ return util.generic_repr(self, to_inspect=[_IntegerType,
+ _NumericType,
+ sqltypes.Integer])
+
+
+class _StringType(sqltypes.String):
+ """Base for MySQL string types."""
+
+ def __init__(self, charset=None, collation=None,
+ ascii=False, binary=False, unicode=False,
+ national=False, **kw):
+ self.charset = charset
+
+ # allow collate= or collation=
+ kw.setdefault('collation', kw.pop('collate', collation))
+
+ self.ascii = ascii
+ self.unicode = unicode
+ self.binary = binary
+ self.national = national
+ super(_StringType, self).__init__(**kw)
+
+ def __repr__(self):
+ return util.generic_repr(self,
+ to_inspect=[_StringType, sqltypes.String])
+
+
+class _MatchType(sqltypes.Float, sqltypes.MatchType):
+ def __init__(self, **kw):
+ # TODO: float arguments?
+ sqltypes.Float.__init__(self)
+ sqltypes.MatchType.__init__(self)
+
+
+
+class NUMERIC(_NumericType, sqltypes.NUMERIC):
+ """MySQL NUMERIC type."""
+
+ __visit_name__ = 'NUMERIC'
+
+ def __init__(self, precision=None, scale=None, asdecimal=True, **kw):
+ """Construct a NUMERIC.
+
+ :param precision: Total digits in this number. If scale and precision
+ are both None, values are stored to limits allowed by the server.
+
+ :param scale: The number of digits after the decimal point.
+
+ :param unsigned: a boolean, optional.
+
+ :param zerofill: Optional. If true, values will be stored as strings
+ left-padded with zeros. Note that this does not effect the values
+ returned by the underlying database API, which continue to be
+ numeric.
+
+ """
+ super(NUMERIC, self).__init__(precision=precision,
+ scale=scale, asdecimal=asdecimal, **kw)
+
+
+class DECIMAL(_NumericType, sqltypes.DECIMAL):
+ """MySQL DECIMAL type."""
+
+ __visit_name__ = 'DECIMAL'
+
+ def __init__(self, precision=None, scale=None, asdecimal=True, **kw):
+ """Construct a DECIMAL.
+
+ :param precision: Total digits in this number. If scale and precision
+ are both None, values are stored to limits allowed by the server.
+
+ :param scale: The number of digits after the decimal point.
+
+ :param unsigned: a boolean, optional.
+
+ :param zerofill: Optional. If true, values will be stored as strings
+ left-padded with zeros. Note that this does not effect the values
+ returned by the underlying database API, which continue to be
+ numeric.
+
+ """
+ super(DECIMAL, self).__init__(precision=precision, scale=scale,
+ asdecimal=asdecimal, **kw)
+
+
+class DOUBLE(_FloatType):
+ """MySQL DOUBLE type."""
+
+ __visit_name__ = 'DOUBLE'
+
+ def __init__(self, precision=None, scale=None, asdecimal=True, **kw):
+ """Construct a DOUBLE.
+
+ .. note::
+
+ The :class:`.DOUBLE` type by default converts from float
+ to Decimal, using a truncation that defaults to 10 digits.
+ Specify either ``scale=n`` or ``decimal_return_scale=n`` in order
+ to change this scale, or ``asdecimal=False`` to return values
+ directly as Python floating points.
+
+ :param precision: Total digits in this number. If scale and precision
+ are both None, values are stored to limits allowed by the server.
+
+ :param scale: The number of digits after the decimal point.
+
+ :param unsigned: a boolean, optional.
+
+ :param zerofill: Optional. If true, values will be stored as strings
+ left-padded with zeros. Note that this does not effect the values
+ returned by the underlying database API, which continue to be
+ numeric.
+
+ """
+ super(DOUBLE, self).__init__(precision=precision, scale=scale,
+ asdecimal=asdecimal, **kw)
+
+
+class REAL(_FloatType, sqltypes.REAL):
+ """MySQL REAL type."""
+
+ __visit_name__ = 'REAL'
+
+ def __init__(self, precision=None, scale=None, asdecimal=True, **kw):
+ """Construct a REAL.
+
+ .. note::
+
+ The :class:`.REAL` type by default converts from float
+ to Decimal, using a truncation that defaults to 10 digits.
+ Specify either ``scale=n`` or ``decimal_return_scale=n`` in order
+ to change this scale, or ``asdecimal=False`` to return values
+ directly as Python floating points.
+
+ :param precision: Total digits in this number. If scale and precision
+ are both None, values are stored to limits allowed by the server.
+
+ :param scale: The number of digits after the decimal point.
+
+ :param unsigned: a boolean, optional.
+
+ :param zerofill: Optional. If true, values will be stored as strings
+ left-padded with zeros. Note that this does not effect the values
+ returned by the underlying database API, which continue to be
+ numeric.
+
+ """
+ super(REAL, self).__init__(precision=precision, scale=scale,
+ asdecimal=asdecimal, **kw)
+
+
+class FLOAT(_FloatType, sqltypes.FLOAT):
+ """MySQL FLOAT type."""
+
+ __visit_name__ = 'FLOAT'
+
+ def __init__(self, precision=None, scale=None, asdecimal=False, **kw):
+ """Construct a FLOAT.
+
+ :param precision: Total digits in this number. If scale and precision
+ are both None, values are stored to limits allowed by the server.
+
+ :param scale: The number of digits after the decimal point.
+
+ :param unsigned: a boolean, optional.
+
+ :param zerofill: Optional. If true, values will be stored as strings
+ left-padded with zeros. Note that this does not effect the values
+ returned by the underlying database API, which continue to be
+ numeric.
+
+ """
+ super(FLOAT, self).__init__(precision=precision, scale=scale,
+ asdecimal=asdecimal, **kw)
+
+ def bind_processor(self, dialect):
+ return None
+
+
+class INTEGER(_IntegerType, sqltypes.INTEGER):
+ """MySQL INTEGER type."""
+
+ __visit_name__ = 'INTEGER'
+
+ def __init__(self, display_width=None, **kw):
+ """Construct an INTEGER.
+
+ :param display_width: Optional, maximum display width for this number.
+
+ :param unsigned: a boolean, optional.
+
+ :param zerofill: Optional. If true, values will be stored as strings
+ left-padded with zeros. Note that this does not effect the values
+ returned by the underlying database API, which continue to be
+ numeric.
+
+ """
+ super(INTEGER, self).__init__(display_width=display_width, **kw)
+
+
+class BIGINT(_IntegerType, sqltypes.BIGINT):
+ """MySQL BIGINTEGER type."""
+
+ __visit_name__ = 'BIGINT'
+
+ def __init__(self, display_width=None, **kw):
+ """Construct a BIGINTEGER.
+
+ :param display_width: Optional, maximum display width for this number.
+
+ :param unsigned: a boolean, optional.
+
+ :param zerofill: Optional. If true, values will be stored as strings
+ left-padded with zeros. Note that this does not effect the values
+ returned by the underlying database API, which continue to be
+ numeric.
+
+ """
+ super(BIGINT, self).__init__(display_width=display_width, **kw)
+
+
+class MEDIUMINT(_IntegerType):
+ """MySQL MEDIUMINTEGER type."""
+
+ __visit_name__ = 'MEDIUMINT'
+
+ def __init__(self, display_width=None, **kw):
+ """Construct a MEDIUMINTEGER
+
+ :param display_width: Optional, maximum display width for this number.
+
+ :param unsigned: a boolean, optional.
+
+ :param zerofill: Optional. If true, values will be stored as strings
+ left-padded with zeros. Note that this does not effect the values
+ returned by the underlying database API, which continue to be
+ numeric.
+
+ """
+ super(MEDIUMINT, self).__init__(display_width=display_width, **kw)
+
+
+class TINYINT(_IntegerType):
+ """MySQL TINYINT type."""
+
+ __visit_name__ = 'TINYINT'
+
+ def __init__(self, display_width=None, **kw):
+ """Construct a TINYINT.
+
+ :param display_width: Optional, maximum display width for this number.
+
+ :param unsigned: a boolean, optional.
+
+ :param zerofill: Optional. If true, values will be stored as strings
+ left-padded with zeros. Note that this does not effect the values
+ returned by the underlying database API, which continue to be
+ numeric.
+
+ """
+ super(TINYINT, self).__init__(display_width=display_width, **kw)
+
+
+class SMALLINT(_IntegerType, sqltypes.SMALLINT):
+ """MySQL SMALLINTEGER type."""
+
+ __visit_name__ = 'SMALLINT'
+
+ def __init__(self, display_width=None, **kw):
+ """Construct a SMALLINTEGER.
+
+ :param display_width: Optional, maximum display width for this number.
+
+ :param unsigned: a boolean, optional.
+
+ :param zerofill: Optional. If true, values will be stored as strings
+ left-padded with zeros. Note that this does not effect the values
+ returned by the underlying database API, which continue to be
+ numeric.
+
+ """
+ super(SMALLINT, self).__init__(display_width=display_width, **kw)
+
+
+class BIT(sqltypes.TypeEngine):
+ """MySQL BIT type.
+
+ This type is for MySQL 5.0.3 or greater for MyISAM, and 5.0.5 or greater
+ for MyISAM, MEMORY, InnoDB and BDB. For older versions, use a
+ MSTinyInteger() type.
+
+ """
+
+ __visit_name__ = 'BIT'
+
+ def __init__(self, length=None):
+ """Construct a BIT.
+
+ :param length: Optional, number of bits.
+
+ """
+ self.length = length
+
+ def result_processor(self, dialect, coltype):
+ """Convert a MySQL's 64 bit, variable length binary string to a long.
+
+ TODO: this is MySQL-db, pyodbc specific. OurSQL and mysqlconnector
+ already do this, so this logic should be moved to those dialects.
+
+ """
+
+ def process(value):
+ if value is not None:
+ v = 0
+ for i in value:
+ if not isinstance(i, int):
+ i = ord(i) # convert byte to int on Python 2
+ v = v << 8 | i
+ return v
+ return value
+ return process
+
+
+class TIME(sqltypes.TIME):
+ """MySQL TIME type. """
+
+ __visit_name__ = 'TIME'
+
+ def __init__(self, timezone=False, fsp=None):
+ """Construct a MySQL TIME type.
+
+ :param timezone: not used by the MySQL dialect.
+ :param fsp: fractional seconds precision value.
+ MySQL 5.6 supports storage of fractional seconds;
+ this parameter will be used when emitting DDL
+ for the TIME type.
+
+ .. note::
+
+ DBAPI driver support for fractional seconds may
+ be limited; current support includes
+ MySQL Connector/Python.
+
+ .. versionadded:: 0.8 The MySQL-specific TIME
+ type as well as fractional seconds support.
+
+ """
+ super(TIME, self).__init__(timezone=timezone)
+ self.fsp = fsp
+
+ def result_processor(self, dialect, coltype):
+ time = datetime.time
+
+ def process(value):
+ # convert from a timedelta value
+ if value is not None:
+ microseconds = value.microseconds
+ seconds = value.seconds
+ minutes = seconds // 60
+ return time(minutes // 60,
+ minutes % 60,
+ seconds - minutes * 60,
+ microsecond=microseconds)
+ else:
+ return None
+ return process
+
+
+class TIMESTAMP(sqltypes.TIMESTAMP):
+ """MySQL TIMESTAMP type.
+
+ """
+
+ __visit_name__ = 'TIMESTAMP'
+
+ def __init__(self, timezone=False, fsp=None):
+ """Construct a MySQL TIMESTAMP type.
+
+ :param timezone: not used by the MySQL dialect.
+ :param fsp: fractional seconds precision value.
+ MySQL 5.6.4 supports storage of fractional seconds;
+ this parameter will be used when emitting DDL
+ for the TIMESTAMP type.
+
+ .. note::
+
+ DBAPI driver support for fractional seconds may
+ be limited; current support includes
+ MySQL Connector/Python.
+
+ .. versionadded:: 0.8.5 Added MySQL-specific :class:`.mysql.TIMESTAMP`
+ with fractional seconds support.
+
+ """
+ super(TIMESTAMP, self).__init__(timezone=timezone)
+ self.fsp = fsp
+
+
+class DATETIME(sqltypes.DATETIME):
+ """MySQL DATETIME type.
+
+ """
+
+ __visit_name__ = 'DATETIME'
+
+ def __init__(self, timezone=False, fsp=None):
+ """Construct a MySQL DATETIME type.
+
+ :param timezone: not used by the MySQL dialect.
+ :param fsp: fractional seconds precision value.
+ MySQL 5.6.4 supports storage of fractional seconds;
+ this parameter will be used when emitting DDL
+ for the DATETIME type.
+
+ .. note::
+
+ DBAPI driver support for fractional seconds may
+ be limited; current support includes
+ MySQL Connector/Python.
+
+ .. versionadded:: 0.8.5 Added MySQL-specific :class:`.mysql.DATETIME`
+ with fractional seconds support.
+
+ """
+ super(DATETIME, self).__init__(timezone=timezone)
+ self.fsp = fsp
+
+
+class YEAR(sqltypes.TypeEngine):
+ """MySQL YEAR type, for single byte storage of years 1901-2155."""
+
+ __visit_name__ = 'YEAR'
+
+ def __init__(self, display_width=None):
+ self.display_width = display_width
+
+
+class TEXT(_StringType, sqltypes.TEXT):
+ """MySQL TEXT type, for text up to 2^16 characters."""
+
+ __visit_name__ = 'TEXT'
+
+ def __init__(self, length=None, **kw):
+ """Construct a TEXT.
+
+ :param length: Optional, if provided the server may optimize storage
+ by substituting the smallest TEXT type sufficient to store
+ ``length`` characters.
+
+ :param charset: Optional, a column-level character set for this string
+ value. Takes precedence to 'ascii' or 'unicode' short-hand.
+
+ :param collation: Optional, a column-level collation for this string
+ value. Takes precedence to 'binary' short-hand.
+
+ :param ascii: Defaults to False: short-hand for the ``latin1``
+ character set, generates ASCII in schema.
+
+ :param unicode: Defaults to False: short-hand for the ``ucs2``
+ character set, generates UNICODE in schema.
+
+ :param national: Optional. If true, use the server's configured
+ national character set.
+
+ :param binary: Defaults to False: short-hand, pick the binary
+ collation type that matches the column's character set. Generates
+ BINARY in schema. This does not affect the type of data stored,
+ only the collation of character data.
+
+ """
+ super(TEXT, self).__init__(length=length, **kw)
+
+
+class TINYTEXT(_StringType):
+ """MySQL TINYTEXT type, for text up to 2^8 characters."""
+
+ __visit_name__ = 'TINYTEXT'
+
+ def __init__(self, **kwargs):
+ """Construct a TINYTEXT.
+
+ :param charset: Optional, a column-level character set for this string
+ value. Takes precedence to 'ascii' or 'unicode' short-hand.
+
+ :param collation: Optional, a column-level collation for this string
+ value. Takes precedence to 'binary' short-hand.
+
+ :param ascii: Defaults to False: short-hand for the ``latin1``
+ character set, generates ASCII in schema.
+
+ :param unicode: Defaults to False: short-hand for the ``ucs2``
+ character set, generates UNICODE in schema.
+
+ :param national: Optional. If true, use the server's configured
+ national character set.
+
+ :param binary: Defaults to False: short-hand, pick the binary
+ collation type that matches the column's character set. Generates
+ BINARY in schema. This does not affect the type of data stored,
+ only the collation of character data.
+
+ """
+ super(TINYTEXT, self).__init__(**kwargs)
+
+
+class MEDIUMTEXT(_StringType):
+ """MySQL MEDIUMTEXT type, for text up to 2^24 characters."""
+
+ __visit_name__ = 'MEDIUMTEXT'
+
+ def __init__(self, **kwargs):
+ """Construct a MEDIUMTEXT.
+
+ :param charset: Optional, a column-level character set for this string
+ value. Takes precedence to 'ascii' or 'unicode' short-hand.
+
+ :param collation: Optional, a column-level collation for this string
+ value. Takes precedence to 'binary' short-hand.
+
+ :param ascii: Defaults to False: short-hand for the ``latin1``
+ character set, generates ASCII in schema.
+
+ :param unicode: Defaults to False: short-hand for the ``ucs2``
+ character set, generates UNICODE in schema.
+
+ :param national: Optional. If true, use the server's configured
+ national character set.
+
+ :param binary: Defaults to False: short-hand, pick the binary
+ collation type that matches the column's character set. Generates
+ BINARY in schema. This does not affect the type of data stored,
+ only the collation of character data.
+
+ """
+ super(MEDIUMTEXT, self).__init__(**kwargs)
+
+
+class LONGTEXT(_StringType):
+ """MySQL LONGTEXT type, for text up to 2^32 characters."""
+
+ __visit_name__ = 'LONGTEXT'
+
+ def __init__(self, **kwargs):
+ """Construct a LONGTEXT.
+
+ :param charset: Optional, a column-level character set for this string
+ value. Takes precedence to 'ascii' or 'unicode' short-hand.
+
+ :param collation: Optional, a column-level collation for this string
+ value. Takes precedence to 'binary' short-hand.
+
+ :param ascii: Defaults to False: short-hand for the ``latin1``
+ character set, generates ASCII in schema.
+
+ :param unicode: Defaults to False: short-hand for the ``ucs2``
+ character set, generates UNICODE in schema.
+
+ :param national: Optional. If true, use the server's configured
+ national character set.
+
+ :param binary: Defaults to False: short-hand, pick the binary
+ collation type that matches the column's character set. Generates
+ BINARY in schema. This does not affect the type of data stored,
+ only the collation of character data.
+
+ """
+ super(LONGTEXT, self).__init__(**kwargs)
+
+
+class VARCHAR(_StringType, sqltypes.VARCHAR):
+ """MySQL VARCHAR type, for variable-length character data."""
+
+ __visit_name__ = 'VARCHAR'
+
+ def __init__(self, length=None, **kwargs):
+ """Construct a VARCHAR.
+
+ :param charset: Optional, a column-level character set for this string
+ value. Takes precedence to 'ascii' or 'unicode' short-hand.
+
+ :param collation: Optional, a column-level collation for this string
+ value. Takes precedence to 'binary' short-hand.
+
+ :param ascii: Defaults to False: short-hand for the ``latin1``
+ character set, generates ASCII in schema.
+
+ :param unicode: Defaults to False: short-hand for the ``ucs2``
+ character set, generates UNICODE in schema.
+
+ :param national: Optional. If true, use the server's configured
+ national character set.
+
+ :param binary: Defaults to False: short-hand, pick the binary
+ collation type that matches the column's character set. Generates
+ BINARY in schema. This does not affect the type of data stored,
+ only the collation of character data.
+
+ """
+ super(VARCHAR, self).__init__(length=length, **kwargs)
+
+
+class CHAR(_StringType, sqltypes.CHAR):
+ """MySQL CHAR type, for fixed-length character data."""
+
+ __visit_name__ = 'CHAR'
+
+ def __init__(self, length=None, **kwargs):
+ """Construct a CHAR.
+
+ :param length: Maximum data length, in characters.
+
+ :param binary: Optional, use the default binary collation for the
+ national character set. This does not affect the type of data
+ stored, use a BINARY type for binary data.
+
+ :param collation: Optional, request a particular collation. Must be
+ compatible with the national character set.
+
+ """
+ super(CHAR, self).__init__(length=length, **kwargs)
+
+ @classmethod
+ def _adapt_string_for_cast(self, type_):
+ # copy the given string type into a CHAR
+ # for the purposes of rendering a CAST expression
+ type_ = sqltypes.to_instance(type_)
+ if isinstance(type_, sqltypes.CHAR):
+ return type_
+ elif isinstance(type_, _StringType):
+ return CHAR(
+ length=type_.length,
+ charset=type_.charset,
+ collation=type_.collation,
+ ascii=type_.ascii,
+ binary=type_.binary,
+ unicode=type_.unicode,
+ national=False # not supported in CAST
+ )
+ else:
+ return CHAR(length=type_.length)
+
+
+class NVARCHAR(_StringType, sqltypes.NVARCHAR):
+ """MySQL NVARCHAR type.
+
+ For variable-length character data in the server's configured national
+ character set.
+ """
+
+ __visit_name__ = 'NVARCHAR'
+
+ def __init__(self, length=None, **kwargs):
+ """Construct an NVARCHAR.
+
+ :param length: Maximum data length, in characters.
+
+ :param binary: Optional, use the default binary collation for the
+ national character set. This does not affect the type of data
+ stored, use a BINARY type for binary data.
+
+ :param collation: Optional, request a particular collation. Must be
+ compatible with the national character set.
+
+ """
+ kwargs['national'] = True
+ super(NVARCHAR, self).__init__(length=length, **kwargs)
+
+
+class NCHAR(_StringType, sqltypes.NCHAR):
+ """MySQL NCHAR type.
+
+ For fixed-length character data in the server's configured national
+ character set.
+ """
+
+ __visit_name__ = 'NCHAR'
+
+ def __init__(self, length=None, **kwargs):
+ """Construct an NCHAR.
+
+ :param length: Maximum data length, in characters.
+
+ :param binary: Optional, use the default binary collation for the
+ national character set. This does not affect the type of data
+ stored, use a BINARY type for binary data.
+
+ :param collation: Optional, request a particular collation. Must be
+ compatible with the national character set.
+
+ """
+ kwargs['national'] = True
+ super(NCHAR, self).__init__(length=length, **kwargs)
+
+
+class TINYBLOB(sqltypes._Binary):
+ """MySQL TINYBLOB type, for binary data up to 2^8 bytes."""
+
+ __visit_name__ = 'TINYBLOB'
+
+
+class MEDIUMBLOB(sqltypes._Binary):
+ """MySQL MEDIUMBLOB type, for binary data up to 2^24 bytes."""
+
+ __visit_name__ = 'MEDIUMBLOB'
+
+
+class LONGBLOB(sqltypes._Binary):
+ """MySQL LONGBLOB type, for binary data up to 2^32 bytes."""
+
+ __visit_name__ = 'LONGBLOB'
diff --git a/lib/sqlalchemy/dialects/oracle/zxjdbc.py b/lib/sqlalchemy/dialects/oracle/zxjdbc.py
index 3fbe921ca..8872daf81 100644
--- a/lib/sqlalchemy/dialects/oracle/zxjdbc.py
+++ b/lib/sqlalchemy/dialects/oracle/zxjdbc.py
@@ -10,8 +10,7 @@
:name: zxJDBC for Jython
:dbapi: zxjdbc
:connectstring: oracle+zxjdbc://user:pass@host/dbname
- :driverurl: http://www.oracle.com/technology/software/tech/java/\
-sqlj_jdbc/index.html.
+ :driverurl: http://www.oracle.com/technetwork/database/features/jdbc/index-091264.html
.. note:: Jython is not supported by current versions of SQLAlchemy. The
zxjdbc dialect should be considered as experimental.
diff --git a/lib/sqlalchemy/dialects/postgresql/array.py b/lib/sqlalchemy/dialects/postgresql/array.py
index b88f139de..f4316d318 100644
--- a/lib/sqlalchemy/dialects/postgresql/array.py
+++ b/lib/sqlalchemy/dialects/postgresql/array.py
@@ -84,12 +84,20 @@ class array(expression.Tuple):
super(array, self).__init__(*clauses, **kw)
self.type = ARRAY(self.type)
- def _bind_param(self, operator, obj):
- return array([
- expression.BindParameter(None, o, _compared_to_operator=operator,
- _compared_to_type=self.type, unique=True)
- for o in obj
- ])
+ def _bind_param(self, operator, obj, _assume_scalar=False, type_=None):
+ if _assume_scalar or operator is operators.getitem:
+ # if getitem->slice were called, Indexable produces
+ # a Slice object from that
+ assert isinstance(obj, int)
+ return expression.BindParameter(
+ None, obj, _compared_to_operator=operator,
+ type_=type_,
+ _compared_to_type=self.type, unique=True)
+
+ else:
+ return array([
+ self._bind_param(operator, o, _assume_scalar=True, type_=type_)
+ for o in obj])
def self_group(self, against=None):
if (against in (
@@ -106,15 +114,15 @@ CONTAINED_BY = operators.custom_op("<@", precedence=5)
OVERLAP = operators.custom_op("&&", precedence=5)
-class ARRAY(SchemaEventTarget, sqltypes.Array):
+class ARRAY(SchemaEventTarget, sqltypes.ARRAY):
"""Postgresql ARRAY type.
.. versionchanged:: 1.1 The :class:`.postgresql.ARRAY` type is now
- a subclass of the core :class:`.Array` type.
+ a subclass of the core :class:`.types.ARRAY` type.
The :class:`.postgresql.ARRAY` type is constructed in the same way
- as the core :class:`.Array` type; a member type is required, and a
+ as the core :class:`.types.ARRAY` type; a member type is required, and a
number of dimensions is recommended if the type is to be used for more
than one dimension::
@@ -125,9 +133,9 @@ class ARRAY(SchemaEventTarget, sqltypes.Array):
)
The :class:`.postgresql.ARRAY` type provides all operations defined on the
- core :class:`.Array` type, including support for "dimensions", indexed
- access, and simple matching such as :meth:`.Array.Comparator.any`
- and :meth:`.Array.Comparator.all`. :class:`.postgresql.ARRAY` class also
+ core :class:`.types.ARRAY` type, including support for "dimensions", indexed
+ access, and simple matching such as :meth:`.types.ARRAY.Comparator.any`
+ and :meth:`.types.ARRAY.Comparator.all`. :class:`.postgresql.ARRAY` class also
provides PostgreSQL-specific methods for containment operations, including
:meth:`.postgresql.ARRAY.Comparator.contains`
:meth:`.postgresql.ARRAY.Comparator.contained_by`,
@@ -144,20 +152,20 @@ class ARRAY(SchemaEventTarget, sqltypes.Array):
.. seealso::
- :class:`.types.Array` - base array type
+ :class:`.types.ARRAY` - base array type
:class:`.postgresql.array` - produces a literal array value.
"""
- class Comparator(sqltypes.Array.Comparator):
+ class Comparator(sqltypes.ARRAY.Comparator):
"""Define comparison operations for :class:`.ARRAY`.
Note that these operations are in addition to those provided
- by the base :class:`.types.Array.Comparator` class, including
- :meth:`.types.Array.Comparator.any` and
- :meth:`.types.Array.Comparator.all`.
+ by the base :class:`.types.ARRAY.Comparator` class, including
+ :meth:`.types.ARRAY.Comparator.any` and
+ :meth:`.types.ARRAY.Comparator.all`.
"""
diff --git a/lib/sqlalchemy/dialects/postgresql/base.py b/lib/sqlalchemy/dialects/postgresql/base.py
index e9001f79a..692305319 100644
--- a/lib/sqlalchemy/dialects/postgresql/base.py
+++ b/lib/sqlalchemy/dialects/postgresql/base.py
@@ -579,7 +579,7 @@ use the following workaround type::
def handle_raw_string(value):
inner = re.match(r"^{(.*)}$", value).group(1)
- return inner.split(",")
+ return inner.split(",") if inner else []
def process(value):
if value is None:
@@ -1056,6 +1056,16 @@ class PGCompiler(compiler.SQLCompiler):
self.process(element.stop, **kw),
)
+ def visit_json_getitem_op_binary(self, binary, operator, **kw):
+ return self._generate_generic_binary(
+ binary, " -> ", **kw
+ )
+
+ def visit_json_path_getitem_op_binary(self, binary, operator, **kw):
+ return self._generate_generic_binary(
+ binary, " #> ", **kw
+ )
+
def visit_getitem_binary(self, binary, operator, **kw):
return "%s[%s]" % (
self.process(binary.left, **kw),
@@ -1471,8 +1481,11 @@ class PGIdentifierPreparer(compiler.IdentifierPreparer):
raise exc.CompileError("Postgresql ENUM type requires a name.")
name = self.quote(type_.name)
- if not self.omit_schema and use_schema and type_.schema is not None:
- name = self.quote_schema(type_.schema) + "." + name
+ effective_schema = self.schema_for_object(type_)
+
+ if not self.omit_schema and use_schema and \
+ effective_schema is not None:
+ name = self.quote_schema(effective_schema) + "." + name
return name
@@ -1565,10 +1578,15 @@ class PGExecutionContext(default.DefaultExecutionContext):
name = "%s_%s_seq" % (tab, col)
column._postgresql_seq_name = seq_name = name
- sch = column.table.schema
- if sch is not None:
+ if column.table is not None:
+ effective_schema = self.connection.schema_for_object(
+ column.table)
+ else:
+ effective_schema = None
+
+ if effective_schema is not None:
exc = "select nextval('\"%s\".\"%s\"')" % \
- (sch, seq_name)
+ (effective_schema, seq_name)
else:
exc = "select nextval('\"%s\"')" % \
(seq_name, )
diff --git a/lib/sqlalchemy/dialects/postgresql/ext.py b/lib/sqlalchemy/dialects/postgresql/ext.py
index 1a443c2d7..66c7ed0e5 100644
--- a/lib/sqlalchemy/dialects/postgresql/ext.py
+++ b/lib/sqlalchemy/dialects/postgresql/ext.py
@@ -159,7 +159,7 @@ static/sql-createtable.html#SQL-CREATETABLE-EXCLUDE
def array_agg(*arg, **kw):
"""Postgresql-specific form of :class:`.array_agg`, ensures
return type is :class:`.postgresql.ARRAY` and not
- the plain :class:`.types.Array`.
+ the plain :class:`.types.ARRAY`.
.. versionadded:: 1.1
diff --git a/lib/sqlalchemy/dialects/postgresql/hstore.py b/lib/sqlalchemy/dialects/postgresql/hstore.py
index b7b0fc007..d2d20386a 100644
--- a/lib/sqlalchemy/dialects/postgresql/hstore.py
+++ b/lib/sqlalchemy/dialects/postgresql/hstore.py
@@ -12,34 +12,33 @@ from .array import ARRAY
from ... import types as sqltypes
from ...sql import functions as sqlfunc
from ...sql import operators
-from ...sql.operators import custom_op
from ... import util
__all__ = ('HSTORE', 'hstore')
-INDEX = custom_op(
- "->", precedence=5, natural_self_precedent=True
+GETITEM = operators.custom_op(
+ "->", precedence=15, natural_self_precedent=True,
)
HAS_KEY = operators.custom_op(
- "?", precedence=5, natural_self_precedent=True
+ "?", precedence=15, natural_self_precedent=True
)
HAS_ALL = operators.custom_op(
- "?&", precedence=5, natural_self_precedent=True
+ "?&", precedence=15, natural_self_precedent=True
)
HAS_ANY = operators.custom_op(
- "?|", precedence=5, natural_self_precedent=True
+ "?|", precedence=15, natural_self_precedent=True
)
CONTAINS = operators.custom_op(
- "@>", precedence=5, natural_self_precedent=True
+ "@>", precedence=15, natural_self_precedent=True
)
CONTAINED_BY = operators.custom_op(
- "<@", precedence=5, natural_self_precedent=True
+ "<@", precedence=15, natural_self_precedent=True
)
@@ -166,7 +165,7 @@ class HSTORE(sqltypes.Indexable, sqltypes.Concatenable, sqltypes.TypeEngine):
CONTAINED_BY, other, result_type=sqltypes.Boolean)
def _setup_getitem(self, index):
- return INDEX, index, self.type.text_type
+ return GETITEM, index, self.type.text_type
def defined(self, key):
"""Boolean expression. Test for presence of a non-NULL value for
diff --git a/lib/sqlalchemy/dialects/postgresql/json.py b/lib/sqlalchemy/dialects/postgresql/json.py
index 8a50270f5..6ff9fd88e 100644
--- a/lib/sqlalchemy/dialects/postgresql/json.py
+++ b/lib/sqlalchemy/dialects/postgresql/json.py
@@ -6,10 +6,10 @@
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from __future__ import absolute_import
-import collections
import json
+import collections
-from .base import ischema_names
+from .base import ischema_names, colspecs
from ... import types as sqltypes
from ...sql import operators
from ...sql import elements
@@ -17,70 +17,68 @@ from ... import util
__all__ = ('JSON', 'JSONB')
-
-# json : returns json
-INDEX = operators.custom_op(
- "->", precedence=5, natural_self_precedent=True
-)
-
-# path operator: returns json
-PATHIDX = operators.custom_op(
- "#>", precedence=5, natural_self_precedent=True
-)
-
-# json + astext: returns text
ASTEXT = operators.custom_op(
- "->>", precedence=5, natural_self_precedent=True
+ "->>", precedence=15, natural_self_precedent=True,
)
-# path operator + astext: returns text
-ASTEXT_PATHIDX = operators.custom_op(
- "#>>", precedence=5, natural_self_precedent=True
+JSONPATH_ASTEXT = operators.custom_op(
+ "#>>", precedence=15, natural_self_precedent=True,
)
+
HAS_KEY = operators.custom_op(
- "?", precedence=5, natural_self_precedent=True
+ "?", precedence=15, natural_self_precedent=True
)
HAS_ALL = operators.custom_op(
- "?&", precedence=5, natural_self_precedent=True
+ "?&", precedence=15, natural_self_precedent=True
)
HAS_ANY = operators.custom_op(
- "?|", precedence=5, natural_self_precedent=True
+ "?|", precedence=15, natural_self_precedent=True
)
CONTAINS = operators.custom_op(
- "@>", precedence=5, natural_self_precedent=True
+ "@>", precedence=15, natural_self_precedent=True
)
CONTAINED_BY = operators.custom_op(
- "<@", precedence=5, natural_self_precedent=True
+ "<@", precedence=15, natural_self_precedent=True
)
-class JSON(sqltypes.Indexable, sqltypes.TypeEngine):
- """Represent the Postgresql JSON type.
+class JSONPathType(sqltypes.JSON.JSONPathType):
+ def bind_processor(self, dialect):
+ def process(value):
+ assert isinstance(value, collections.Sequence)
+ tokens = [util.text_type(elem) for elem in value]
+ return "{%s}" % (", ".join(tokens))
- The :class:`.JSON` type stores arbitrary JSON format data, e.g.::
+ return process
- data_table = Table('data_table', metadata,
- Column('id', Integer, primary_key=True),
- Column('data', JSON)
- )
+colspecs[sqltypes.JSON.JSONPathType] = JSONPathType
- with engine.connect() as conn:
- conn.execute(
- data_table.insert(),
- data = {"key1": "value1", "key2": "value2"}
- )
- :class:`.JSON` provides several operations:
+class JSON(sqltypes.JSON):
+ """Represent the Postgresql JSON type.
+
+ This type is a specialization of the Core-level :class:`.types.JSON`
+ type. Be sure to read the documentation for :class:`.types.JSON` for
+ important tips regarding treatment of NULL values and ORM use.
+
+ .. versionchanged:: 1.1 :class:`.postgresql.JSON` is now a Postgresql-
+ specific specialization of the new :class:`.types.JSON` type.
+
+ The operators provided by the Postgresql version of :class:`.JSON`
+ include:
* Index operations (the ``->`` operator)::
data_table.c.data['some key']
+ data_table.c.data[5]
+
+
* Index operations returning text (the ``->>`` operator)::
data_table.c.data['some key'].astext == 'some value'
@@ -92,11 +90,11 @@ class JSON(sqltypes.Indexable, sqltypes.TypeEngine):
* Path index operations (the ``#>`` operator)::
- data_table.c.data[('key_1', 'key_2', ..., 'key_n')]
+ data_table.c.data[('key_1', 'key_2', 5, ..., 'key_n')]
* Path index operations returning text (the ``#>>`` operator)::
- data_table.c.data[('key_1', 'key_2', ..., 'key_n')].astext == \
+ data_table.c.data[('key_1', 'key_2', 5, ..., 'key_n')].astext == \
'some value'
.. versionchanged:: 1.1 The :meth:`.ColumnElement.cast` operator on
@@ -108,36 +106,6 @@ class JSON(sqltypes.Indexable, sqltypes.TypeEngine):
:class:`.JSON` by default, so that further JSON-oriented instructions
may be called upon the result type.
- The :class:`.JSON` type, when used with the SQLAlchemy ORM, does not
- detect in-place mutations to the structure. In order to detect these, the
- :mod:`sqlalchemy.ext.mutable` extension must be used. This extension will
- allow "in-place" changes to the datastructure to produce events which
- will be detected by the unit of work. See the example at :class:`.HSTORE`
- for a simple example involving a dictionary.
-
- When working with NULL values, the :class:`.JSON` type recommends the
- use of two specific constants in order to differentiate between a column
- that evaluates to SQL NULL, e.g. no value, vs. the JSON-encoded string
- of ``"null"``. To insert or select against a value that is SQL NULL,
- use the constant :func:`.null`::
-
- conn.execute(table.insert(), json_value=null())
-
- To insert or select against a value that is JSON ``"null"``, use the
- constant :attr:`.JSON.NULL`::
-
- conn.execute(table.insert(), json_value=JSON.NULL)
-
- The :class:`.JSON` type supports a flag
- :paramref:`.JSON.none_as_null` which when set to True will result
- in the Python constant ``None`` evaluating to the value of SQL
- NULL, and when set to False results in the Python constant
- ``None`` evaluating to the value of JSON ``"null"``. The Python
- value ``None`` may be used in conjunction with either
- :attr:`.JSON.NULL` and :func:`.null` in order to indicate NULL
- values, but care must be taken as to the value of the
- :paramref:`.JSON.none_as_null` in these cases.
-
Custom serializers and deserializers are specified at the dialect level,
that is using :func:`.create_engine`. The reason for this is that when
using psycopg2, the DBAPI only allows serializers at the per-cursor
@@ -151,43 +119,16 @@ class JSON(sqltypes.Indexable, sqltypes.TypeEngine):
When using the psycopg2 dialect, the json_deserializer is registered
against the database using ``psycopg2.extras.register_default_json``.
- .. versionadded:: 0.9
-
.. seealso::
+ :class:`.types.JSON` - Core level JSON type
+
:class:`.JSONB`
"""
- __visit_name__ = 'JSON'
-
- hashable = False
astext_type = sqltypes.Text()
- NULL = util.symbol('JSON_NULL')
- """Describe the json value of NULL.
-
- This value is used to force the JSON value of ``"null"`` to be
- used as the value. A value of Python ``None`` will be recognized
- either as SQL NULL or JSON ``"null"``, based on the setting
- of the :paramref:`.JSON.none_as_null` flag; the :attr:`.JSON.NULL`
- constant can be used to always resolve to JSON ``"null"`` regardless
- of this setting. This is in contrast to the :func:`.sql.null` construct,
- which always resolves to SQL NULL. E.g.::
-
- from sqlalchemy import null
- from sqlalchemy.dialects.postgresql import JSON
-
- obj1 = MyObject(json_value=null()) # will *always* insert SQL NULL
- obj2 = MyObject(json_value=JSON.NULL) # will *always* insert JSON string "null"
-
- session.add_all([obj1, obj2])
- session.commit()
-
- .. versionadded:: 1.1
-
- """
-
def __init__(self, none_as_null=False, astext_type=None):
"""Construct a :class:`.JSON` type.
@@ -210,15 +151,14 @@ class JSON(sqltypes.Indexable, sqltypes.TypeEngine):
:attr:`.JSON.Comparator.astext`
accessor on indexed attributes. Defaults to :class:`.types.Text`.
- .. versionadded:: 1.1.0
+ .. versionadded:: 1.1
"""
- self.none_as_null = none_as_null
+ super(JSON, self).__init__(none_as_null=none_as_null)
if astext_type is not None:
self.astext_type = astext_type
- class Comparator(
- sqltypes.Indexable.Comparator, sqltypes.Concatenable.Comparator):
+ class Comparator(sqltypes.JSON.Comparator):
"""Define comparison operations for :class:`.JSON`."""
@property
@@ -235,69 +175,19 @@ class JSON(sqltypes.Indexable, sqltypes.TypeEngine):
:meth:`.ColumnElement.cast`
"""
- against = self.expr.operator
- if against is PATHIDX:
- against = ASTEXT_PATHIDX
- else:
- against = ASTEXT
- return self.expr.left.operate(
- against, self.expr.right, result_type=self.type.astext_type)
-
- def _setup_getitem(self, index):
- if not isinstance(index, util.string_types):
- assert isinstance(index, collections.Sequence)
- tokens = [util.text_type(elem) for elem in index]
- index = "{%s}" % (", ".join(tokens))
- operator = PATHIDX
+ if isinstance(self.expr.right.type, sqltypes.JSON.JSONPathType):
+ return self.expr.left.operate(
+ JSONPATH_ASTEXT,
+ self.expr.right, result_type=self.type.astext_type)
else:
- operator = INDEX
-
- return operator, index, self.type
+ return self.expr.left.operate(
+ ASTEXT, self.expr.right, result_type=self.type.astext_type)
comparator_factory = Comparator
- @property
- def should_evaluate_none(self):
- return not self.none_as_null
-
- def bind_processor(self, dialect):
- json_serializer = dialect._json_serializer or json.dumps
- if util.py2k:
- encoding = dialect.encoding
- else:
- encoding = None
-
- def process(value):
- if value is self.NULL:
- value = None
- elif isinstance(value, elements.Null) or (
- value is None and self.none_as_null
- ):
- return None
- if encoding:
- return json_serializer(value).encode(encoding)
- else:
- return json_serializer(value)
-
- return process
-
- def result_processor(self, dialect, coltype):
- json_deserializer = dialect._json_deserializer or json.loads
- if util.py2k:
- encoding = dialect.encoding
- else:
- encoding = None
-
- def process(value):
- if value is None:
- return None
- if encoding:
- value = value.decode(encoding)
- return json_deserializer(value)
- return process
-
+colspecs[sqltypes.JSON] = JSON
ischema_names['json'] = JSON
diff --git a/lib/sqlalchemy/dialects/postgresql/pg8000.py b/lib/sqlalchemy/dialects/postgresql/pg8000.py
index c71f689a3..2c745e6f7 100644
--- a/lib/sqlalchemy/dialects/postgresql/pg8000.py
+++ b/lib/sqlalchemy/dialects/postgresql/pg8000.py
@@ -155,6 +155,7 @@ class PGDialect_pg8000(PGDialect):
sqltypes.Numeric: _PGNumericNoBind,
sqltypes.Float: _PGNumeric,
JSON: _PGJSON,
+ sqltypes.JSON: _PGJSON
}
)
diff --git a/lib/sqlalchemy/dialects/postgresql/psycopg2.py b/lib/sqlalchemy/dialects/postgresql/psycopg2.py
index d33554922..82fcc9054 100644
--- a/lib/sqlalchemy/dialects/postgresql/psycopg2.py
+++ b/lib/sqlalchemy/dialects/postgresql/psycopg2.py
@@ -534,6 +534,7 @@ class PGDialect_psycopg2(PGDialect):
sqltypes.Enum: _PGEnum, # needs force_unicode
HSTORE: _PGHStore,
JSON: _PGJSON,
+ sqltypes.JSON: _PGJSON,
JSONB: _PGJSONB,
UUID: _PGUUID
}
diff --git a/lib/sqlalchemy/dialects/sqlite/base.py b/lib/sqlalchemy/dialects/sqlite/base.py
index a1786d16c..0e048aeff 100644
--- a/lib/sqlalchemy/dialects/sqlite/base.py
+++ b/lib/sqlalchemy/dialects/sqlite/base.py
@@ -352,19 +352,29 @@ The index will be rendered at create time as::
.. versionadded:: 0.9.9
+.. _sqlite_dotted_column_names:
+
Dotted Column Names
-------------------
Using table or column names that explicitly have periods in them is
**not recommended**. While this is generally a bad idea for relational
databases in general, as the dot is a syntactically significant character,
-the SQLite driver has a bug which requires that SQLAlchemy filter out these
-dots in result sets.
+the SQLite driver up until version **3.10.0** of SQLite has a bug which
+requires that SQLAlchemy filter out these dots in result sets.
+
+.. versionchanged:: 1.1
+
+ The following SQLite issue has been resolved as of version 3.10.0
+ of SQLite. SQLAlchemy as of **1.1** automatically disables its internal
+ workarounds based on detection of this version.
The bug, entirely outside of SQLAlchemy, can be illustrated thusly::
import sqlite3
+ assert sqlite3.sqlite_version_info < (3, 10, 0), "bug is fixed in this version"
+
conn = sqlite3.connect(":memory:")
cursor = conn.cursor()
@@ -997,9 +1007,13 @@ class SQLiteIdentifierPreparer(compiler.IdentifierPreparer):
class SQLiteExecutionContext(default.DefaultExecutionContext):
@util.memoized_property
def _preserve_raw_colnames(self):
- return self.execution_options.get("sqlite_raw_colnames", False)
+ return not self.dialect._broken_dotted_colnames or \
+ self.execution_options.get("sqlite_raw_colnames", False)
def _translate_colname(self, colname):
+ # TODO: detect SQLite version 3.10.0 or greater;
+ # see [ticket:3633]
+
# adjust for dotted column names. SQLite
# in the case of UNION may store col names as
# "tablename.colname", or if using an attached database,
@@ -1019,7 +1033,6 @@ class SQLiteDialect(default.DefaultDialect):
supports_empty_insert = False
supports_cast = True
supports_multivalues_insert = True
- supports_right_nested_joins = False
default_paramstyle = 'qmark'
execution_ctx_cls = SQLiteExecutionContext
@@ -1044,6 +1057,7 @@ class SQLiteDialect(default.DefaultDialect):
]
_broken_fk_pragma_quotes = False
+ _broken_dotted_colnames = False
def __init__(self, isolation_level=None, native_datetime=False, **kwargs):
default.DefaultDialect.__init__(self, **kwargs)
@@ -1056,6 +1070,11 @@ class SQLiteDialect(default.DefaultDialect):
self.native_datetime = native_datetime
if self.dbapi is not None:
+ self.supports_right_nested_joins = (
+ self.dbapi.sqlite_version_info >= (3, 7, 16))
+ self._broken_dotted_colnames = (
+ self.dbapi.sqlite_version_info < (3, 10, 0)
+ )
self.supports_default_values = (
self.dbapi.sqlite_version_info >= (3, 3, 8))
self.supports_cast = (
diff --git a/lib/sqlalchemy/dialects/sybase/base.py b/lib/sqlalchemy/dialects/sybase/base.py
index b3f8e307a..187521831 100644
--- a/lib/sqlalchemy/dialects/sybase/base.py
+++ b/lib/sqlalchemy/dialects/sybase/base.py
@@ -336,11 +336,7 @@ class SybaseSQLCompiler(compiler.SQLCompiler):
s += "TOP %s " % (limit,)
offset = select._offset
if offset:
- if not limit:
- # FIXME: sybase doesn't allow an offset without a limit
- # so use a huge value for TOP here
- s += "TOP 1000000 "
- s += "START AT %s " % (offset + 1,)
+ raise NotImplementedError("Sybase ASE does not support OFFSET")
return s
def get_from_hint_text(self, table, text):
diff --git a/lib/sqlalchemy/engine/__init__.py b/lib/sqlalchemy/engine/__init__.py
index 0b0d50329..02c35d6a9 100644
--- a/lib/sqlalchemy/engine/__init__.py
+++ b/lib/sqlalchemy/engine/__init__.py
@@ -53,6 +53,7 @@ url.py
from .interfaces import (
Connectable,
+ CreateEnginePlugin,
Dialect,
ExecutionContext,
ExceptionContext,
@@ -390,7 +391,7 @@ def engine_from_config(configuration, prefix='sqlalchemy.', **kwargs):
"""Create a new Engine instance using a configuration dictionary.
The dictionary is typically produced from a config file.
-
+
The keys of interest to ``engine_from_config()`` should be prefixed, e.g.
``sqlalchemy.url``, ``sqlalchemy.echo``, etc. The 'prefix' argument
indicates the prefix to be searched for. Each matching key (after the
diff --git a/lib/sqlalchemy/engine/base.py b/lib/sqlalchemy/engine/base.py
index eaa435d45..0b928566d 100644
--- a/lib/sqlalchemy/engine/base.py
+++ b/lib/sqlalchemy/engine/base.py
@@ -14,6 +14,7 @@ from __future__ import with_statement
import sys
from .. import exc, util, log, interfaces
from ..sql import util as sql_util
+from ..sql import schema
from .interfaces import Connectable, ExceptionContext
from .util import _distill_params
import contextlib
@@ -44,6 +45,22 @@ class Connection(Connectable):
"""
+ schema_for_object = schema._schema_getter(None)
+ """Return the ".schema" attribute for an object.
+
+ Used for :class:`.Table`, :class:`.Sequence` and similar objects,
+ and takes into account
+ the :paramref:`.Connection.execution_options.schema_translate_map`
+ parameter.
+
+ .. versionadded:: 1.1
+
+ .. seealso::
+
+ :ref:`schema_translating`
+
+ """
+
def __init__(self, engine, connection=None, close_with_result=False,
_branch_from=None, _execution_options=None,
_dispatch=None,
@@ -67,6 +84,7 @@ class Connection(Connectable):
self.should_close_with_result = False
self.dispatch = _dispatch
self._has_events = _branch_from._has_events
+ self.schema_for_object = _branch_from.schema_for_object
else:
self.__connection = connection \
if connection is not None else engine.raw_connection()
@@ -277,6 +295,19 @@ class Connection(Connectable):
of many DBAPIs. The flag is currently understood only by the
psycopg2 dialect.
+ :param schema_translate_map: Available on: Connection, Engine.
+ A dictionary mapping schema names to schema names, that will be
+ applied to the :paramref:`.Table.schema` element of each
+ :class:`.Table` encountered when SQL or DDL expression elements
+ are compiled into strings; the resulting schema name will be
+ converted based on presence in the map of the original name.
+
+ .. versionadded:: 1.1
+
+ .. seealso::
+
+ :ref:`schema_translating`
+
"""
c = self._clone()
c._execution_options = c._execution_options.union(opt)
@@ -959,7 +990,10 @@ class Connection(Connectable):
dialect = self.dialect
- compiled = ddl.compile(dialect=dialect)
+ compiled = ddl.compile(
+ dialect=dialect,
+ schema_translate_map=self.schema_for_object
+ if not self.schema_for_object.is_default else None)
ret = self._execute_context(
dialect,
dialect.execution_ctx_cls._init_ddl,
@@ -990,17 +1024,26 @@ class Connection(Connectable):
dialect = self.dialect
if 'compiled_cache' in self._execution_options:
- key = dialect, elem, tuple(sorted(keys)), len(distilled_params) > 1
+ key = (
+ dialect, elem, tuple(sorted(keys)),
+ self.schema_for_object.hash_key,
+ len(distilled_params) > 1
+ )
compiled_sql = self._execution_options['compiled_cache'].get(key)
if compiled_sql is None:
compiled_sql = elem.compile(
dialect=dialect, column_keys=keys,
- inline=len(distilled_params) > 1)
+ inline=len(distilled_params) > 1,
+ schema_translate_map=self.schema_for_object
+ if not self.schema_for_object.is_default else None
+ )
self._execution_options['compiled_cache'][key] = compiled_sql
else:
compiled_sql = elem.compile(
dialect=dialect, column_keys=keys,
- inline=len(distilled_params) > 1)
+ inline=len(distilled_params) > 1,
+ schema_translate_map=self.schema_for_object
+ if not self.schema_for_object.is_default else None)
ret = self._execute_context(
dialect,
@@ -1155,7 +1198,7 @@ class Connection(Connectable):
if context.compiled:
context.post_exec()
- if context.is_crud:
+ if context.is_crud or context.is_text:
result = context._setup_crud_result_proxy()
else:
result = context.get_result_proxy()
@@ -1686,6 +1729,22 @@ class Engine(Connectable, log.Identified):
_has_events = False
_connection_cls = Connection
+ schema_for_object = schema._schema_getter(None)
+ """Return the ".schema" attribute for an object.
+
+ Used for :class:`.Table`, :class:`.Sequence` and similar objects,
+ and takes into account
+ the :paramref:`.Connection.execution_options.schema_translate_map`
+ parameter.
+
+ .. versionadded:: 1.1
+
+ .. seealso::
+
+ :ref:`schema_translating`
+
+ """
+
def __init__(self, pool, dialect, url,
logging_name=None, echo=None, proxy=None,
execution_options=None
diff --git a/lib/sqlalchemy/engine/default.py b/lib/sqlalchemy/engine/default.py
index 9a7b80bfd..9f845e79d 100644
--- a/lib/sqlalchemy/engine/default.py
+++ b/lib/sqlalchemy/engine/default.py
@@ -16,7 +16,7 @@ as the base class for their own corresponding classes.
import re
import random
from . import reflection, interfaces, result
-from ..sql import compiler, expression
+from ..sql import compiler, expression, schema
from .. import types as sqltypes
from .. import exc, util, pool, processors
import codecs
@@ -398,10 +398,22 @@ class DefaultDialect(interfaces.Dialect):
if not branch:
self._set_connection_isolation(connection, isolation_level)
+ if 'schema_translate_map' in opts:
+ getter = schema._schema_getter(opts['schema_translate_map'])
+ engine.schema_for_object = getter
+
+ @event.listens_for(engine, "engine_connect")
+ def set_schema_translate_map(connection, branch):
+ connection.schema_for_object = getter
+
def set_connection_execution_options(self, connection, opts):
if 'isolation_level' in opts:
self._set_connection_isolation(connection, opts['isolation_level'])
+ if 'schema_translate_map' in opts:
+ getter = schema._schema_getter(opts['schema_translate_map'])
+ connection.schema_for_object = getter
+
def _set_connection_isolation(self, connection, level):
if connection.in_transaction():
util.warn(
@@ -462,11 +474,29 @@ class DefaultDialect(interfaces.Dialect):
self.set_isolation_level(dbapi_conn, self.default_isolation_level)
+class StrCompileDialect(DefaultDialect):
+
+ statement_compiler = compiler.StrSQLCompiler
+ ddl_compiler = compiler.DDLCompiler
+ type_compiler = compiler.StrSQLTypeCompiler
+ preparer = compiler.IdentifierPreparer
+
+ supports_sequences = True
+ sequences_optional = True
+ preexecute_autoincrement_sequences = False
+ implicit_returning = False
+
+ supports_native_boolean = True
+
+ supports_simple_order_by_label = True
+
+
class DefaultExecutionContext(interfaces.ExecutionContext):
isinsert = False
isupdate = False
isdelete = False
is_crud = False
+ is_text = False
isddl = False
executemany = False
compiled = None
@@ -531,7 +561,8 @@ class DefaultExecutionContext(interfaces.ExecutionContext):
connection._execution_options)
self.result_column_struct = (
- compiled._result_columns, compiled._ordered_columns)
+ compiled._result_columns, compiled._ordered_columns,
+ compiled._textual_ordered_columns)
self.unicode_statement = util.text_type(compiled)
if not dialect.supports_unicode_statements:
@@ -543,6 +574,7 @@ class DefaultExecutionContext(interfaces.ExecutionContext):
self.isinsert = compiled.isinsert
self.isupdate = compiled.isupdate
self.isdelete = compiled.isdelete
+ self.is_text = compiled.isplaintext
if not parameters:
self.compiled_parameters = [compiled.construct_params()]
@@ -622,6 +654,7 @@ class DefaultExecutionContext(interfaces.ExecutionContext):
self.root_connection = connection
self._dbapi_connection = dbapi_connection
self.dialect = connection.dialect
+ self.is_text = True
# plain text statement
self.execution_options = connection._execution_options
diff --git a/lib/sqlalchemy/engine/interfaces.py b/lib/sqlalchemy/engine/interfaces.py
index 3bad765df..c84823d1e 100644
--- a/lib/sqlalchemy/engine/interfaces.py
+++ b/lib/sqlalchemy/engine/interfaces.py
@@ -7,7 +7,7 @@
"""Define core interfaces used by the engine system."""
-from .. import util, event
+from .. import util
# backwards compat
from ..sql.compiler import Compiled, TypeCompiler
@@ -781,6 +781,111 @@ class Dialect(object):
pass
+class CreateEnginePlugin(object):
+ """A set of hooks intended to augment the construction of an
+ :class:`.Engine` object based on entrypoint names in a URL.
+
+ The purpose of :class:`.CreateEnginePlugin` is to allow third-party
+ systems to apply engine, pool and dialect level event listeners without
+ the need for the target application to be modified; instead, the plugin
+ names can be added to the database URL. Target applications for
+ :class:`.CreateEnginePlugin` include:
+
+ * connection and SQL performance tools, e.g. which use events to track
+ number of checkouts and/or time spent with statements
+
+ * connectivity plugins such as proxies
+
+ Plugins are registered using entry points in a similar way as that
+ of dialects::
+
+ entry_points={
+ 'sqlalchemy.plugins': [
+ 'myplugin = myapp.plugins:MyPlugin'
+ ]
+
+ A plugin that uses the above names would be invoked from a database
+ URL as in::
+
+ from sqlalchemy import create_engine
+
+ engine = create_engine(
+ "mysql+pymysql://scott:tiger@localhost/test?plugin=myplugin")
+
+ The ``plugin`` argument supports multiple instances, so that a URL
+ may specify multiple plugins; they are loaded in the order stated
+ in the URL::
+
+ engine = create_engine(
+ "mysql+pymysql://scott:tiger@localhost/"
+ "test?plugin=plugin_one&plugin=plugin_twp&plugin=plugin_three")
+
+ A plugin can receive additional arguments from the URL string as
+ well as from the keyword arguments passed to :func:`.create_engine`.
+ The :class:`.URL` object and the keyword dictionary are passed to the
+ constructor so that these arguments can be extracted from the url's
+ :attr:`.URL.query` collection as well as from the dictionary::
+
+ class MyPlugin(CreateEnginePlugin):
+ def __init__(self, url, kwargs):
+ self.my_argument_one = url.query.pop('my_argument_one')
+ self.my_argument_two = url.query.pop('my_argument_two')
+ self.my_argument_three = kwargs.pop('my_argument_three', None)
+
+ Arguments like those illustrated above would be consumed from the
+ following::
+
+ from sqlalchemy import create_engine
+
+ engine = create_engine(
+ "mysql+pymysql://scott:tiger@localhost/"
+ "test?plugin=myplugin&my_argument_one=foo&my_argument_two=bar",
+ my_argument_three='bat')
+
+ The URL and dictionary are used for subsequent setup of the engine
+ as they are, so the plugin can modify their arguments in-place.
+ Arguments that are only understood by the plugin should be popped
+ or otherwise removed so that they aren't interpreted as erroneous
+ arguments afterwards.
+
+ When the engine creation process completes and produces the
+ :class:`.Engine` object, it is again passed to the plugin via the
+ :meth:`.CreateEnginePlugin.engine_created` hook. In this hook, additional
+ changes can be made to the engine, most typically involving setup of
+ events (e.g. those defined in :ref:`core_event_toplevel`).
+
+ .. versionadded:: 1.1
+
+ """
+ def __init__(self, url, kwargs):
+ """Contruct a new :class:`.CreateEnginePlugin`.
+
+ The plugin object is instantiated individually for each call
+ to :func:`.create_engine`. A single :class:`.Engine` will be
+ passed to the :meth:`.CreateEnginePlugin.engine_created` method
+ corresponding to this URL.
+
+ :param url: the :class:`.URL` object. The plugin should inspect
+ what it needs here as well as remove its custom arguments from the
+ :attr:`.URL.query` collection. The URL can be modified in-place
+ in any other way as well.
+ :param kwargs: The keyword arguments passed to :func`.create_engine`.
+ The plugin can read and modify this dictionary in-place, to affect
+ the ultimate arguments used to create the engine. It should
+ remove its custom arguments from the dictionary as well.
+
+ """
+ self.url = url
+
+ def engine_created(self, engine):
+ """Receive the :class:`.Engine` object when it is fully constructed.
+
+ The plugin may make additional changes to the engine, such as
+ registering engine or connection pool events.
+
+ """
+
+
class ExecutionContext(object):
"""A messenger object for a Dialect that corresponds to a single
execution.
diff --git a/lib/sqlalchemy/engine/reflection.py b/lib/sqlalchemy/engine/reflection.py
index 59eed51ec..6880660ce 100644
--- a/lib/sqlalchemy/engine/reflection.py
+++ b/lib/sqlalchemy/engine/reflection.py
@@ -529,7 +529,8 @@ class Inspector(object):
"""
dialect = self.bind.dialect
- schema = table.schema
+ schema = self.bind.schema_for_object(table)
+
table_name = table.name
# get table-level arguments that are specifically
diff --git a/lib/sqlalchemy/engine/result.py b/lib/sqlalchemy/engine/result.py
index 7d1425c28..cc4ac74cd 100644
--- a/lib/sqlalchemy/engine/result.py
+++ b/lib/sqlalchemy/engine/result.py
@@ -84,8 +84,8 @@ except ImportError:
raise
if index is None:
raise exc.InvalidRequestError(
- "Ambiguous column name '%s' in result set! "
- "try 'use_labels' option on select statement." % key)
+ "Ambiguous column name '%s' in "
+ "result set column descriptions" % obj)
if processor is not None:
return processor(self._row[index])
else:
@@ -186,97 +186,29 @@ class ResultMetaData(object):
"""Handle cursor.description, applying additional info from an execution
context."""
- def __init__(self, parent, metadata):
+ __slots__ = (
+ '_keymap', 'case_sensitive', 'matched_on_name',
+ '_processors', 'keys', '_orig_processors')
+
+ def __init__(self, parent, cursor_description):
context = parent.context
dialect = context.dialect
- typemap = dialect.dbapi_type_map
- translate_colname = context._translate_colname
- self.case_sensitive = case_sensitive = dialect.case_sensitive
+ self.case_sensitive = dialect.case_sensitive
+ self.matched_on_name = False
if context.result_column_struct:
- result_columns, cols_are_ordered = context.result_column_struct
+ result_columns, cols_are_ordered, textual_ordered = \
+ context.result_column_struct
num_ctx_cols = len(result_columns)
else:
- num_ctx_cols = None
-
- if num_ctx_cols and \
- cols_are_ordered and \
- num_ctx_cols == len(metadata):
- # case 1 - SQL expression statement, number of columns
- # in result matches number of cols in compiled. This is the
- # vast majority case for SQL expression constructs. In this
- # case we don't bother trying to parse or match up to
- # the colnames in the result description.
- raw = [
- (
- idx,
- key,
- name.lower() if not case_sensitive else name,
- context.get_result_processor(
- type_, key, metadata[idx][1]
- ),
- obj,
- None
- ) for idx, (key, name, obj, type_)
- in enumerate(result_columns)
- ]
- self.keys = [
- elem[0] for elem in result_columns
- ]
- else:
- # case 2 - raw string, or number of columns in result does
- # not match number of cols in compiled. The raw string case
- # is very common. The latter can happen
- # when text() is used with only a partial typemap, or
- # in the extremely unlikely cases where the compiled construct
- # has a single element with multiple col expressions in it
- # (e.g. has commas embedded) or there's some kind of statement
- # that is adding extra columns.
- # In all these cases we fall back to the "named" approach
- # that SQLAlchemy has used up through 0.9.
-
- if num_ctx_cols:
- result_map = self._create_result_map(
- result_columns, case_sensitive)
-
- raw = []
- self.keys = []
- untranslated = None
- for idx, rec in enumerate(metadata):
- colname = rec[0]
- coltype = rec[1]
-
- if dialect.description_encoding:
- colname = dialect._description_decoder(colname)
-
- if translate_colname:
- colname, untranslated = translate_colname(colname)
-
- if dialect.requires_name_normalize:
- colname = dialect.normalize_name(colname)
-
- self.keys.append(colname)
- if not case_sensitive:
- colname = colname.lower()
-
- if num_ctx_cols:
- try:
- ctx_rec = result_map[colname]
- except KeyError:
- mapped_type = typemap.get(coltype, sqltypes.NULLTYPE)
- obj = None
- else:
- obj = ctx_rec[1]
- mapped_type = ctx_rec[2]
- else:
- mapped_type = typemap.get(coltype, sqltypes.NULLTYPE)
- obj = None
- processor = context.get_result_processor(
- mapped_type, colname, coltype)
+ result_columns = cols_are_ordered = \
+ num_ctx_cols = textual_ordered = False
- raw.append(
- (idx, colname, colname, processor, obj, untranslated)
- )
+ # merge cursor.description with the column info
+ # present in the compiled structure, if any
+ raw = self._merge_cursor_description(
+ context, cursor_description, result_columns,
+ num_ctx_cols, cols_are_ordered, textual_ordered)
# keymap indexes by integer index...
self._keymap = dict([
@@ -288,12 +220,16 @@ class ResultMetaData(object):
# views like __iter__ and slices
self._processors = [elem[3] for elem in raw]
+ # keymap by primary string...
+ by_key = dict([
+ (elem[2], (elem[3], elem[4], elem[0]))
+ for elem in raw
+ ])
+
+ # for compiled SQL constructs, copy additional lookup keys into
+ # the key lookup map, such as Column objects, labels,
+ # column keys and other names
if num_ctx_cols:
- # keymap by primary string...
- by_key = dict([
- (elem[2], (elem[3], elem[4], elem[0]))
- for elem in raw
- ])
# if by-primary-string dictionary smaller (or bigger?!) than
# number of columns, assume we have dupes, rewrite
@@ -304,30 +240,250 @@ class ResultMetaData(object):
for rec in raw:
key = rec[1]
if key in seen:
- by_key[key] = (None, by_key[key][1], None)
+ # this is an "ambiguous" element, replacing
+ # the full record in the map
+ by_key[key] = (None, key, None)
seen.add(key)
- # update keymap with secondary "object"-based keys
+ # copy secondary elements from compiled columns
+ # into self._keymap, write in the potentially "ambiguous"
+ # element
+ self._keymap.update([
+ (obj_elem, by_key[elem[2]])
+ for elem in raw if elem[4]
+ for obj_elem in elem[4]
+ ])
+
+ # if we did a pure positional match, then reset the
+ # original "expression element" back to the "unambiguous"
+ # entry. This is a new behavior in 1.1 which impacts
+ # TextAsFrom but also straight compiled SQL constructs.
+ if not self.matched_on_name:
+ self._keymap.update([
+ (elem[4][0], (elem[3], elem[4], elem[0]))
+ for elem in raw if elem[4]
+ ])
+ else:
+ # no dupes - copy secondary elements from compiled
+ # columns into self._keymap
+ self._keymap.update([
+ (obj_elem, (elem[3], elem[4], elem[0]))
+ for elem in raw if elem[4]
+ for obj_elem in elem[4]
+ ])
+
+ # update keymap with primary string names taking
+ # precedence
+ self._keymap.update(by_key)
+
+ # update keymap with "translated" names (sqlite-only thing)
+ if not num_ctx_cols and context._translate_colname:
self._keymap.update([
- (obj_elem, by_key[elem[2]])
- for elem in raw if elem[4]
- for obj_elem in elem[4]
+ (elem[5], self._keymap[elem[2]])
+ for elem in raw if elem[5]
])
- # update keymap with primary string names taking
- # precedence
- self._keymap.update(by_key)
+ def _merge_cursor_description(
+ self, context, cursor_description, result_columns,
+ num_ctx_cols, cols_are_ordered, textual_ordered):
+ """Merge a cursor.description with compiled result column information.
+
+ There are at least four separate strategies used here, selected
+ depending on the type of SQL construct used to start with.
+
+ The most common case is that of the compiled SQL expression construct,
+ which generated the column names present in the raw SQL string and
+ which has the identical number of columns as were reported by
+ cursor.description. In this case, we assume a 1-1 positional mapping
+ between the entries in cursor.description and the compiled object.
+ This is also the most performant case as we disregard extracting /
+ decoding the column names present in cursor.description since we
+ already have the desired name we generated in the compiled SQL
+ construct.
+
+ The next common case is that of the completely raw string SQL,
+ such as passed to connection.execute(). In this case we have no
+ compiled construct to work with, so we extract and decode the
+ names from cursor.description and index those as the primary
+ result row target keys.
+
+ The remaining fairly common case is that of the textual SQL
+ that includes at least partial column information; this is when
+ we use a :class:`.TextAsFrom` construct. This contruct may have
+ unordered or ordered column information. In the ordered case, we
+ merge the cursor.description and the compiled construct's information
+ positionally, and warn if there are additional description names
+ present, however we still decode the names in cursor.description
+ as we don't have a guarantee that the names in the columns match
+ on these. In the unordered case, we match names in cursor.description
+ to that of the compiled construct based on name matching.
+ In both of these cases, the cursor.description names and the column
+ expression objects and names are indexed as result row target keys.
+
+ The final case is much less common, where we have a compiled
+ non-textual SQL expression construct, but the number of columns
+ in cursor.description doesn't match what's in the compiled
+ construct. We make the guess here that there might be textual
+ column expressions in the compiled construct that themselves include
+ a comma in them causing them to split. We do the same name-matching
+ as with textual non-ordered columns.
+
+ The name-matched system of merging is the same as that used by
+ SQLAlchemy for all cases up through te 0.9 series. Positional
+ matching for compiled SQL expressions was introduced in 1.0 as a
+ major performance feature, and positional matching for textual
+ :class:`.TextAsFrom` objects in 1.1. As name matching is no longer
+ a common case, it was acceptable to factor it into smaller generator-
+ oriented methods that are easier to understand, but incur slightly
+ more performance overhead.
+
+ """
+
+ case_sensitive = context.dialect.case_sensitive
+
+ if num_ctx_cols and \
+ cols_are_ordered and \
+ not textual_ordered and \
+ num_ctx_cols == len(cursor_description):
+ self.keys = [elem[0] for elem in result_columns]
+ # pure positional 1-1 case; doesn't need to read
+ # the names from cursor.description
+ return [
+ (
+ idx,
+ key,
+ name.lower() if not case_sensitive else name,
+ context.get_result_processor(
+ type_, key, cursor_description[idx][1]
+ ),
+ obj,
+ None
+ ) for idx, (key, name, obj, type_)
+ in enumerate(result_columns)
+ ]
else:
- self._keymap.update([
- (elem[2], (elem[3], elem[4], elem[0]))
- for elem in raw
- ])
- # update keymap with "translated" names (sqlite-only thing)
+ # name-based or text-positional cases, where we need
+ # to read cursor.description names
+ if textual_ordered:
+ # textual positional case
+ raw_iterator = self._merge_textual_cols_by_position(
+ context, cursor_description, result_columns)
+ elif num_ctx_cols:
+ # compiled SQL with a mismatch of description cols
+ # vs. compiled cols, or textual w/ unordered columns
+ raw_iterator = self._merge_cols_by_name(
+ context, cursor_description, result_columns)
+ else:
+ # no compiled SQL, just a raw string
+ raw_iterator = self._merge_cols_by_none(
+ context, cursor_description)
+
+ return [
+ (
+ idx, colname, colname,
+ context.get_result_processor(
+ mapped_type, colname, coltype),
+ obj, untranslated)
+
+ for idx, colname, mapped_type, coltype, obj, untranslated
+ in raw_iterator
+ ]
+
+ def _colnames_from_description(self, context, cursor_description):
+ """Extract column names and data types from a cursor.description.
+
+ Applies unicode decoding, column translation, "normalization",
+ and case sensitivity rules to the names based on the dialect.
+
+ """
+
+ dialect = context.dialect
+ case_sensitive = dialect.case_sensitive
+ translate_colname = context._translate_colname
+ description_decoder = dialect._description_decoder \
+ if dialect.description_encoding else None
+ normalize_name = dialect.normalize_name \
+ if dialect.requires_name_normalize else None
+ untranslated = None
+
+ self.keys = []
+
+ for idx, rec in enumerate(cursor_description):
+ colname = rec[0]
+ coltype = rec[1]
+
+ if description_decoder:
+ colname = description_decoder(colname)
+
if translate_colname:
- self._keymap.update([
- (elem[5], self._keymap[elem[2]])
- for elem in raw if elem[5]
- ])
+ colname, untranslated = translate_colname(colname)
+
+ if normalize_name:
+ colname = normalize_name(colname)
+
+ self.keys.append(colname)
+ if not case_sensitive:
+ colname = colname.lower()
+
+ yield idx, colname, untranslated, coltype
+
+ def _merge_textual_cols_by_position(
+ self, context, cursor_description, result_columns):
+ dialect = context.dialect
+ typemap = dialect.dbapi_type_map
+ num_ctx_cols = len(result_columns) if result_columns else None
+
+ if num_ctx_cols > len(cursor_description):
+ util.warn(
+ "Number of columns in textual SQL (%d) is "
+ "smaller than number of columns requested (%d)" % (
+ num_ctx_cols, len(cursor_description)
+ ))
+
+ seen = set()
+ for idx, colname, untranslated, coltype in \
+ self._colnames_from_description(context, cursor_description):
+ if idx < num_ctx_cols:
+ ctx_rec = result_columns[idx]
+ obj = ctx_rec[2]
+ mapped_type = ctx_rec[3]
+ if obj[0] in seen:
+ raise exc.InvalidRequestError(
+ "Duplicate column expression requested "
+ "in textual SQL: %r" % obj[0])
+ seen.add(obj[0])
+ else:
+ mapped_type = typemap.get(coltype, sqltypes.NULLTYPE)
+ obj = None
+
+ yield idx, colname, mapped_type, coltype, obj, untranslated
+
+ def _merge_cols_by_name(self, context, cursor_description, result_columns):
+ dialect = context.dialect
+ typemap = dialect.dbapi_type_map
+ case_sensitive = dialect.case_sensitive
+ result_map = self._create_result_map(result_columns, case_sensitive)
+
+ self.matched_on_name = True
+ for idx, colname, untranslated, coltype in \
+ self._colnames_from_description(context, cursor_description):
+ try:
+ ctx_rec = result_map[colname]
+ except KeyError:
+ mapped_type = typemap.get(coltype, sqltypes.NULLTYPE)
+ obj = None
+ else:
+ obj = ctx_rec[1]
+ mapped_type = ctx_rec[2]
+ yield idx, colname, mapped_type, coltype, obj, untranslated
+
+ def _merge_cols_by_none(self, context, cursor_description):
+ dialect = context.dialect
+ typemap = dialect.dbapi_type_map
+ for idx, colname, untranslated, coltype in \
+ self._colnames_from_description(context, cursor_description):
+ mapped_type = typemap.get(coltype, sqltypes.NULLTYPE)
+ yield idx, colname, mapped_type, coltype, None, untranslated
@classmethod
def _create_result_map(cls, result_columns, case_sensitive=True):
@@ -347,22 +503,6 @@ class ResultMetaData(object):
d[key] = rec
return d
- @util.pending_deprecation("0.8", "sqlite dialect uses "
- "_translate_colname() now")
- def _set_keymap_synonym(self, name, origname):
- """Set a synonym for the given name.
-
- Some dialects (SQLite at the moment) may use this to
- adjust the column names that are significant within a
- row.
-
- """
- rec = (processor, obj, i) = self._keymap[origname if
- self.case_sensitive
- else origname.lower()]
- if self._keymap.setdefault(name, rec) is not rec:
- self._keymap[name] = (processor, obj, None)
-
def _key_fallback(self, key, raiseerr=True):
map = self._keymap
result = None
@@ -427,8 +567,8 @@ class ResultMetaData(object):
if index is None:
raise exc.InvalidRequestError(
- "Ambiguous column name '%s' in result set! "
- "try 'use_labels' option on select statement." % key)
+ "Ambiguous column name '%s' in "
+ "result set column descriptions" % obj)
return operator.itemgetter(index)
@@ -441,6 +581,7 @@ class ResultMetaData(object):
),
'keys': self.keys,
"case_sensitive": self.case_sensitive,
+ "matched_on_name": self.matched_on_name
}
def __setstate__(self, state):
@@ -454,7 +595,7 @@ class ResultMetaData(object):
keymap[key] = (None, None, index)
self.keys = state['keys']
self.case_sensitive = state['case_sensitive']
- self._echo = False
+ self.matched_on_name = state['matched_on_name']
class ResultProxy(object):
@@ -511,20 +652,20 @@ class ResultProxy(object):
return has_key(key)
def _init_metadata(self):
- metadata = self._cursor_description()
- if metadata is not None:
+ cursor_description = self._cursor_description()
+ if cursor_description is not None:
if self.context.compiled and \
'compiled_cache' in self.context.execution_options:
if self.context.compiled._cached_metadata:
self._metadata = self.context.compiled._cached_metadata
else:
self._metadata = self.context.compiled._cached_metadata = \
- ResultMetaData(self, metadata)
+ ResultMetaData(self, cursor_description)
else:
- self._metadata = ResultMetaData(self, metadata)
+ self._metadata = ResultMetaData(self, cursor_description)
if self._echo:
self.context.engine.logger.debug(
- "Col %r", tuple(x[0] for x in metadata))
+ "Col %r", tuple(x[0] for x in cursor_description))
def keys(self):
"""Return the current set of string keys for rows."""
diff --git a/lib/sqlalchemy/engine/strategies.py b/lib/sqlalchemy/engine/strategies.py
index a539ee9f7..d8e2d4764 100644
--- a/lib/sqlalchemy/engine/strategies.py
+++ b/lib/sqlalchemy/engine/strategies.py
@@ -18,8 +18,9 @@ New strategies can be added via new ``EngineStrategy`` classes.
from operator import attrgetter
from sqlalchemy.engine import base, threadlocal, url
-from sqlalchemy import util, exc, event
+from sqlalchemy import util, event
from sqlalchemy import pool as poollib
+from sqlalchemy.sql import schema
strategies = {}
@@ -48,6 +49,10 @@ class DefaultEngineStrategy(EngineStrategy):
# create url.URL object
u = url.make_url(name_or_url)
+ plugins = u._instantiate_plugins(kwargs)
+
+ u.query.pop('plugin', None)
+
entrypoint = u._get_entrypoint()
dialect_cls = entrypoint.get_dialect_cls(u)
@@ -169,6 +174,9 @@ class DefaultEngineStrategy(EngineStrategy):
if entrypoint is not dialect_cls:
entrypoint.engine_created(engine)
+ for plugin in plugins:
+ plugin.engine_created(engine)
+
return engine
@@ -226,6 +234,8 @@ class MockEngineStrategy(EngineStrategy):
dialect = property(attrgetter('_dialect'))
name = property(lambda s: s._dialect.name)
+ schema_for_object = schema._schema_getter(None)
+
def contextual_connect(self, **kwargs):
return self
diff --git a/lib/sqlalchemy/engine/url.py b/lib/sqlalchemy/engine/url.py
index 32e3f8a6b..9a955948a 100644
--- a/lib/sqlalchemy/engine/url.py
+++ b/lib/sqlalchemy/engine/url.py
@@ -17,7 +17,7 @@ be used directly and is also accepted directly by ``create_engine()``.
import re
from .. import exc, util
from . import Dialect
-from ..dialects import registry
+from ..dialects import registry, plugins
class URL(object):
@@ -117,6 +117,14 @@ class URL(object):
else:
return self.drivername.split('+')[1]
+ def _instantiate_plugins(self, kwargs):
+ plugin_names = util.to_list(self.query.get('plugin', ()))
+
+ return [
+ plugins.load(plugin_name)(self, kwargs)
+ for plugin_name in plugin_names
+ ]
+
def _get_entrypoint(self):
"""Return the "entry point" dialect class.
diff --git a/lib/sqlalchemy/ext/automap.py b/lib/sqlalchemy/ext/automap.py
index 218ed64e1..616cd070d 100644
--- a/lib/sqlalchemy/ext/automap.py
+++ b/lib/sqlalchemy/ext/automap.py
@@ -112,7 +112,7 @@ explicit table declaration::
Base.classes.user_order
Specifying Classes Explicitly
-============================
+=============================
The :mod:`.sqlalchemy.ext.automap` extension allows classes to be defined
explicitly, in a way similar to that of the :class:`.DeferredReflection` class.
diff --git a/lib/sqlalchemy/ext/compiler.py b/lib/sqlalchemy/ext/compiler.py
index 9717e41c0..d4d2ed2ef 100644
--- a/lib/sqlalchemy/ext/compiler.py
+++ b/lib/sqlalchemy/ext/compiler.py
@@ -121,9 +121,19 @@ below where we generate a CHECK constraint that embeds a SQL expression::
def compile_my_constraint(constraint, ddlcompiler, **kw):
return "CONSTRAINT %s CHECK (%s)" % (
constraint.name,
- ddlcompiler.sql_compiler.process(constraint.expression)
+ ddlcompiler.sql_compiler.process(
+ constraint.expression, literal_binds=True)
)
+Above, we add an additional flag to the process step as called by
+:meth:`.SQLCompiler.process`, which is the ``literal_binds`` flag. This
+indicates that any SQL expression which refers to a :class:`.BindParameter`
+object or other "literal" object such as those which refer to strings or
+integers should be rendered **in-place**, rather than being referred to as
+a bound parameter; when emitting DDL, bound parameters are typically not
+supported.
+
+
.. _enabling_compiled_autocommit:
Enabling Autocommit on a Construct
diff --git a/lib/sqlalchemy/ext/declarative/api.py b/lib/sqlalchemy/ext/declarative/api.py
index dfc47ce95..5fe427bc2 100644
--- a/lib/sqlalchemy/ext/declarative/api.py
+++ b/lib/sqlalchemy/ext/declarative/api.py
@@ -397,6 +397,15 @@ class ConcreteBase(object):
'polymorphic_identity':'manager',
'concrete':True}
+ .. seealso::
+
+ :class:`.AbstractConcreteBase`
+
+ :ref:`concrete_inheritance`
+
+ :ref:`inheritance_concrete_helpers`
+
+
"""
@classmethod
@@ -495,6 +504,13 @@ class AbstractConcreteBase(ConcreteBase):
have been reworked to support relationships established directly
on the abstract base, without any special configurational steps.
+ .. seealso::
+
+ :class:`.ConcreteBase`
+
+ :ref:`concrete_inheritance`
+
+ :ref:`inheritance_concrete_helpers`
"""
diff --git a/lib/sqlalchemy/orm/mapper.py b/lib/sqlalchemy/orm/mapper.py
index 95aa14a26..53afdcb28 100644
--- a/lib/sqlalchemy/orm/mapper.py
+++ b/lib/sqlalchemy/orm/mapper.py
@@ -112,6 +112,7 @@ class Mapper(InspectionAttr):
include_properties=None,
exclude_properties=None,
passive_updates=True,
+ passive_deletes=False,
confirm_deleted_rows=True,
eager_defaults=False,
legacy_is_orphan=False,
@@ -319,6 +320,40 @@ class Mapper(InspectionAttr):
ordering for entities. By default mappers have no pre-defined
ordering.
+ :param passive_deletes: Indicates DELETE behavior of foreign key
+ columns when a joined-table inheritance entity is being deleted.
+ Defaults to ``False`` for a base mapper; for an inheriting mapper,
+ defaults to ``False`` unless the value is set to ``True``
+ on the superclass mapper.
+
+ When ``True``, it is assumed that ON DELETE CASCADE is configured
+ on the foreign key relationships that link this mapper's table
+ to its superclass table, so that when the unit of work attempts
+ to delete the entity, it need only emit a DELETE statement for the
+ superclass table, and not this table.
+
+ When ``False``, a DELETE statement is emitted for this mapper's
+ table individually. If the primary key attributes local to this
+ table are unloaded, then a SELECT must be emitted in order to
+ validate these attributes; note that the primary key columns
+ of a joined-table subclass are not part of the "primary key" of
+ the object as a whole.
+
+ Note that a value of ``True`` is **always** forced onto the
+ subclass mappers; that is, it's not possible for a superclass
+ to specify passive_deletes without this taking effect for
+ all subclass mappers.
+
+ .. versionadded:: 1.1
+
+ .. seealso::
+
+ :ref:`passive_deletes` - description of similar feature as
+ used with :func:`.relationship`
+
+ :paramref:`.mapper.passive_updates` - supporting ON UPDATE
+ CASCADE for joined-table inheritance mappers
+
:param passive_updates: Indicates UPDATE behavior of foreign key
columns when a primary key column changes on a joined-table
inheritance mapping. Defaults to ``True``.
@@ -339,6 +374,9 @@ class Mapper(InspectionAttr):
:ref:`passive_updates` - description of a similar feature as
used with :func:`.relationship`
+ :paramref:`.mapper.passive_deletes` - supporting ON DELETE
+ CASCADE for joined-table inheritance mappers
+
:param polymorphic_on: Specifies the column, attribute, or
SQL expression used to determine the target class for an
incoming row, when inheriting classes are present.
@@ -559,6 +597,7 @@ class Mapper(InspectionAttr):
self._dependency_processors = []
self.validators = util.immutabledict()
self.passive_updates = passive_updates
+ self.passive_deletes = passive_deletes
self.legacy_is_orphan = legacy_is_orphan
self._clause_adapter = None
self._requires_row_aliasing = False
@@ -971,6 +1010,8 @@ class Mapper(InspectionAttr):
self.inherits._inheriting_mappers.append(self)
self.base_mapper = self.inherits.base_mapper
self.passive_updates = self.inherits.passive_updates
+ self.passive_deletes = self.inherits.passive_deletes or \
+ self.passive_deletes
self._all_tables = self.inherits._all_tables
if self.polymorphic_identity is not None:
@@ -982,7 +1023,7 @@ class Mapper(InspectionAttr):
(self.polymorphic_identity,
self.polymorphic_map[self.polymorphic_identity],
self, self.polymorphic_identity)
- )
+ )
self.polymorphic_map[self.polymorphic_identity] = self
else:
@@ -1591,7 +1632,12 @@ class Mapper(InspectionAttr):
if key in self._props and \
not isinstance(prop, properties.ColumnProperty) and \
- not isinstance(self._props[key], properties.ColumnProperty):
+ not isinstance(
+ self._props[key],
+ (
+ properties.ColumnProperty,
+ properties.ConcreteInheritedProperty)
+ ):
util.warn("Property %s on %s being replaced with new "
"property %s; the old property will be discarded" % (
self._props[key],
diff --git a/lib/sqlalchemy/orm/persistence.py b/lib/sqlalchemy/orm/persistence.py
index e6a2c0634..30b39f600 100644
--- a/lib/sqlalchemy/orm/persistence.py
+++ b/lib/sqlalchemy/orm/persistence.py
@@ -241,6 +241,8 @@ def delete_obj(base_mapper, states, uowtransaction):
mapper = table_to_mapper[table]
if table not in mapper._pks_by_table:
continue
+ elif mapper.inherits and mapper.passive_deletes:
+ continue
delete = _collect_delete_commands(base_mapper, uowtransaction,
table, states_to_delete)
diff --git a/lib/sqlalchemy/orm/query.py b/lib/sqlalchemy/orm/query.py
index e1b920bbb..6b808a701 100644
--- a/lib/sqlalchemy/orm/query.py
+++ b/lib/sqlalchemy/orm/query.py
@@ -2741,22 +2741,37 @@ class Query(object):
self.session._autoflush()
return self._execute_and_instances(context)
+ def __str__(self):
+ context = self._compile_context()
+ try:
+ bind = self._get_bind_args(
+ context, self.session.get_bind) if self.session else None
+ except sa_exc.UnboundExecutionError:
+ bind = None
+ return str(context.statement.compile(bind))
+
def _connection_from_session(self, **kw):
- conn = self.session.connection(
- **kw)
+ conn = self.session.connection(**kw)
if self._execution_options:
conn = conn.execution_options(**self._execution_options)
return conn
def _execute_and_instances(self, querycontext):
- conn = self._connection_from_session(
- mapper=self._bind_mapper(),
- clause=querycontext.statement,
+ conn = self._get_bind_args(
+ querycontext,
+ self._connection_from_session,
close_with_result=True)
result = conn.execute(querycontext.statement, self._params)
return loading.instances(querycontext.query, result, querycontext)
+ def _get_bind_args(self, querycontext, fn, **kw):
+ return fn(
+ mapper=self._bind_mapper(),
+ clause=querycontext.statement,
+ **kw
+ )
+
@property
def column_descriptions(self):
"""Return metadata about the columns which would be
@@ -3358,8 +3373,6 @@ class Query(object):
sql.True_._ifnone(context.whereclause),
single_crit)
- def __str__(self):
- return str(self._compile_context().statement)
from ..sql.selectable import ForUpdateArg
diff --git a/lib/sqlalchemy/orm/relationships.py b/lib/sqlalchemy/orm/relationships.py
index f822071c4..83856eebf 100644
--- a/lib/sqlalchemy/orm/relationships.py
+++ b/lib/sqlalchemy/orm/relationships.py
@@ -1817,15 +1817,16 @@ class RelationshipProperty(StrategizedProperty):
backref_key, kwargs = self.backref
mapper = self.mapper.primary_mapper()
- check = set(mapper.iterate_to_root()).\
- union(mapper.self_and_descendants)
- for m in check:
- if m.has_property(backref_key):
- raise sa_exc.ArgumentError(
- "Error creating backref "
- "'%s' on relationship '%s': property of that "
- "name exists on mapper '%s'" %
- (backref_key, self, m))
+ if not mapper.concrete:
+ check = set(mapper.iterate_to_root()).\
+ union(mapper.self_and_descendants)
+ for m in check:
+ if m.has_property(backref_key) and not m.concrete:
+ raise sa_exc.ArgumentError(
+ "Error creating backref "
+ "'%s' on relationship '%s': property of that "
+ "name exists on mapper '%s'" %
+ (backref_key, self, m))
# determine primaryjoin/secondaryjoin for the
# backref. Use the one we had, so that
diff --git a/lib/sqlalchemy/orm/scoping.py b/lib/sqlalchemy/orm/scoping.py
index b3f2fa5db..176523c3b 100644
--- a/lib/sqlalchemy/orm/scoping.py
+++ b/lib/sqlalchemy/orm/scoping.py
@@ -21,6 +21,12 @@ class scoped_session(object):
"""
+ session_factory = None
+ """The `session_factory` provided to `__init__` is stored in this
+ attribute and may be accessed at a later time. This can be useful when
+ a new non-scoped :class:`.Session` or :class:`.Connection` to the
+ database is needed."""
+
def __init__(self, session_factory, scopefunc=None):
"""Construct a new :class:`.scoped_session`.
@@ -38,6 +44,7 @@ class scoped_session(object):
"""
self.session_factory = session_factory
+
if scopefunc:
self.registry = ScopedRegistry(session_factory, scopefunc)
else:
@@ -45,12 +52,12 @@ class scoped_session(object):
def __call__(self, **kw):
"""Return the current :class:`.Session`, creating it
- using the session factory if not present.
+ using the :attr:`.scoped_session.session_factory` if not present.
:param \**kw: Keyword arguments will be passed to the
- session factory callable, if an existing :class:`.Session`
- is not present. If the :class:`.Session` is present and
- keyword arguments have been passed,
+ :attr:`.scoped_session.session_factory` callable, if an existing
+ :class:`.Session` is not present. If the :class:`.Session` is present
+ and keyword arguments have been passed,
:exc:`~sqlalchemy.exc.InvalidRequestError` is raised.
"""
diff --git a/lib/sqlalchemy/orm/strategies.py b/lib/sqlalchemy/orm/strategies.py
index b60e47bb3..0252a65f9 100644
--- a/lib/sqlalchemy/orm/strategies.py
+++ b/lib/sqlalchemy/orm/strategies.py
@@ -238,7 +238,7 @@ class DeferredColumnLoader(LoaderStrategy):
(
loadopt and
self.group and
- loadopt.local_opts.get('undefer_group', False) == self.group
+ loadopt.local_opts.get('undefer_group_%s' % self.group, False)
)
or
(
diff --git a/lib/sqlalchemy/orm/strategy_options.py b/lib/sqlalchemy/orm/strategy_options.py
index 3467328e3..aa818258a 100644
--- a/lib/sqlalchemy/orm/strategy_options.py
+++ b/lib/sqlalchemy/orm/strategy_options.py
@@ -80,6 +80,8 @@ class Load(Generative, MapperOption):
def __init__(self, entity):
insp = inspect(entity)
self.path = insp._path_registry
+ # note that this .context is shared among all descendant
+ # Load objects
self.context = {}
self.local_opts = {}
@@ -88,6 +90,7 @@ class Load(Generative, MapperOption):
cloned.local_opts = {}
return cloned
+ is_opts_only = False
strategy = None
propagate_to_loaders = False
@@ -200,7 +203,7 @@ class Load(Generative, MapperOption):
self._set_path_strategy()
@_generative
- def set_column_strategy(self, attrs, strategy, opts=None):
+ def set_column_strategy(self, attrs, strategy, opts=None, opts_only=False):
strategy = self._coerce_strat(strategy)
for attr in attrs:
@@ -211,13 +214,34 @@ class Load(Generative, MapperOption):
cloned.propagate_to_loaders = True
if opts:
cloned.local_opts.update(opts)
+ if opts_only:
+ cloned.is_opts_only = True
cloned._set_path_strategy()
+ def _set_for_path(self, context, path, replace=True, merge_opts=False):
+ if merge_opts or not replace:
+ existing = path.get(self.context, "loader")
+
+ if existing:
+ if merge_opts:
+ existing.local_opts.update(self.local_opts)
+ else:
+ path.set(context, "loader", self)
+ else:
+ existing = path.get(self.context, "loader")
+ path.set(context, "loader", self)
+ if existing and existing.is_opts_only:
+ self.local_opts.update(existing.local_opts)
+
def _set_path_strategy(self):
if self.path.has_entity:
- self.path.parent.set(self.context, "loader", self)
+ effective_path = self.path.parent
else:
- self.path.set(self.context, "loader", self)
+ effective_path = self.path
+
+ self._set_for_path(
+ self.context, effective_path, replace=True,
+ merge_opts=self.is_opts_only)
def __getstate__(self):
d = self.__dict__.copy()
@@ -305,7 +329,7 @@ class _UnboundLoad(Load):
val._bind_loader(query, query._attributes, raiseerr)
@classmethod
- def _from_keys(self, meth, keys, chained, kw):
+ def _from_keys(cls, meth, keys, chained, kw):
opt = _UnboundLoad()
def _split_key(key):
@@ -390,6 +414,7 @@ class _UnboundLoad(Load):
loader = Load(path_element)
loader.context = context
loader.strategy = self.strategy
+ loader.is_opts_only = self.is_opts_only
path = loader.path
for token in start_path:
@@ -411,15 +436,15 @@ class _UnboundLoad(Load):
if effective_path.is_token:
for path in effective_path.generate_for_superclasses():
- if self._is_chain_link:
- path.setdefault(context, "loader", loader)
- else:
- path.set(context, "loader", loader)
+ loader._set_for_path(
+ context, path,
+ replace=not self._is_chain_link,
+ merge_opts=self.is_opts_only)
else:
- if self._is_chain_link:
- effective_path.setdefault(context, "loader", loader)
- else:
- effective_path.set(context, "loader", loader)
+ loader._set_for_path(
+ context, effective_path,
+ replace=not self._is_chain_link,
+ merge_opts=self.is_opts_only)
def _find_entity_prop_comparator(self, query, token, mapper, raiseerr):
if _is_aliased_class(mapper):
@@ -1028,7 +1053,8 @@ def undefer_group(loadopt, name):
return loadopt.set_column_strategy(
"*",
None,
- {"undefer_group": name}
+ {"undefer_group_%s" % name: True},
+ opts_only=True
)
diff --git a/lib/sqlalchemy/sql/__init__.py b/lib/sqlalchemy/sql/__init__.py
index fa2cf2399..f4ad3ec00 100644
--- a/lib/sqlalchemy/sql/__init__.py
+++ b/lib/sqlalchemy/sql/__init__.py
@@ -66,6 +66,7 @@ from .expression import (
union,
union_all,
update,
+ within_group
)
from .visitors import ClauseVisitor
diff --git a/lib/sqlalchemy/sql/base.py b/lib/sqlalchemy/sql/base.py
index eed079238..48b9a8a2b 100644
--- a/lib/sqlalchemy/sql/base.py
+++ b/lib/sqlalchemy/sql/base.py
@@ -449,11 +449,10 @@ class ColumnCollection(util.OrderedProperties):
"""
- __slots__ = '_all_col_set', '_all_columns'
+ __slots__ = '_all_columns'
def __init__(self, *columns):
super(ColumnCollection, self).__init__()
- object.__setattr__(self, '_all_col_set', util.column_set())
object.__setattr__(self, '_all_columns', [])
for c in columns:
self.add(c)
@@ -482,14 +481,11 @@ class ColumnCollection(util.OrderedProperties):
other = self[column.name]
if other.name == other.key:
remove_col = other
- self._all_col_set.remove(other)
del self._data[other.key]
if column.key in self._data:
remove_col = self._data[column.key]
- self._all_col_set.remove(remove_col)
- self._all_col_set.add(column)
self._data[column.key] = column
if remove_col is not None:
self._all_columns[:] = [column if c is remove_col
@@ -534,7 +530,6 @@ class ColumnCollection(util.OrderedProperties):
# in a _make_proxy operation
util.memoized_property.reset(value, "proxy_set")
- self._all_col_set.add(value)
self._all_columns.append(value)
self._data[key] = value
@@ -543,22 +538,20 @@ class ColumnCollection(util.OrderedProperties):
def remove(self, column):
del self._data[column.key]
- self._all_col_set.remove(column)
self._all_columns[:] = [
c for c in self._all_columns if c is not column]
def update(self, iter):
cols = list(iter)
+ all_col_set = set(self._all_columns)
self._all_columns.extend(
- c for label, c in cols if c not in self._all_col_set)
- self._all_col_set.update(c for label, c in cols)
+ c for label, c in cols if c not in all_col_set)
self._data.update((label, c) for label, c in cols)
def extend(self, iter):
cols = list(iter)
- self._all_columns.extend(c for c in cols if c not in
- self._all_col_set)
- self._all_col_set.update(cols)
+ all_col_set = set(self._all_columns)
+ self._all_columns.extend(c for c in cols if c not in all_col_set)
self._data.update((c.key, c) for c in cols)
__hash__ = None
@@ -584,22 +577,18 @@ class ColumnCollection(util.OrderedProperties):
def __setstate__(self, state):
object.__setattr__(self, '_data', state['_data'])
object.__setattr__(self, '_all_columns', state['_all_columns'])
- object.__setattr__(
- self, '_all_col_set', util.column_set(state['_all_columns']))
def contains_column(self, col):
- # this has to be done via set() membership
- return col in self._all_col_set
+ existing = self._data.get(col.key)
+ return existing is not None and hash(existing) == hash(col)
def as_immutable(self):
- return ImmutableColumnCollection(
- self._data, self._all_col_set, self._all_columns)
+ return ImmutableColumnCollection(self._data, self._all_columns)
class ImmutableColumnCollection(util.ImmutableProperties, ColumnCollection):
- def __init__(self, data, colset, all_columns):
+ def __init__(self, data, all_columns):
util.ImmutableProperties.__init__(self, data)
- object.__setattr__(self, '_all_col_set', colset)
object.__setattr__(self, '_all_columns', all_columns)
extend = remove = util.ImmutableProperties._immutable
diff --git a/lib/sqlalchemy/sql/compiler.py b/lib/sqlalchemy/sql/compiler.py
index 6766c99b7..492999d16 100644
--- a/lib/sqlalchemy/sql/compiler.py
+++ b/lib/sqlalchemy/sql/compiler.py
@@ -167,25 +167,39 @@ class Compiled(object):
_cached_metadata = None
def __init__(self, dialect, statement, bind=None,
+ schema_translate_map=None,
compile_kwargs=util.immutabledict()):
- """Construct a new ``Compiled`` object.
+ """Construct a new :class:`.Compiled` object.
- :param dialect: ``Dialect`` to compile against.
+ :param dialect: :class:`.Dialect` to compile against.
- :param statement: ``ClauseElement`` to be compiled.
+ :param statement: :class:`.ClauseElement` to be compiled.
:param bind: Optional Engine or Connection to compile this
statement against.
+ :param schema_translate_map: dictionary of schema names to be
+ translated when forming the resultant SQL
+
+ .. versionadded:: 1.1
+
+ .. seealso::
+
+ :ref:`schema_translating`
+
:param compile_kwargs: additional kwargs that will be
passed to the initial call to :meth:`.Compiled.process`.
- .. versionadded:: 0.8
"""
self.dialect = dialect
self.bind = bind
+ self.preparer = self.dialect.identifier_preparer
+ if schema_translate_map:
+ self.preparer = self.preparer._with_schema_translate(
+ schema_translate_map)
+
if statement is not None:
self.statement = statement
self.can_execute = statement.supports_execution
@@ -286,12 +300,11 @@ class _CompileLabel(visitors.Visitable):
def self_group(self, **kw):
return self
-class SQLCompiler(Compiled):
- """Default implementation of Compiled.
+class SQLCompiler(Compiled):
+ """Default implementation of :class:`.Compiled`.
- Compiles ClauseElements into SQL strings. Uses a similar visit
- paradigm as visitors.ClauseVisitor but implements its own traversal.
+ Compiles :class:`.ClauseElement` objects into SQL strings.
"""
@@ -305,6 +318,8 @@ class SQLCompiler(Compiled):
INSERT/UPDATE/DELETE
"""
+ isplaintext = False
+
returning = None
"""holds the "returning" collection of columns if
the statement is CRUD and defines returning columns
@@ -330,19 +345,34 @@ class SQLCompiler(Compiled):
driver/DB enforces this
"""
+ _textual_ordered_columns = False
+ """tell the result object that the column names as rendered are important,
+ but they are also "ordered" vs. what is in the compiled object here.
+ """
+
+ _ordered_columns = True
+ """
+ if False, means we can't be sure the list of entries
+ in _result_columns is actually the rendered order. Usually
+ True unless using an unordered TextAsFrom.
+ """
+
def __init__(self, dialect, statement, column_keys=None,
inline=False, **kwargs):
- """Construct a new ``DefaultCompiler`` object.
+ """Construct a new :class:`.SQLCompiler` object.
- dialect
- Dialect to be used
+ :param dialect: :class:`.Dialect` to be used
- statement
- ClauseElement to be compiled
+ :param statement: :class:`.ClauseElement` to be compiled
- column_keys
- a list of column names to be compiled into an INSERT or UPDATE
- statement.
+ :param column_keys: a list of column names to be compiled into an
+ INSERT or UPDATE statement.
+
+ :param inline: whether to generate INSERT statements as "inline", e.g.
+ not formatted to return any generated defaults
+
+ :param kwargs: additional keyword arguments to be consumed by the
+ superclass.
"""
self.column_keys = column_keys
@@ -368,11 +398,6 @@ class SQLCompiler(Compiled):
# column targeting
self._result_columns = []
- # if False, means we can't be sure the list of entries
- # in _result_columns is actually the rendered order. This
- # gets flipped when we use TextAsFrom, for example.
- self._ordered_columns = True
-
# true if the paramstyle is positional
self.positional = dialect.positional
if self.positional:
@@ -381,8 +406,6 @@ class SQLCompiler(Compiled):
self.ctes = None
- # an IdentifierPreparer that formats the quoting of identifiers
- self.preparer = dialect.identifier_preparer
self.label_length = dialect.label_length \
or dialect.max_identifier_length
@@ -649,8 +672,11 @@ class SQLCompiler(Compiled):
if table is None or not include_table or not table.named_with_column:
return name
else:
- if table.schema:
- schema_prefix = self.preparer.quote_schema(table.schema) + '.'
+ effective_schema = self.preparer.schema_for_object(table)
+
+ if effective_schema:
+ schema_prefix = self.preparer.quote_schema(
+ effective_schema) + '.'
else:
schema_prefix = ''
tablename = table.name
@@ -688,6 +714,9 @@ class SQLCompiler(Compiled):
else:
return self.bindparam_string(name, **kw)
+ if not self.stack:
+ self.isplaintext = True
+
# un-escape any \:params
return BIND_PARAMS_ESC.sub(
lambda m: m.group(1),
@@ -711,7 +740,8 @@ class SQLCompiler(Compiled):
) or entry.get('need_result_map_for_nested', False)
if populate_result_map:
- self._ordered_columns = False
+ self._ordered_columns = \
+ self._textual_ordered_columns = taf.positional
for c in taf.column_args:
self.process(c, within_columns_clause=True,
add_to_result_map=self._add_to_result_map)
@@ -873,22 +903,28 @@ class SQLCompiler(Compiled):
else:
return text
+ def _get_operator_dispatch(self, operator_, qualifier1, qualifier2):
+ attrname = "visit_%s_%s%s" % (
+ operator_.__name__, qualifier1,
+ "_" + qualifier2 if qualifier2 else "")
+ return getattr(self, attrname, None)
+
def visit_unary(self, unary, **kw):
if unary.operator:
if unary.modifier:
raise exc.CompileError(
"Unary expression does not support operator "
"and modifier simultaneously")
- disp = getattr(self, "visit_%s_unary_operator" %
- unary.operator.__name__, None)
+ disp = self._get_operator_dispatch(
+ unary.operator, "unary", "operator")
if disp:
return disp(unary, unary.operator, **kw)
else:
return self._generate_generic_unary_operator(
unary, OPERATORS[unary.operator], **kw)
elif unary.modifier:
- disp = getattr(self, "visit_%s_unary_modifier" %
- unary.modifier.__name__, None)
+ disp = self._get_operator_dispatch(
+ unary.modifier, "unary", "modifier")
if disp:
return disp(unary, unary.modifier, **kw)
else:
@@ -922,7 +958,7 @@ class SQLCompiler(Compiled):
kw['literal_binds'] = True
operator_ = override_operator or binary.operator
- disp = getattr(self, "visit_%s_binary" % operator_.__name__, None)
+ disp = self._get_operator_dispatch(operator_, "binary", None)
if disp:
return disp(binary, operator_, **kw)
else:
@@ -1298,7 +1334,7 @@ class SQLCompiler(Compiled):
add_to_result_map = lambda keyname, name, objects, type_: \
self._add_to_result_map(
keyname, name,
- objects + (column,), type_)
+ (column,) + objects, type_)
else:
col_expr = column
if populate_result_map:
@@ -1386,7 +1422,7 @@ class SQLCompiler(Compiled):
"""Rewrite any "a JOIN (b JOIN c)" expression as
"a JOIN (select * from b JOIN c) AS anon", to support
databases that can't parse a parenthesized join correctly
- (i.e. sqlite the main one).
+ (i.e. sqlite < 3.7.16).
"""
cloned = {}
@@ -1801,8 +1837,10 @@ class SQLCompiler(Compiled):
def visit_table(self, table, asfrom=False, iscrud=False, ashint=False,
fromhints=None, use_schema=True, **kwargs):
if asfrom or ashint:
- if use_schema and getattr(table, "schema", None):
- ret = self.preparer.quote_schema(table.schema) + \
+ effective_schema = self.preparer.schema_for_object(table)
+
+ if use_schema and effective_schema:
+ ret = self.preparer.quote_schema(effective_schema) + \
"." + self.preparer.quote(table.name)
else:
ret = self.preparer.quote(table.name)
@@ -2080,6 +2118,30 @@ class SQLCompiler(Compiled):
self.preparer.format_savepoint(savepoint_stmt)
+class StrSQLCompiler(SQLCompiler):
+ """"a compiler subclass with a few non-standard SQL features allowed.
+
+ Used for stringification of SQL statements when a real dialect is not
+ available.
+
+ """
+
+ def visit_getitem_binary(self, binary, operator, **kw):
+ return "%s[%s]" % (
+ self.process(binary.left, **kw),
+ self.process(binary.right, **kw)
+ )
+
+ def returning_clause(self, stmt, returning_cols):
+
+ columns = [
+ self._label_select_column(None, c, True, False, {})
+ for c in elements._select_iterables(returning_cols)
+ ]
+
+ return 'RETURNING ' + ', '.join(columns)
+
+
class DDLCompiler(Compiled):
@util.memoized_property
@@ -2090,10 +2152,6 @@ class DDLCompiler(Compiled):
def type_compiler(self):
return self.dialect.type_compiler
- @property
- def preparer(self):
- return self.dialect.identifier_preparer
-
def construct_params(self, params=None):
return None
@@ -2103,7 +2161,7 @@ class DDLCompiler(Compiled):
if isinstance(ddl.target, schema.Table):
context = context.copy()
- preparer = self.dialect.identifier_preparer
+ preparer = self.preparer
path = preparer.format_table_seq(ddl.target)
if len(path) == 1:
table, sch = path[0], ''
@@ -2129,7 +2187,7 @@ class DDLCompiler(Compiled):
def visit_create_table(self, create):
table = create.element
- preparer = self.dialect.identifier_preparer
+ preparer = self.preparer
text = "\nCREATE "
if table._prefixes:
@@ -2256,9 +2314,12 @@ class DDLCompiler(Compiled):
index, include_schema=True)
def _prepared_index_name(self, index, include_schema=False):
- if include_schema and index.table is not None and index.table.schema:
- schema = index.table.schema
- schema_name = self.preparer.quote_schema(schema)
+ if index.table is not None:
+ effective_schema = self.preparer.schema_for_object(index.table)
+ else:
+ effective_schema = None
+ if include_schema and effective_schema:
+ schema_name = self.preparer.quote_schema(effective_schema)
else:
schema_name = None
@@ -2386,7 +2447,7 @@ class DDLCompiler(Compiled):
return text
def visit_foreign_key_constraint(self, constraint):
- preparer = self.dialect.identifier_preparer
+ preparer = self.preparer
text = ""
if constraint.name is not None:
formatted_name = self.preparer.format_constraint(constraint)
@@ -2603,6 +2664,17 @@ class GenericTypeCompiler(TypeCompiler):
return type_.get_col_spec(**kw)
+class StrSQLTypeCompiler(GenericTypeCompiler):
+ def __getattr__(self, key):
+ if key.startswith("visit_"):
+ return self._visit_unknown
+ else:
+ raise AttributeError(key)
+
+ def _visit_unknown(self, type_, **kw):
+ return "%s" % type_.__class__.__name__
+
+
class IdentifierPreparer(object):
"""Handle quoting and case-folding of identifiers based on options."""
@@ -2613,6 +2685,8 @@ class IdentifierPreparer(object):
illegal_initial_characters = ILLEGAL_INITIAL_CHARACTERS
+ schema_for_object = schema._schema_getter(None)
+
def __init__(self, dialect, initial_quote='"',
final_quote=None, escape_quote='"', omit_schema=False):
"""Construct a new ``IdentifierPreparer`` object.
@@ -2637,6 +2711,12 @@ class IdentifierPreparer(object):
self.omit_schema = omit_schema
self._strings = {}
+ def _with_schema_translate(self, schema_translate_map):
+ prep = self.__class__.__new__(self.__class__)
+ prep.__dict__.update(self.__dict__)
+ prep.schema_for_object = schema._schema_getter(schema_translate_map)
+ return prep
+
def _escape_identifier(self, value):
"""Escape an identifier.
@@ -2709,9 +2789,12 @@ class IdentifierPreparer(object):
def format_sequence(self, sequence, use_schema=True):
name = self.quote(sequence.name)
+
+ effective_schema = self.schema_for_object(sequence)
+
if (not self.omit_schema and use_schema and
- sequence.schema is not None):
- name = self.quote_schema(sequence.schema) + "." + name
+ effective_schema is not None):
+ name = self.quote_schema(effective_schema) + "." + name
return name
def format_label(self, label, name=None):
@@ -2740,9 +2823,12 @@ class IdentifierPreparer(object):
if name is None:
name = table.name
result = self.quote(name)
+
+ effective_schema = self.schema_for_object(table)
+
if not self.omit_schema and use_schema \
- and getattr(table, "schema", None):
- result = self.quote_schema(table.schema) + "." + result
+ and effective_schema:
+ result = self.quote_schema(effective_schema) + "." + result
return result
def format_schema(self, name, quote=None):
@@ -2781,9 +2867,11 @@ class IdentifierPreparer(object):
# ('database', 'owner', etc.) could override this and return
# a longer sequence.
+ effective_schema = self.schema_for_object(table)
+
if not self.omit_schema and use_schema and \
- getattr(table, 'schema', None):
- return (self.quote_schema(table.schema),
+ effective_schema:
+ return (self.quote_schema(effective_schema),
self.format_table(table, use_schema=False))
else:
return (self.format_table(table, use_schema=False), )
diff --git a/lib/sqlalchemy/sql/ddl.py b/lib/sqlalchemy/sql/ddl.py
index 71018f132..7953b61b8 100644
--- a/lib/sqlalchemy/sql/ddl.py
+++ b/lib/sqlalchemy/sql/ddl.py
@@ -679,13 +679,16 @@ class SchemaGenerator(DDLBase):
def _can_create_table(self, table):
self.dialect.validate_identifier(table.name)
- if table.schema:
- self.dialect.validate_identifier(table.schema)
+ effective_schema = self.connection.schema_for_object(table)
+ if effective_schema:
+ self.dialect.validate_identifier(effective_schema)
return not self.checkfirst or \
not self.dialect.has_table(self.connection,
- table.name, schema=table.schema)
+ table.name, schema=effective_schema)
def _can_create_sequence(self, sequence):
+ effective_schema = self.connection.schema_for_object(sequence)
+
return self.dialect.supports_sequences and \
(
(not self.dialect.sequences_optional or
@@ -695,7 +698,7 @@ class SchemaGenerator(DDLBase):
not self.dialect.has_sequence(
self.connection,
sequence.name,
- schema=sequence.schema)
+ schema=effective_schema)
)
)
@@ -882,12 +885,14 @@ class SchemaDropper(DDLBase):
def _can_drop_table(self, table):
self.dialect.validate_identifier(table.name)
- if table.schema:
- self.dialect.validate_identifier(table.schema)
+ effective_schema = self.connection.schema_for_object(table)
+ if effective_schema:
+ self.dialect.validate_identifier(effective_schema)
return not self.checkfirst or self.dialect.has_table(
- self.connection, table.name, schema=table.schema)
+ self.connection, table.name, schema=effective_schema)
def _can_drop_sequence(self, sequence):
+ effective_schema = self.connection.schema_for_object(sequence)
return self.dialect.supports_sequences and \
((not self.dialect.sequences_optional or
not sequence.optional) and
@@ -895,7 +900,7 @@ class SchemaDropper(DDLBase):
self.dialect.has_sequence(
self.connection,
sequence.name,
- schema=sequence.schema))
+ schema=effective_schema))
)
def visit_index(self, index):
diff --git a/lib/sqlalchemy/sql/default_comparator.py b/lib/sqlalchemy/sql/default_comparator.py
index 68ea5624e..ddb57da77 100644
--- a/lib/sqlalchemy/sql/default_comparator.py
+++ b/lib/sqlalchemy/sql/default_comparator.py
@@ -164,27 +164,7 @@ def _in_impl(expr, op, seq_or_selectable, negate_op, **kw):
def _getitem_impl(expr, op, other, **kw):
if isinstance(expr.type, type_api.INDEXABLE):
- if isinstance(other, slice):
- if expr.type.zero_indexes:
- other = slice(
- other.start + 1,
- other.stop + 1,
- other.step
- )
- other = Slice(
- _literal_as_binds(
- other.start, name=expr.key, type_=type_api.INTEGERTYPE),
- _literal_as_binds(
- other.stop, name=expr.key, type_=type_api.INTEGERTYPE),
- _literal_as_binds(
- other.step, name=expr.key, type_=type_api.INTEGERTYPE)
- )
- else:
- if expr.type.zero_indexes:
- other += 1
-
- other = _literal_as_binds(
- other, name=expr.key, type_=type_api.INTEGERTYPE)
+ other = _check_literal(expr, op, other)
return _binary_operate(expr, op, other, **kw)
else:
_unsupported_impl(expr, op, other, **kw)
@@ -260,6 +240,8 @@ operator_lookup = {
"mod": (_binary_operate,),
"truediv": (_binary_operate,),
"custom_op": (_binary_operate,),
+ "json_path_getitem_op": (_binary_operate, ),
+ "json_getitem_op": (_binary_operate, ),
"concat_op": (_binary_operate,),
"lt": (_boolean_compare, operators.ge),
"le": (_boolean_compare, operators.gt),
@@ -295,7 +277,7 @@ operator_lookup = {
}
-def _check_literal(expr, operator, other):
+def _check_literal(expr, operator, other, bindparam_type=None):
if isinstance(other, (ColumnElement, TextClause)):
if isinstance(other, BindParameter) and \
other.type._isnull:
@@ -310,7 +292,7 @@ def _check_literal(expr, operator, other):
if isinstance(other, (SelectBase, Alias)):
return other.as_scalar()
elif not isinstance(other, Visitable):
- return expr._bind_param(operator, other)
+ return expr._bind_param(operator, other, type_=bindparam_type)
else:
return other
diff --git a/lib/sqlalchemy/sql/elements.py b/lib/sqlalchemy/sql/elements.py
index 70046c66b..fe2fecce8 100644
--- a/lib/sqlalchemy/sql/elements.py
+++ b/lib/sqlalchemy/sql/elements.py
@@ -429,7 +429,7 @@ class ClauseElement(Visitable):
dialect = self.bind.dialect
bind = self.bind
else:
- dialect = default.DefaultDialect()
+ dialect = default.StrCompileDialect()
return self._compiler(dialect, bind=bind, **kw)
def _compiler(self, dialect, **kw):
@@ -682,9 +682,10 @@ class ColumnElement(operators.ColumnOperators, ClauseElement):
def reverse_operate(self, op, other, **kwargs):
return op(other, self.comparator, **kwargs)
- def _bind_param(self, operator, obj):
+ def _bind_param(self, operator, obj, type_=None):
return BindParameter(None, obj,
_compared_to_operator=operator,
+ type_=type_,
_compared_to_type=self.type, unique=True)
@property
@@ -1275,17 +1276,16 @@ class TextClause(Executable, ClauseElement):
for id, name in connection.execute(t):
print(id, name)
- The :func:`.text` construct is used internally in cases when
- a literal string is specified for part of a larger query, such as
- when a string is specified to the :meth:`.Select.where` method of
- :class:`.Select`. In those cases, the same
- bind parameter syntax is applied::
+ The :func:`.text` construct is used in cases when
+ a literal string SQL fragment is specified as part of a larger query,
+ such as for the WHERE clause of a SELECT statement::
- s = select([users.c.id, users.c.name]).where("id=:user_id")
+ s = select([users.c.id, users.c.name]).where(text("id=:user_id"))
result = connection.execute(s, user_id=12)
- Using :func:`.text` explicitly usually implies the construction
- of a full, standalone statement. As such, SQLAlchemy refers
+ :func:`.text` is also used for the construction
+ of a full, standalone statement using plain text.
+ As such, SQLAlchemy refers
to it as an :class:`.Executable` object, and it supports
the :meth:`Executable.execution_options` method. For example,
a :func:`.text` construct that should be subject to "autocommit"
@@ -1360,6 +1360,12 @@ class TextClause(Executable, ClauseElement):
.. deprecated:: 0.9.0 the :meth:`.TextClause.columns` method
supersedes the ``typemap`` argument to :func:`.text`.
+ .. seealso::
+
+ :ref:`sqlexpression_text` - in the Core tutorial
+
+ :ref:`orm_tutorial_literal_sql` - in the ORM tutorial
+
"""
stmt = TextClause(text, bind=bind)
if bindparams:
@@ -1485,9 +1491,17 @@ class TextClause(Executable, ClauseElement):
mytable.join(stmt, mytable.c.name == stmt.c.name)
).where(stmt.c.id > 5)
- Above, we used untyped :func:`.column` elements. These can also have
- types specified, which will impact how the column behaves in
- expressions as well as determining result set behavior::
+ Above, we pass a series of :func:`.column` elements to the
+ :meth:`.TextClause.columns` method positionally. These :func:`.column`
+ elements now become first class elements upon the :attr:`.TextAsFrom.c`
+ column collection, just like any other selectable.
+
+ The column expressions we pass to :meth:`.TextClause.columns` may
+ also be typed; when we do so, these :class:`.TypeEngine` objects become
+ the effective return type of the column, so that SQLAlchemy's
+ result-set-processing systems may be used on the return values.
+ This is often needed for types such as date or boolean types, as well
+ as for unicode processing on some dialect configurations::
stmt = text("SELECT id, name, timestamp FROM some_table")
stmt = stmt.columns(
@@ -1499,9 +1513,8 @@ class TextClause(Executable, ClauseElement):
for id, name, timestamp in connection.execute(stmt):
print(id, name, timestamp)
- Keyword arguments allow just the names and types of columns to be
- specified, where the :func:`.column` elements will be generated
- automatically::
+ As a shortcut to the above syntax, keyword arguments referring to
+ types alone may be used, if only type conversion is needed::
stmt = text("SELECT id, name, timestamp FROM some_table")
stmt = stmt.columns(
@@ -1513,6 +1526,31 @@ class TextClause(Executable, ClauseElement):
for id, name, timestamp in connection.execute(stmt):
print(id, name, timestamp)
+ The positional form of :meth:`.TextClause.columns` also provides
+ the unique feature of **positional column targeting**, which is
+ particularly useful when using the ORM with complex textual queries.
+ If we specify the columns from our model to :meth:`.TextClause.columns`,
+ the result set will match to those columns positionally, meaning the
+ name or origin of the column in the textual SQL doesn't matter::
+
+ stmt = text("SELECT users.id, addresses.id, users.id, "
+ "users.name, addresses.email_address AS email "
+ "FROM users JOIN addresses ON users.id=addresses.user_id "
+ "WHERE users.id = 1").columns(
+ User.id,
+ Address.id,
+ Address.user_id,
+ User.name,
+ Address.email_address
+ )
+
+ query = session.query(User).from_statement(stmt).options(
+ contains_eager(User.addresses))
+
+ .. versionadded:: 1.1 the :meth:`.TextClause.columns` method now
+ offers positional column targeting in the result set when
+ the column expressions are passed purely positionally.
+
The :meth:`.TextClause.columns` method provides a direct
route to calling :meth:`.FromClause.alias` as well as
:meth:`.SelectBase.cte` against a textual SELECT statement::
@@ -1526,15 +1564,22 @@ class TextClause(Executable, ClauseElement):
:meth:`.TextClause.columns` method. This method supersedes the
``typemap`` argument to :func:`.text`.
+
"""
- input_cols = [
+ positional_input_cols = [
ColumnClause(col.key, types.pop(col.key))
if col.key in types
else col
for col in cols
- ] + [ColumnClause(key, type_) for key, type_ in types.items()]
- return selectable.TextAsFrom(self, input_cols)
+ ]
+ keyed_input_cols = [
+ ColumnClause(key, type_) for key, type_ in types.items()]
+
+ return selectable.TextAsFrom(
+ self,
+ positional_input_cols + keyed_input_cols,
+ positional=bool(positional_input_cols) and not keyed_input_cols)
@property
def type(self):
@@ -1952,11 +1997,12 @@ class Tuple(ClauseList, ColumnElement):
def _select_iterable(self):
return (self, )
- def _bind_param(self, operator, obj):
+ def _bind_param(self, operator, obj, type_=None):
return Tuple(*[
BindParameter(None, o, _compared_to_operator=operator,
- _compared_to_type=type_, unique=True)
- for o, type_ in zip(obj, self._type_tuple)
+ _compared_to_type=compared_to_type, unique=True,
+ type_=type_)
+ for o, compared_to_type in zip(obj, self._type_tuple)
]).self_group()
@@ -3637,10 +3683,11 @@ class ColumnClause(Immutable, ColumnElement):
else:
return name
- def _bind_param(self, operator, obj):
+ def _bind_param(self, operator, obj, type_=None):
return BindParameter(self.key, obj,
_compared_to_operator=operator,
_compared_to_type=self.type,
+ type_=type_,
unique=True)
def _make_proxy(self, selectable, name=None, attach=True,
diff --git a/lib/sqlalchemy/sql/functions.py b/lib/sqlalchemy/sql/functions.py
index 6cfbd12b3..3c654bf67 100644
--- a/lib/sqlalchemy/sql/functions.py
+++ b/lib/sqlalchemy/sql/functions.py
@@ -256,16 +256,18 @@ class FunctionElement(Executable, ColumnElement, FromClause):
"""
return self.select().execute()
- def _bind_param(self, operator, obj):
+ def _bind_param(self, operator, obj, type_=None):
return BindParameter(None, obj, _compared_to_operator=operator,
- _compared_to_type=self.type, unique=True)
+ _compared_to_type=self.type, unique=True,
+ type_=type_)
def self_group(self, against=None):
# for the moment, we are parenthesizing all array-returning
# expressions against getitem. This may need to be made
# more portable if in the future we support other DBs
# besides postgresql.
- if against is operators.getitem:
+ if against is operators.getitem and \
+ isinstance(self.type, sqltypes.ARRAY):
return Grouping(self)
else:
return super(FunctionElement, self).self_group(against=against)
@@ -423,10 +425,11 @@ class Function(FunctionElement):
FunctionElement.__init__(self, *clauses, **kw)
- def _bind_param(self, operator, obj):
+ def _bind_param(self, operator, obj, type_=None):
return BindParameter(self.name, obj,
_compared_to_operator=operator,
_compared_to_type=self.type,
+ type_=type_,
unique=True)
@@ -659,7 +662,7 @@ class array_agg(GenericFunction):
"""support for the ARRAY_AGG function.
The ``func.array_agg(expr)`` construct returns an expression of
- type :class:`.Array`.
+ type :class:`.types.ARRAY`.
e.g.::
@@ -670,11 +673,11 @@ class array_agg(GenericFunction):
.. seealso::
:func:`.postgresql.array_agg` - PostgreSQL-specific version that
- returns :class:`.ARRAY`, which has PG-specific operators added.
+ returns :class:`.postgresql.ARRAY`, which has PG-specific operators added.
"""
- type = sqltypes.Array
+ type = sqltypes.ARRAY
def __init__(self, *args, **kwargs):
args = [_literal_as_binds(c) for c in args]
@@ -694,7 +697,7 @@ class OrderedSetAgg(GenericFunction):
func_clauses = self.clause_expr.element
order_by = sqlutil.unwrap_order_by(within_group.order_by)
if self.array_for_multi_clause and len(func_clauses.clauses) > 1:
- return sqltypes.Array(order_by[0].type)
+ return sqltypes.ARRAY(order_by[0].type)
else:
return order_by[0].type
@@ -719,7 +722,7 @@ class percentile_cont(OrderedSetAgg):
modifier to supply a sort expression to operate upon.
The return type of this function is the same as the sort expression,
- or if the arguments are an array, an :class:`.Array` of the sort
+ or if the arguments are an array, an :class:`.types.ARRAY` of the sort
expression's type.
.. versionadded:: 1.1
@@ -736,7 +739,7 @@ class percentile_disc(OrderedSetAgg):
modifier to supply a sort expression to operate upon.
The return type of this function is the same as the sort expression,
- or if the arguments are an array, an :class:`.Array` of the sort
+ or if the arguments are an array, an :class:`.types.ARRAY` of the sort
expression's type.
.. versionadded:: 1.1
diff --git a/lib/sqlalchemy/sql/operators.py b/lib/sqlalchemy/sql/operators.py
index da3576466..f4f90b664 100644
--- a/lib/sqlalchemy/sql/operators.py
+++ b/lib/sqlalchemy/sql/operators.py
@@ -12,7 +12,6 @@
from .. import util
-
from operator import (
and_, or_, inv, add, mul, sub, mod, truediv, lt, le, ne, gt, ge, eq, neg,
getitem, lshift, rshift
@@ -720,7 +719,6 @@ def istrue(a):
def isfalse(a):
raise NotImplementedError()
-
def is_(a, b):
return a.is_(b)
@@ -837,6 +835,14 @@ def nullslast_op(a):
return a.nullslast()
+def json_getitem_op(a, b):
+ raise NotImplementedError()
+
+
+def json_path_getitem_op(a, b):
+ raise NotImplementedError()
+
+
_commutative = set([eq, ne, add, mul])
_comparison = set([eq, ne, lt, gt, ge, le, between_op, like_op])
@@ -879,7 +885,8 @@ def mirror(op):
_associative = _commutative.union([concat_op, and_, or_])
-_natural_self_precedent = _associative.union([getitem])
+_natural_self_precedent = _associative.union([
+ getitem, json_getitem_op, json_path_getitem_op])
"""Operators where if we have (a op b) op c, we don't want to
parenthesize (a op b).
@@ -894,6 +901,8 @@ _PRECEDENCE = {
from_: 15,
any_op: 15,
all_op: 15,
+ json_getitem_op: 15,
+ json_path_getitem_op: 15,
getitem: 15,
mul: 8,
truediv: 8,
diff --git a/lib/sqlalchemy/sql/schema.py b/lib/sqlalchemy/sql/schema.py
index b244d746c..0626cb2b4 100644
--- a/lib/sqlalchemy/sql/schema.py
+++ b/lib/sqlalchemy/sql/schema.py
@@ -30,20 +30,19 @@ as components in SQL expressions.
"""
from __future__ import absolute_import
-import inspect
from .. import exc, util, event, inspection
from .base import SchemaEventTarget, DialectKWArgs
+import operator
from . import visitors
from . import type_api
from .base import _bind_or_error, ColumnCollection
-from .elements import ClauseElement, ColumnClause, _truncated_label, \
+from .elements import ClauseElement, ColumnClause, \
_as_truncated, TextClause, _literal_as_text,\
- ColumnElement, _find_columns, quoted_name
+ ColumnElement, quoted_name
from .selectable import TableClause
import collections
import sqlalchemy
from . import ddl
-import types
RETAIN_SCHEMA = util.symbol('retain_schema')
@@ -3862,3 +3861,52 @@ class ThreadLocalMetaData(MetaData):
for e in self.__engines.values():
if hasattr(e, 'dispose'):
e.dispose()
+
+
+class _SchemaTranslateMap(object):
+ """Provide translation of schema names based on a mapping.
+
+ Also provides helpers for producing cache keys and optimized
+ access when no mapping is present.
+
+ Used by the :paramref:`.Connection.execution_options.schema_translate_map`
+ feature.
+
+ .. versionadded:: 1.1
+
+
+ """
+ __slots__ = 'map_', '__call__', 'hash_key', 'is_default'
+
+ _default_schema_getter = operator.attrgetter("schema")
+
+ def __init__(self, map_):
+ self.map_ = map_
+ if map_ is not None:
+ def schema_for_object(obj):
+ effective_schema = self._default_schema_getter(obj)
+ effective_schema = map_.get(effective_schema, effective_schema)
+ return effective_schema
+ self.__call__ = schema_for_object
+ self.hash_key = ";".join(
+ "%s=%s" % (k, map_[k])
+ for k in sorted(map_, key=str)
+ )
+ self.is_default = False
+ else:
+ self.hash_key = 0
+ self.__call__ = self._default_schema_getter
+ self.is_default = True
+
+ @classmethod
+ def _schema_getter(cls, map_):
+ if map_ is None:
+ return _default_schema_map
+ elif isinstance(map_, _SchemaTranslateMap):
+ return map_
+ else:
+ return _SchemaTranslateMap(map_)
+
+_default_schema_map = _SchemaTranslateMap(None)
+_schema_getter = _SchemaTranslateMap._schema_getter
+
diff --git a/lib/sqlalchemy/sql/selectable.py b/lib/sqlalchemy/sql/selectable.py
index 73341053d..1955fc934 100644
--- a/lib/sqlalchemy/sql/selectable.py
+++ b/lib/sqlalchemy/sql/selectable.py
@@ -3420,9 +3420,10 @@ class TextAsFrom(SelectBase):
_textual = True
- def __init__(self, text, columns):
+ def __init__(self, text, columns, positional=False):
self.element = text
self.column_args = columns
+ self.positional = positional
@property
def _bind(self):
diff --git a/lib/sqlalchemy/sql/sqltypes.py b/lib/sqlalchemy/sql/sqltypes.py
index 4abb9b15a..84bfca026 100644
--- a/lib/sqlalchemy/sql/sqltypes.py
+++ b/lib/sqlalchemy/sql/sqltypes.py
@@ -11,9 +11,13 @@
import datetime as dt
import codecs
+import collections
+import json
+from . import elements
from .type_api import TypeEngine, TypeDecorator, to_instance
-from .elements import quoted_name, TypeCoerce as type_coerce, _defer_name
+from .elements import quoted_name, TypeCoerce as type_coerce, _defer_name, \
+ Slice, _literal_as_binds
from .. import exc, util, processors
from .base import _bind_or_error, SchemaEventTarget
from . import operators
@@ -85,20 +89,16 @@ class Indexable(object):
"""
- zero_indexes = False
- """if True, Python zero-based indexes should be interpreted as one-based
- on the SQL expression side."""
-
class Comparator(TypeEngine.Comparator):
def _setup_getitem(self, index):
raise NotImplementedError()
def __getitem__(self, index):
- operator, adjusted_right_expr, result_type = \
+ adjusted_op, adjusted_right_expr, result_type = \
self._setup_getitem(index)
return self.operate(
- operator,
+ adjusted_op,
adjusted_right_expr,
result_type=result_type
)
@@ -909,9 +909,9 @@ class LargeBinary(_Binary):
"""A type for large binary byte data.
- The Binary type generates BLOB or BYTEA when tables are created,
- and also converts incoming values using the ``Binary`` callable
- provided by each DB-API.
+ The :class:`.LargeBinary` type corresponds to a large and/or unlengthed
+ binary type for the target platform, such as BLOB on MySQL and BYTEA for
+ Postgresql. It also handles the necessary conversions for the DBAPI.
"""
@@ -922,13 +922,8 @@ class LargeBinary(_Binary):
Construct a LargeBinary type.
:param length: optional, a length for the column for use in
- DDL statements, for those BLOB types that accept a length
- (i.e. MySQL). It does *not* produce a *lengthed* BINARY/VARBINARY
- type - use the BINARY/VARBINARY types specifically for those.
- May be safely omitted if no ``CREATE
- TABLE`` will be issued. Certain databases may require a
- *length* for use in DDL, and will raise an exception when
- the ``CREATE TABLE`` DDL is issued.
+ DDL statements, for those binary types that accept a length,
+ such as the MySQL BLOB type.
"""
_Binary.__init__(self, length=length)
@@ -1496,7 +1491,221 @@ class Interval(_DateAffinity, TypeDecorator):
return self.impl.coerce_compared_value(op, value)
-class Array(Indexable, Concatenable, TypeEngine):
+class JSON(Indexable, TypeEngine):
+ """Represent a SQL JSON type.
+
+ .. note:: :class:`.types.JSON` is provided as a facade for vendor-specific
+ JSON types. Since it supports JSON SQL operations, it only
+ works on backends that have an actual JSON type, currently
+ Postgresql as well as certain versions of MySQL.
+
+ :class:`.types.JSON` is part of the Core in support of the growing
+ popularity of native JSON datatypes.
+
+ The :class:`.types.JSON` type stores arbitrary JSON format data, e.g.::
+
+ data_table = Table('data_table', metadata,
+ Column('id', Integer, primary_key=True),
+ Column('data', JSON)
+ )
+
+ with engine.connect() as conn:
+ conn.execute(
+ data_table.insert(),
+ data = {"key1": "value1", "key2": "value2"}
+ )
+
+ The base :class:`.types.JSON` provides these two operations:
+
+ * Keyed index operations::
+
+ data_table.c.data['some key']
+
+ * Integer index operations::
+
+ data_table.c.data[3]
+
+ * Path index operations::
+
+ data_table.c.data[('key_1', 'key_2', 5, ..., 'key_n')]
+
+ Additional operations are available from the dialect-specific versions
+ of :class:`.types.JSON`, such as :class:`.postgresql.JSON` and
+ :class:`.postgresql.JSONB`, each of which offer more operators than
+ just the basic type.
+
+ Index operations return an expression object whose type defaults to
+ :class:`.JSON` by default, so that further JSON-oriented instructions
+ may be called upon the result type.
+
+ The :class:`.JSON` type, when used with the SQLAlchemy ORM, does not
+ detect in-place mutations to the structure. In order to detect these, the
+ :mod:`sqlalchemy.ext.mutable` extension must be used. This extension will
+ allow "in-place" changes to the datastructure to produce events which
+ will be detected by the unit of work. See the example at :class:`.HSTORE`
+ for a simple example involving a dictionary.
+
+ When working with NULL values, the :class:`.JSON` type recommends the
+ use of two specific constants in order to differentiate between a column
+ that evaluates to SQL NULL, e.g. no value, vs. the JSON-encoded string
+ of ``"null"``. To insert or select against a value that is SQL NULL,
+ use the constant :func:`.null`::
+
+ from sqlalchemy import null
+ conn.execute(table.insert(), json_value=null())
+
+ To insert or select against a value that is JSON ``"null"``, use the
+ constant :attr:`.JSON.NULL`::
+
+ conn.execute(table.insert(), json_value=JSON.NULL)
+
+ The :class:`.JSON` type supports a flag
+ :paramref:`.JSON.none_as_null` which when set to True will result
+ in the Python constant ``None`` evaluating to the value of SQL
+ NULL, and when set to False results in the Python constant
+ ``None`` evaluating to the value of JSON ``"null"``. The Python
+ value ``None`` may be used in conjunction with either
+ :attr:`.JSON.NULL` and :func:`.null` in order to indicate NULL
+ values, but care must be taken as to the value of the
+ :paramref:`.JSON.none_as_null` in these cases.
+
+ .. seealso::
+
+ :class:`.postgresql.JSON`
+
+ :class:`.postgresql.JSONB`
+
+ :class:`.mysql.JSON`
+
+ .. versionadded:: 1.1
+
+
+ """
+ __visit_name__ = 'JSON'
+
+ hashable = False
+ NULL = util.symbol('JSON_NULL')
+ """Describe the json value of NULL.
+
+ This value is used to force the JSON value of ``"null"`` to be
+ used as the value. A value of Python ``None`` will be recognized
+ either as SQL NULL or JSON ``"null"``, based on the setting
+ of the :paramref:`.JSON.none_as_null` flag; the :attr:`.JSON.NULL`
+ constant can be used to always resolve to JSON ``"null"`` regardless
+ of this setting. This is in contrast to the :func:`.sql.null` construct,
+ which always resolves to SQL NULL. E.g.::
+
+ from sqlalchemy import null
+ from sqlalchemy.dialects.postgresql import JSON
+
+ obj1 = MyObject(json_value=null()) # will *always* insert SQL NULL
+ obj2 = MyObject(json_value=JSON.NULL) # will *always* insert JSON string "null"
+
+ session.add_all([obj1, obj2])
+ session.commit()
+
+ """
+
+ def __init__(self, none_as_null=False):
+ """Construct a :class:`.types.JSON` type.
+
+ :param none_as_null=False: if True, persist the value ``None`` as a
+ SQL NULL value, not the JSON encoding of ``null``. Note that
+ when this flag is False, the :func:`.null` construct can still
+ be used to persist a NULL value::
+
+ from sqlalchemy import null
+ conn.execute(table.insert(), data=null())
+
+ .. seealso::
+
+ :attr:`.types.JSON.NULL`
+
+ """
+ self.none_as_null = none_as_null
+
+ class JSONIndexType(TypeEngine):
+ """Placeholder for the datatype of a JSON index value.
+
+ This allows execution-time processing of JSON index values
+ for special syntaxes.
+
+ """
+
+ class JSONPathType(TypeEngine):
+ """Placeholder type for JSON path operations.
+
+ This allows execution-time processing of a path-based
+ index value into a specific SQL syntax.
+
+ """
+
+ class Comparator(Indexable.Comparator, Concatenable.Comparator):
+ """Define comparison operations for :class:`.types.JSON`."""
+
+ @util.dependencies('sqlalchemy.sql.default_comparator')
+ def _setup_getitem(self, default_comparator, index):
+ if not isinstance(index, util.string_types) and \
+ isinstance(index, collections.Sequence):
+ index = default_comparator._check_literal(
+ self.expr, operators.json_path_getitem_op,
+ index, bindparam_type=JSON.JSONPathType
+ )
+
+ operator = operators.json_path_getitem_op
+ else:
+ index = default_comparator._check_literal(
+ self.expr, operators.json_getitem_op,
+ index, bindparam_type=JSON.JSONIndexType
+ )
+ operator = operators.json_getitem_op
+
+ return operator, index, self.type
+
+ comparator_factory = Comparator
+
+ @property
+ def should_evaluate_none(self):
+ return not self.none_as_null
+
+ @util.memoized_property
+ def _str_impl(self):
+ return String(convert_unicode=True)
+
+ def bind_processor(self, dialect):
+ string_process = self._str_impl.bind_processor(dialect)
+
+ json_serializer = dialect._json_serializer or json.dumps
+
+ def process(value):
+ if value is self.NULL:
+ value = None
+ elif isinstance(value, elements.Null) or (
+ value is None and self.none_as_null
+ ):
+ return None
+
+ serialized = json_serializer(value)
+ if string_process:
+ serialized = string_process(serialized)
+ return serialized
+
+ return process
+
+ def result_processor(self, dialect, coltype):
+ string_process = self._str_impl.result_processor(dialect, coltype)
+ json_deserializer = dialect._json_deserializer or json.loads
+
+ def process(value):
+ if value is None:
+ return None
+ if string_process:
+ value = string_process(value)
+ return json_deserializer(value)
+ return process
+
+
+class ARRAY(Indexable, Concatenable, TypeEngine):
"""Represent a SQL Array type.
.. note:: This type serves as the basis for all ARRAY operations.
@@ -1506,17 +1715,17 @@ class Array(Indexable, Concatenable, TypeEngine):
with PostgreSQL, as it provides additional operators specific
to that backend.
- :class:`.Array` is part of the Core in support of various SQL standard
+ :class:`.types.ARRAY` is part of the Core in support of various SQL standard
functions such as :class:`.array_agg` which explicitly involve arrays;
however, with the exception of the PostgreSQL backend and possibly
some third-party dialects, no other SQLAlchemy built-in dialect has
support for this type.
- An :class:`.Array` type is constructed given the "type"
+ An :class:`.types.ARRAY` type is constructed given the "type"
of element::
mytable = Table("mytable", metadata,
- Column("data", Array(Integer))
+ Column("data", ARRAY(Integer))
)
The above type represents an N-dimensional array,
@@ -1529,11 +1738,11 @@ class Array(Indexable, Concatenable, TypeEngine):
data=[1,2,3]
)
- The :class:`.Array` type can be constructed given a fixed number
+ The :class:`.types.ARRAY` type can be constructed given a fixed number
of dimensions::
mytable = Table("mytable", metadata,
- Column("data", Array(Integer, dimensions=2))
+ Column("data", ARRAY(Integer, dimensions=2))
)
Sending a number of dimensions is optional, but recommended if the
@@ -1555,10 +1764,10 @@ class Array(Indexable, Concatenable, TypeEngine):
>>> expr = table.c.column[5] # returns ARRAY(Integer, dimensions=1)
>>> expr = expr[6] # returns Integer
- For 1-dimensional arrays, an :class:`.Array` instance with no
+ For 1-dimensional arrays, an :class:`.types.ARRAY` instance with no
dimension parameter will generally assume single-dimensional behaviors.
- SQL expressions of type :class:`.Array` have support for "index" and
+ SQL expressions of type :class:`.types.ARRAY` have support for "index" and
"slice" behavior. The Python ``[]`` operator works normally here, given
integer indexes or slices. Arrays default to 1-based indexing.
The operator produces binary expression
@@ -1575,9 +1784,9 @@ class Array(Indexable, Concatenable, TypeEngine):
mytable.c.data[2:7]: [1, 2, 3]
})
- The :class:`.Array` type also provides for the operators
- :meth:`.Array.Comparator.any` and :meth:`.Array.Comparator.all`.
- The PostgreSQL-specific version of :class:`.Array` also provides additional
+ The :class:`.types.ARRAY` type also provides for the operators
+ :meth:`.types.ARRAY.Comparator.any` and :meth:`.types.ARRAY.Comparator.all`.
+ The PostgreSQL-specific version of :class:`.types.ARRAY` also provides additional
operators.
.. versionadded:: 1.1.0
@@ -1589,9 +1798,13 @@ class Array(Indexable, Concatenable, TypeEngine):
"""
__visit_name__ = 'ARRAY'
+ zero_indexes = False
+ """if True, Python zero-based indexes should be interpreted as one-based
+ on the SQL expression side."""
+
class Comparator(Indexable.Comparator, Concatenable.Comparator):
- """Define comparison operations for :class:`.Array`.
+ """Define comparison operations for :class:`.types.ARRAY`.
More operators are available on the dialect-specific form
of this type. See :class:`.postgresql.ARRAY.Comparator`.
@@ -1601,11 +1814,32 @@ class Array(Indexable, Concatenable, TypeEngine):
def _setup_getitem(self, index):
if isinstance(index, slice):
return_type = self.type
- elif self.type.dimensions is None or self.type.dimensions == 1:
- return_type = self.type.item_type
+ if self.type.zero_indexes:
+ index = slice(
+ index.start + 1,
+ index.stop + 1,
+ index.step
+ )
+ index = Slice(
+ _literal_as_binds(
+ index.start, name=self.expr.key,
+ type_=type_api.INTEGERTYPE),
+ _literal_as_binds(
+ index.stop, name=self.expr.key,
+ type_=type_api.INTEGERTYPE),
+ _literal_as_binds(
+ index.step, name=self.expr.key,
+ type_=type_api.INTEGERTYPE)
+ )
else:
- adapt_kw = {'dimensions': self.type.dimensions - 1}
- return_type = self.type.adapt(self.type.__class__, **adapt_kw)
+ if self.type.zero_indexes:
+ index += 1
+ if self.type.dimensions is None or self.type.dimensions == 1:
+ return_type = self.type.item_type
+ else:
+ adapt_kw = {'dimensions': self.type.dimensions - 1}
+ return_type = self.type.adapt(
+ self.type.__class__, **adapt_kw)
return operators.getitem, index, return_type
@@ -1635,7 +1869,7 @@ class Array(Indexable, Concatenable, TypeEngine):
:func:`.sql.expression.any_`
- :meth:`.Array.Comparator.all`
+ :meth:`.types.ARRAY.Comparator.all`
"""
operator = operator if operator else operators.eq
@@ -1670,7 +1904,7 @@ class Array(Indexable, Concatenable, TypeEngine):
:func:`.sql.expression.all_`
- :meth:`.Array.Comparator.any`
+ :meth:`.types.ARRAY.Comparator.any`
"""
operator = operator if operator else operators.eq
@@ -1683,18 +1917,18 @@ class Array(Indexable, Concatenable, TypeEngine):
def __init__(self, item_type, as_tuple=False, dimensions=None,
zero_indexes=False):
- """Construct an :class:`.Array`.
+ """Construct an :class:`.types.ARRAY`.
E.g.::
- Column('myarray', Array(Integer))
+ Column('myarray', ARRAY(Integer))
Arguments are:
:param item_type: The data type of items of this array. Note that
dimensionality is irrelevant here, so multi-dimensional arrays like
- ``INTEGER[][]``, are constructed as ``Array(Integer)``, not as
- ``Array(Array(Integer))`` or such.
+ ``INTEGER[][]``, are constructed as ``ARRAY(Integer)``, not as
+ ``ARRAY(ARRAY(Integer))`` or such.
:param as_tuple=False: Specify whether return results
should be converted to tuples from lists. This parameter is
@@ -1706,7 +1940,7 @@ class Array(Indexable, Concatenable, TypeEngine):
on the database, how it goes about interpreting Python and
result values, as well as how expression behavior in conjunction
with the "getitem" operator works. See the description at
- :class:`.Array` for additional detail.
+ :class:`.types.ARRAY` for additional detail.
:param zero_indexes=False: when True, index values will be converted
between Python zero-based and SQL one-based indexes, e.g.
@@ -1714,7 +1948,7 @@ class Array(Indexable, Concatenable, TypeEngine):
to the database.
"""
- if isinstance(item_type, Array):
+ if isinstance(item_type, ARRAY):
raise ValueError("Do not nest ARRAY types; ARRAY(basetype) "
"handles multi-dimensional arrays of basetype")
if isinstance(item_type, type):
diff --git a/lib/sqlalchemy/testing/__init__.py b/lib/sqlalchemy/testing/__init__.py
index bd6377eb7..d24f31321 100644
--- a/lib/sqlalchemy/testing/__init__.py
+++ b/lib/sqlalchemy/testing/__init__.py
@@ -22,7 +22,7 @@ from .assertions import emits_warning, emits_warning_on, uses_deprecated, \
eq_, ne_, le_, is_, is_not_, startswith_, assert_raises, \
assert_raises_message, AssertsCompiledSQL, ComparesTables, \
AssertsExecutionResults, expect_deprecated, expect_warnings, \
- in_, not_in_
+ in_, not_in_, eq_ignore_whitespace
from .util import run_as_contextmanager, rowset, fail, \
provide_metadata, adict, force_drop_names, \
diff --git a/lib/sqlalchemy/testing/assertions.py b/lib/sqlalchemy/testing/assertions.py
index 63667654d..8c962d7a3 100644
--- a/lib/sqlalchemy/testing/assertions.py
+++ b/lib/sqlalchemy/testing/assertions.py
@@ -245,6 +245,15 @@ def startswith_(a, fragment, msg=None):
a, fragment)
+def eq_ignore_whitespace(a, b, msg=None):
+ a = re.sub(r'^\s+?|\n', "", a)
+ a = re.sub(r' {2,}', " ", a)
+ b = re.sub(r'^\s+?|\n', "", b)
+ b = re.sub(r' {2,}', " ", b)
+
+ assert a == b, msg or "%r != %r" % (a, b)
+
+
def assert_raises(except_cls, callable_, *args, **kw):
try:
callable_(*args, **kw)
@@ -273,7 +282,8 @@ class AssertsCompiledSQL(object):
check_prefetch=None,
use_default_dialect=False,
allow_dialect_select=False,
- literal_binds=False):
+ literal_binds=False,
+ schema_translate_map=None):
if use_default_dialect:
dialect = default.DefaultDialect()
elif allow_dialect_select:
@@ -292,6 +302,9 @@ class AssertsCompiledSQL(object):
kw = {}
compile_kwargs = {}
+ if schema_translate_map:
+ kw['schema_translate_map'] = schema_translate_map
+
if params is not None:
kw['column_keys'] = list(params)
diff --git a/lib/sqlalchemy/testing/assertsql.py b/lib/sqlalchemy/testing/assertsql.py
index 39d078985..56c422cf1 100644
--- a/lib/sqlalchemy/testing/assertsql.py
+++ b/lib/sqlalchemy/testing/assertsql.py
@@ -87,13 +87,18 @@ class CompiledSQL(SQLMatchRule):
compare_dialect = self._compile_dialect(execute_observed)
if isinstance(context.compiled.statement, _DDLCompiles):
compiled = \
- context.compiled.statement.compile(dialect=compare_dialect)
+ context.compiled.statement.compile(
+ dialect=compare_dialect,
+ schema_translate_map=context.
+ execution_options.get('schema_translate_map'))
else:
compiled = (
context.compiled.statement.compile(
dialect=compare_dialect,
column_keys=context.compiled.column_keys,
- inline=context.compiled.inline)
+ inline=context.compiled.inline,
+ schema_translate_map=context.
+ execution_options.get('schema_translate_map'))
)
_received_statement = re.sub(r'[\n\t]', '', util.text_type(compiled))
parameters = execute_observed.parameters
diff --git a/lib/sqlalchemy/testing/plugin/pytestplugin.py b/lib/sqlalchemy/testing/plugin/pytestplugin.py
index 30d7aa73a..5bb6b966d 100644
--- a/lib/sqlalchemy/testing/plugin/pytestplugin.py
+++ b/lib/sqlalchemy/testing/plugin/pytestplugin.py
@@ -55,7 +55,7 @@ def pytest_sessionstart(session):
plugin_base.post_begin()
if has_xdist:
- _follower_count = itertools.count(1)
+ import uuid
def pytest_configure_node(node):
# the master for each node fills slaveinput dictionary
@@ -63,7 +63,7 @@ if has_xdist:
plugin_base.memoize_important_follower_config(node.slaveinput)
- node.slaveinput["follower_ident"] = "test_%s" % next(_follower_count)
+ node.slaveinput["follower_ident"] = "test_%s" % uuid.uuid4().hex[0:12]
from sqlalchemy.testing import provision
provision.create_follower_db(node.slaveinput["follower_ident"])
diff --git a/lib/sqlalchemy/testing/profiling.py b/lib/sqlalchemy/testing/profiling.py
index 357735656..a152d5e93 100644
--- a/lib/sqlalchemy/testing/profiling.py
+++ b/lib/sqlalchemy/testing/profiling.py
@@ -75,6 +75,11 @@ class ProfileStatsFile(object):
platform_tokens.append("pypy")
if win32:
platform_tokens.append("win")
+ platform_tokens.append(
+ "nativeunicode"
+ if config.db.dialect.convert_unicode
+ else "dbapiunicode"
+ )
_has_cext = config.requirements._has_cextensions()
platform_tokens.append(_has_cext and "cextensions" or "nocextensions")
return "_".join(platform_tokens)
diff --git a/lib/sqlalchemy/testing/requirements.py b/lib/sqlalchemy/testing/requirements.py
index 15bfad831..1b5d6e883 100644
--- a/lib/sqlalchemy/testing/requirements.py
+++ b/lib/sqlalchemy/testing/requirements.py
@@ -487,6 +487,19 @@ class SuiteRequirements(Requirements):
return exclusions.open()
@property
+ def json_type(self):
+ """target platform implements a native JSON type."""
+
+ return exclusions.closed()
+
+ @property
+ def json_array_indexes(self):
+ """"target platform supports numeric array indexes
+ within a JSON structure"""
+
+ return self.json_type
+
+ @property
def precision_numerics_general(self):
"""target backend has general support for moderately high-precision
numerics."""
diff --git a/lib/sqlalchemy/testing/suite/test_types.py b/lib/sqlalchemy/testing/suite/test_types.py
index 230aeb1e9..6231e0fb9 100644
--- a/lib/sqlalchemy/testing/suite/test_types.py
+++ b/lib/sqlalchemy/testing/suite/test_types.py
@@ -5,7 +5,7 @@ from ..assertions import eq_
from ..config import requirements
from sqlalchemy import Integer, Unicode, UnicodeText, select
from sqlalchemy import Date, DateTime, Time, MetaData, String, \
- Text, Numeric, Float, literal, Boolean
+ Text, Numeric, Float, literal, Boolean, cast, null, JSON, and_
from ..schema import Table, Column
from ... import testing
import decimal
@@ -586,7 +586,260 @@ class BooleanTest(_LiteralRoundTripFixture, fixtures.TablesTest):
)
-__all__ = ('UnicodeVarcharTest', 'UnicodeTextTest',
+class JSONTest(_LiteralRoundTripFixture, fixtures.TablesTest):
+ __requires__ = 'json_type',
+ __backend__ = True
+
+ datatype = JSON
+
+ data1 = {
+ "key1": "value1",
+ "key2": "value2"
+ }
+
+ data2 = {
+ "Key 'One'": "value1",
+ "key two": "value2",
+ "key three": "value ' three '"
+ }
+
+ data3 = {
+ "key1": [1, 2, 3],
+ "key2": ["one", "two", "three"],
+ "key3": [{"four": "five"}, {"six": "seven"}]
+ }
+
+ data4 = ["one", "two", "three"]
+
+ data5 = {
+ "nested": {
+ "elem1": [
+ {"a": "b", "c": "d"},
+ {"e": "f", "g": "h"}
+ ],
+ "elem2": {
+ "elem3": {"elem4": "elem5"}
+ }
+ }
+ }
+
+ @classmethod
+ def define_tables(cls, metadata):
+ Table('data_table', metadata,
+ Column('id', Integer, primary_key=True),
+ Column('name', String(30), nullable=False),
+ Column('data', cls.datatype),
+ Column('nulldata', cls.datatype(none_as_null=True))
+ )
+
+ def test_round_trip_data1(self):
+ self._test_round_trip(self.data1)
+
+ def _test_round_trip(self, data_element):
+ data_table = self.tables.data_table
+
+ config.db.execute(
+ data_table.insert(),
+ {'name': 'row1', 'data': data_element}
+ )
+
+ row = config.db.execute(
+ select([
+ data_table.c.data,
+ ])
+ ).first()
+
+ eq_(row, (data_element, ))
+
+ def test_round_trip_none_as_sql_null(self):
+ col = self.tables.data_table.c['nulldata']
+
+ with config.db.connect() as conn:
+ conn.execute(
+ self.tables.data_table.insert(),
+ {"name": "r1", "data": None}
+ )
+
+ eq_(
+ conn.scalar(
+ select([self.tables.data_table.c.name]).
+ where(col.is_(null()))
+ ),
+ "r1"
+ )
+
+ eq_(
+ conn.scalar(
+ select([col])
+ ),
+ None
+ )
+
+ def test_round_trip_json_null_as_json_null(self):
+ col = self.tables.data_table.c['data']
+
+ with config.db.connect() as conn:
+ conn.execute(
+ self.tables.data_table.insert(),
+ {"name": "r1", "data": JSON.NULL}
+ )
+
+ eq_(
+ conn.scalar(
+ select([self.tables.data_table.c.name]).
+ where(cast(col, String) == 'null')
+ ),
+ "r1"
+ )
+
+ eq_(
+ conn.scalar(
+ select([col])
+ ),
+ None
+ )
+
+ def test_round_trip_none_as_json_null(self):
+ col = self.tables.data_table.c['data']
+
+ with config.db.connect() as conn:
+ conn.execute(
+ self.tables.data_table.insert(),
+ {"name": "r1", "data": None}
+ )
+
+ eq_(
+ conn.scalar(
+ select([self.tables.data_table.c.name]).
+ where(cast(col, String) == 'null')
+ ),
+ "r1"
+ )
+
+ eq_(
+ conn.scalar(
+ select([col])
+ ),
+ None
+ )
+
+ def _criteria_fixture(self):
+ config.db.execute(
+ self.tables.data_table.insert(),
+ [{"name": "r1", "data": self.data1},
+ {"name": "r2", "data": self.data2},
+ {"name": "r3", "data": self.data3},
+ {"name": "r4", "data": self.data4},
+ {"name": "r5", "data": self.data5}]
+ )
+
+ def _test_index_criteria(self, crit, expected):
+ self._criteria_fixture()
+ with config.db.connect() as conn:
+ eq_(
+ conn.scalar(
+ select([self.tables.data_table.c.name]).
+ where(crit)
+ ),
+ expected
+ )
+
+ def test_crit_spaces_in_key(self):
+ name = self.tables.data_table.c.name
+ col = self.tables.data_table.c['data']
+
+ # limit the rows here to avoid PG error
+ # "cannot extract field from a non-object", which is
+ # fixed in 9.4 but may exist in 9.3
+ self._test_index_criteria(
+ and_(
+ name.in_(["r1", "r2", "r3"]),
+ cast(col["key two"], String) == '"value2"'
+ ),
+ "r2"
+ )
+
+ @config.requirements.json_array_indexes
+ def test_crit_simple_int(self):
+ name = self.tables.data_table.c.name
+ col = self.tables.data_table.c['data']
+
+ # limit the rows here to avoid PG error
+ # "cannot extract array element from a non-array", which is
+ # fixed in 9.4 but may exist in 9.3
+ self._test_index_criteria(
+ and_(name == 'r4', cast(col[1], String) == '"two"'),
+ "r4"
+ )
+
+ def test_crit_mixed_path(self):
+ col = self.tables.data_table.c['data']
+ self._test_index_criteria(
+ cast(col[("key3", 1, "six")], String) == '"seven"',
+ "r3"
+ )
+
+ def test_crit_string_path(self):
+ col = self.tables.data_table.c['data']
+ self._test_index_criteria(
+ cast(col[("nested", "elem2", "elem3", "elem4")], String)
+ == '"elem5"',
+ "r5"
+ )
+
+ def test_unicode_round_trip(self):
+ s = select([
+ cast(
+ {
+ util.u('réveillé'): util.u('réveillé'),
+ "data": {"k1": util.u('drôle')}
+ },
+ self.datatype
+ )
+ ])
+ eq_(
+ config.db.scalar(s),
+ {
+ util.u('réveillé'): util.u('réveillé'),
+ "data": {"k1": util.u('drôle')}
+ },
+ )
+
+ def test_eval_none_flag_orm(self):
+ from sqlalchemy.ext.declarative import declarative_base
+ from sqlalchemy.orm import Session
+
+ Base = declarative_base()
+
+ class Data(Base):
+ __table__ = self.tables.data_table
+
+ s = Session(testing.db)
+
+ d1 = Data(name='d1', data=None, nulldata=None)
+ s.add(d1)
+ s.commit()
+
+ s.bulk_insert_mappings(
+ Data, [{"name": "d2", "data": None, "nulldata": None}]
+ )
+ eq_(
+ s.query(
+ cast(self.tables.data_table.c.data, String),
+ cast(self.tables.data_table.c.nulldata, String)
+ ).filter(self.tables.data_table.c.name == 'd1').first(),
+ ("null", None)
+ )
+ eq_(
+ s.query(
+ cast(self.tables.data_table.c.data, String),
+ cast(self.tables.data_table.c.nulldata, String)
+ ).filter(self.tables.data_table.c.name == 'd2').first(),
+ ("null", None)
+ )
+
+
+__all__ = ('UnicodeVarcharTest', 'UnicodeTextTest', 'JSONTest',
'DateTest', 'DateTimeTest', 'TextTest',
'NumericTest', 'IntegerTest',
'DateTimeHistoricTest', 'DateTimeCoercedToDateTimeTest',
diff --git a/lib/sqlalchemy/types.py b/lib/sqlalchemy/types.py
index d82e683d9..ac6d3b439 100644
--- a/lib/sqlalchemy/types.py
+++ b/lib/sqlalchemy/types.py
@@ -17,7 +17,7 @@ __all__ = ['TypeEngine', 'TypeDecorator', 'UserDefinedType',
'SmallInteger', 'BigInteger', 'Numeric', 'Float', 'DateTime',
'Date', 'Time', 'LargeBinary', 'Binary', 'Boolean', 'Unicode',
'Concatenable', 'UnicodeText', 'PickleType', 'Interval', 'Enum',
- 'Indexable', 'Array']
+ 'Indexable', 'ARRAY', 'JSON']
from .sql.type_api import (
adapt_type,
@@ -28,7 +28,7 @@ from .sql.type_api import (
UserDefinedType
)
from .sql.sqltypes import (
- Array,
+ ARRAY,
BIGINT,
BINARY,
BLOB,
@@ -53,6 +53,7 @@ from .sql.sqltypes import (
INTEGER,
Integer,
Interval,
+ JSON,
LargeBinary,
MatchType,
NCHAR,
diff --git a/lib/sqlalchemy/util/compat.py b/lib/sqlalchemy/util/compat.py
index 25c88c662..737b8a087 100644
--- a/lib/sqlalchemy/util/compat.py
+++ b/lib/sqlalchemy/util/compat.py
@@ -177,27 +177,27 @@ from operator import attrgetter as dottedgetter
if py3k:
def reraise(tp, value, tb=None, cause=None):
if cause is not None:
+ assert cause is not value, "Same cause emitted"
value.__cause__ = cause
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
- def raise_from_cause(exception, exc_info=None):
- if exc_info is None:
- exc_info = sys.exc_info()
- exc_type, exc_value, exc_tb = exc_info
- reraise(type(exception), exception, tb=exc_tb, cause=exc_value)
else:
+ # not as nice as that of Py3K, but at least preserves
+ # the code line where the issue occurred
exec("def reraise(tp, value, tb=None, cause=None):\n"
+ " if cause is not None:\n"
+ " assert cause is not value, 'Same cause emitted'\n"
" raise tp, value, tb\n")
- def raise_from_cause(exception, exc_info=None):
- # not as nice as that of Py3K, but at least preserves
- # the code line where the issue occurred
- if exc_info is None:
- exc_info = sys.exc_info()
- exc_type, exc_value, exc_tb = exc_info
- reraise(type(exception), exception, tb=exc_tb)
+
+def raise_from_cause(exception, exc_info=None):
+ if exc_info is None:
+ exc_info = sys.exc_info()
+ exc_type, exc_value, exc_tb = exc_info
+ cause = exc_value if exc_value is not exception else None
+ reraise(type(exception), exception, tb=exc_tb, cause=cause)
if py3k:
exec_ = getattr(builtins, 'exec')