summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMike Bayer <mike_mp@zzzcomputing.com>2011-12-06 16:36:36 -0500
committerMike Bayer <mike_mp@zzzcomputing.com>2011-12-06 16:36:36 -0500
commit0ff337f29e3f1060a98338300fcecfc550936558 (patch)
tree826eb38c15b21ab7bc2a4666efc74347a04c4cbe
parent0a561ac656eea4ad2464ca8dfef5806191da3805 (diff)
downloadsqlalchemy-0ff337f29e3f1060a98338300fcecfc550936558.tar.gz
backport [ticket:2269] fix from 0.7
-rw-r--r--CHANGES5
-rw-r--r--lib/sqlalchemy/dialects/mssql/base.py11
-rw-r--r--test/engine/test_reflection.py136
3 files changed, 113 insertions, 39 deletions
diff --git a/CHANGES b/CHANGES
index 94c56f5fe..c5dc7ab83 100644
--- a/CHANGES
+++ b/CHANGES
@@ -111,6 +111,11 @@ CHANGES
MySQL's arbitrary rules regarding if it will actually
work or not. [ticket:2225]
+- mssql
+ - [bug] Decode incoming values when retrieving
+ list of index names and the names of columns
+ within those indexes. [ticket:2269]
+
- oracle
- Added ORA-00028 to disconnect codes, use
cx_oracle _Error.code to get at the code,
diff --git a/lib/sqlalchemy/dialects/mssql/base.py b/lib/sqlalchemy/dialects/mssql/base.py
index 4c589a2a9..76786bb81 100644
--- a/lib/sqlalchemy/dialects/mssql/base.py
+++ b/lib/sqlalchemy/dialects/mssql/base.py
@@ -1197,7 +1197,10 @@ class MSDialect(default.DefaultDialect):
sqltypes.String(convert_unicode=True)),
sql.bindparam('schname', current_schema,
sqltypes.String(convert_unicode=True))
- ]
+ ],
+ typemap = {
+ 'name':sqltypes.Unicode()
+ }
)
)
indexes = {}
@@ -1223,7 +1226,11 @@ class MSDialect(default.DefaultDialect):
sqltypes.String(convert_unicode=True)),
sql.bindparam('schname', current_schema,
sqltypes.String(convert_unicode=True))
- ]),
+ ],
+ typemap = {
+ 'name':sqltypes.Unicode()
+ }
+ ),
)
for row in rp:
if row['index_id'] in indexes:
diff --git a/test/engine/test_reflection.py b/test/engine/test_reflection.py
index 84a07bf4d..feeea3705 100644
--- a/test/engine/test_reflection.py
+++ b/test/engine/test_reflection.py
@@ -1,7 +1,7 @@
from sqlalchemy.test.testing import eq_, assert_raises, assert_raises_message
import StringIO, unicodedata
from sqlalchemy import types as sql_types
-from sqlalchemy import schema
+from sqlalchemy import schema, Integer
from sqlalchemy.engine.reflection import Inspector
from sqlalchemy import MetaData
from sqlalchemy.test.schema import Table, Column
@@ -906,53 +906,115 @@ class SchemaManipulationTest(TestBase):
assert addresses.constraints == set([addresses.primary_key, fk])
class UnicodeReflectionTest(TestBase):
+ @classmethod
+ def setup_class(cls):
+ # trigger mysql _server_casing check...
+ testing.db.connect().close()
+
+ cls.bind = bind = engines.utf8_engine(
+ options={'convert_unicode' : True})
+
+ cls.metadata = metadata = MetaData()
+
+ no_multibyte_period = set([
+ (u'plain', u'col_plain', u'ix_plain')
+ ])
+ no_has_table = [
+ (u'no_has_table_1', u'col_Unit\u00e9ble', u'ix_Unit\u00e9ble'),
+ (u'no_has_table_2', u'col_\u6e2c\u8a66', u'ix_\u6e2c\u8a66'),
+ ]
+ no_case_sensitivity = [
+ (u'\u6e2c\u8a66', u'col_\u6e2c\u8a66', u'ix_\u6e2c\u8a66'),
+ (u'unit\u00e9ble', u'col_unit\u00e9ble', u'ix_unit\u00e9ble'),
+ ]
+ full = [
+ (u'Unit\u00e9ble', u'col_Unit\u00e9ble', u'ix_Unit\u00e9ble'),
+ (u'\u6e2c\u8a66', u'col_\u6e2c\u8a66', u'ix_\u6e2c\u8a66'),
+ ]
+
+ # as you can see, our options for this kind of thing
+ # are really limited unless you're on PG or SQLite
+
+ # forget about it on these backends
+ if testing.against('sybase', 'maxdb', 'oracle'):
+ names = no_multibyte_period
+ # mysql can't handle casing usually,
+ # note only 0.7 has a better "case sensitive" requires here
+ elif testing.against("mysql"):
+ names = no_multibyte_period.union(no_case_sensitivity)
+ # mssql + pyodbc + freetds can't compare multibyte names to
+ # information_schema.tables.table_name
+ elif testing.against("mssql"):
+ names = no_multibyte_period.union(no_has_table)
+ else:
+ names = no_multibyte_period.union(full)
+
+ for tname, cname, ixname in names:
+ t = Table(tname, metadata, Column('id', sa.Integer,
+ sa.Sequence(cname + '_id_seq'), primary_key=True),
+ Column(cname, Integer)
+ )
+ schema.Index(ixname, t.c[cname])
+
+ metadata.create_all(bind)
+ cls.names = names
+
+ @classmethod
+ def teardown_class(cls):
+ cls.metadata.drop_all(cls.bind, checkfirst=False)
+ cls.bind.dispose()
+
+ @testing.requires.unicode_connections
+ def test_has_table(self):
+ for tname, cname, ixname in self.names:
+ assert self.bind.has_table(tname), "Can't detect name %s" % tname
@testing.requires.unicode_connections
def test_basic(self):
- try:
+ # the 'convert_unicode' should not get in the way of the
+ # reflection process. reflecttable for oracle, postgresql
+ # (others?) expect non-unicode strings in result sets/bind
+ # params
- # the 'convert_unicode' should not get in the way of the
- # reflection process. reflecttable for oracle, postgresql
- # (others?) expect non-unicode strings in result sets/bind
- # params
+ bind = self.bind
+ names = set([rec[0] for rec in self.names])
- bind = engines.utf8_engine(options={'convert_unicode'
- : True})
- metadata = MetaData(bind)
- if testing.against('sybase', 'maxdb', 'oracle', 'mssql'):
- names = set([u'plain'])
- else:
- names = set([u'plain', u'Unit\u00e9ble', u'\u6e2c\u8a66'
- ])
- for name in names:
- Table(name, metadata, Column('id', sa.Integer,
- sa.Sequence(name + '_id_seq'), primary_key=True))
- metadata.create_all()
- reflected = set(bind.table_names())
+ reflected = set(bind.table_names())
- # Jython 2.5 on Java 5 lacks unicodedata.normalize
+ # Jython 2.5 on Java 5 lacks unicodedata.normalize
- if not names.issubset(reflected) and hasattr(unicodedata,
- 'normalize'):
+ if not names.issubset(reflected) and hasattr(unicodedata,'normalize'):
- # Python source files in the utf-8 coding seem to
- # normalize literals as NFC (and the above are
- # explicitly NFC). Maybe this database normalizes NFD
- # on reflection.
+ # Python source files in the utf-8 coding seem to
+ # normalize literals as NFC (and the above are
+ # explicitly NFC). Maybe this database normalizes NFD
+ # on reflection.
- nfc = set([unicodedata.normalize('NFC', n) for n in
- names])
- self.assert_(nfc == names)
+ nfc = set([unicodedata.normalize('NFC', n) for n in names])
+ self.assert_(nfc == names)
- # Yep. But still ensure that bulk reflection and
- # create/drop work with either normalization.
+ # Yep. But still ensure that bulk reflection and
+ # create/drop work with either normalization.
- r = MetaData(bind, reflect=True)
- r.drop_all()
- r.create_all()
- finally:
- metadata.drop_all()
- bind.dispose()
+ r = MetaData(bind, reflect=True)
+ r.drop_all(checkfirst=False)
+ r.create_all(checkfirst=False)
+
+ @testing.requires.unicode_connections
+ def test_get_names(self):
+ inspector = Inspector.from_engine(self.bind)
+ names = dict(
+ (tname, (cname, ixname)) for tname, cname, ixname in self.names
+ )
+ for tname in inspector.get_table_names():
+ assert tname in names
+ eq_(
+ [
+ (rec['name'], rec['column_names'][0])
+ for rec in inspector.get_indexes(tname)
+ ],
+ [(names[tname][1], names[tname][0])]
+ )
class SchemaTest(TestBase):