summaryrefslogtreecommitdiff
path: root/test/sql
diff options
context:
space:
mode:
authorJason Kirtland <jek@discorporate.us>2007-12-13 09:59:14 +0000
committerJason Kirtland <jek@discorporate.us>2007-12-13 09:59:14 +0000
commit8128a6378affeff76b573b1b4ca1e05e7d00b021 (patch)
treeb0d20234152eb56026d509ea4b205ed086bc742a /test/sql
parent2522534311452325513606d765ae398ce8514e2c (diff)
downloadsqlalchemy-8128a6378affeff76b573b1b4ca1e05e7d00b021.tar.gz
- Removed @testing.supported. Dialects in development or maintained outside
the tree can now run the full suite of tests out of the box. - Migrated most @supported to @fails_on, @fails_on_everything_but, or (last resort) @unsupported. @fails_on revealed a slew of bogus test skippage, which was corrected. - Added @fails_on_everything_but. Yes, the first usage *was* "fails_on_everything_but('postgres')". How did you guess! - Migrated @supported in dialect/* to the new test-class attribute __only_on__. - Test classes can also have __unsupported_on__ and __excluded_on__.
Diffstat (limited to 'test/sql')
-rw-r--r--test/sql/defaults.py129
-rw-r--r--test/sql/functions.py29
-rw-r--r--test/sql/query.py68
3 files changed, 86 insertions, 140 deletions
diff --git a/test/sql/defaults.py b/test/sql/defaults.py
index a50250e9b..bce499686 100644
--- a/test/sql/defaults.py
+++ b/test/sql/defaults.py
@@ -5,6 +5,7 @@ from sqlalchemy import exceptions, schema, util
from sqlalchemy.orm import mapper, create_session
from testlib import *
+
class DefaultTest(PersistTest):
def setUpAll(self):
@@ -13,7 +14,7 @@ class DefaultTest(PersistTest):
db = testbase.db
metadata = MetaData(db)
default_generator = {'x':50}
-
+
def mydefault():
default_generator['x'] += 1
return default_generator['x']
@@ -21,7 +22,7 @@ class DefaultTest(PersistTest):
def myupdate_with_ctx(ctx):
conn = ctx.connection
return conn.execute(select([text('13')])).scalar()
-
+
def mydefault_using_connection(ctx):
conn = ctx.connection
try:
@@ -30,10 +31,10 @@ class DefaultTest(PersistTest):
# ensure a "close()" on this connection does nothing,
# since its a "branched" connection
conn.close()
-
+
use_function_defaults = testing.against('postgres', 'oracle')
is_oracle = testing.against('oracle')
-
+
# select "count(1)" returns different results on different DBs
# also correct for "current_date" compatible as column default, value differences
currenttime = func.current_date(type_=Date, bind=db)
@@ -63,32 +64,32 @@ class DefaultTest(PersistTest):
def1 = def2 = "3"
ts = 3
deftype = Integer
-
+
t = Table('default_test1', metadata,
# python function
Column('col1', Integer, primary_key=True, default=mydefault),
-
+
# python literal
Column('col2', String(20), default="imthedefault", onupdate="im the update"),
-
+
# preexecute expression
Column('col3', Integer, default=func.length('abcdef'), onupdate=func.length('abcdefghijk')),
-
+
# SQL-side default from sql expression
Column('col4', deftype, PassiveDefault(def1)),
-
+
# SQL-side default from literal expression
Column('col5', deftype, PassiveDefault(def2)),
-
+
# preexecute + update timestamp
Column('col6', Date, default=currenttime, onupdate=currenttime),
-
+
Column('boolcol1', Boolean, default=True),
Column('boolcol2', Boolean, default=False),
-
+
# python function which uses ExecutionContext
Column('col7', Integer, default=mydefault_using_connection, onupdate=myupdate_with_ctx),
-
+
# python builtin
Column('col8', Date, default=datetime.date.today, onupdate=datetime.date.today)
)
@@ -96,11 +97,11 @@ class DefaultTest(PersistTest):
def tearDownAll(self):
t.drop()
-
+
def tearDown(self):
default_generator['x'] = 50
t.delete().execute()
-
+
def testargsignature(self):
ex_msg = \
"ColumnDefault Python function takes zero or one positional arguments"
@@ -122,7 +123,7 @@ class DefaultTest(PersistTest):
for fn in fn3, fn4, fn5, fn6, fn7:
c = ColumnDefault(fn)
-
+
def teststandalone(self):
c = testbase.db.engine.contextual_connect()
x = c.execute(t.c.col1.default)
@@ -132,7 +133,7 @@ class DefaultTest(PersistTest):
self.assert_(y == 'imthedefault')
self.assert_(z == f)
self.assert_(f2==11)
-
+
def testinsert(self):
r = t.insert().execute()
assert r.lastrow_has_defaults()
@@ -141,7 +142,7 @@ class DefaultTest(PersistTest):
r = t.insert(inline=True).execute()
assert r.lastrow_has_defaults()
assert util.Set(r.context.postfetch_cols) == util.Set([t.c.col3, t.c.col5, t.c.col4, t.c.col6])
-
+
t.insert().execute()
t.insert().execute()
@@ -149,8 +150,8 @@ class DefaultTest(PersistTest):
l = t.select().execute()
today = datetime.date.today()
self.assert_(l.fetchall() == [
- (51, 'imthedefault', f, ts, ts, ctexec, True, False, 12, today),
- (52, 'imthedefault', f, ts, ts, ctexec, True, False, 12, today),
+ (51, 'imthedefault', f, ts, ts, ctexec, True, False, 12, today),
+ (52, 'imthedefault', f, ts, ts, ctexec, True, False, 12, today),
(53, 'imthedefault', f, ts, ts, ctexec, True, False, 12, today),
(54, 'imthedefault', f, ts, ts, ctexec, True, False, 12, today),
])
@@ -172,7 +173,7 @@ class DefaultTest(PersistTest):
t.insert(values={'col3':50}).execute()
l = t.select().execute()
self.assert_(l.fetchone()['col3'] == 50)
-
+
def testupdatemany(self):
# MySQL-Python 1.2.2 breaks functions in execute_many :(
if (testing.against('mysql') and
@@ -184,8 +185,7 @@ class DefaultTest(PersistTest):
t.update(t.c.col1==bindparam('pkval')).execute(
{'pkval':51,'col7':None, 'col8':None, 'boolcol1':False},
)
-
-
+
t.update(t.c.col1==bindparam('pkval')).execute(
{'pkval':51,},
{'pkval':52,},
@@ -196,8 +196,7 @@ class DefaultTest(PersistTest):
ctexec = currenttime.scalar()
today = datetime.date.today()
self.assert_(l.fetchall() == [(51, 'im the update', f2, ts, ts, ctexec, False, False, 13, today), (52, 'im the update', f2, ts, ts, ctexec, True, False, 13, today), (53, 'im the update', f2, ts, ts, ctexec, True, False, 13, today)])
-
-
+
def testupdate(self):
r = t.insert().execute()
pk = r.last_inserted_ids()[0]
@@ -207,7 +206,7 @@ class DefaultTest(PersistTest):
l = l.fetchone()
self.assert_(l == (pk, 'im the update', f2, None, None, ctexec, True, False, 13, datetime.date.today()))
self.assert_(f2==11)
-
+
def testupdatevalues(self):
r = t.insert().execute()
pk = r.last_inserted_ids()[0]
@@ -216,11 +215,11 @@ class DefaultTest(PersistTest):
l = l.fetchone()
self.assert_(l['col3'] == 55)
- @testing.supported('postgres')
+ @testing.fails_on_everything_except('postgres')
def testpassiveoverride(self):
- """primarily for postgres, tests that when we get a primary key column back
+ """primarily for postgres, tests that when we get a primary key column back
from reflecting a table which has a default value on it, we pre-execute
- that PassiveDefault upon insert, even though PassiveDefault says
+ that PassiveDefault upon insert, even though PassiveDefault says
"let the database execute this", because in postgres we must have all the primary
key values in memory before insert; otherwise we cant locate the just inserted row."""
@@ -246,21 +245,21 @@ class DefaultTest(PersistTest):
class PKDefaultTest(PersistTest):
def setUpAll(self):
global metadata, t1, t2
-
+
metadata = MetaData(testbase.db)
-
- t2 = Table('t2', metadata,
+
+ t2 = Table('t2', metadata,
Column('nextid', Integer))
-
+
t1 = Table('t1', metadata,
Column('id', Integer, primary_key=True, default=select([func.max(t2.c.nextid)]).as_scalar()),
Column('data', String(30)))
-
+
metadata.create_all()
-
+
def tearDownAll(self):
metadata.drop_all()
-
+
@testing.unsupported('mssql')
def test_basic(self):
t2.insert().execute(nextid=1)
@@ -270,14 +269,14 @@ class PKDefaultTest(PersistTest):
t2.insert().execute(nextid=2)
r = t1.insert().execute(data='there')
assert r.last_inserted_ids() == [2]
-
-
+
+
class AutoIncrementTest(PersistTest):
def setUp(self):
global aitable, aimeta
-
+
aimeta = MetaData(testbase.db)
- aitable = Table("aitest", aimeta,
+ aitable = Table("aitest", aimeta,
Column('id', Integer, Sequence('ai_id_seq', optional=True),
primary_key=True),
Column('int1', Integer),
@@ -287,16 +286,19 @@ class AutoIncrementTest(PersistTest):
def tearDown(self):
aimeta.drop_all()
- @testing.supported('postgres', 'mysql', 'maxdb')
+ # should fail everywhere... was: @supported('postgres', 'mysql', 'maxdb')
+ @testing.fails_on('sqlite')
def testnonautoincrement(self):
+ # sqlite INT primary keys can be non-unique! (only for ints)
meta = MetaData(testbase.db)
- nonai_table = Table("nonaitest", meta,
+ nonai_table = Table("nonaitest", meta,
Column('id', Integer, autoincrement=False, primary_key=True),
Column('data', String(20)))
nonai_table.create(checkfirst=True)
try:
try:
- # postgres will fail on first row, mysql fails on second row
+ # postgres + mysql strict will fail on first row,
+ # mysql in legacy mode fails on second row
nonai_table.insert().execute(data='row 1')
nonai_table.insert().execute(data='row 2')
assert False
@@ -306,7 +308,7 @@ class AutoIncrementTest(PersistTest):
nonai_table.insert().execute(id=1, data='row 1')
finally:
- nonai_table.drop()
+ nonai_table.drop()
# TODO: add coverage for increment on a secondary column in a key
def _test_autoincrement(self, bind):
@@ -362,7 +364,7 @@ class AutoIncrementTest(PersistTest):
def test_autoincrement_fk(self):
if not testbase.db.dialect.supports_pk_autoincrement:
return True
-
+
metadata = MetaData(testbase.db)
# No optional sequence here.
@@ -379,13 +381,14 @@ class AutoIncrementTest(PersistTest):
metadata.drop_all()
-
class SequenceTest(PersistTest):
- @testing.supported('postgres', 'oracle', 'maxdb')
+ __unsupported_on__ = ('sqlite', 'mysql', 'mssql', 'firebird',
+ 'sybase', 'access')
+
def setUpAll(self):
global cartitems, sometable, metadata
metadata = MetaData(testbase.db)
- cartitems = Table("cartitems", metadata,
+ cartitems = Table("cartitems", metadata,
Column("cart_id", Integer, Sequence('cart_id_seq'), primary_key=True),
Column("description", String(40)),
Column("createdate", DateTime())
@@ -393,12 +396,12 @@ class SequenceTest(PersistTest):
sometable = Table( 'Manager', metadata,
Column('obj_id', Integer, Sequence('obj_id_seq'), ),
Column('name', String, ),
- Column('id', Integer, Sequence('Manager_id_seq', optional=True), primary_key=True),
+ Column('id', Integer, Sequence('Manager_id_seq', optional=True),
+ primary_key=True),
)
-
+
metadata.create_all()
-
- @testing.supported('postgres', 'oracle', 'maxdb')
+
def testseqnonpk(self):
"""test sequences fire off as defaults on non-pk columns"""
@@ -415,7 +418,6 @@ class SequenceTest(PersistTest):
(4, "name4", 4),
]
- @testing.supported('postgres', 'oracle', 'maxdb')
def testsequence(self):
cartitems.insert().execute(description='hi')
cartitems.insert().execute(description='there')
@@ -427,11 +429,11 @@ class SequenceTest(PersistTest):
assert select([func.count(cartitems.c.cart_id)],
and_(cartitems.c.description == 'lala',
cartitems.c.cart_id == id_)).scalar() == 1
-
+
cartitems.select().execute().fetchall()
-
- @testing.supported('postgres', 'oracle')
+
+ @testing.fails_on('maxdb')
# maxdb db-api seems to double-execute NEXTVAL internally somewhere,
# throwing off the numbers for these tests...
def test_implicit_sequence_exec(self):
@@ -443,7 +445,7 @@ class SequenceTest(PersistTest):
finally:
s.drop()
- @testing.supported('postgres', 'oracle')
+ @testing.fails_on('maxdb')
def teststandalone_explicit(self):
s = Sequence("my_sequence")
s.create(bind=testbase.db)
@@ -452,23 +454,22 @@ class SequenceTest(PersistTest):
self.assert_(x == 1)
finally:
s.drop(testbase.db)
-
- @testing.supported('postgres', 'oracle', 'maxdb')
+
def test_checkfirst(self):
s = Sequence("my_sequence")
s.create(testbase.db, checkfirst=False)
s.create(testbase.db, checkfirst=True)
s.drop(testbase.db, checkfirst=False)
s.drop(testbase.db, checkfirst=True)
-
- @testing.supported('postgres', 'oracle')
+
+ @testing.fails_on('maxdb')
def teststandalone2(self):
x = cartitems.c.cart_id.sequence.execute()
self.assert_(1 <= x <= 4)
-
- @testing.supported('postgres', 'oracle', 'maxdb')
- def tearDownAll(self):
+
+ def tearDownAll(self):
metadata.drop_all()
+
if __name__ == "__main__":
testbase.main()
diff --git a/test/sql/functions.py b/test/sql/functions.py
index 177a308b4..1103245ea 100644
--- a/test/sql/functions.py
+++ b/test/sql/functions.py
@@ -1,15 +1,24 @@
import testbase
import datetime
from sqlalchemy import *
-from sqlalchemy import exceptions, sql
+from sqlalchemy import databases, exceptions, sql
from sqlalchemy.sql.compiler import BIND_TEMPLATES
from sqlalchemy.engine import default
from sqlalchemy import types as sqltypes
from testlib import *
-# TODO: add a helper function to testlib for this
-from sqlalchemy.databases import sqlite, postgres, mysql, oracle, firebird, mssql
-dialects = [x.dialect() for x in [sqlite, postgres, mysql, oracle, firebird, mssql]]
+from sqlalchemy.databases import *
+# every dialect in databases.__all__ is expected to pass these tests.
+dialects = [getattr(databases, mod).dialect()
+ for mod in databases.__all__
+ # fixme!
+ if mod not in ('access',)]
+
+# if the configured dialect is out-of-tree or not yet in __all__, include it
+# too.
+if testbase.db.name not in databases.__all__:
+ dialects.append(testbase.db.dialect)
+
class CompileTest(SQLCompileTest):
def test_compile(self):
@@ -22,7 +31,7 @@ class CompileTest(SQLCompileTest):
else:
self.assert_compile(func.nosuchfunction(), "nosuchfunction()", dialect=dialect)
self.assert_compile(func.char_length('foo'), "char_length(%s)" % bindtemplate % {'name':'param_1', 'position':1}, dialect=dialect)
-
+
def test_constructor(self):
try:
func.current_timestamp('somearg')
@@ -41,14 +50,14 @@ class CompileTest(SQLCompileTest):
assert False
except TypeError:
assert True
-
+
def test_typing(self):
assert isinstance(func.coalesce(datetime.date(2007, 10, 5), datetime.date(2005, 10, 15)).type, sqltypes.Date)
assert isinstance(func.coalesce(None, datetime.date(2005, 10, 15)).type, sqltypes.Date)
-
+
assert isinstance(func.concat("foo", "bar").type, sqltypes.String)
-
+
class ExecuteTest(PersistTest):
def test_standalone_execute(self):
@@ -123,11 +132,10 @@ class ExecuteTest(PersistTest):
t2.update(values={t2.c.value:func.length("asfdaasdf"), t2.c.stuff:"foo"}).execute()
print "HI", select([t2.c.value, t2.c.stuff]).execute().fetchone()
assert select([t2.c.value, t2.c.stuff]).execute().fetchone() == (9, "foo")
-
finally:
meta.drop_all()
- @testing.supported('postgres')
+ @testing.fails_on_everything_except('postgres')
def test_as_from(self):
# TODO: shouldnt this work on oracle too ?
x = testbase.db.func.current_date().execute().scalar()
@@ -150,4 +158,3 @@ def exec_sorted(statement, *args, **kw):
if __name__ == '__main__':
testbase.main()
- \ No newline at end of file
diff --git a/test/sql/query.py b/test/sql/query.py
index 4979fecd7..d0b24a9cc 100644
--- a/test/sql/query.py
+++ b/test/sql/query.py
@@ -264,19 +264,11 @@ class QueryTest(PersistTest):
r = users.select(offset=5, order_by=[users.c.user_id]).execute().fetchall()
self.assert_(r==[(6, 'ralph'), (7, 'fido')])
- @testing.supported('mssql')
- @testing.fails_on('maxdb')
- def test_select_limit_nooffset(self):
- try:
- r = users.select(limit=3, offset=2, order_by=[users.c.user_id]).execute().fetchall()
- assert False # InvalidRequestError should have been raised
- except exceptions.InvalidRequestError:
- pass
-
- @testing.unsupported('mysql')
+ @testing.exclude('mysql', '<', (5, 0, 0))
def test_scalar_select(self):
"""test that scalar subqueries with labels get their type propigated to the result set."""
- # mysql and/or mysqldb has a bug here, type isnt propigated for scalar subquery.
+ # mysql and/or mysqldb has a bug here, type isn't propagated for scalar
+ # subquery.
datetable = Table('datetable', metadata,
Column('id', Integer, primary_key=True),
Column('today', DateTime))
@@ -482,60 +474,6 @@ class QueryTest(PersistTest):
finally:
shadowed.drop(checkfirst=True)
- @testing.supported('mssql')
- def test_fetchid_trigger(self):
- meta = MetaData(testbase.db)
- t1 = Table('t1', meta,
- Column('id', Integer, Sequence('fred', 100, 1), primary_key=True),
- Column('descr', String(200)))
- t2 = Table('t2', meta,
- Column('id', Integer, Sequence('fred', 200, 1), primary_key=True),
- Column('descr', String(200)))
- meta.create_all()
- con = testbase.db.connect()
- con.execute("""create trigger paj on t1 for insert as
- insert into t2 (descr) select descr from inserted""")
-
- try:
- tr = con.begin()
- r = con.execute(t2.insert(), descr='hello')
- self.assert_(r.last_inserted_ids() == [200])
- r = con.execute(t1.insert(), descr='hello')
- self.assert_(r.last_inserted_ids() == [100])
-
- finally:
- tr.commit()
- con.execute("""drop trigger paj""")
- meta.drop_all()
-
- @testing.supported('mssql')
- def test_insertid_schema(self):
- meta = MetaData(testbase.db)
- con = testbase.db.connect()
- con.execute('create schema paj')
- tbl = Table('test', meta, Column('id', Integer, primary_key=True), schema='paj')
- tbl.create()
- try:
- tbl.insert().execute({'id':1})
- finally:
- tbl.drop()
- con.execute('drop schema paj')
-
- @testing.supported('mssql')
- def test_insertid_reserved(self):
- meta = MetaData(testbase.db)
- table = Table(
- 'select', meta,
- Column('col', Integer, primary_key=True)
- )
- table.create()
-
- meta2 = MetaData(testbase.db)
- try:
- table.insert().execute(col=7)
- finally:
- table.drop()
-
@testing.fails_on('maxdb')
def test_in_filtering(self):
"""test the behavior of the in_() function."""