diff options
| author | Andi Albrecht <albrecht.andi@gmail.com> | 2015-10-26 20:03:58 +0100 |
|---|---|---|
| committer | Andi Albrecht <albrecht.andi@gmail.com> | 2015-10-26 20:03:58 +0100 |
| commit | f7e07b7b61be4befd5eaafce93aeb0238c884315 (patch) | |
| tree | d2e43fddb59e18ead4d4672ccddeca11d77b76fa | |
| parent | c6f210e65a9b9e91683e62134eb654d8c00a92d8 (diff) | |
| download | sqlparse-f7e07b7b61be4befd5eaafce93aeb0238c884315.tar.gz | |
Code cleanup in tests.
| -rw-r--r-- | tests/test_filters.py | 2 | ||||
| -rw-r--r-- | tests/test_format.py | 9 | ||||
| -rw-r--r-- | tests/test_functions.py | 1 | ||||
| -rw-r--r-- | tests/test_grouping.py | 6 | ||||
| -rw-r--r-- | tests/test_parse.py | 2 | ||||
| -rw-r--r-- | tests/test_tokenize.py | 54 |
6 files changed, 38 insertions, 36 deletions
diff --git a/tests/test_filters.py b/tests/test_filters.py index eb61604..925b0b6 100644 --- a/tests/test_filters.py +++ b/tests/test_filters.py @@ -5,7 +5,6 @@ Created on 24/03/2012 ''' import unittest -from sqlparse.compat import u from sqlparse.filters import StripWhitespace, Tokens2Unicode from sqlparse.lexer import tokenize @@ -75,5 +74,4 @@ LIMIT 1""" if __name__ == "__main__": - #import sys;sys.argv = ['', 'Test.testName'] unittest.main() diff --git a/tests/test_format.py b/tests/test_format.py index a105b1c..e8875dd 100644 --- a/tests/test_format.py +++ b/tests/test_format.py @@ -93,9 +93,12 @@ class TestFormat(TestCaseBase): # Because of the use of self.ndiffAssertEqual(f(s1), "SELECT some_column LIKE 'value\r'") - self.ndiffAssertEqual(f(s2), "SELECT some_column LIKE 'value\r'\nWHERE id = 1\n") - self.ndiffAssertEqual(f(s3), "SELECT some_column LIKE 'value\\'\r' WHERE id = 1\n") - self.ndiffAssertEqual(f(s4), "SELECT some_column LIKE 'value\\\\\\'\r' WHERE id = 1\n") + self.ndiffAssertEqual( + f(s2), "SELECT some_column LIKE 'value\r'\nWHERE id = 1\n") + self.ndiffAssertEqual( + f(s3), "SELECT some_column LIKE 'value\\'\r' WHERE id = 1\n") + self.ndiffAssertEqual( + f(s4), "SELECT some_column LIKE 'value\\\\\\'\r' WHERE id = 1\n") def test_outputformat(self): sql = 'select * from foo;' diff --git a/tests/test_functions.py b/tests/test_functions.py index 52e2ce7..425ab7f 100644 --- a/tests/test_functions.py +++ b/tests/test_functions.py @@ -160,5 +160,4 @@ class Test_IsType(Test_SQL): if __name__ == "__main__": - #import sys;sys.argv = ['', 'Test.testName'] main() diff --git a/tests/test_grouping.py b/tests/test_grouping.py index fa68ab2..e846176 100644 --- a/tests/test_grouping.py +++ b/tests/test_grouping.py @@ -21,7 +21,8 @@ class TestGrouping(TestCaseBase): self.assert_(isinstance(parsed.tokens[-1], sql.Identifier)) self.assertEqual(len(parsed.tokens[2].tokens), 5) self.assert_(isinstance(parsed.tokens[2].tokens[3], sql.Identifier)) - self.assert_(isinstance(parsed.tokens[2].tokens[3].tokens[0], sql.Parenthesis)) + self.assert_(isinstance(parsed.tokens[2].tokens[3].tokens[0], + sql.Parenthesis)) self.assertEqual(len(parsed.tokens[2].tokens[3].tokens), 3) def test_comments(self): @@ -147,7 +148,8 @@ class TestGrouping(TestCaseBase): s = 'select x from (select y from foo where bar = 1) z' p = sqlparse.parse(s)[0] self.ndiffAssertEqual(s, u(p)) - self.assertTrue(isinstance(p.tokens[-1].tokens[0].tokens[-2], sql.Where)) + self.assertTrue(isinstance(p.tokens[-1].tokens[0].tokens[-2], + sql.Where)) def test_typecast(self): s = 'select foo::integer from bar' diff --git a/tests/test_parse.py b/tests/test_parse.py index fb7b24b..2ea0f40 100644 --- a/tests/test_parse.py +++ b/tests/test_parse.py @@ -221,7 +221,7 @@ def test_sqlite_identifiers(): assert (len(p) == 1 and isinstance(p[0], sqlparse.sql.IdentifierList) and [id.get_name() for id in p[0].get_identifiers()] - == ['[col1]', '[col2]']) + == ['[col1]', '[col2]']) p = sqlparse.parse('[col1]+[col2]')[0] types = [tok.ttype for tok in p.flatten()] diff --git a/tests/test_tokenize.py b/tests/test_tokenize.py index 619aefd..02c3f25 100644 --- a/tests/test_tokenize.py +++ b/tests/test_tokenize.py @@ -1,6 +1,5 @@ # -*- coding: utf-8 -*- -import sys import types import unittest @@ -9,8 +8,8 @@ import pytest import sqlparse from sqlparse import lexer from sqlparse import sql +from sqlparse import tokens as T from sqlparse.compat import StringIO -from sqlparse.tokens import * class TestTokenize(unittest.TestCase): @@ -22,14 +21,14 @@ class TestTokenize(unittest.TestCase): tokens = list(stream) self.assertEqual(len(tokens), 8) self.assertEqual(len(tokens[0]), 2) - self.assertEqual(tokens[0], (Keyword.DML, u'select')) - self.assertEqual(tokens[-1], (Punctuation, u';')) + self.assertEqual(tokens[0], (T.Keyword.DML, u'select')) + self.assertEqual(tokens[-1], (T.Punctuation, u';')) def test_backticks(self): s = '`foo`.`bar`' tokens = list(lexer.tokenize(s)) self.assertEqual(len(tokens), 3) - self.assertEqual(tokens[0], (Name, u'`foo`')) + self.assertEqual(tokens[0], (T.Name, u'`foo`')) def test_linebreaks(self): # issue1 s = 'foo\nbar\n' @@ -49,28 +48,28 @@ class TestTokenize(unittest.TestCase): s = "create created_foo" tokens = list(lexer.tokenize(s)) self.assertEqual(len(tokens), 3) - self.assertEqual(tokens[0][0], Keyword.DDL) - self.assertEqual(tokens[2][0], Name) + self.assertEqual(tokens[0][0], T.Keyword.DDL) + self.assertEqual(tokens[2][0], T.Name) self.assertEqual(tokens[2][1], u'created_foo') s = "enddate" tokens = list(lexer.tokenize(s)) self.assertEqual(len(tokens), 1) - self.assertEqual(tokens[0][0], Name) + self.assertEqual(tokens[0][0], T.Name) s = "join_col" tokens = list(lexer.tokenize(s)) self.assertEqual(len(tokens), 1) - self.assertEqual(tokens[0][0], Name) + self.assertEqual(tokens[0][0], T.Name) s = "left join_col" tokens = list(lexer.tokenize(s)) self.assertEqual(len(tokens), 3) - self.assertEqual(tokens[2][0], Name) + self.assertEqual(tokens[2][0], T.Name) self.assertEqual(tokens[2][1], 'join_col') def test_negative_numbers(self): s = "values(-1)" tokens = list(lexer.tokenize(s)) self.assertEqual(len(tokens), 4) - self.assertEqual(tokens[2][0], Number.Integer) + self.assertEqual(tokens[2][0], T.Number.Integer) self.assertEqual(tokens[2][1], '-1') def test_tab_expansion(self): @@ -88,15 +87,15 @@ class TestToken(unittest.TestCase): self.assertEqual(str(token), 'FoO') def test_repr(self): - token = sql.Token(Keyword, 'foo') + token = sql.Token(T.Keyword, 'foo') tst = "<Keyword 'foo' at 0x" self.assertEqual(repr(token)[:len(tst)], tst) - token = sql.Token(Keyword, '1234567890') + token = sql.Token(T.Keyword, '1234567890') tst = "<Keyword '123456...' at 0x" self.assertEqual(repr(token)[:len(tst)], tst) def test_flatten(self): - token = sql.Token(Keyword, 'foo') + token = sql.Token(T.Keyword, 'foo') gen = token.flatten() self.assertEqual(type(gen), types.GeneratorType) lgen = list(gen) @@ -118,15 +117,16 @@ class TestTokenList(unittest.TestCase): self.assertEqual(sql.TokenList([]).token_first(), None) def test_token_matching(self): - t1 = sql.Token(Keyword, 'foo') - t2 = sql.Token(Punctuation, ',') + t1 = sql.Token(T.Keyword, 'foo') + t2 = sql.Token(T.Punctuation, ',') x = sql.TokenList([t1, t2]) - self.assertEqual(x.token_matching(0, [lambda t: t.ttype is Keyword]), + self.assertEqual(x.token_matching(0, [lambda t: t.ttype is T.Keyword]), t1) - self.assertEqual(x.token_matching(0, - [lambda t: t.ttype is Punctuation]), - t2) - self.assertEqual(x.token_matching(1, [lambda t: t.ttype is Keyword]), + self.assertEqual(x.token_matching( + 0, + [lambda t: t.ttype is T.Punctuation]), + t2) + self.assertEqual(x.token_matching(1, [lambda t: t.ttype is T.Keyword]), None) @@ -155,7 +155,7 @@ class TestStream(unittest.TestCase): lex.bufsize = 4 tokens = list(lex.get_tokens(stream)) self.assertEqual(len(tokens), 2) - self.assertEqual(tokens[1][0], Error) + self.assertEqual(tokens[1][0], T.Error) @pytest.mark.parametrize('expr', ['JOIN', 'LEFT JOIN', 'LEFT OUTER JOIN', @@ -165,21 +165,21 @@ class TestStream(unittest.TestCase): def test_parse_join(expr): p = sqlparse.parse('%s foo' % expr)[0] assert len(p.tokens) == 3 - assert p.tokens[0].ttype is Keyword + assert p.tokens[0].ttype is T.Keyword def test_parse_endifloop(): p = sqlparse.parse('END IF')[0] assert len(p.tokens) == 1 - assert p.tokens[0].ttype is Keyword + assert p.tokens[0].ttype is T.Keyword p = sqlparse.parse('END IF')[0] assert len(p.tokens) == 1 p = sqlparse.parse('END\t\nIF')[0] assert len(p.tokens) == 1 - assert p.tokens[0].ttype is Keyword + assert p.tokens[0].ttype is T.Keyword p = sqlparse.parse('END LOOP')[0] assert len(p.tokens) == 1 - assert p.tokens[0].ttype is Keyword + assert p.tokens[0].ttype is T.Keyword p = sqlparse.parse('END LOOP')[0] assert len(p.tokens) == 1 - assert p.tokens[0].ttype is Keyword + assert p.tokens[0].ttype is T.Keyword |
