summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAndi Albrecht <albrecht.andi@gmail.com>2012-11-12 14:40:07 +0100
committerAndi Albrecht <albrecht.andi@gmail.com>2012-11-12 14:40:07 +0100
commitdcab30c6387265dcbcbedfa2d515917ffc88d036 (patch)
treee3c03e7c8014d3770c3b68952c2c912367bf8235
parent9ef2a8e6080cfbe699aba0cad968c6e0d2554aa7 (diff)
parentf4876322e7fe36923d4eb89757a844b3d90be306 (diff)
downloadsqlparse-dcab30c6387265dcbcbedfa2d515917ffc88d036.tar.gz
Merge branch 'work'
-rw-r--r--pytest.ini6
-rw-r--r--sqlparse/__init__.py3
-rw-r--r--sqlparse/engine/grouping.py3
-rw-r--r--sqlparse/exceptions.py3
-rw-r--r--sqlparse/filters.py10
-rw-r--r--sqlparse/functions.py4
-rw-r--r--sqlparse/keywords.py12
-rw-r--r--sqlparse/lexer.py3
-rw-r--r--tests/test_filters.py13
-rw-r--r--tests/test_format.py2
-rw-r--r--tests/test_functions.py20
-rw-r--r--tests/test_parse.py2
-rw-r--r--tests/test_pipeline.py47
-rw-r--r--tests/test_split.py3
-rw-r--r--tests/test_tokenize.py61
15 files changed, 106 insertions, 86 deletions
diff --git a/pytest.ini b/pytest.ini
new file mode 100644
index 0000000..a2cbd90
--- /dev/null
+++ b/pytest.ini
@@ -0,0 +1,6 @@
+[pytest]
+pep8ignore =
+ extras/* ALL
+ examples/* ALL
+ docs/* ALL
+ * E125 E127
diff --git a/sqlparse/__init__.py b/sqlparse/__init__.py
index a64c80f..defca37 100644
--- a/sqlparse/__init__.py
+++ b/sqlparse/__init__.py
@@ -61,7 +61,10 @@ def split(sql):
stack.split_statements = True
return [unicode(stmt) for stmt in stack.run(sql)]
+
from sqlparse.engine.filter import StatementFilter
+
+
def split2(stream):
splitter = StatementFilter()
return list(splitter.process(None, stream))
diff --git a/sqlparse/engine/grouping.py b/sqlparse/engine/grouping.py
index a98e787..0b64d21 100644
--- a/sqlparse/engine/grouping.py
+++ b/sqlparse/engine/grouping.py
@@ -155,7 +155,8 @@ def group_identifier(tlist):
def _next_token(tl, i):
# chooses the next token. if two tokens are found then the
# first is returned.
- t1 = tl.token_next_by_type(i, (T.String.Symbol, T.String.Single, T.Name))
+ t1 = tl.token_next_by_type(
+ i, (T.String.Symbol, T.String.Single, T.Name))
t2 = tl.token_next_by_instance(i, sql.Function)
if t1 and t2:
i1 = tl.token_index(t1)
diff --git a/sqlparse/exceptions.py b/sqlparse/exceptions.py
index 2a8e571..ec25afa 100644
--- a/sqlparse/exceptions.py
+++ b/sqlparse/exceptions.py
@@ -5,7 +5,6 @@
"""Exceptions used in this package."""
+
class SQLParseError(Exception):
"""Base class for exceptions in this module."""
-
-
diff --git a/sqlparse/filters.py b/sqlparse/filters.py
index c5165be..2f1c825 100644
--- a/sqlparse/filters.py
+++ b/sqlparse/filters.py
@@ -329,8 +329,8 @@ class ReindentFilter:
self.indent += 1
tlist.tokens.insert(0, self.nl())
indented = True
- num_offset = self._get_offset(tlist.token_next_match(0,
- T.Punctuation, '('))
+ num_offset = self._get_offset(
+ tlist.token_next_match(0, T.Punctuation, '('))
self.offset += num_offset
self._process_default(tlist, stmts=not indented)
if indented:
@@ -397,8 +397,8 @@ class ReindentFilter:
nl = '\n'
else:
nl = '\n\n'
- stmt.tokens.insert(0,
- sql.Token(T.Whitespace, nl))
+ stmt.tokens.insert(
+ 0, sql.Token(T.Whitespace, nl))
if self._last_stmt != stmt:
self._last_stmt = stmt
@@ -407,7 +407,7 @@ class ReindentFilter:
class RightMarginFilter:
keep_together = (
-# sql.TypeCast, sql.Identifier, sql.Alias,
+ # sql.TypeCast, sql.Identifier, sql.Alias,
)
def __init__(self, width=79):
diff --git a/sqlparse/functions.py b/sqlparse/functions.py
index aaf6fb8..fbfcc0b 100644
--- a/sqlparse/functions.py
+++ b/sqlparse/functions.py
@@ -6,9 +6,9 @@ Created on 17/05/2012
Several utility functions to extract info from the SQL sentences
'''
-from sqlparse.filters import ColumnsSelect, Limit
+from sqlparse.filters import ColumnsSelect, Limit
from sqlparse.pipeline import Pipeline
-from sqlparse.tokens import Keyword, Whitespace
+from sqlparse.tokens import Keyword, Whitespace
def getlimit(stream):
diff --git a/sqlparse/keywords.py b/sqlparse/keywords.py
index 9c59ee9..c11a3a6 100644
--- a/sqlparse/keywords.py
+++ b/sqlparse/keywords.py
@@ -36,7 +36,7 @@ KEYWORDS = {
'BOTH': tokens.Keyword,
'BREADTH': tokens.Keyword,
-# 'C': tokens.Keyword, # most likely this is an alias
+ # 'C': tokens.Keyword, # most likely this is an alias
'CACHE': tokens.Keyword,
'CALL': tokens.Keyword,
'CALLED': tokens.Keyword,
@@ -172,7 +172,7 @@ KEYWORDS = {
'FULL': tokens.Keyword,
'FUNCTION': tokens.Keyword,
-# 'G': tokens.Keyword,
+ # 'G': tokens.Keyword,
'GENERAL': tokens.Keyword,
'GENERATED': tokens.Keyword,
'GET': tokens.Keyword,
@@ -219,7 +219,7 @@ KEYWORDS = {
'ISOLATION': tokens.Keyword,
'ITERATE': tokens.Keyword,
-# 'K': tokens.Keyword,
+ # 'K': tokens.Keyword,
'KEY': tokens.Keyword,
'KEY_MEMBER': tokens.Keyword,
'KEY_TYPE': tokens.Keyword,
@@ -244,7 +244,7 @@ KEYWORDS = {
'LOCK': tokens.Keyword,
'LOWER': tokens.Keyword,
-# 'M': tokens.Keyword,
+ # 'M': tokens.Keyword,
'MAP': tokens.Keyword,
'MATCH': tokens.Keyword,
'MAXVALUE': tokens.Keyword,
@@ -519,7 +519,7 @@ KEYWORDS = {
'INT8': tokens.Name.Builtin,
'SERIAL8': tokens.Name.Builtin,
'TEXT': tokens.Name.Builtin,
- }
+}
KEYWORDS_COMMON = {
@@ -562,4 +562,4 @@ KEYWORDS_COMMON = {
'MIN': tokens.Keyword,
'MAX': tokens.Keyword,
'DISTINCT': tokens.Keyword,
- }
+}
diff --git a/sqlparse/lexer.py b/sqlparse/lexer.py
index 432eb1e..09631da 100644
--- a/sqlparse/lexer.py
+++ b/sqlparse/lexer.py
@@ -18,6 +18,7 @@ from sqlparse import tokens
from sqlparse.keywords import KEYWORDS, KEYWORDS_COMMON
from cStringIO import StringIO
+
class include(str):
pass
@@ -158,7 +159,7 @@ class Lexer(object):
stripall = False
stripnl = False
tabsize = 0
- flags = re.IGNORECASE|re.UNICODE
+ flags = re.IGNORECASE | re.UNICODE
bufsize = 4096
tokens = {
diff --git a/tests/test_filters.py b/tests/test_filters.py
index 26f0b52..d827454 100644
--- a/tests/test_filters.py
+++ b/tests/test_filters.py
@@ -6,7 +6,7 @@ Created on 24/03/2012
import unittest
from sqlparse.filters import StripWhitespace, Tokens2Unicode
-from sqlparse.lexer import tokenize
+from sqlparse.lexer import tokenize
class Test__StripWhitespace(unittest.TestCase):
@@ -49,17 +49,20 @@ GROUP BY dir_entries.inode
LIMIT 1"""
def test_StripWhitespace1(self):
- self.assertEqual(Tokens2Unicode(StripWhitespace(tokenize(self.sql))),
+ self.assertEqual(
+ Tokens2Unicode(StripWhitespace(tokenize(self.sql))),
'INSERT INTO dir_entries(type)VALUES(:type);INSERT INTO '
'directories(inode)VALUES(:inode)LIMIT 1')
def test_StripWhitespace2(self):
- self.assertEqual(Tokens2Unicode(StripWhitespace(tokenize(self.sql2))),
+ self.assertEqual(
+ Tokens2Unicode(StripWhitespace(tokenize(self.sql2))),
'SELECT child_entry,asdf AS inode,creation FROM links WHERE '
'parent_dir==:parent_dir AND name==:name LIMIT 1')
def test_StripWhitespace3(self):
- self.assertEqual(Tokens2Unicode(StripWhitespace(tokenize(self.sql3))),
+ self.assertEqual(
+ Tokens2Unicode(StripWhitespace(tokenize(self.sql3))),
'SELECT 0 AS st_dev,0 AS st_uid,0 AS st_gid,dir_entries.type AS '
'st_mode,dir_entries.inode AS st_ino,COUNT(links.child_entry)AS '
'st_nlink,:creation AS st_ctime,dir_entries.access AS st_atime,'
@@ -72,4 +75,4 @@ LIMIT 1"""
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
- unittest.main() \ No newline at end of file
+ unittest.main()
diff --git a/tests/test_format.py b/tests/test_format.py
index 9c764d7..c33ac93 100644
--- a/tests/test_format.py
+++ b/tests/test_format.py
@@ -91,7 +91,7 @@ class TestFormatReindent(TestCaseBase):
self.assertRaises(SQLParseError, sqlparse.format, 'foo',
reindent=True, indent_width='foo')
self.assertRaises(SQLParseError, sqlparse.format, 'foo',
- reindent=True, indent_width= -12)
+ reindent=True, indent_width=-12)
def test_stmts(self):
f = lambda sql: sqlparse.format(sql, reindent=True)
diff --git a/tests/test_functions.py b/tests/test_functions.py
index aa382ce..52e2ce7 100644
--- a/tests/test_functions.py
+++ b/tests/test_functions.py
@@ -6,12 +6,12 @@ Created on 13/02/2012
from unittest import main, TestCase
from sqlparse.filters import IncludeStatement, Tokens2Unicode
-from sqlparse.lexer import tokenize
+from sqlparse.lexer import tokenize
import sys
sys.path.insert(0, '..')
-from sqlparse.filters import compact
+from sqlparse.filters import compact
from sqlparse.functions import getcolumns, getlimit, IsType
@@ -27,15 +27,17 @@ class Test_IncludeStatement(TestCase):
def test_includeStatement(self):
stream = tokenize(self.sql)
- includeStatement = IncludeStatement('tests/files', raiseexceptions=True)
+ includeStatement = IncludeStatement('tests/files',
+ raiseexceptions=True)
stream = includeStatement.process(None, stream)
stream = compact(stream)
result = Tokens2Unicode(stream)
- self.assertEqual(result,
- 'INSERT INTO dir_entries(type)VALUES(:type);INSERT INTO '
- 'directories(inode)VALUES(:inode)LIMIT 1')
+ self.assertEqual(
+ result, (
+ 'INSERT INTO dir_entries(type)VALUES(:type);INSERT INTO '
+ 'directories(inode)VALUES(:inode)LIMIT 1'))
class Test_SQL(TestCase):
@@ -96,7 +98,8 @@ class Test_Compact(Test_SQL):
result = compact(stream)
- self.assertEqual(Tokens2Unicode(result),
+ self.assertEqual(
+ Tokens2Unicode(result),
'SELECT child_entry,asdf AS inode,creation FROM links WHERE '
'parent_dir==:parent_dir AND name==:name LIMIT 1')
@@ -105,7 +108,8 @@ class Test_Compact(Test_SQL):
result = compact(stream)
- self.assertEqual(Tokens2Unicode(result),
+ self.assertEqual(
+ Tokens2Unicode(result),
'SELECT 0 AS st_dev,0 AS st_uid,0 AS st_gid,dir_entries.type AS '
'st_mode,dir_entries.inode AS st_ino,COUNT(links.child_entry)AS '
'st_nlink,:creation AS st_ctime,dir_entries.access AS st_atime,'
diff --git a/tests/test_parse.py b/tests/test_parse.py
index 05141a4..59b8e72 100644
--- a/tests/test_parse.py
+++ b/tests/test_parse.py
@@ -20,7 +20,7 @@ class SQLParseTest(TestCaseBase):
def test_multistatement(self):
sql1 = 'select * from foo;'
sql2 = 'select * from bar;'
- stmts = sqlparse.parse(sql1+sql2)
+ stmts = sqlparse.parse(sql1 + sql2)
self.assertEqual(len(stmts), 2)
self.assertEqual(str(stmts[0]), sql1)
self.assertEqual(str(stmts[1]), sql2)
diff --git a/tests/test_pipeline.py b/tests/test_pipeline.py
index 56eed4a..3442a5b 100644
--- a/tests/test_pipeline.py
+++ b/tests/test_pipeline.py
@@ -4,6 +4,7 @@ from sqlparse.filters import ColumnsSelect
from sqlparse.lexer import tokenize
from sqlparse.pipeline import Pipeline
+
class Test(unittest.TestCase):
def setUp(self):
@@ -34,35 +35,35 @@ class Test(unittest.TestCase):
def test_3(self):
sql = """
- SELECT
- 0 AS st_dev,
- 0 AS st_uid,
- 0 AS st_gid,
+SELECT
+0 AS st_dev,
+0 AS st_uid,
+0 AS st_gid,
- dir_entries.type AS st_mode,
- dir_entries.inode AS st_ino,
- COUNT(links.child_entry) AS st_nlink,
+dir_entries.type AS st_mode,
+dir_entries.inode AS st_ino,
+COUNT(links.child_entry) AS st_nlink,
- :creation AS st_ctime,
- dir_entries.access AS st_atime,
- dir_entries.modification AS st_mtime,
- -- :creation AS st_ctime,
- -- CAST(STRFTIME('%s',dir_entries.access) AS INTEGER) AS st_atime,
- -- CAST(STRFTIME('%s',dir_entries.modification) AS INTEGER) AS st_mtime,
+:creation AS st_ctime,
+dir_entries.access AS st_atime,
+dir_entries.modification AS st_mtime,
+-- :creation AS st_ctime,
+-- CAST(STRFTIME('%s',dir_entries.access) AS INTEGER) AS st_atime,
+-- CAST(STRFTIME('%s',dir_entries.modification) AS INTEGER) AS st_mtime,
- COALESCE(files.size,0) AS st_size, -- Python-FUSE
- COALESCE(files.size,0) AS size -- PyFilesystem
+COALESCE(files.size,0) AS st_size, -- Python-FUSE
+COALESCE(files.size,0) AS size -- PyFilesystem
- FROM dir_entries
- LEFT JOIN files
- ON dir_entries.inode == files.inode
- LEFT JOIN links
- ON dir_entries.inode == links.child_entry
+FROM dir_entries
+LEFT JOIN files
+ON dir_entries.inode == files.inode
+LEFT JOIN links
+ON dir_entries.inode == links.child_entry
- WHERE dir_entries.inode == :inode
+WHERE dir_entries.inode == :inode
- GROUP BY dir_entries.inode
- LIMIT 1"""
+GROUP BY dir_entries.inode
+LIMIT 1"""
self.assertEqual([u'st_dev', u'st_uid', u'st_gid', u'st_mode',
u'st_ino', u'st_nlink', u'st_ctime',
u'st_atime', u'st_mtime', u'st_size', u'size'],
diff --git a/tests/test_split.py b/tests/test_split.py
index 321fca2..f0b6dda 100644
--- a/tests/test_split.py
+++ b/tests/test_split.py
@@ -111,7 +111,8 @@ class SQLSplitTest(TestCaseBase):
def test_if_function(self): # see issue 33
# don't let IF as a function confuse the splitter
- sql = ('CREATE TEMPORARY TABLE tmp SELECT IF(a=1, a, b) AS o FROM one; '
+ sql = ('CREATE TEMPORARY TABLE tmp '
+ 'SELECT IF(a=1, a, b) AS o FROM one; '
'SELECT t FROM two')
stmts = sqlparse.split(sql)
self.assertEqual(len(stmts), 2)
diff --git a/tests/test_tokenize.py b/tests/test_tokenize.py
index 5b403f9..417aef6 100644
--- a/tests/test_tokenize.py
+++ b/tests/test_tokenize.py
@@ -12,9 +12,9 @@ from sqlparse.tokens import *
class TestTokenize(unittest.TestCase):
def test_simple(self):
- sql = 'select * from foo;'
- stream = lexer.tokenize(sql)
- self.assert_(type(stream) is types.GeneratorType)
+ s = 'select * from foo;'
+ stream = lexer.tokenize(s)
+ self.assert_(isinstance(stream, types.GeneratorType))
tokens = list(stream)
self.assertEqual(len(tokens), 8)
self.assertEqual(len(tokens[0]), 2)
@@ -22,60 +22,61 @@ class TestTokenize(unittest.TestCase):
self.assertEqual(tokens[-1], (Punctuation, u';'))
def test_backticks(self):
- sql = '`foo`.`bar`'
- tokens = list(lexer.tokenize(sql))
+ s = '`foo`.`bar`'
+ tokens = list(lexer.tokenize(s))
self.assertEqual(len(tokens), 3)
self.assertEqual(tokens[0], (Name, u'`foo`'))
def test_linebreaks(self): # issue1
- sql = 'foo\nbar\n'
- tokens = lexer.tokenize(sql)
- self.assertEqual(''.join(str(x[1]) for x in tokens), sql)
- sql = 'foo\rbar\r'
- tokens = lexer.tokenize(sql)
- self.assertEqual(''.join(str(x[1]) for x in tokens), sql)
- sql = 'foo\r\nbar\r\n'
- tokens = lexer.tokenize(sql)
- self.assertEqual(''.join(str(x[1]) for x in tokens), sql)
- sql = 'foo\r\nbar\n'
- tokens = lexer.tokenize(sql)
- self.assertEqual(''.join(str(x[1]) for x in tokens), sql)
+ s = 'foo\nbar\n'
+ tokens = lexer.tokenize(s)
+ self.assertEqual(''.join(str(x[1]) for x in tokens), s)
+ s = 'foo\rbar\r'
+ tokens = lexer.tokenize(s)
+ self.assertEqual(''.join(str(x[1]) for x in tokens), s)
+ s = 'foo\r\nbar\r\n'
+ tokens = lexer.tokenize(s)
+ self.assertEqual(''.join(str(x[1]) for x in tokens), s)
+ s = 'foo\r\nbar\n'
+ tokens = lexer.tokenize(s)
+ self.assertEqual(''.join(str(x[1]) for x in tokens), s)
def test_inline_keywords(self): # issue 7
- sql = "create created_foo"
- tokens = list(lexer.tokenize(sql))
+ s = "create created_foo"
+ tokens = list(lexer.tokenize(s))
self.assertEqual(len(tokens), 3)
self.assertEqual(tokens[0][0], Keyword.DDL)
self.assertEqual(tokens[2][0], Name)
self.assertEqual(tokens[2][1], u'created_foo')
- sql = "enddate"
- tokens = list(lexer.tokenize(sql))
+ s = "enddate"
+ tokens = list(lexer.tokenize(s))
self.assertEqual(len(tokens), 1)
self.assertEqual(tokens[0][0], Name)
- sql = "join_col"
- tokens = list(lexer.tokenize(sql))
+ s = "join_col"
+ tokens = list(lexer.tokenize(s))
self.assertEqual(len(tokens), 1)
self.assertEqual(tokens[0][0], Name)
- sql = "left join_col"
- tokens = list(lexer.tokenize(sql))
+ s = "left join_col"
+ tokens = list(lexer.tokenize(s))
self.assertEqual(len(tokens), 3)
self.assertEqual(tokens[2][0], Name)
self.assertEqual(tokens[2][1], 'join_col')
def test_negative_numbers(self):
- sql = "values(-1)"
- tokens = list(lexer.tokenize(sql))
+ s = "values(-1)"
+ tokens = list(lexer.tokenize(s))
self.assertEqual(len(tokens), 4)
self.assertEqual(tokens[2][0], Number.Integer)
self.assertEqual(tokens[2][1], '-1')
def test_tab_expansion(self):
- sql = "\t"
+ s = "\t"
lex = lexer.Lexer()
lex.tabsize = 5
- tokens = list(lex.get_tokens(sql))
+ tokens = list(lex.get_tokens(s))
self.assertEqual(tokens[0][1], " " * 5)
+
class TestToken(unittest.TestCase):
def test_str(self):
@@ -124,9 +125,9 @@ class TestTokenList(unittest.TestCase):
self.assertEqual(x.token_matching(1, [lambda t: t.ttype is Keyword]),
None)
+
class TestStream(unittest.TestCase):
def test_simple(self):
- import types
from cStringIO import StringIO
stream = StringIO("SELECT 1; SELECT 2;")