summaryrefslogtreecommitdiff
path: root/sqlparse
diff options
context:
space:
mode:
authorAndi Albrecht <albrecht.andi@gmail.com>2012-11-12 14:40:07 +0100
committerAndi Albrecht <albrecht.andi@gmail.com>2012-11-12 14:40:07 +0100
commitdcab30c6387265dcbcbedfa2d515917ffc88d036 (patch)
treee3c03e7c8014d3770c3b68952c2c912367bf8235 /sqlparse
parent9ef2a8e6080cfbe699aba0cad968c6e0d2554aa7 (diff)
parentf4876322e7fe36923d4eb89757a844b3d90be306 (diff)
downloadsqlparse-dcab30c6387265dcbcbedfa2d515917ffc88d036.tar.gz
Merge branch 'work'
Diffstat (limited to 'sqlparse')
-rw-r--r--sqlparse/__init__.py3
-rw-r--r--sqlparse/engine/grouping.py3
-rw-r--r--sqlparse/exceptions.py3
-rw-r--r--sqlparse/filters.py10
-rw-r--r--sqlparse/functions.py4
-rw-r--r--sqlparse/keywords.py12
-rw-r--r--sqlparse/lexer.py3
7 files changed, 21 insertions, 17 deletions
diff --git a/sqlparse/__init__.py b/sqlparse/__init__.py
index a64c80f..defca37 100644
--- a/sqlparse/__init__.py
+++ b/sqlparse/__init__.py
@@ -61,7 +61,10 @@ def split(sql):
stack.split_statements = True
return [unicode(stmt) for stmt in stack.run(sql)]
+
from sqlparse.engine.filter import StatementFilter
+
+
def split2(stream):
splitter = StatementFilter()
return list(splitter.process(None, stream))
diff --git a/sqlparse/engine/grouping.py b/sqlparse/engine/grouping.py
index a98e787..0b64d21 100644
--- a/sqlparse/engine/grouping.py
+++ b/sqlparse/engine/grouping.py
@@ -155,7 +155,8 @@ def group_identifier(tlist):
def _next_token(tl, i):
# chooses the next token. if two tokens are found then the
# first is returned.
- t1 = tl.token_next_by_type(i, (T.String.Symbol, T.String.Single, T.Name))
+ t1 = tl.token_next_by_type(
+ i, (T.String.Symbol, T.String.Single, T.Name))
t2 = tl.token_next_by_instance(i, sql.Function)
if t1 and t2:
i1 = tl.token_index(t1)
diff --git a/sqlparse/exceptions.py b/sqlparse/exceptions.py
index 2a8e571..ec25afa 100644
--- a/sqlparse/exceptions.py
+++ b/sqlparse/exceptions.py
@@ -5,7 +5,6 @@
"""Exceptions used in this package."""
+
class SQLParseError(Exception):
"""Base class for exceptions in this module."""
-
-
diff --git a/sqlparse/filters.py b/sqlparse/filters.py
index c5165be..2f1c825 100644
--- a/sqlparse/filters.py
+++ b/sqlparse/filters.py
@@ -329,8 +329,8 @@ class ReindentFilter:
self.indent += 1
tlist.tokens.insert(0, self.nl())
indented = True
- num_offset = self._get_offset(tlist.token_next_match(0,
- T.Punctuation, '('))
+ num_offset = self._get_offset(
+ tlist.token_next_match(0, T.Punctuation, '('))
self.offset += num_offset
self._process_default(tlist, stmts=not indented)
if indented:
@@ -397,8 +397,8 @@ class ReindentFilter:
nl = '\n'
else:
nl = '\n\n'
- stmt.tokens.insert(0,
- sql.Token(T.Whitespace, nl))
+ stmt.tokens.insert(
+ 0, sql.Token(T.Whitespace, nl))
if self._last_stmt != stmt:
self._last_stmt = stmt
@@ -407,7 +407,7 @@ class ReindentFilter:
class RightMarginFilter:
keep_together = (
-# sql.TypeCast, sql.Identifier, sql.Alias,
+ # sql.TypeCast, sql.Identifier, sql.Alias,
)
def __init__(self, width=79):
diff --git a/sqlparse/functions.py b/sqlparse/functions.py
index aaf6fb8..fbfcc0b 100644
--- a/sqlparse/functions.py
+++ b/sqlparse/functions.py
@@ -6,9 +6,9 @@ Created on 17/05/2012
Several utility functions to extract info from the SQL sentences
'''
-from sqlparse.filters import ColumnsSelect, Limit
+from sqlparse.filters import ColumnsSelect, Limit
from sqlparse.pipeline import Pipeline
-from sqlparse.tokens import Keyword, Whitespace
+from sqlparse.tokens import Keyword, Whitespace
def getlimit(stream):
diff --git a/sqlparse/keywords.py b/sqlparse/keywords.py
index 9c59ee9..c11a3a6 100644
--- a/sqlparse/keywords.py
+++ b/sqlparse/keywords.py
@@ -36,7 +36,7 @@ KEYWORDS = {
'BOTH': tokens.Keyword,
'BREADTH': tokens.Keyword,
-# 'C': tokens.Keyword, # most likely this is an alias
+ # 'C': tokens.Keyword, # most likely this is an alias
'CACHE': tokens.Keyword,
'CALL': tokens.Keyword,
'CALLED': tokens.Keyword,
@@ -172,7 +172,7 @@ KEYWORDS = {
'FULL': tokens.Keyword,
'FUNCTION': tokens.Keyword,
-# 'G': tokens.Keyword,
+ # 'G': tokens.Keyword,
'GENERAL': tokens.Keyword,
'GENERATED': tokens.Keyword,
'GET': tokens.Keyword,
@@ -219,7 +219,7 @@ KEYWORDS = {
'ISOLATION': tokens.Keyword,
'ITERATE': tokens.Keyword,
-# 'K': tokens.Keyword,
+ # 'K': tokens.Keyword,
'KEY': tokens.Keyword,
'KEY_MEMBER': tokens.Keyword,
'KEY_TYPE': tokens.Keyword,
@@ -244,7 +244,7 @@ KEYWORDS = {
'LOCK': tokens.Keyword,
'LOWER': tokens.Keyword,
-# 'M': tokens.Keyword,
+ # 'M': tokens.Keyword,
'MAP': tokens.Keyword,
'MATCH': tokens.Keyword,
'MAXVALUE': tokens.Keyword,
@@ -519,7 +519,7 @@ KEYWORDS = {
'INT8': tokens.Name.Builtin,
'SERIAL8': tokens.Name.Builtin,
'TEXT': tokens.Name.Builtin,
- }
+}
KEYWORDS_COMMON = {
@@ -562,4 +562,4 @@ KEYWORDS_COMMON = {
'MIN': tokens.Keyword,
'MAX': tokens.Keyword,
'DISTINCT': tokens.Keyword,
- }
+}
diff --git a/sqlparse/lexer.py b/sqlparse/lexer.py
index 432eb1e..09631da 100644
--- a/sqlparse/lexer.py
+++ b/sqlparse/lexer.py
@@ -18,6 +18,7 @@ from sqlparse import tokens
from sqlparse.keywords import KEYWORDS, KEYWORDS_COMMON
from cStringIO import StringIO
+
class include(str):
pass
@@ -158,7 +159,7 @@ class Lexer(object):
stripall = False
stripnl = False
tabsize = 0
- flags = re.IGNORECASE|re.UNICODE
+ flags = re.IGNORECASE | re.UNICODE
bufsize = 4096
tokens = {