summaryrefslogtreecommitdiff
path: root/sqlparse
diff options
context:
space:
mode:
authorAndi Albrecht <albrecht.andi@gmail.com>2011-08-12 14:34:58 +0200
committerAndi Albrecht <albrecht.andi@gmail.com>2011-08-12 14:34:58 +0200
commit99af50cf179539f8d82e57ea9a0530adad238a96 (patch)
tree58e7626221d0f7c1dba7c0d21055b42fcb3b7f46 /sqlparse
parent0c88f8506acaa74b970f463e06e134671a6db1c8 (diff)
downloadsqlparse-99af50cf179539f8d82e57ea9a0530adad238a96.tar.gz
Code cleanup.
Diffstat (limited to 'sqlparse')
-rw-r--r--sqlparse/filters.py19
1 files changed, 11 insertions, 8 deletions
diff --git a/sqlparse/filters.py b/sqlparse/filters.py
index ce2fb80..2d715a8 100644
--- a/sqlparse/filters.py
+++ b/sqlparse/filters.py
@@ -7,7 +7,10 @@ from os.path import abspath, join
from sqlparse import sql
from sqlparse import tokens as T
from sqlparse.engine import FilterStack
-from sqlparse.tokens import Comment, Keyword, Name, Punctuation, String, Whitespace
+from sqlparse.tokens import (
+ Comment, Keyword, Name,
+ Punctuation, String, Whitespace,
+)
class Filter(object):
@@ -56,7 +59,7 @@ class IdentifierCaseFilter(_CaseFilter):
yield ttype, value
-class Get_Comments(Filter):
+class GetComments(Filter):
"""Get the comments from a stack"""
def process(self, stack, stream):
for token_type, value in stream:
@@ -81,7 +84,6 @@ class IncludeStatement(Filter):
self.detected = False
-
def process(self, stack, stream):
# Run over all tokens in the stream
for token_type, value in stream:
@@ -107,10 +109,9 @@ class IncludeStatement(Filter):
if path:
try:
with open(path) as f:
- sql = f.read()
+ raw_sql = f.read()
except IOError, err:
- logging.error(err)
yield Comment, u'-- IOError: %s\n' % err
else:
@@ -120,7 +121,7 @@ class IncludeStatement(Filter):
stack = FilterStack()
stack.preprocess.append(IncludeStatement(self.dirpath))
- for tv in stack.run(sql):
+ for tv in stack.run(raw_sql):
yield tv
# Set normal mode
@@ -227,6 +228,7 @@ class ReindentFilter(Filter):
split_words = ('FROM', 'JOIN$', 'AND', 'OR',
'GROUP', 'ORDER', 'UNION', 'VALUES',
'SET', 'BETWEEN')
+
def _next_token(i):
t = tlist.token_next_match(i, T.Keyword, split_words,
regex=True)
@@ -426,7 +428,8 @@ class ColumnsSelect(Filter):
oldValue = ""
mode = 2
- elif token_type == Punctuation and value == ',' and not parenthesis:
+ elif (token_type == Punctuation
+ and value == ',' and not parenthesis):
if oldValue:
yield Name, oldValue
oldValue = ""
@@ -573,4 +576,4 @@ class Limit(Filter):
if index and token_type in Keyword and value == 'LIMIT':
return stream[4 - index][1]
- return -1 \ No newline at end of file
+ return -1