summaryrefslogtreecommitdiff
path: root/sqlparse
diff options
context:
space:
mode:
authorVictor Uriarte <victor.m.uriarte@intel.com>2016-06-04 21:57:43 -0700
committerVictor Uriarte <victor.m.uriarte@intel.com>2016-06-06 06:31:35 -0700
commita7c7d9586208516de372cb01203b48a53f7095fb (patch)
tree95e887de388e5f6f1516b1ea862e9d48a4d174eb /sqlparse
parent035ffb8e61b2cda516ae448246df36451ff6d14b (diff)
downloadsqlparse-a7c7d9586208516de372cb01203b48a53f7095fb.tar.gz
Format `pr` to pass flake8 and update functions used
Diffstat (limited to 'sqlparse')
-rw-r--r--sqlparse/filters.py95
-rw-r--r--sqlparse/formatter.py6
2 files changed, 56 insertions, 45 deletions
diff --git a/sqlparse/filters.py b/sqlparse/filters.py
index 464a570..20f61a0 100644
--- a/sqlparse/filters.py
+++ b/sqlparse/filters.py
@@ -139,24 +139,23 @@ class StripWhitespaceFilter(object):
stmt.tokens.pop(-1)
-class SpacesAroundOperatorsFilter:
+class SpacesAroundOperatorsFilter(object):
whitelist = (sql.Identifier, sql.Comparison, sql.Where)
def _process(self, tlist):
def next_token(idx):
- # HACK: distinguish between real wildcard from multiplication operator
- return tlist.token_next_by_type(idx, (T.Operator, T.Comparison, T.Wildcard))
+ return tlist.token_next_by(t=(T.Operator, T.Comparison), idx=idx)
+
idx = 0
token = next_token(idx)
while token:
idx = tlist.token_index(token)
if idx > 0 and tlist.tokens[idx - 1].ttype != T.Whitespace:
- tlist.tokens.insert(idx, sql.Token(T.Whitespace, ' ')) # insert before
+ # insert before
+ tlist.tokens.insert(idx, sql.Token(T.Whitespace, ' '))
idx += 1
if idx < len(tlist.tokens) - 1:
- if token.ttype == T.Wildcard and tlist.tokens[idx + 1].match(T.Punctuation, ','):
- pass # this must have been a real wildcard, not multiplication
- elif tlist.tokens[idx + 1].ttype != T.Whitespace:
+ if tlist.tokens[idx + 1].ttype != T.Whitespace:
tlist.tokens.insert(idx + 1, sql.Token(T.Whitespace, ' '))
idx += 1
@@ -165,7 +164,7 @@ class SpacesAroundOperatorsFilter:
for sgroup in tlist.get_sublists():
self._process(sgroup)
- def process(self, stack, stmt):
+ def process(self, stmt):
self._process(stmt)
@@ -365,16 +364,16 @@ class ReindentFilter(object):
self._last_stmt = stmt
-class AlignedIndentFilter:
- join_words = r'((LEFT\s+|RIGHT\s+|FULL\s+)?(INNER\s+|OUTER\s+|STRAIGHT\s+)?|(CROSS\s+|NATURAL\s+)?)?JOIN\b'
- split_words = (
- 'FROM',
- join_words, 'ON',
- 'WHERE', 'AND', 'OR',
- 'GROUP', 'HAVING', 'LIMIT',
- 'ORDER', 'UNION', 'VALUES',
- 'SET', 'BETWEEN', 'EXCEPT',
- )
+class AlignedIndentFilter(object):
+ join_words = (r'((LEFT\s+|RIGHT\s+|FULL\s+)?'
+ r'(INNER\s+|OUTER\s+|STRAIGHT\s+)?|'
+ r'(CROSS\s+|NATURAL\s+)?)?JOIN\b')
+ split_words = ('FROM',
+ join_words, 'ON',
+ 'WHERE', 'AND', 'OR',
+ 'GROUP', 'HAVING', 'LIMIT',
+ 'ORDER', 'UNION', 'VALUES',
+ 'SET', 'BETWEEN', 'EXCEPT')
def __init__(self, char=' ', line_width=None):
self.char = char
@@ -384,43 +383,51 @@ class AlignedIndentFilter:
return sql.Token(T.Newline, '\n')
def whitespace(self, chars=0, newline_before=False, newline_after=False):
- return sql.Token(
- T.Whitespace,
- (str(self.newline()) if newline_before else '') + self.char * chars + (str(self.newline()) if newline_after else ''))
+ return sql.Token(T.Whitespace, ('\n' if newline_before else '') +
+ self.char * chars + ('\n' if newline_after else ''))
def _process_statement(self, tlist, base_indent=0):
if tlist.tokens[0].is_whitespace() and base_indent == 0:
tlist.tokens.pop(0)
# process the main query body
- return self._process(sql.TokenList(tlist.tokens), base_indent=base_indent)
+ return self._process(sql.TokenList(tlist.tokens),
+ base_indent=base_indent)
def _process_parenthesis(self, tlist, base_indent=0):
- if not tlist.token_next_match(0, T.DML, 'SELECT'):
+ if not tlist.token_next_by(m=(T.DML, 'SELECT')):
# if this isn't a subquery, don't re-indent
return tlist
- sub_indent = base_indent + self._max_kwd_len + 2 # add two for the space and parens
- tlist.insert_after(tlist.tokens[0], self.whitespace(sub_indent, newline_before=True))
+ # add two for the space and parens
+ sub_indent = base_indent + self._max_kwd_len + 2
+ tlist.insert_after(tlist.tokens[0],
+ self.whitespace(sub_indent, newline_before=True))
# de-indent the last parenthesis
- tlist.insert_before(tlist.tokens[-1], self.whitespace(sub_indent - 1, newline_before=True))
+ tlist.insert_before(tlist.tokens[-1],
+ self.whitespace(sub_indent - 1,
+ newline_before=True))
# process the inside of the parantheses
tlist.tokens = (
[tlist.tokens[0]] +
- self._process(sql.TokenList(tlist._groupable_tokens), base_indent=sub_indent).tokens +
+ self._process(sql.TokenList(tlist._groupable_tokens),
+ base_indent=sub_indent).tokens +
[tlist.tokens[-1]]
- )
+ )
return tlist
def _process_identifierlist(self, tlist, base_indent=0):
# columns being selected
new_tokens = []
- identifiers = filter(lambda t: t.ttype not in (T.Punctuation, T.Whitespace, T.Newline), tlist.tokens)
+ identifiers = list(filter(
+ lambda t: t.ttype not in (T.Punctuation, T.Whitespace, T.Newline),
+ tlist.tokens))
for i, token in enumerate(identifiers):
if i > 0:
new_tokens.append(self.newline())
- new_tokens.append(self.whitespace(self._max_kwd_len + base_indent + 1))
+ new_tokens.append(
+ self.whitespace(self._max_kwd_len + base_indent + 1))
new_tokens.append(token)
if i < len(identifiers) - 1:
# if not last column in select, add a comma seperator
@@ -437,10 +444,11 @@ class AlignedIndentFilter:
case_offset = len('when ')
cases = tlist.get_cases(skip_ws=True)
# align the end as well
- end_token = tlist.token_next_match(0, T.Keyword, 'END')
+ end_token = tlist.token_next_by(m=(T.Keyword, 'END'))
cases.append((None, [end_token]))
- condition_width = max(len(' '.join(map(str, cond))) for cond, value in cases if cond)
+ condition_width = max(
+ len(' '.join(map(str, cond))) for cond, value in cases if cond)
for i, (cond, value) in enumerate(cases):
if cond is None: # else or end
stmt = value[0]
@@ -449,9 +457,11 @@ class AlignedIndentFilter:
stmt = cond[0]
line = cond + value
if i > 0:
- tlist.insert_before(stmt, self.whitespace(base_offset + case_offset - len(str(stmt))))
+ tlist.insert_before(stmt, self.whitespace(
+ base_offset + case_offset - len(str(stmt))))
if cond:
- tlist.insert_after(cond[-1], self.whitespace(condition_width - len(' '.join(map(str, cond)))))
+ tlist.insert_after(cond[-1], self.whitespace(
+ condition_width - len(' '.join(map(str, cond)))))
if i < len(cases) - 1:
# if not the END add a newline
@@ -459,7 +469,8 @@ class AlignedIndentFilter:
def _process_substatement(self, tlist, base_indent=0):
def _next_token(i):
- t = tlist.token_next_match(i, T.Keyword, self.split_words, regex=True)
+ t = tlist.token_next_by(m=(T.Keyword, self.split_words, True),
+ idx=i)
# treat "BETWEEN x and y" as a single statement
if t and t.value.upper() == 'BETWEEN':
t = _next_token(tlist.token_index(t) + 1)
@@ -470,12 +481,14 @@ class AlignedIndentFilter:
idx = 0
token = _next_token(idx)
while token:
+ # joins are special case. only consider the first word as aligner
if token.match(T.Keyword, self.join_words, regex=True):
- # joins are a special case. we only consider the first word of the join as the aligner
token_indent = len(token.value.split()[0])
else:
token_indent = len(str(token))
- tlist.insert_before(token, self.whitespace(self._max_kwd_len - token_indent + base_indent, newline_before=True))
+ tlist.insert_before(token, self.whitespace(
+ self._max_kwd_len - token_indent + base_indent,
+ newline_before=True))
next_idx = tlist.token_index(token) + 1
token = _next_token(next_idx)
@@ -483,22 +496,20 @@ class AlignedIndentFilter:
for sgroup in tlist.get_sublists():
prev_token = tlist.token_prev(tlist.token_index(sgroup))
indent_offset = 0
+ # HACK: make "group/order by" work. Longer than _max_kwd_len.
if prev_token and prev_token.match(T.Keyword, 'BY'):
- # HACK: make "group by" and "order by" indents work. these are longer than _max_kwd_len.
# TODO: generalize this
indent_offset = 3
self._process(sgroup, base_indent=base_indent + indent_offset)
return tlist
- def _process(self, tlist, base_indent=0, verbose=False):
+ def _process(self, tlist, base_indent=0):
token_name = tlist.__class__.__name__.lower()
func_name = '_process_%s' % token_name
func = getattr(self, func_name, self._process_substatement)
- if verbose:
- print func.__name__, token_name, str(tlist)
return func(tlist, base_indent=base_indent)
- def process(self, stack, stmt):
+ def process(self, stmt):
self._process(stmt)
diff --git a/sqlparse/formatter.py b/sqlparse/formatter.py
index 0fa563c..069109b 100644
--- a/sqlparse/formatter.py
+++ b/sqlparse/formatter.py
@@ -30,10 +30,10 @@ def validate_options(options):
raise SQLParseError('Invalid value for strip_comments: %r'
% strip_comments)
- use_space_around_operators = options.get('use_space_around_operators', False)
- if use_space_around_operators not in [True, False]:
+ space_around_operators = options.get('use_space_around_operators', False)
+ if space_around_operators not in [True, False]:
raise SQLParseError('Invalid value for use_space_around_operators: %r'
- % use_space_around_operators)
+ % space_around_operators)
strip_ws = options.get('strip_whitespace', False)
if strip_ws not in [True, False]: