summaryrefslogtreecommitdiff
path: root/sqlparse
diff options
context:
space:
mode:
authorAndi Albrecht <albrecht.andi@gmail.com>2016-08-24 14:59:28 +0200
committerGitHub <noreply@github.com>2016-08-24 14:59:28 +0200
commit791a3312a247670cdeed61e52e8ca449dbb27afa (patch)
treebc386cace78549766f46475f037a760335eb447b /sqlparse
parent31830af6355557ec159d2173b12ad1437f49b447 (diff)
parenta36008a235e31bc24b9d42a3a69b479031f024f9 (diff)
downloadsqlparse-791a3312a247670cdeed61e52e8ca449dbb27afa.tar.gz
Merge pull request #285 from vmuriart/unify_naming_schema
Unify_naming_schema. Closes #283
Diffstat (limited to 'sqlparse')
-rw-r--r--sqlparse/engine/grouping.py10
-rw-r--r--sqlparse/filters/aligned_indent.py2
-rw-r--r--sqlparse/filters/others.py18
-rw-r--r--sqlparse/filters/output.py4
-rw-r--r--sqlparse/filters/reindent.py6
-rw-r--r--sqlparse/filters/right_margin.py4
-rw-r--r--sqlparse/sql.py31
7 files changed, 34 insertions, 41 deletions
diff --git a/sqlparse/engine/grouping.py b/sqlparse/engine/grouping.py
index 7cbcc6b..258abc8 100644
--- a/sqlparse/engine/grouping.py
+++ b/sqlparse/engine/grouping.py
@@ -21,13 +21,13 @@ def _group_matching(tlist, cls):
for idx, token in enumerate(list(tlist)):
tidx = idx - tidx_offset
- if token.is_whitespace():
+ if token.is_whitespace:
# ~50% of tokens will be whitespace. Will checking early
# for them avoid 3 comparisons, but then add 1 more comparison
# for the other ~50% of tokens...
continue
- if token.is_group() and not isinstance(token, cls):
+ if token.is_group and not isinstance(token, cls):
# Check inside previously grouped (ie. parenthesis) if group
# of differnt type is inside (ie, case). though ideally should
# should check for all open/close tokens at once to avoid recursion
@@ -246,7 +246,7 @@ def group_comments(tlist):
tidx, token = tlist.token_next_by(t=T.Comment)
while token:
eidx, end = tlist.token_not_matching(
- lambda tk: imt(tk, t=T.Comment) or tk.is_whitespace(), idx=tidx)
+ lambda tk: imt(tk, t=T.Comment) or tk.is_whitespace, idx=tidx)
if end is not None:
eidx, end = tlist.token_prev(eidx, skip_ws=False)
tlist.group_tokens(sql.Comment, tidx, eidx)
@@ -372,10 +372,10 @@ def _group(tlist, cls, match,
for idx, token in enumerate(list(tlist)):
tidx = idx - tidx_offset
- if token.is_whitespace():
+ if token.is_whitespace:
continue
- if recurse and token.is_group() and not isinstance(token, cls):
+ if recurse and token.is_group and not isinstance(token, cls):
_group(token, cls, match, valid_prev, valid_next, post, extend)
if match(token):
diff --git a/sqlparse/filters/aligned_indent.py b/sqlparse/filters/aligned_indent.py
index 68a2d12..ad2d014 100644
--- a/sqlparse/filters/aligned_indent.py
+++ b/sqlparse/filters/aligned_indent.py
@@ -38,7 +38,7 @@ class AlignedIndentFilter(object):
self._max_kwd_len + offset + indent + self.offset))
def _process_statement(self, tlist):
- if tlist.tokens[0].is_whitespace() and self.indent == 0:
+ if tlist.tokens[0].is_whitespace and self.indent == 0:
tlist.tokens.pop(0)
# process the main query body
diff --git a/sqlparse/filters/others.py b/sqlparse/filters/others.py
index 9d4a1d1..9c2f1ce 100644
--- a/sqlparse/filters/others.py
+++ b/sqlparse/filters/others.py
@@ -23,8 +23,8 @@ class StripCommentsFilter(object):
# Replace by whitespace if prev and next exist and if they're not
# whitespaces. This doesn't apply if prev or next is a paranthesis.
if (prev_ is None or next_ is None or
- prev_.is_whitespace() or prev_.match(T.Punctuation, '(') or
- next_.is_whitespace() or next_.match(T.Punctuation, ')')):
+ prev_.is_whitespace or prev_.match(T.Punctuation, '(') or
+ next_.is_whitespace or next_.match(T.Punctuation, ')')):
tlist.tokens.remove(token)
else:
tlist.tokens[tidx] = sql.Token(T.Whitespace, ' ')
@@ -48,9 +48,9 @@ class StripWhitespaceFilter(object):
last_was_ws = False
is_first_char = True
for token in tlist.tokens:
- if token.is_whitespace():
+ if token.is_whitespace:
token.value = '' if last_was_ws or is_first_char else ' '
- last_was_ws = token.is_whitespace()
+ last_was_ws = token.is_whitespace
is_first_char = False
def _stripws_identifierlist(self, tlist):
@@ -59,25 +59,25 @@ class StripWhitespaceFilter(object):
for token in list(tlist.tokens):
if last_nl and token.ttype is T.Punctuation and token.value == ',':
tlist.tokens.remove(last_nl)
- last_nl = token if token.is_whitespace() else None
+ last_nl = token if token.is_whitespace else None
# next_ = tlist.token_next(token, skip_ws=False)
- # if (next_ and not next_.is_whitespace() and
+ # if (next_ and not next_.is_whitespace and
# token.ttype is T.Punctuation and token.value == ','):
# tlist.insert_after(token, sql.Token(T.Whitespace, ' '))
return self._stripws_default(tlist)
def _stripws_parenthesis(self, tlist):
- if tlist.tokens[1].is_whitespace():
+ if tlist.tokens[1].is_whitespace:
tlist.tokens.pop(1)
- if tlist.tokens[-2].is_whitespace():
+ if tlist.tokens[-2].is_whitespace:
tlist.tokens.pop(-2)
self._stripws_default(tlist)
def process(self, stmt, depth=0):
[self.process(sgroup, depth + 1) for sgroup in stmt.get_sublists()]
self._stripws(stmt)
- if depth == 0 and stmt.tokens and stmt.tokens[-1].is_whitespace():
+ if depth == 0 and stmt.tokens and stmt.tokens[-1].is_whitespace:
stmt.tokens.pop(-1)
return stmt
diff --git a/sqlparse/filters/output.py b/sqlparse/filters/output.py
index bbc5076..77a7ac8 100644
--- a/sqlparse/filters/output.py
+++ b/sqlparse/filters/output.py
@@ -47,7 +47,7 @@ class OutputPythonFilter(OutputFilter):
# Print the tokens on the quote
for token in stream:
# Token is a new line separator
- if token.is_whitespace() and '\n' in token.value:
+ if token.is_whitespace and '\n' in token.value:
# Close quote and add a new line
yield sql.Token(T.Text, " '")
yield sql.Token(T.Whitespace, '\n')
@@ -93,7 +93,7 @@ class OutputPHPFilter(OutputFilter):
# Print the tokens on the quote
for token in stream:
# Token is a new line separator
- if token.is_whitespace() and '\n' in token.value:
+ if token.is_whitespace and '\n' in token.value:
# Close quote and add a new line
yield sql.Token(T.Text, ' ";')
yield sql.Token(T.Whitespace, '\n')
diff --git a/sqlparse/filters/reindent.py b/sqlparse/filters/reindent.py
index 68595a5..3d93441 100644
--- a/sqlparse/filters/reindent.py
+++ b/sqlparse/filters/reindent.py
@@ -23,7 +23,7 @@ class ReindentFilter(object):
def _flatten_up_to_token(self, token):
"""Yields all tokens up to token but excluding current."""
- if token.is_group():
+ if token.is_group:
token = next(token.flatten())
for t in self._curr_stmt.flatten():
@@ -65,7 +65,7 @@ class ReindentFilter(object):
pidx, prev_ = tlist.token_prev(tidx, skip_ws=False)
uprev = text_type(prev_)
- if prev_ and prev_.is_whitespace():
+ if prev_ and prev_.is_whitespace:
del tlist.tokens[pidx]
tidx -= 1
@@ -80,7 +80,7 @@ class ReindentFilter(object):
tidx, token = tlist.token_next_by(t=ttypes)
while token:
pidx, prev_ = tlist.token_prev(tidx, skip_ws=False)
- if prev_ and prev_.is_whitespace():
+ if prev_ and prev_.is_whitespace:
del tlist.tokens[pidx]
tidx -= 1
# only break if it's not the first token
diff --git a/sqlparse/filters/right_margin.py b/sqlparse/filters/right_margin.py
index b3f905d..86cf5fd 100644
--- a/sqlparse/filters/right_margin.py
+++ b/sqlparse/filters/right_margin.py
@@ -23,12 +23,12 @@ class RightMarginFilter(object):
def _process(self, group, stream):
for token in stream:
- if token.is_whitespace() and '\n' in token.value:
+ if token.is_whitespace and '\n' in token.value:
if token.value.endswith('\n'):
self.line = ''
else:
self.line = token.value.splitlines()[-1]
- elif token.is_group() and type(token) not in self.keep_together:
+ elif token.is_group and type(token) not in self.keep_together:
token.tokens = self._process(token, token.tokens)
else:
val = text_type(token)
diff --git a/sqlparse/sql.py b/sqlparse/sql.py
index 50952bc..f780090 100644
--- a/sqlparse/sql.py
+++ b/sqlparse/sql.py
@@ -24,14 +24,17 @@ class Token(object):
the type of the token.
"""
- __slots__ = ('value', 'ttype', 'parent', 'normalized', 'is_keyword')
+ __slots__ = ('value', 'ttype', 'parent', 'normalized', 'is_keyword',
+ 'is_group', 'is_whitespace')
def __init__(self, ttype, value):
value = text_type(value)
self.value = value
self.ttype = ttype
self.parent = None
+ self.is_group = False
self.is_keyword = ttype in T.Keyword
+ self.is_whitespace = self.ttype in T.Whitespace
self.normalized = value.upper() if self.is_keyword else value
def __str__(self):
@@ -96,14 +99,6 @@ class Token(object):
return self.normalized in values
- def is_group(self):
- """Returns ``True`` if this object has children."""
- return False
-
- def is_whitespace(self):
- """Return ``True`` if this token is a whitespace token."""
- return self.ttype in T.Whitespace
-
def within(self, group_cls):
"""Returns ``True`` if this token is within *group_cls*.
@@ -145,6 +140,7 @@ class TokenList(Token):
self.tokens = tokens or []
[setattr(token, 'parent', self) for token in tokens]
super(TokenList, self).__init__(None, text_type(self))
+ self.is_group = True
def __str__(self):
return ''.join(token.value for token in self.flatten())
@@ -173,7 +169,7 @@ class TokenList(Token):
print("{indent}{idx:2d} {cls} {q}{value}{q}"
.format(**locals()), file=f)
- if token.is_group() and (max_depth is None or depth < max_depth):
+ if token.is_group and (max_depth is None or depth < max_depth):
token._pprint_tree(max_depth, depth + 1, f)
def get_token_at_offset(self, offset):
@@ -191,18 +187,15 @@ class TokenList(Token):
This method is recursively called for all child tokens.
"""
for token in self.tokens:
- if token.is_group():
+ if token.is_group:
for item in token.flatten():
yield item
else:
yield token
- def is_group(self):
- return True
-
def get_sublists(self):
for token in self.tokens:
- if token.is_group():
+ if token.is_group:
yield token
@property
@@ -241,7 +234,7 @@ class TokenList(Token):
ignored too.
"""
# this on is inconsistent, using Comment instead of T.Comment...
- funcs = lambda tk: not ((skip_ws and tk.is_whitespace()) or
+ funcs = lambda tk: not ((skip_ws and tk.is_whitespace) or
(skip_cm and imt(tk, t=T.Comment, i=Comment)))
return self._token_matching(funcs)[1]
@@ -278,7 +271,7 @@ class TokenList(Token):
if idx is None:
return None, None
idx += 1 # alot of code usage current pre-compensates for this
- funcs = lambda tk: not ((skip_ws and tk.is_whitespace()) or
+ funcs = lambda tk: not ((skip_ws and tk.is_whitespace) or
(skip_cm and imt(tk, t=T.Comment, i=Comment)))
return self._token_matching(funcs, idx, reverse=_reverse)
@@ -296,7 +289,7 @@ class TokenList(Token):
end_idx = end + include_end
# will be needed later for new group_clauses
- # while skip_ws and tokens and tokens[-1].is_whitespace():
+ # while skip_ws and tokens and tokens[-1].is_whitespace:
# tokens = tokens[:-1]
if extend and isinstance(start, grp_cls):
@@ -471,7 +464,7 @@ class IdentifierList(TokenList):
Whitespaces and punctuations are not included in this generator.
"""
for token in self.tokens:
- if not (token.is_whitespace() or token.match(T.Punctuation, ',')):
+ if not (token.is_whitespace or token.match(T.Punctuation, ',')):
yield token