diff options
author | Victor Uriarte <victor.m.uriarte@intel.com> | 2016-08-22 19:29:45 -0700 |
---|---|---|
committer | Victor Uriarte <victor.m.uriarte@intel.com> | 2016-08-22 19:29:45 -0700 |
commit | a36008a235e31bc24b9d42a3a69b479031f024f9 (patch) | |
tree | 39fe3c9b45e0e4c085458116ee641af982f1b945 /sqlparse/sql.py | |
parent | ae0532678b0fdc859cae021ee135579d875a24a8 (diff) | |
download | sqlparse-a36008a235e31bc24b9d42a3a69b479031f024f9.tar.gz |
Unify_naming_schema. Closes #283
Diffstat (limited to 'sqlparse/sql.py')
-rw-r--r-- | sqlparse/sql.py | 31 |
1 files changed, 12 insertions, 19 deletions
diff --git a/sqlparse/sql.py b/sqlparse/sql.py index 50952bc..f780090 100644 --- a/sqlparse/sql.py +++ b/sqlparse/sql.py @@ -24,14 +24,17 @@ class Token(object): the type of the token. """ - __slots__ = ('value', 'ttype', 'parent', 'normalized', 'is_keyword') + __slots__ = ('value', 'ttype', 'parent', 'normalized', 'is_keyword', + 'is_group', 'is_whitespace') def __init__(self, ttype, value): value = text_type(value) self.value = value self.ttype = ttype self.parent = None + self.is_group = False self.is_keyword = ttype in T.Keyword + self.is_whitespace = self.ttype in T.Whitespace self.normalized = value.upper() if self.is_keyword else value def __str__(self): @@ -96,14 +99,6 @@ class Token(object): return self.normalized in values - def is_group(self): - """Returns ``True`` if this object has children.""" - return False - - def is_whitespace(self): - """Return ``True`` if this token is a whitespace token.""" - return self.ttype in T.Whitespace - def within(self, group_cls): """Returns ``True`` if this token is within *group_cls*. @@ -145,6 +140,7 @@ class TokenList(Token): self.tokens = tokens or [] [setattr(token, 'parent', self) for token in tokens] super(TokenList, self).__init__(None, text_type(self)) + self.is_group = True def __str__(self): return ''.join(token.value for token in self.flatten()) @@ -173,7 +169,7 @@ class TokenList(Token): print("{indent}{idx:2d} {cls} {q}{value}{q}" .format(**locals()), file=f) - if token.is_group() and (max_depth is None or depth < max_depth): + if token.is_group and (max_depth is None or depth < max_depth): token._pprint_tree(max_depth, depth + 1, f) def get_token_at_offset(self, offset): @@ -191,18 +187,15 @@ class TokenList(Token): This method is recursively called for all child tokens. """ for token in self.tokens: - if token.is_group(): + if token.is_group: for item in token.flatten(): yield item else: yield token - def is_group(self): - return True - def get_sublists(self): for token in self.tokens: - if token.is_group(): + if token.is_group: yield token @property @@ -241,7 +234,7 @@ class TokenList(Token): ignored too. """ # this on is inconsistent, using Comment instead of T.Comment... - funcs = lambda tk: not ((skip_ws and tk.is_whitespace()) or + funcs = lambda tk: not ((skip_ws and tk.is_whitespace) or (skip_cm and imt(tk, t=T.Comment, i=Comment))) return self._token_matching(funcs)[1] @@ -278,7 +271,7 @@ class TokenList(Token): if idx is None: return None, None idx += 1 # alot of code usage current pre-compensates for this - funcs = lambda tk: not ((skip_ws and tk.is_whitespace()) or + funcs = lambda tk: not ((skip_ws and tk.is_whitespace) or (skip_cm and imt(tk, t=T.Comment, i=Comment))) return self._token_matching(funcs, idx, reverse=_reverse) @@ -296,7 +289,7 @@ class TokenList(Token): end_idx = end + include_end # will be needed later for new group_clauses - # while skip_ws and tokens and tokens[-1].is_whitespace(): + # while skip_ws and tokens and tokens[-1].is_whitespace: # tokens = tokens[:-1] if extend and isinstance(start, grp_cls): @@ -471,7 +464,7 @@ class IdentifierList(TokenList): Whitespaces and punctuations are not included in this generator. """ for token in self.tokens: - if not (token.is_whitespace() or token.match(T.Punctuation, ',')): + if not (token.is_whitespace or token.match(T.Punctuation, ',')): yield token |