diff options
| author | Victor Uriarte <victor.m.uriarte@intel.com> | 2016-05-10 19:42:08 -0700 |
|---|---|---|
| committer | Victor Uriarte <victor.m.uriarte@intel.com> | 2016-05-10 19:42:08 -0700 |
| commit | 5a1830554f9c1d6b626f57fd88c19c6f7063b434 (patch) | |
| tree | 3ae4df5cb8ba7ea9314a20f9917e5c0b8ea771d9 /sqlparse | |
| parent | dbf8a624e091e1da24a7a90c4ff59d88ce816b8f (diff) | |
| download | sqlparse-5a1830554f9c1d6b626f57fd88c19c6f7063b434.tar.gz | |
generalize group_tokens for more use cases
Diffstat (limited to 'sqlparse')
| -rw-r--r-- | sqlparse/engine/grouping.py | 14 | ||||
| -rw-r--r-- | sqlparse/sql.py | 34 |
2 files changed, 29 insertions, 19 deletions
diff --git a/sqlparse/engine/grouping.py b/sqlparse/engine/grouping.py index 982488b..ab519f0 100644 --- a/sqlparse/engine/grouping.py +++ b/sqlparse/engine/grouping.py @@ -422,19 +422,13 @@ def group_order(tlist): def align_comments(tlist): [align_comments(sgroup) for sgroup in tlist.get_sublists()] - idx = 0 - token = tlist.token_next_by_instance(idx, sql.Comment) + token = tlist.token_next_by(i=sql.Comment) while token: before = tlist.token_prev(tlist.token_index(token)) if isinstance(before, sql.TokenList): - grp = tlist.tokens_between(before, token)[1:] - before.tokens.extend(grp) - for t in grp: - tlist.tokens.remove(t) - idx = tlist.token_index(before) + 1 - else: - idx = tlist.token_index(token) + 1 - token = tlist.token_next_by_instance(idx, sql.Comment) + tokens = tlist.tokens_between(before, token) + token = tlist.group_tokens(sql.TokenList, tokens, extend=True) + token = tlist.token_next_by(i=sql.Comment, idx=token) def group(tlist): diff --git a/sqlparse/sql.py b/sqlparse/sql.py index ccb6924..a9884a5 100644 --- a/sqlparse/sql.py +++ b/sqlparse/sql.py @@ -388,20 +388,36 @@ class TokenList(Token): start_idx = self.token_index(start) return self.tokens[start_idx:end_idx] - def group_tokens(self, grp_cls, tokens, ignore_ws=False): + def group_tokens(self, grp_cls, tokens, ignore_ws=False, extend=False): """Replace tokens by an instance of *grp_cls*.""" - idx = self.token_index(tokens[0]) if ignore_ws: while tokens and tokens[-1].is_whitespace(): tokens = tokens[:-1] - for t in tokens: - self.tokens.remove(t) - grp = grp_cls(tokens) + + left = tokens[0] + idx = self.token_index(left) + + if extend: + if not isinstance(left, grp_cls): + grp = grp_cls([left]) + self.tokens.remove(left) + self.tokens.insert(idx, grp) + left = grp + left.parent = self + tokens = tokens[1:] + left.tokens.extend(tokens) + left.value = left.__str__() + + else: + left = grp_cls(tokens) + left.parent = self + self.tokens.insert(idx, left) + for token in tokens: - token.parent = grp - grp.parent = self - self.tokens.insert(idx, grp) - return grp + token.parent = left + self.tokens.remove(token) + + return left def insert_before(self, where, token): """Inserts *token* before *where*.""" |
