summaryrefslogtreecommitdiff
path: root/sqlparse
diff options
context:
space:
mode:
authorVictor Uriarte <victor.m.uriarte@intel.com>2016-06-13 22:20:29 -0700
committerVictor Uriarte <victor.m.uriarte@intel.com>2016-06-15 13:29:15 -0700
commit5002bfa36c4fa2ee72eff18648b6ddc616b718f0 (patch)
tree54aa68750cc0c03289d29bdbbe852fb6f769e34b /sqlparse
parent4f922d9b6fb68b8281c6b3d93a57a4c84860e06a (diff)
downloadsqlparse-5002bfa36c4fa2ee72eff18648b6ddc616b718f0.tar.gz
Normalize behavior between token_next and token_next_by
both will now return the "next" token and not itself when passing own index
Diffstat (limited to 'sqlparse')
-rw-r--r--sqlparse/engine/grouping.py25
-rw-r--r--sqlparse/filters/aligned_indent.py8
-rw-r--r--sqlparse/filters/others.py2
-rw-r--r--sqlparse/filters/reindent.py14
-rw-r--r--sqlparse/sql.py3
5 files changed, 26 insertions, 26 deletions
diff --git a/sqlparse/engine/grouping.py b/sqlparse/engine/grouping.py
index a229e3d..e7072d0 100644
--- a/sqlparse/engine/grouping.py
+++ b/sqlparse/engine/grouping.py
@@ -118,7 +118,7 @@ def group_identifier(tlist):
tidx, token = tlist.token_next_by(t=T_IDENT)
while token:
tlist.group_tokens(sql.Identifier, tidx, tidx)
- tidx, token = tlist.token_next_by(t=T_IDENT, idx=tidx + 1)
+ tidx, token = tlist.token_next_by(t=T_IDENT, idx=tidx)
def group_period(tlist):
@@ -140,7 +140,7 @@ def group_arrays(tlist):
t=(T.Name, T.String.Symbol,)):
tlist.group_tokens(sql.Identifier, pidx, tidx, extend=True)
tidx = pidx
- tidx, token = tlist.token_next_by(i=sql.SquareBrackets, idx=tidx + 1)
+ tidx, token = tlist.token_next_by(i=sql.SquareBrackets, idx=tidx)
@recurse(sql.Identifier)
@@ -161,8 +161,7 @@ def group_operator(tlist):
tlist.group_tokens(sql.Operation, pidx, nidx)
tidx = pidx
- tidx, token = tlist.token_next_by(t=(T.Operator, T.Wildcard),
- idx=tidx + 1)
+ tidx, token = tlist.token_next_by(t=(T.Operator, T.Wildcard), idx=tidx)
@recurse(sql.IdentifierList)
@@ -182,7 +181,7 @@ def group_identifier_list(tlist):
if func(prev_) and func(next_):
tlist.group_tokens(sql.IdentifierList, pidx, nidx, extend=True)
tidx = pidx
- tidx, token = tlist.token_next_by(m=M_COMMA, idx=tidx + 1)
+ tidx, token = tlist.token_next_by(m=M_COMMA, idx=tidx)
def group_brackets(tlist):
@@ -198,20 +197,20 @@ def group_comments(tlist):
tidx, token = tlist.token_next_by(t=T.Comment)
while token:
end = tlist.token_not_matching(
- lambda tk: imt(tk, t=T.Comment) or tk.is_whitespace(), idx=tidx + 1)
+ lambda tk: imt(tk, t=T.Comment) or tk.is_whitespace(), idx=tidx)
if end is not None:
eidx = tlist.token_index(end)
eidx, end = tlist.token_prev(eidx, skip_ws=False)
tlist.group_tokens(sql.Comment, tidx, eidx)
- tidx, token = tlist.token_next_by(t=T.Comment, idx=tidx + 1)
+ tidx, token = tlist.token_next_by(t=T.Comment, idx=tidx)
@recurse(sql.Where)
def group_where(tlist):
tidx, token = tlist.token_next_by(m=sql.Where.M_OPEN)
while token:
- eidx, end = tlist.token_next_by(m=sql.Where.M_CLOSE, idx=tidx + 1)
+ eidx, end = tlist.token_next_by(m=sql.Where.M_CLOSE, idx=tidx)
if end is None:
end = tlist._groupable_tokens[-1]
@@ -220,7 +219,7 @@ def group_where(tlist):
# TODO: convert this to eidx instead of end token.
# i think above values are len(tlist) and eidx-1
tlist.group_tokens(sql.Where, tidx, end)
- tidx, token = tlist.token_next_by(m=sql.Where.M_OPEN, idx=tidx + 1)
+ tidx, token = tlist.token_next_by(m=sql.Where.M_OPEN, idx=tidx)
@recurse()
@@ -233,7 +232,7 @@ def group_aliased(tlist):
nidx, next_ = tlist.token_next(tidx)
if imt(next_, i=sql.Identifier):
tlist.group_tokens(sql.Identifier, tidx, nidx, extend=True)
- tidx, token = tlist.token_next_by(i=I_ALIAS, t=T.Number, idx=tidx + 1)
+ tidx, token = tlist.token_next_by(i=I_ALIAS, t=T.Number, idx=tidx)
def group_typecasts(tlist):
@@ -257,7 +256,7 @@ def group_functions(tlist):
nidx, next_ = tlist.token_next(tidx)
if isinstance(next_, sql.Parenthesis):
tlist.group_tokens(sql.Function, tidx, nidx)
- tidx, token = tlist.token_next_by(t=T.Name, idx=tidx + 1)
+ tidx, token = tlist.token_next_by(t=T.Name, idx=tidx)
def group_order(tlist):
@@ -268,7 +267,7 @@ def group_order(tlist):
if imt(prev_, i=sql.Identifier, t=T.Number):
tlist.group_tokens(sql.Identifier, pidx, tidx)
tidx = pidx
- tidx, token = tlist.token_next_by(t=T.Keyword.Order, idx=tidx + 1)
+ tidx, token = tlist.token_next_by(t=T.Keyword.Order, idx=tidx)
@recurse()
@@ -279,7 +278,7 @@ def align_comments(tlist):
if isinstance(prev_, sql.TokenList):
tlist.group_tokens(sql.TokenList, pidx, tidx, extend=True)
tidx = pidx
- tidx, token = tlist.token_next_by(i=sql.Comment, idx=tidx + 1)
+ tidx, token = tlist.token_next_by(i=sql.Comment, idx=tidx)
def group(stmt):
diff --git a/sqlparse/filters/aligned_indent.py b/sqlparse/filters/aligned_indent.py
index ed5e15e..2fea4d2 100644
--- a/sqlparse/filters/aligned_indent.py
+++ b/sqlparse/filters/aligned_indent.py
@@ -86,14 +86,14 @@ class AlignedIndentFilter(object):
max_cond_width - condition_width[i]))
tlist.insert_after(cond[-1], ws)
- def _next_token(self, tlist, idx=0):
+ def _next_token(self, tlist, idx=-1):
split_words = T.Keyword, self.split_words, True
tidx, token = tlist.token_next_by(m=split_words, idx=idx)
# treat "BETWEEN x and y" as a single statement
if token and token.normalized == 'BETWEEN':
- tidx, token = self._next_token(tlist, tidx + 1)
+ tidx, token = self._next_token(tlist, tidx)
if token and token.normalized == 'AND':
- tidx, token = self._next_token(tlist, tidx + 1)
+ tidx, token = self._next_token(tlist, tidx)
return tidx, token
def _split_kwds(self, tlist):
@@ -106,7 +106,7 @@ class AlignedIndentFilter(object):
token_indent = text_type(token)
tlist.insert_before(token, self.nl(token_indent))
tidx += 1
- tidx, token = self._next_token(tlist, tidx + 1)
+ tidx, token = self._next_token(tlist, tidx)
def _process_default(self, tlist):
self._split_kwds(tlist)
diff --git a/sqlparse/filters/others.py b/sqlparse/filters/others.py
index a23a6c6..9d4a1d1 100644
--- a/sqlparse/filters/others.py
+++ b/sqlparse/filters/others.py
@@ -99,7 +99,7 @@ class SpacesAroundOperatorsFilter(object):
tidx += 1 # has to shift since token inserted before it
# assert tlist.token_index(token) == tidx
- tidx, token = tlist.token_next_by(t=ttypes, idx=tidx + 1)
+ tidx, token = tlist.token_next_by(t=ttypes, idx=tidx)
def process(self, stmt):
[self.process(sgroup) for sgroup in stmt.get_sublists()]
diff --git a/sqlparse/filters/reindent.py b/sqlparse/filters/reindent.py
index d23a8d5..1b539c6 100644
--- a/sqlparse/filters/reindent.py
+++ b/sqlparse/filters/reindent.py
@@ -44,7 +44,7 @@ class ReindentFilter(object):
def nl(self):
return sql.Token(T.Whitespace, self.n + self.char * self.leading_ws)
- def _next_token(self, tlist, idx=0):
+ def _next_token(self, tlist, idx=-1):
split_words = ('FROM', 'STRAIGHT_JOIN$', 'JOIN$', 'AND', 'OR',
'GROUP', 'ORDER', 'UNION', 'VALUES',
'SET', 'BETWEEN', 'EXCEPT', 'HAVING')
@@ -52,10 +52,10 @@ class ReindentFilter(object):
tidx, token = tlist.token_next_by(m=m_split, idx=idx)
if token and token.normalized == 'BETWEEN':
- tidx, token = self._next_token(tlist, tidx + 1)
+ tidx, token = self._next_token(tlist, tidx)
if token and token.normalized == 'AND':
- tidx, token = self._next_token(tlist, tidx + 1)
+ tidx, token = self._next_token(tlist, tidx)
return tidx, token
@@ -74,10 +74,11 @@ class ReindentFilter(object):
tlist.insert_before(tidx, self.nl())
tidx += 1
- tidx, token = self._next_token(tlist, tidx + 1)
+ tidx, token = self._next_token(tlist, tidx)
def _split_statements(self, tlist):
- tidx, token = tlist.token_next_by(t=(T.Keyword.DDL, T.Keyword.DML))
+ ttypes = T.Keyword.DML, T.Keyword.DDL
+ tidx, token = tlist.token_next_by(t=ttypes)
while token:
pidx, prev_ = tlist.token_prev(tidx, skip_ws=False)
if prev_ and prev_.is_whitespace():
@@ -87,8 +88,7 @@ class ReindentFilter(object):
if prev_:
tlist.insert_before(tidx, self.nl())
tidx += 1
- tidx, token = tlist.token_next_by(
- t=(T.Keyword.DDL, T.Keyword.DML), idx=tidx + 1)
+ tidx, token = tlist.token_next_by(t=ttypes, idx=tidx)
def _process(self, tlist):
func_name = '_process_{cls}'.format(cls=type(tlist).__name__)
diff --git a/sqlparse/sql.py b/sqlparse/sql.py
index ed56793..4b6abf1 100644
--- a/sqlparse/sql.py
+++ b/sqlparse/sql.py
@@ -240,8 +240,9 @@ class TokenList(Token):
(skip_cm and imt(tk, t=T.Comment, i=Comment)))
return self._token_matching(funcs)[1]
- def token_next_by(self, i=None, m=None, t=None, idx=0, end=None):
+ def token_next_by(self, i=None, m=None, t=None, idx=-1, end=None):
funcs = lambda tk: imt(tk, i, m, t)
+ idx += 1
return self._token_matching(funcs, idx, end)
def token_not_matching(self, funcs, idx):