summaryrefslogtreecommitdiff
path: root/sqlparse
diff options
context:
space:
mode:
authorSjoerd Job Postmus <sjoerdjob@sjec.nl>2016-06-02 11:58:19 +0200
committerVictor Uriarte <victor.m.uriarte@intel.com>2016-06-12 17:38:26 -0700
commit89d4f68ba5bbe78a9dd89257cbe4a9f3cfa76433 (patch)
treea7df0e698a209edd3dbbca3c8601e2b1bcb961ff /sqlparse
parent8f7968ed5c649e5227e605ee272f59dd5ca75adb (diff)
downloadsqlparse-89d4f68ba5bbe78a9dd89257cbe4a9f3cfa76433.tar.gz
Use a specialized token_idx_next.
Prevent calling token_index.
Diffstat (limited to 'sqlparse')
-rw-r--r--sqlparse/engine/grouping.py8
-rw-r--r--sqlparse/sql.py20
2 files changed, 24 insertions, 4 deletions
diff --git a/sqlparse/engine/grouping.py b/sqlparse/engine/grouping.py
index 6bdba2f..0169830 100644
--- a/sqlparse/engine/grouping.py
+++ b/sqlparse/engine/grouping.py
@@ -167,11 +167,11 @@ def group_identifier_list(tlist):
tidx, token = tlist.token_idx_next_by(m=M_COMMA)
while token:
before_idx, before = tlist.token_idx_prev(tidx)
- after = tlist.token_next(tidx)
+ after_idx, after = tlist.token_idx_next(tidx)
if func(before) and func(after):
tidx = before_idx
- token = tlist.group_tokens_between(sql.IdentifierList, tidx, after, extend=True)
+ token = tlist.group_tokens_between(sql.IdentifierList, tidx, after_idx, extend=True)
tidx, token = tlist.token_idx_next_by(m=M_COMMA, idx=tidx + 1)
@@ -219,9 +219,9 @@ def group_aliased(tlist):
tidx, token = tlist.token_idx_next_by(i=I_ALIAS, t=T.Number)
while token:
- next_ = tlist.token_next(tidx)
+ next_index_, next_ = tlist.token_idx_next(tidx)
if imt(next_, i=sql.Identifier):
- token = tlist.group_tokens_between(sql.Identifier, tidx, next_, extend=True)
+ token = tlist.group_tokens_between(sql.Identifier, tidx, next_index_, extend=True)
tidx, token = tlist.token_idx_next_by(i=I_ALIAS, t=T.Number, idx=tidx + 1)
diff --git a/sqlparse/sql.py b/sqlparse/sql.py
index 9782c33..f3ef642 100644
--- a/sqlparse/sql.py
+++ b/sqlparse/sql.py
@@ -350,6 +350,26 @@ class TokenList(Token):
funcs = lambda tk: not (tk.is_whitespace() and skip_ws)
return self._token_matching(funcs, idx)
+ def token_idx_next(self, idx, skip_ws=True):
+ """Returns the next token relative to *idx*.
+
+ If *skip_ws* is ``True`` (the default) whitespace tokens are ignored.
+ ``None`` is returned if there's no next token.
+ """
+ if isinstance(idx, int):
+ idx += 1 # alot of code usage current pre-compensates for this
+ try:
+ if not skip_ws:
+ return idx, self.tokens[idx]
+ else:
+ while True:
+ token = self.tokens[idx]
+ if not token.is_whitespace():
+ return idx, token
+ idx += 1
+ except IndexError:
+ return None, None
+
def token_index(self, token, start=0):
"""Return list index of token."""
start = self.token_index(start) if not isinstance(start, int) else start