summaryrefslogtreecommitdiff
path: root/sqlparse/engine
diff options
context:
space:
mode:
authorSjoerd Job Postmus <sjoerdjob@sjec.nl>2016-06-02 10:08:00 +0200
committerVictor Uriarte <victor.m.uriarte@intel.com>2016-06-12 17:38:13 -0700
commit67dc823e1174eee9ea2159674c8eb016b2f95b54 (patch)
tree72338f09570c72f56934f7618f1b74eef8ff30f1 /sqlparse/engine
parent237575ef726e4232b60a5043177c43a72f370238 (diff)
downloadsqlparse-67dc823e1174eee9ea2159674c8eb016b2f95b54.tar.gz
Use specialized token_idx_next_by in group_aliased.
The method group_aliased was making a lot of calls to token_index. By specializing token_next_by to token_idx_next_by, the calls to token_index became superfluous. Also use token_idx_next_by in group_identifier_list. It was making a lot of calls, which is now more than reduced in half.
Diffstat (limited to 'sqlparse/engine')
-rw-r--r--sqlparse/engine/grouping.py10
1 files changed, 4 insertions, 6 deletions
diff --git a/sqlparse/engine/grouping.py b/sqlparse/engine/grouping.py
index 77a53ad..fddee0f 100644
--- a/sqlparse/engine/grouping.py
+++ b/sqlparse/engine/grouping.py
@@ -163,17 +163,16 @@ def group_identifier_list(tlist):
(T.Keyword, T.Comment, T.Wildcard))
func = lambda t: imt(t, i=I_IDENT_LIST, m=M_ROLE, t=T_IDENT_LIST)
- token = tlist.token_next_by(m=M_COMMA)
+ tidx, token = tlist.token_idx_next_by(m=M_COMMA)
while token:
- tidx = tlist.token_index(token)
before, after = tlist.token_prev(tidx), tlist.token_next(tidx)
if func(before) and func(after):
tidx = tlist.token_index(before)
token = tlist.group_tokens_between(sql.IdentifierList, tidx, after, extend=True)
- token = tlist.token_next_by(m=M_COMMA, idx=tidx + 1)
+ tidx, token = tlist.token_idx_next_by(m=M_COMMA, idx=tidx + 1)
def group_brackets(tlist):
@@ -217,13 +216,12 @@ def group_aliased(tlist):
I_ALIAS = (sql.Parenthesis, sql.Function, sql.Case, sql.Identifier,
) # sql.Operation)
- token = tlist.token_next_by(i=I_ALIAS, t=T.Number)
+ tidx, token = tlist.token_idx_next_by(i=I_ALIAS, t=T.Number)
while token:
- tidx = tlist.token_index(token)
next_ = tlist.token_next(tidx)
if imt(next_, i=sql.Identifier):
token = tlist.group_tokens_between(sql.Identifier, tidx, next_, extend=True)
- token = tlist.token_next_by(i=I_ALIAS, t=T.Number, idx=tidx + 1)
+ tidx, token = tlist.token_idx_next_by(i=I_ALIAS, t=T.Number, idx=tidx + 1)
def group_typecasts(tlist):