summaryrefslogtreecommitdiff
path: root/sqlparse
diff options
context:
space:
mode:
authorVictor Uriarte <victor.m.uriarte@intel.com>2016-06-13 13:21:20 -0700
committerVictor Uriarte <victor.m.uriarte@intel.com>2016-06-15 13:28:22 -0700
commita795be1a70a241e177227b742269fb2df88af962 (patch)
tree6c5c86555d1bd0168280a38bd8afac7939f3811a /sqlparse
parent997f95b8b6ec5129362dcfe5deedaf50800e3afc (diff)
downloadsqlparse-a795be1a70a241e177227b742269fb2df88af962.tar.gz
Change token_ funcs to token_idx funcs
Diffstat (limited to 'sqlparse')
-rw-r--r--sqlparse/engine/grouping.py111
-rw-r--r--sqlparse/filters/aligned_indent.py25
-rw-r--r--sqlparse/filters/others.py36
-rw-r--r--sqlparse/filters/reindent.py54
-rw-r--r--sqlparse/sql.py64
5 files changed, 160 insertions, 130 deletions
diff --git a/sqlparse/engine/grouping.py b/sqlparse/engine/grouping.py
index 86c4bf2..88064cb 100644
--- a/sqlparse/engine/grouping.py
+++ b/sqlparse/engine/grouping.py
@@ -32,15 +32,16 @@ def _group_left_right(tlist, m, cls,
continue
tidx = tlist.token_index(token)
- left, right = tlist.token_prev(tidx), tlist.token_next(tidx)
+ pidx, prev_ = tlist.token_idx_prev(tidx)
+ nidx, next_ = tlist.token_idx_next(tidx)
- if valid_left(left) and valid_right(right):
+ if valid_left(prev_) and valid_right(next_):
if semicolon:
# only overwrite if a semicolon present.
- sright = tlist.token_next_by(m=M_SEMICOLON, idx=tidx + 1)
- right = sright or right
+ snidx, _ = tlist.token_idx_next_by(m=M_SEMICOLON, idx=nidx)
+ nidx = snidx or nidx
# Luckily, this leaves the position of `token` intact.
- tlist.group_tokens_between(cls, left, right, extend=True)
+ tlist.group_tokens_between(cls, pidx, nidx, extend=True)
def _group_matching(tlist, cls):
@@ -114,11 +115,10 @@ def group_case(tlist):
def group_identifier(tlist):
T_IDENT = (T.String.Symbol, T.Name)
- token = tlist.token_next_by(t=T_IDENT)
+ tidx, token = tlist.token_idx_next_by(t=T_IDENT)
while token:
- tidx = tlist.token_index(token)
- token = tlist.group_tokens_between(sql.Identifier, tidx, tidx)
- token = tlist.token_next_by(t=T_IDENT, idx=tidx + 1)
+ tlist.group_tokens_between(sql.Identifier, tidx, tidx)
+ tidx, token = tlist.token_idx_next_by(t=T_IDENT, idx=tidx + 1)
def group_period(tlist):
@@ -133,13 +133,14 @@ def group_period(tlist):
def group_arrays(tlist):
- token = tlist.token_next_by(i=sql.SquareBrackets)
+ tidx, token = tlist.token_idx_next_by(i=sql.SquareBrackets)
while token:
- prev = tlist.token_prev(tlist.token_index(token))
+ pidx, prev = tlist.token_idx_prev(tidx)
if imt(prev, i=(sql.SquareBrackets, sql.Identifier, sql.Function),
t=(T.Name, T.String.Symbol,)):
- token = tlist.group_tokens_between(sql.Identifier, prev, token, extend=True)
- token = tlist.token_next_by(i=sql.SquareBrackets, idx=tlist.token_index(token) + 1)
+ tlist.group_tokens_between(sql.Identifier, pidx, tidx, extend=True)
+ tidx = pidx
+ tidx, token = tlist.token_idx_next_by(i=sql.SquareBrackets, idx=tidx + 1)
@recurse(sql.Identifier)
@@ -150,15 +151,18 @@ def group_operator(tlist):
T_CYCLE = T_NUMERICAL + T_STRING + T_NAME
func = lambda tk: imt(tk, i=I_CYCLE, t=T_CYCLE)
- token = tlist.token_next_by(t=(T.Operator, T.Wildcard))
+ tidx, token = tlist.token_idx_next_by(t=(T.Operator, T.Wildcard))
while token:
- left, right = tlist.token_prev(tlist.token_index(token)), tlist.token_next(tlist.token_index(token))
+ pidx, prev_ = tlist.token_idx_prev(tidx)
+ nidx, next_ = tlist.token_idx_next(tidx)
- if func(left) and func(right):
+ if func(prev_) and func(next_):
token.ttype = T.Operator
- token = tlist.group_tokens_between(sql.Operation, left, right)
+ tlist.group_tokens_between(sql.Operation, pidx, nidx)
+ tidx = pidx
- token = tlist.token_next_by(t=(T.Operator, T.Wildcard), idx=tlist.token_index(token) + 1)
+ tidx, token = tlist.token_idx_next_by(t=(T.Operator, T.Wildcard),
+ idx=tidx + 1)
@recurse(sql.IdentifierList)
@@ -172,13 +176,12 @@ def group_identifier_list(tlist):
tidx, token = tlist.token_idx_next_by(m=M_COMMA)
while token:
- before_idx, before = tlist.token_idx_prev(tidx)
- after_idx, after = tlist.token_idx_next(tidx)
-
- if func(before) and func(after):
- tidx = before_idx
- token = tlist.group_tokens_between(sql.IdentifierList, tidx, after_idx, extend=True)
+ pidx, prev_ = tlist.token_idx_prev(tidx)
+ nidx, next_ = tlist.token_idx_next(tidx)
+ if func(prev_) and func(next_):
+ tlist.group_tokens_between(sql.IdentifierList, pidx, nidx, extend=True)
+ tidx = pidx
tidx, token = tlist.token_idx_next_by(m=M_COMMA, idx=tidx + 1)
@@ -192,31 +195,32 @@ def group_parenthesis(tlist):
@recurse(sql.Comment)
def group_comments(tlist):
- token = tlist.token_next_by(t=T.Comment)
+ tidx, token = tlist.token_idx_next_by(t=T.Comment)
while token:
end = tlist.token_not_matching(
- lambda tk: imt(tk, t=T.Comment) or tk.is_whitespace(),
- idx=tlist.token_index(token) + 1)
+ lambda tk: imt(tk, t=T.Comment) or tk.is_whitespace(), idx=tidx + 1)
if end is not None:
- end = tlist.token_prev(tlist.token_index(end), False)
- token = tlist.group_tokens_between(sql.Comment, token, end)
+ eidx = tlist.token_index(end)
+ eidx, end = tlist.token_idx_prev(eidx, skip_ws=False)
+ tlist.group_tokens_between(sql.Comment, tidx, eidx)
- token = tlist.token_next_by(t=T.Comment, idx=tlist.token_index(token) + 1)
+ tidx, token = tlist.token_idx_next_by(t=T.Comment, idx=tidx + 1)
@recurse(sql.Where)
def group_where(tlist):
- token = tlist.token_next_by(m=sql.Where.M_OPEN)
+ tidx, token = tlist.token_idx_next_by(m=sql.Where.M_OPEN)
while token:
- end = tlist.token_next_by(m=sql.Where.M_CLOSE, idx=tlist.token_index(token) + 1)
+ eidx, end = tlist.token_idx_next_by(m=sql.Where.M_CLOSE, idx=tidx + 1)
if end is None:
end = tlist._groupable_tokens[-1]
else:
- end = tlist.tokens[tlist.token_index(end) - 1]
-
- token = tlist.group_tokens_between(sql.Where, token, end)
- token = tlist.token_next_by(m=sql.Where.M_OPEN, idx=tlist.token_index(token) + 1)
+ end = tlist.tokens[eidx - 1]
+ # TODO: convert this to eidx instead of end token.
+ # i think above values are len(tlist) and eidx-1
+ tlist.group_tokens_between(sql.Where, tidx, end)
+ tidx, token = tlist.token_idx_next_by(m=sql.Where.M_OPEN, idx=tidx + 1)
@recurse()
@@ -226,9 +230,9 @@ def group_aliased(tlist):
tidx, token = tlist.token_idx_next_by(i=I_ALIAS, t=T.Number)
while token:
- next_index_, next_ = tlist.token_idx_next(tidx)
+ nidx, next_ = tlist.token_idx_next(tidx)
if imt(next_, i=sql.Identifier):
- token = tlist.group_tokens_between(sql.Identifier, tidx, next_index_, extend=True)
+ tlist.group_tokens_between(sql.Identifier, tidx, nidx, extend=True)
tidx, token = tlist.token_idx_next_by(i=I_ALIAS, t=T.Number, idx=tidx + 1)
@@ -247,32 +251,35 @@ def group_functions(tlist):
has_table = True
if has_create and has_table:
return
- token = tlist.token_next_by(t=T.Name)
+
+ tidx, token = tlist.token_idx_next_by(t=T.Name)
while token:
- next_ = tlist.token_next(tlist.token_index(token))
- if imt(next_, i=sql.Parenthesis):
- token = tlist.group_tokens_between(sql.Function, token, next_)
- token = tlist.token_next_by(t=T.Name, idx=tlist.token_index(token) + 1)
+ nidx, next_ = tlist.token_idx_next(tidx)
+ if isinstance(next_, sql.Parenthesis):
+ tlist.group_tokens_between(sql.Function, tidx, nidx)
+ tidx, token = tlist.token_idx_next_by(t=T.Name, idx=tidx + 1)
def group_order(tlist):
"""Group together Identifier and Asc/Desc token"""
- token = tlist.token_next_by(t=T.Keyword.Order)
+ tidx, token = tlist.token_idx_next_by(t=T.Keyword.Order)
while token:
- prev = tlist.token_prev(tlist.token_index(token))
+ pidx, prev = tlist.token_idx_prev(tidx)
if imt(prev, i=sql.Identifier, t=T.Number):
- token = tlist.group_tokens_between(sql.Identifier, prev, token)
- token = tlist.token_next_by(t=T.Keyword.Order, idx=tlist.token_index(token) + 1)
+ tlist.group_tokens_between(sql.Identifier, pidx, tidx)
+ tidx = pidx
+ tidx, token = tlist.token_idx_next_by(t=T.Keyword.Order, idx=tidx + 1)
@recurse()
def align_comments(tlist):
- token = tlist.token_next_by(i=sql.Comment)
+ tidx, token = tlist.token_idx_next_by(i=sql.Comment)
while token:
- before = tlist.token_prev(tlist.token_index(token))
- if isinstance(before, sql.TokenList):
- token = tlist.group_tokens_between(sql.TokenList, before, token, extend=True)
- token = tlist.token_next_by(i=sql.Comment, idx=tlist.token_index(token) + 1)
+ pidx, prev = tlist.token_idx_prev(tidx)
+ if isinstance(prev, sql.TokenList):
+ tlist.group_tokens_between(sql.TokenList, pidx, tidx, extend=True)
+ tidx = pidx
+ tidx, token = tlist.token_idx_next_by(i=sql.Comment, idx=tidx + 1)
def group(stmt):
diff --git a/sqlparse/filters/aligned_indent.py b/sqlparse/filters/aligned_indent.py
index ea749e9..719b450 100644
--- a/sqlparse/filters/aligned_indent.py
+++ b/sqlparse/filters/aligned_indent.py
@@ -46,7 +46,8 @@ class AlignedIndentFilter(object):
def _process_parenthesis(self, tlist):
# if this isn't a subquery, don't re-indent
- if tlist.token_next_by(m=(T.DML, 'SELECT')):
+ _, token = tlist.token_idx_next_by(m=(T.DML, 'SELECT'))
+ if token is not None:
with indent(self):
tlist.insert_after(tlist[0], self.nl('SELECT'))
# process the inside of the parantheses
@@ -66,7 +67,7 @@ class AlignedIndentFilter(object):
offset_ = len('case ') + len('when ')
cases = tlist.get_cases(skip_ws=True)
# align the end as well
- end_token = tlist.token_next_by(m=(T.Keyword, 'END'))
+ _, end_token = tlist.token_idx_next_by(m=(T.Keyword, 'END'))
cases.append((None, [end_token]))
condition_width = [len(' '.join(map(text_type, cond))) if cond else 0
@@ -87,16 +88,16 @@ class AlignedIndentFilter(object):
def _next_token(self, tlist, idx=0):
split_words = T.Keyword, self.split_words, True
- token = tlist.token_next_by(m=split_words, idx=idx)
+ tidx, token = tlist.token_idx_next_by(m=split_words, idx=idx)
# treat "BETWEEN x and y" as a single statement
- if token and token.value.upper() == 'BETWEEN':
- token = self._next_token(tlist, token)
- if token and token.value.upper() == 'AND':
- token = self._next_token(tlist, token)
- return token
+ if token and token.normalized == 'BETWEEN':
+ tidx, token = self._next_token(tlist, tidx + 1)
+ if token and token.normalized == 'AND':
+ tidx, token = self._next_token(tlist, tidx + 1)
+ return tidx, token
def _split_kwds(self, tlist):
- token = self._next_token(tlist)
+ tidx, token = self._next_token(tlist)
while token:
# joins are special case. only consider the first word as aligner
if token.match(T.Keyword, self.join_words, regex=True):
@@ -104,13 +105,15 @@ class AlignedIndentFilter(object):
else:
token_indent = text_type(token)
tlist.insert_before(token, self.nl(token_indent))
- token = self._next_token(tlist, token)
+ tidx += 1
+ tidx, token = self._next_token(tlist, tidx + 1)
def _process_default(self, tlist):
self._split_kwds(tlist)
# process any sub-sub statements
for sgroup in tlist.get_sublists():
- prev = tlist.token_prev(sgroup)
+ idx = tlist.token_index(sgroup)
+ pidx, prev = tlist.token_idx_prev(idx)
# HACK: make "group/order by" work. Longer than max_len.
offset_ = 3 if (prev and prev.match(T.Keyword, 'BY')) else 0
with offset(self, offset_):
diff --git a/sqlparse/filters/others.py b/sqlparse/filters/others.py
index 71b1f8e..ecde2fe 100644
--- a/sqlparse/filters/others.py
+++ b/sqlparse/filters/others.py
@@ -14,23 +14,22 @@ class StripCommentsFilter(object):
def _process(tlist):
def get_next_comment():
# TODO(andi) Comment types should be unified, see related issue38
- return tlist.token_next_by(i=sql.Comment, t=T.Comment)
+ return tlist.token_idx_next_by(i=sql.Comment, t=T.Comment)
- token = get_next_comment()
+ tidx, token = get_next_comment()
while token:
- prev = tlist.token_prev(token, skip_ws=False)
- next_ = tlist.token_next(token, skip_ws=False)
+ pidx, prev_ = tlist.token_idx_prev(tidx, skip_ws=False)
+ nidx, next_ = tlist.token_idx_next(tidx, skip_ws=False)
# Replace by whitespace if prev and next exist and if they're not
# whitespaces. This doesn't apply if prev or next is a paranthesis.
- if (prev is None or next_ is None or
- prev.is_whitespace() or prev.match(T.Punctuation, '(') or
+ if (prev_ is None or next_ is None or
+ prev_.is_whitespace() or prev_.match(T.Punctuation, '(') or
next_.is_whitespace() or next_.match(T.Punctuation, ')')):
tlist.tokens.remove(token)
else:
- tidx = tlist.token_index(token)
tlist.tokens[tidx] = sql.Token(T.Whitespace, ' ')
- token = get_next_comment()
+ tidx, token = get_next_comment()
def process(self, stmt):
[self.process(sgroup) for sgroup in stmt.get_sublists()]
@@ -86,20 +85,21 @@ class StripWhitespaceFilter(object):
class SpacesAroundOperatorsFilter(object):
@staticmethod
def _process(tlist):
- def next_token(idx=0):
- return tlist.token_next_by(t=(T.Operator, T.Comparison), idx=idx)
- token = next_token()
+ ttypes = (T.Operator, T.Comparison)
+ tidx, token = tlist.token_idx_next_by(t=ttypes)
while token:
- prev_ = tlist.token_prev(token, skip_ws=False)
- if prev_ and prev_.ttype != T.Whitespace:
- tlist.insert_before(token, sql.Token(T.Whitespace, ' '))
-
- next_ = tlist.token_next(token, skip_ws=False)
+ nidx, next_ = tlist.token_idx_next(tidx, skip_ws=False)
if next_ and next_.ttype != T.Whitespace:
- tlist.insert_after(token, sql.Token(T.Whitespace, ' '))
+ tlist.insert_after(tidx, sql.Token(T.Whitespace, ' '))
+
+ pidx, prev_ = tlist.token_idx_prev(tidx, skip_ws=False)
+ if prev_ and prev_.ttype != T.Whitespace:
+ tlist.insert_before(tidx, sql.Token(T.Whitespace, ' '))
+ tidx += 1 # has to shift since token inserted before it
- token = next_token(idx=token)
+ # assert tlist.token_index(token) == tidx
+ tidx, token = tlist.token_idx_next_by(t=ttypes, idx=tidx + 1)
def process(self, stmt):
[self.process(sgroup) for sgroup in stmt.get_sublists()]
diff --git a/sqlparse/filters/reindent.py b/sqlparse/filters/reindent.py
index b490631..d13fdf3 100644
--- a/sqlparse/filters/reindent.py
+++ b/sqlparse/filters/reindent.py
@@ -48,40 +48,47 @@ class ReindentFilter(object):
split_words = ('FROM', 'STRAIGHT_JOIN$', 'JOIN$', 'AND', 'OR',
'GROUP', 'ORDER', 'UNION', 'VALUES',
'SET', 'BETWEEN', 'EXCEPT', 'HAVING')
- token = tlist.token_next_by(m=(T.Keyword, split_words, True), idx=idx)
+ m_split = T.Keyword, split_words, True
+ tidx, token = tlist.token_idx_next_by(m=m_split, idx=idx)
- if token and token.value.upper() == 'BETWEEN':
- token = self._next_token(tlist, token)
+ if token and token.normalized == 'BETWEEN':
+ tidx, token = self._next_token(tlist, tidx + 1)
- if token and token.value.upper() == 'AND':
- token = self._next_token(tlist, token)
+ if token and token.normalized == 'AND':
+ tidx, token = self._next_token(tlist, tidx + 1)
- return token
+ return tidx, token
def _split_kwds(self, tlist):
- token = self._next_token(tlist)
+ tidx, token = self._next_token(tlist)
while token:
- prev = tlist.token_prev(token, skip_ws=False)
+ tidx = tlist.token_index(token)
+ pidx, prev = tlist.token_idx_prev(tidx, skip_ws=False)
uprev = text_type(prev)
if prev and prev.is_whitespace():
- tlist.tokens.remove(prev)
+ del tlist.tokens[pidx]
+ tidx -= 1
if not (uprev.endswith('\n') or uprev.endswith('\r')):
- tlist.insert_before(token, self.nl())
+ tlist.insert_before(tidx, self.nl())
+ tidx += 1
- token = self._next_token(tlist, token)
+ tidx, token = self._next_token(tlist, tidx + 1)
def _split_statements(self, tlist):
- token = tlist.token_next_by(t=(T.Keyword.DDL, T.Keyword.DML))
+ tidx, token = tlist.token_idx_next_by(t=(T.Keyword.DDL, T.Keyword.DML))
while token:
- prev = tlist.token_prev(token, skip_ws=False)
+ pidx, prev = tlist.token_idx_prev(tidx, skip_ws=False)
if prev and prev.is_whitespace():
- tlist.tokens.remove(prev)
+ del tlist.tokens[pidx]
+ tidx -= 1
# only break if it's not the first token
- tlist.insert_before(token, self.nl()) if prev else None
- token = tlist.token_next_by(t=(T.Keyword.DDL, T.Keyword.DML),
- idx=token)
+ if prev:
+ tlist.insert_before(tidx, self.nl())
+ tidx += 1
+ tidx, token = tlist.token_idx_next_by(
+ t=(T.Keyword.DDL, T.Keyword.DML), idx=tidx + 1)
def _process(self, tlist):
func_name = '_process_{cls}'.format(cls=type(tlist).__name__)
@@ -89,16 +96,17 @@ class ReindentFilter(object):
func(tlist)
def _process_where(self, tlist):
- token = tlist.token_next_by(m=(T.Keyword, 'WHERE'))
+ tidx, token = tlist.token_idx_next_by(m=(T.Keyword, 'WHERE'))
# issue121, errors in statement fixed??
- tlist.insert_before(token, self.nl())
+ tlist.insert_before(tidx, self.nl())
with indent(self):
self._process_default(tlist)
def _process_parenthesis(self, tlist):
- is_dml_dll = tlist.token_next_by(t=(T.Keyword.DML, T.Keyword.DDL))
- first = tlist.token_next_by(m=sql.Parenthesis.M_OPEN)
+ ttypes = T.Keyword.DML, T.Keyword.DDL
+ _, is_dml_dll = tlist.token_idx_next_by(t=ttypes)
+ fidx, first = tlist.token_idx_next_by(m=sql.Parenthesis.M_OPEN)
with indent(self, 1 if is_dml_dll else 0):
tlist.tokens.insert(0, self.nl()) if is_dml_dll else None
@@ -135,8 +143,8 @@ class ReindentFilter(object):
# len "when ", "then ", "else "
with offset(self, len("WHEN ")):
self._process_default(tlist)
- end = tlist.token_next_by(m=sql.Case.M_CLOSE)
- tlist.insert_before(end, self.nl())
+ end_idx, end = tlist.token_idx_next_by(m=sql.Case.M_CLOSE)
+ tlist.insert_before(end_idx, self.nl())
def _process_default(self, tlist, stmts=True):
self._split_statements(tlist) if stmts else None
diff --git a/sqlparse/sql.py b/sqlparse/sql.py
index e0ac81d..d1d8e3e 100644
--- a/sqlparse/sql.py
+++ b/sqlparse/sql.py
@@ -259,7 +259,7 @@ class TokenList(Token):
# this on is inconsistent, using Comment instead of T.Comment...
funcs = lambda tk: not ((skip_ws and tk.is_whitespace()) or
(skip_cm and imt(tk, t=T.Comment, i=Comment)))
- return self._token_matching(funcs)
+ return self._token_idx_matching(funcs)[1]
def token_idx_next_by(self, i=None, m=None, t=None, idx=0, end=None):
funcs = lambda tk: imt(tk, i, m, t)
@@ -272,19 +272,22 @@ class TokenList(Token):
def token_not_matching(self, funcs, idx):
funcs = (funcs,) if not isinstance(funcs, (list, tuple)) else funcs
funcs = [lambda tk: not func(tk) for func in funcs]
- return self._token_matching(funcs, idx)
+ return self._token_idx_matching(funcs, idx)[1]
def token_matching(self, funcs, idx):
- return self._token_matching(funcs, idx)
+ return self._token_idx_matching(funcs, idx)[1]
- def token_idx_prev(self, idx, skip_ws=True):
+ def token_idx_prev(self, idx, skip_ws=True, skip_cm=False):
"""Returns the previous token relative to *idx*.
If *skip_ws* is ``True`` (the default) whitespace tokens are ignored.
``None`` is returned if there's no previous token.
"""
+ if idx is None:
+ return None, None
idx += 1 # alot of code usage current pre-compensates for this
- funcs = lambda tk: not (tk.is_whitespace() and skip_ws)
+ funcs = lambda tk: not ((skip_ws and tk.is_whitespace()) or
+ (skip_cm and imt(tk, t=T.Comment, i=Comment)))
return self._token_idx_matching(funcs, idx, reverse=True)
def token_prev(self, idx=0, skip_ws=True, skip_cm=False):
@@ -313,14 +316,17 @@ class TokenList(Token):
(skip_cm and imt(tk, t=T.Comment, i=Comment)))
return self._token_matching(funcs, idx)
- def token_idx_next(self, idx, skip_ws=True):
+ # TODO: May need to implement skip_cm for upstream changes.
+ # TODO: May need to re-add default value to idx
+ def token_idx_next(self, idx, skip_ws=True, skip_cm=False):
"""Returns the next token relative to *idx*.
If *skip_ws* is ``True`` (the default) whitespace tokens are ignored.
``None`` is returned if there's no next token.
"""
- if isinstance(idx, int):
- idx += 1 # alot of code usage current pre-compensates for this
+ if idx is None:
+ return None, None
+ idx += 1 # alot of code usage current pre-compensates for this
try:
if not skip_ws:
return idx, self.tokens[idx]
@@ -374,17 +380,21 @@ class TokenList(Token):
def insert_before(self, where, token):
"""Inserts *token* before *where*."""
+ if not isinstance(where, int):
+ where = self.token_index(where)
token.parent = self
- self.tokens.insert(self.token_index(where), token)
+ self.tokens.insert(where, token)
def insert_after(self, where, token, skip_ws=True):
"""Inserts *token* after *where*."""
- next_token = self.token_next(where, skip_ws=skip_ws)
+ if not isinstance(where, int):
+ where = self.token_index(where)
+ nidx, next_ = self.token_idx_next(where, skip_ws=skip_ws)
token.parent = self
- if next_token is None:
+ if next_ is None:
self.tokens.append(token)
else:
- self.insert_before(next_token, token)
+ self.tokens.insert(nidx, token)
def has_alias(self):
"""Returns ``True`` if an alias is present."""
@@ -394,12 +404,13 @@ class TokenList(Token):
"""Returns the alias for this identifier or ``None``."""
# "name AS alias"
- kw = self.token_next_by(m=(T.Keyword, 'AS'))
+ kw_idx, kw = self.token_idx_next_by(m=(T.Keyword, 'AS'))
if kw is not None:
- return self._get_first_name(kw, keywords=True)
+ return self._get_first_name(kw_idx + 1, keywords=True)
# "name alias" or "complicated column expression alias"
- if len(self.tokens) > 2 and self.token_next_by(t=T.Whitespace):
+ _, ws = self.token_idx_next_by(t=T.Whitespace)
+ if len(self.tokens) > 2 and ws is not None:
return self._get_first_name(reverse=True)
def get_name(self):
@@ -414,16 +425,16 @@ class TokenList(Token):
def get_real_name(self):
"""Returns the real name (object name) of this identifier."""
# a.b
- dot = self.token_next_by(m=(T.Punctuation, '.'))
- return self._get_first_name(dot)
+ dot_idx, _ = self.token_idx_next_by(m=(T.Punctuation, '.'))
+ return self._get_first_name(dot_idx)
def get_parent_name(self):
"""Return name of the parent object if any.
A parent object is identified by the first occuring dot.
"""
- dot = self.token_next_by(m=(T.Punctuation, '.'))
- prev_ = self.token_prev(dot)
+ dot_idx, _ = self.token_idx_next_by(m=(T.Punctuation, '.'))
+ _, prev_ = self.token_idx_prev(dot_idx)
return remove_quotes(prev_.value) if prev_ is not None else None
def _get_first_name(self, idx=None, reverse=False, keywords=False):
@@ -472,9 +483,10 @@ class Statement(TokenList):
# The WITH keyword should be followed by either an Identifier or
# an IdentifierList containing the CTE definitions; the actual
# DML keyword (e.g. SELECT, INSERT) will follow next.
- token = self.token_next(first_token, skip_ws=True)
+ fidx = self.token_index(first_token)
+ tidx, token = self.token_idx_next(fidx, skip_ws=True)
if isinstance(token, (Identifier, IdentifierList)):
- dml_keyword = self.token_next(token, skip_ws=True)
+ _, dml_keyword = self.token_idx_next(tidx, skip_ws=True)
if dml_keyword.ttype == T.Keyword.DML:
return dml_keyword.normalized
@@ -491,18 +503,18 @@ class Identifier(TokenList):
def is_wildcard(self):
"""Return ``True`` if this identifier contains a wildcard."""
- token = self.token_next_by(t=T.Wildcard)
+ _, token = self.token_idx_next_by(t=T.Wildcard)
return token is not None
def get_typecast(self):
"""Returns the typecast or ``None`` of this object as a string."""
- marker = self.token_next_by(m=(T.Punctuation, '::'))
- next_ = self.token_next(marker, skip_ws=False)
+ midx, marker = self.token_idx_next_by(m=(T.Punctuation, '::'))
+ nidx, next_ = self.token_idx_next(midx, skip_ws=False)
return next_.value if next_ else None
def get_ordering(self):
"""Returns the ordering or ``None`` as uppercase string."""
- ordering = self.token_next_by(t=T.Keyword.Order)
+ _, ordering = self.token_idx_next_by(t=T.Keyword.Order)
return ordering.normalized if ordering else None
def get_array_indices(self):
@@ -649,7 +661,7 @@ class Function(TokenList):
"""Return a list of parameters."""
parenthesis = self.tokens[-1]
for token in parenthesis.tokens:
- if imt(token, i=IdentifierList):
+ if isinstance(token, IdentifierList):
return token.get_identifiers()
elif imt(token, i=(Function, Identifier), t=T.Literal):
return [token, ]