diff options
| -rw-r--r-- | sqlparse/sql.py | 28 | ||||
| -rw-r--r-- | tests/test_regressions.py | 2 | ||||
| -rw-r--r-- | tests/test_tokenize.py | 6 |
3 files changed, 12 insertions, 24 deletions
diff --git a/sqlparse/sql.py b/sqlparse/sql.py index e6fbab2..cee6af5 100644 --- a/sqlparse/sql.py +++ b/sqlparse/sql.py @@ -224,20 +224,6 @@ class TokenList(Token): if func(token): return token - def token_first(self, skip_ws=True, skip_cm=False): - """Returns the first child token. - - If *ignore_whitespace* is ``True`` (the default), whitespace - tokens are ignored. - - if *ignore_comments* is ``True`` (default: ``False``), comments are - ignored too. - """ - # this on is inconsistent, using Comment instead of T.Comment... - funcs = lambda tk: not ((skip_ws and tk.is_whitespace()) or - (skip_cm and imt(tk, i=Comment))) - return self._token_matching(funcs) - def token_next_by(self, i=None, m=None, t=None, idx=0, end=None): funcs = lambda tk: imt(tk, i, m, t) return self._token_matching(funcs, idx, end) @@ -250,24 +236,26 @@ class TokenList(Token): def token_matching(self, idx, funcs): return self._token_matching(funcs, idx) - def token_prev(self, idx, skip_ws=True, skip_cm=False): + def token_prev(self, idx=0, skip_ws=True, skip_cm=False): """Returns the previous token relative to *idx*. If *skip_ws* is ``True`` (the default) whitespace tokens are ignored. ``None`` is returned if there's no previous token. """ funcs = lambda tk: not ((skip_ws and tk.is_whitespace()) or - (skip_cm and imt(tk, t=T.Comment))) + (skip_cm and imt(tk, t=T.Comment, i=Comment))) return self._token_matching(funcs, idx, reverse=True) - def token_next(self, idx, skip_ws=True, skip_cm=False): + def token_next(self, idx=0, skip_ws=True, skip_cm=False): """Returns the next token relative to *idx*. + If called with idx = 0. Returns the first child token. If *skip_ws* is ``True`` (the default) whitespace tokens are ignored. + If *skip_cm* is ``True`` (default: ``False``), comments are ignored. ``None`` is returned if there's no next token. """ funcs = lambda tk: not ((skip_ws and tk.is_whitespace()) or - (skip_cm and imt(tk, t=T.Comment))) + (skip_cm and imt(tk, t=T.Comment, i=Comment))) return self._token_matching(funcs, idx) def token_index(self, token, start=0): @@ -395,7 +383,7 @@ class Statement(TokenList): Whitespaces and comments at the beginning of the statement are ignored. """ - first_token = self.token_first(skip_cm=True) + first_token = self.token_next(skip_cm=True) if first_token is None: # An "empty" statement that either has not tokens at all # or only whitespace tokens. @@ -433,7 +421,7 @@ class Identifier(TokenList): def get_typecast(self): """Returns the typecast or ``None`` of this object as a string.""" marker = self.token_next_by(m=(T.Punctuation, '::')) - next_ = self.token_next(marker, False) + next_ = self.token_next(marker, skip_ws=False) return next_.value if next_ else None def get_ordering(self): diff --git a/tests/test_regressions.py b/tests/test_regressions.py index 4eb1621..616c321 100644 --- a/tests/test_regressions.py +++ b/tests/test_regressions.py @@ -48,7 +48,7 @@ class RegressionTests(TestCaseBase): self.assert_(p.tokens[0].ttype is T.Comment.Single) def test_issue34(self): - t = sqlparse.parse("create")[0].token_first() + t = sqlparse.parse("create")[0].token_next() self.assertEqual(t.match(T.Keyword.DDL, "create"), True) self.assertEqual(t.match(T.Keyword.DDL, "CREATE"), True) diff --git a/tests/test_tokenize.py b/tests/test_tokenize.py index adfd1ea..7200682 100644 --- a/tests/test_tokenize.py +++ b/tests/test_tokenize.py @@ -104,10 +104,10 @@ class TestTokenList(unittest.TestCase): def test_token_first(self): p = sqlparse.parse(' select foo')[0] - first = p.token_first() + first = p.token_next() self.assertEqual(first.value, 'select') - self.assertEqual(p.token_first(skip_ws=False).value, ' ') - self.assertEqual(sql.TokenList([]).token_first(), None) + self.assertEqual(p.token_next(skip_ws=False).value, ' ') + self.assertEqual(sql.TokenList([]).token_next(), None) def test_token_matching(self): t1 = sql.Token(T.Keyword, 'foo') |
