diff options
| author | Victor Uriarte <victor.m.uriarte@intel.com> | 2016-06-13 10:50:58 -0700 |
|---|---|---|
| committer | Victor Uriarte <victor.m.uriarte@intel.com> | 2016-06-14 03:28:26 -0700 |
| commit | 997f95b8b6ec5129362dcfe5deedaf50800e3afc (patch) | |
| tree | 47d8ca13b04d383c5280548927bd2c1489c51f41 | |
| parent | 954ba46e16af4e3c9b1302bbae95ebf2a4be2a8b (diff) | |
| download | sqlparse-997f95b8b6ec5129362dcfe5deedaf50800e3afc.tar.gz | |
Change argument order to match order of all other functions
| -rw-r--r-- | sqlparse/engine/grouping.py | 3 | ||||
| -rw-r--r-- | sqlparse/sql.py | 4 | ||||
| -rw-r--r-- | tests/test_tokenize.py | 12 |
3 files changed, 9 insertions, 10 deletions
diff --git a/sqlparse/engine/grouping.py b/sqlparse/engine/grouping.py index bf76119..86c4bf2 100644 --- a/sqlparse/engine/grouping.py +++ b/sqlparse/engine/grouping.py @@ -195,7 +195,8 @@ def group_comments(tlist): token = tlist.token_next_by(t=T.Comment) while token: end = tlist.token_not_matching( - tlist.token_index(token) + 1, lambda tk: imt(tk, t=T.Comment) or tk.is_whitespace()) + lambda tk: imt(tk, t=T.Comment) or tk.is_whitespace(), + idx=tlist.token_index(token) + 1) if end is not None: end = tlist.token_prev(tlist.token_index(end), False) token = tlist.group_tokens_between(sql.Comment, token, end) diff --git a/sqlparse/sql.py b/sqlparse/sql.py index 027228d..e0ac81d 100644 --- a/sqlparse/sql.py +++ b/sqlparse/sql.py @@ -269,12 +269,12 @@ class TokenList(Token): funcs = lambda tk: imt(tk, i, m, t) return self._token_matching(funcs, idx, end) - def token_not_matching(self, idx, funcs): + def token_not_matching(self, funcs, idx): funcs = (funcs,) if not isinstance(funcs, (list, tuple)) else funcs funcs = [lambda tk: not func(tk) for func in funcs] return self._token_matching(funcs, idx) - def token_matching(self, idx, funcs): + def token_matching(self, funcs, idx): return self._token_matching(funcs, idx) def token_idx_prev(self, idx, skip_ws=True): diff --git a/tests/test_tokenize.py b/tests/test_tokenize.py index adfd1ea..61eaa3e 100644 --- a/tests/test_tokenize.py +++ b/tests/test_tokenize.py @@ -113,14 +113,12 @@ class TestTokenList(unittest.TestCase): t1 = sql.Token(T.Keyword, 'foo') t2 = sql.Token(T.Punctuation, ',') x = sql.TokenList([t1, t2]) - self.assertEqual(x.token_matching(0, [lambda t: t.ttype is T.Keyword]), - t1) self.assertEqual(x.token_matching( - 0, - [lambda t: t.ttype is T.Punctuation]), - t2) - self.assertEqual(x.token_matching(1, [lambda t: t.ttype is T.Keyword]), - None) + [lambda t: t.ttype is T.Keyword], 0), t1) + self.assertEqual(x.token_matching( + [lambda t: t.ttype is T.Punctuation], 0), t2) + self.assertEqual(x.token_matching( + [lambda t: t.ttype is T.Keyword], 1), None) class TestStream(unittest.TestCase): |
