summaryrefslogtreecommitdiff
path: root/sqlparse/engine
diff options
context:
space:
mode:
authorAndi Albrecht <albrecht.andi@gmail.com>2011-09-06 08:01:12 +0200
committerAndi Albrecht <albrecht.andi@gmail.com>2011-09-06 08:01:12 +0200
commitbcaf3269b3a8746a4ee8c848256d5f2b11b25d27 (patch)
tree942dc34e3e993bdcce71e0295c146fd556efbba5 /sqlparse/engine
parent3bf54c10f0742e3afc75dcaa4623397097beadc5 (diff)
downloadsqlparse-bcaf3269b3a8746a4ee8c848256d5f2b11b25d27.tar.gz
Code cleanup.
Diffstat (limited to 'sqlparse/engine')
-rw-r--r--sqlparse/engine/filter.py5
-rw-r--r--sqlparse/engine/grouping.py6
2 files changed, 7 insertions, 4 deletions
diff --git a/sqlparse/engine/filter.py b/sqlparse/engine/filter.py
index 89d9b15..421b3f3 100644
--- a/sqlparse/engine/filter.py
+++ b/sqlparse/engine/filter.py
@@ -61,14 +61,15 @@ class StatementFilter(TokenFilter):
if unified == 'END':
# Should this respect a preceeding BEGIN?
# In CASE ... WHEN ... END this results in a split level -1.
- self._begin_depth = max(0, self._begin_depth-1)
+ self._begin_depth = max(0, self._begin_depth - 1)
return -1
if ttype is T.Keyword.DDL and unified.startswith('CREATE'):
self._is_create = True
return 0
- if unified in ('IF', 'FOR') and self._is_create and self._begin_depth > 0:
+ if (unified in ('IF', 'FOR')
+ and self._is_create and self._begin_depth > 0):
return 1
# Default
diff --git a/sqlparse/engine/grouping.py b/sqlparse/engine/grouping.py
index 9bc9612..72f919b 100644
--- a/sqlparse/engine/grouping.py
+++ b/sqlparse/engine/grouping.py
@@ -267,13 +267,15 @@ def group_aliased(tlist):
token = tlist.token_next_by_instance(idx, (sql.Identifier, sql.Function))
while token:
next_ = tlist.token_next(tlist.token_index(token))
- if next_ is not None and isinstance(next_, (sql.Identifier, sql.Function)):
+ if next_ is not None and isinstance(next_,
+ (sql.Identifier, sql.Function)):
grp = tlist.tokens_between(token, next_)[1:]
token.tokens.extend(grp)
for t in grp:
tlist.tokens.remove(t)
idx = tlist.token_index(token) + 1
- token = tlist.token_next_by_instance(idx, (sql.Identifier, sql.Function))
+ token = tlist.token_next_by_instance(idx,
+ (sql.Identifier, sql.Function))
def group_typecasts(tlist):