summaryrefslogtreecommitdiff
path: root/sqlparse
diff options
context:
space:
mode:
authorAndi Albrecht <albrecht.andi@gmail.com>2011-09-27 12:45:30 +0200
committerAndi Albrecht <albrecht.andi@gmail.com>2011-09-27 12:45:30 +0200
commitf811fa12247330adc27f1b842167a112b1c0829c (patch)
tree455bc81faf235419724537bde5d065ea1b186f59 /sqlparse
parent4b9261f4076befecbc4757c21ed0b268df546f96 (diff)
parentb0010af3ec74e57adf8910ab5d69f408cda3c475 (diff)
downloadsqlparse-f811fa12247330adc27f1b842167a112b1c0829c.tar.gz
Merged.
Diffstat (limited to 'sqlparse')
-rw-r--r--sqlparse/__init__.py6
-rw-r--r--sqlparse/engine/filter.py5
-rw-r--r--sqlparse/engine/grouping.py6
-rw-r--r--sqlparse/filters.py16
-rw-r--r--sqlparse/pipeline.py31
-rw-r--r--sqlparse/sql.py4
6 files changed, 58 insertions, 10 deletions
diff --git a/sqlparse/__init__.py b/sqlparse/__init__.py
index 7698e46..5ccf092 100644
--- a/sqlparse/__init__.py
+++ b/sqlparse/__init__.py
@@ -53,3 +53,9 @@ def split(sql):
stack = engine.FilterStack()
stack.split_statements = True
return [unicode(stmt) for stmt in stack.run(sql)]
+
+
+from sqlparse.engine.filter import StatementFilter
+def split2(stream):
+ splitter = StatementFilter()
+ return list(splitter.process(None, stream)) \ No newline at end of file
diff --git a/sqlparse/engine/filter.py b/sqlparse/engine/filter.py
index 89d9b15..421b3f3 100644
--- a/sqlparse/engine/filter.py
+++ b/sqlparse/engine/filter.py
@@ -61,14 +61,15 @@ class StatementFilter(TokenFilter):
if unified == 'END':
# Should this respect a preceeding BEGIN?
# In CASE ... WHEN ... END this results in a split level -1.
- self._begin_depth = max(0, self._begin_depth-1)
+ self._begin_depth = max(0, self._begin_depth - 1)
return -1
if ttype is T.Keyword.DDL and unified.startswith('CREATE'):
self._is_create = True
return 0
- if unified in ('IF', 'FOR') and self._is_create and self._begin_depth > 0:
+ if (unified in ('IF', 'FOR')
+ and self._is_create and self._begin_depth > 0):
return 1
# Default
diff --git a/sqlparse/engine/grouping.py b/sqlparse/engine/grouping.py
index 6e99782..cc75de4 100644
--- a/sqlparse/engine/grouping.py
+++ b/sqlparse/engine/grouping.py
@@ -280,13 +280,15 @@ def group_aliased(tlist):
token = tlist.token_next_by_instance(idx, (sql.Identifier, sql.Function))
while token:
next_ = tlist.token_next(tlist.token_index(token))
- if next_ is not None and isinstance(next_, (sql.Identifier, sql.Function)):
+ if next_ is not None and isinstance(next_,
+ (sql.Identifier, sql.Function)):
grp = tlist.tokens_between(token, next_)[1:]
token.tokens.extend(grp)
for t in grp:
tlist.tokens.remove(t)
idx = tlist.token_index(token) + 1
- token = tlist.token_next_by_instance(idx, (sql.Identifier, sql.Function))
+ token = tlist.token_next_by_instance(idx,
+ (sql.Identifier, sql.Function))
def group_typecasts(tlist):
diff --git a/sqlparse/filters.py b/sqlparse/filters.py
index cba7b8f..6f9b579 100644
--- a/sqlparse/filters.py
+++ b/sqlparse/filters.py
@@ -420,7 +420,7 @@ class ColumnsSelect(Filter):
elif mode == 1:
if value == 'FROM':
if oldValue:
- yield Name, oldValue
+ yield oldValue
mode = 3 # Columns have been checked
@@ -431,7 +431,7 @@ class ColumnsSelect(Filter):
elif (token_type == Punctuation
and value == ',' and not parenthesis):
if oldValue:
- yield Name, oldValue
+ yield oldValue
oldValue = ""
elif token_type not in Whitespace:
@@ -446,7 +446,7 @@ class ColumnsSelect(Filter):
elif mode == 2:
# We check also for Keywords because a bug in SQLParse
if token_type == Name or token_type == Keyword:
- yield Name, value
+ yield value
mode = 1
@@ -463,6 +463,14 @@ class SerializerUnicode(Filter):
res += '\n'
return res
+def Tokens2Unicode(stream):
+ result = ""
+
+ for _, value in stream:
+ result += unicode(value)
+
+ return result
+
class OutputPythonFilter(Filter):
@@ -576,4 +584,4 @@ class Limit(Filter):
if index and token_type in Keyword and value == 'LIMIT':
return stream[4 - index][1]
- return -1
+ return -1 \ No newline at end of file
diff --git a/sqlparse/pipeline.py b/sqlparse/pipeline.py
new file mode 100644
index 0000000..34dad19
--- /dev/null
+++ b/sqlparse/pipeline.py
@@ -0,0 +1,31 @@
+# Copyright (C) 2011 Jesus Leganes "piranna", piranna@gmail.com
+#
+# This module is part of python-sqlparse and is released under
+# the BSD License: http://www.opensource.org/licenses/bsd-license.php.
+
+from types import GeneratorType
+
+
+class Pipeline(list):
+ """Pipeline to process filters sequentially"""
+
+ def __call__(self, stream):
+ """Run the pipeline
+
+ Return a static (non generator) version of the result
+ """
+
+ # Run the stream over all the filters on the pipeline
+ for filter in self:
+ # Functions and callable objects (objects with '__call__' method)
+ if callable(filter):
+ stream = filter(stream)
+
+ # Normal filters (objects with 'process' method)
+ else:
+ stream = filter.process(None, stream)
+
+ # If last filter return a generator, staticalize it inside a list
+ if isinstance(stream, GeneratorType):
+ return list(stream)
+ return stream
diff --git a/sqlparse/sql.py b/sqlparse/sql.py
index 5b8f067..4d56bf3 100644
--- a/sqlparse/sql.py
+++ b/sqlparse/sql.py
@@ -146,7 +146,7 @@ class TokenList(Token):
def _pprint_tree(self, max_depth=None, depth=0):
"""Pretty-print the object tree."""
- indent = ' '*(depth*2)
+ indent = ' ' * (depth * 2)
for idx, token in enumerate(self.tokens):
if token.is_group():
pre = ' +-'
@@ -156,7 +156,7 @@ class TokenList(Token):
token._get_repr_name(),
token._get_repr_value())
if (token.is_group() and (max_depth is None or depth < max_depth)):
- token._pprint_tree(max_depth, depth+1)
+ token._pprint_tree(max_depth, depth + 1)
def flatten(self):
"""Generator yielding ungrouped tokens.