diff options
| author | Vik <vmuriart@users.noreply.github.com> | 2016-06-06 06:29:25 -0700 |
|---|---|---|
| committer | Vik <vmuriart@users.noreply.github.com> | 2016-06-06 06:29:25 -0700 |
| commit | b9d81ac4fe49114f57dc33c0d635f99ff56e62f2 (patch) | |
| tree | 88642eeb84d318511191a822fd781b44e1d63df1 /sqlparse/engine/__init__.py | |
| parent | c6a5e7ac2a5ecc993f4e5292ab16e6df6b84f26c (diff) | |
| parent | 5747015634a39191511de8db576f2cd0aa5eafc9 (diff) | |
| download | sqlparse-b9d81ac4fe49114f57dc33c0d635f99ff56e62f2.tar.gz | |
Merge pull request #251 from andialbrecht/filters_sql
Update Filters sql
Diffstat (limited to 'sqlparse/engine/__init__.py')
| -rw-r--r-- | sqlparse/engine/__init__.py | 48 |
1 files changed, 12 insertions, 36 deletions
diff --git a/sqlparse/engine/__init__.py b/sqlparse/engine/__init__.py index 1c2bf09..7f00c57 100644 --- a/sqlparse/engine/__init__.py +++ b/sqlparse/engine/__init__.py @@ -13,12 +13,10 @@ from sqlparse.engine.filter import StatementFilter class FilterStack(object): - def __init__(self): self.preprocess = [] self.stmtprocess = [] self.postprocess = [] - self.split_statements = False self._grouping = False def enable_grouping(self): @@ -27,42 +25,20 @@ class FilterStack(object): def run(self, sql, encoding=None): stream = lexer.tokenize(sql, encoding) # Process token stream - if self.preprocess: - for filter_ in self.preprocess: - stream = filter_.process(self, stream) - - if (self.stmtprocess or self.postprocess or - self.split_statements or self._grouping): - splitter = StatementFilter() - stream = splitter.process(self, stream) - - if self._grouping: - - def _group(stream): - for stmt in stream: - grouping.group(stmt) - yield stmt - stream = _group(stream) + for filter_ in self.preprocess: + stream = filter_.process(stream) - if self.stmtprocess: + stream = StatementFilter().process(stream) - def _run1(stream): - ret = [] - for stmt in stream: - for filter_ in self.stmtprocess: - filter_.process(self, stmt) - ret.append(stmt) - return ret - stream = _run1(stream) + # Output: Stream processed Statements + for stmt in stream: + if self._grouping: + stmt = grouping.group(stmt) - if self.postprocess: + for filter_ in self.stmtprocess: + filter_.process(stmt) - def _run2(stream): - for stmt in stream: - stmt.tokens = list(stmt.flatten()) - for filter_ in self.postprocess: - stmt = filter_.process(self, stmt) - yield stmt - stream = _run2(stream) + for filter_ in self.postprocess: + stmt = filter_.process(stmt) - return stream + yield stmt |
