diff options
| author | Victor Uriarte <victor.m.uriarte@intel.com> | 2016-06-02 21:38:23 -0700 |
|---|---|---|
| committer | Victor Uriarte <victor.m.uriarte@intel.com> | 2016-06-04 15:06:04 -0700 |
| commit | 3fed0393a80a40ea28e5fc0cea9b526630e9f42b (patch) | |
| tree | 85a6d8b71b8133b21d34c765aeb68b3f7fd32718 /sqlparse/engine/__init__.py | |
| parent | 5ce225522ba2b2a8af23c7efcbd6261bd9f09528 (diff) | |
| download | sqlparse-3fed0393a80a40ea28e5fc0cea9b526630e9f42b.tar.gz | |
Refactor filter-stack to simplify logic
if (self.stmtprocess or self.postprocess or
self.split_statements or self._grouping):
always evaluates to true after removing unused features
Diffstat (limited to 'sqlparse/engine/__init__.py')
| -rw-r--r-- | sqlparse/engine/__init__.py | 48 |
1 files changed, 12 insertions, 36 deletions
diff --git a/sqlparse/engine/__init__.py b/sqlparse/engine/__init__.py index e69a138..7f00c57 100644 --- a/sqlparse/engine/__init__.py +++ b/sqlparse/engine/__init__.py @@ -13,12 +13,10 @@ from sqlparse.engine.filter import StatementFilter class FilterStack(object): - def __init__(self): self.preprocess = [] self.stmtprocess = [] self.postprocess = [] - self.split_statements = False self._grouping = False def enable_grouping(self): @@ -27,42 +25,20 @@ class FilterStack(object): def run(self, sql, encoding=None): stream = lexer.tokenize(sql, encoding) # Process token stream - if self.preprocess: - for filter_ in self.preprocess: - stream = filter_.process(stream) - - if (self.stmtprocess or self.postprocess or - self.split_statements or self._grouping): - splitter = StatementFilter() - stream = splitter.process(stream) - - if self._grouping: - - def _group(stream): - for stmt in stream: - grouping.group(stmt) - yield stmt - stream = _group(stream) + for filter_ in self.preprocess: + stream = filter_.process(stream) - if self.stmtprocess: + stream = StatementFilter().process(stream) - def _run1(stream): - ret = [] - for stmt in stream: - for filter_ in self.stmtprocess: - filter_.process(stmt) - ret.append(stmt) - return ret - stream = _run1(stream) + # Output: Stream processed Statements + for stmt in stream: + if self._grouping: + stmt = grouping.group(stmt) - if self.postprocess: + for filter_ in self.stmtprocess: + filter_.process(stmt) - def _run2(stream): - for stmt in stream: - stmt.tokens = list(stmt.flatten()) - for filter_ in self.postprocess: - stmt = filter_.process(stmt) - yield stmt - stream = _run2(stream) + for filter_ in self.postprocess: + stmt = filter_.process(stmt) - return stream + yield stmt |
