diff options
Diffstat (limited to 'sqlparse')
| -rw-r--r-- | sqlparse/__init__.py | 1 | ||||
| -rw-r--r-- | sqlparse/engine/__init__.py | 4 | ||||
| -rw-r--r-- | sqlparse/filters.py | 29 | ||||
| -rw-r--r-- | sqlparse/formatter.py | 1 | ||||
| -rw-r--r-- | sqlparse/lexer.py | 10 | ||||
| -rw-r--r-- | sqlparse/sql.py | 4 |
6 files changed, 11 insertions, 38 deletions
diff --git a/sqlparse/__init__.py b/sqlparse/__init__.py index deb6646..2e9493f 100644 --- a/sqlparse/__init__.py +++ b/sqlparse/__init__.py @@ -53,4 +53,3 @@ def split(sql): stack = engine.FilterStack() stack.split_statements = True return [unicode(stmt) for stmt in stack.run(sql)] - diff --git a/sqlparse/engine/__init__.py b/sqlparse/engine/__init__.py index 055025e..02f84c6 100644 --- a/sqlparse/engine/__init__.py +++ b/sqlparse/engine/__init__.py @@ -40,8 +40,8 @@ class FilterStack(object): stream = lexer.tokenize(sql) # Process token stream if self.preprocess: - for filter_ in self.preprocess: - stream = filter_.process(self, stream) + for filter_ in self.preprocess: + stream = filter_.process(self, stream) if (self.stmtprocess or self.postprocess or self.split_statements or self._grouping): diff --git a/sqlparse/filters.py b/sqlparse/filters.py index 2692613..fd03df1 100644 --- a/sqlparse/filters.py +++ b/sqlparse/filters.py @@ -19,34 +19,6 @@ class TokenFilter(Filter): raise NotImplementedError -# FIXME: Should be removed -def rstrip(stream): - buff = [] - for token in stream: - if token.is_whitespace() and '\n' in token.value: - # assuming there's only one \n in value - before, rest = token.value.split('\n', 1) - token.value = '\n%s' % rest - buff = [] - yield token - elif token.is_whitespace(): - buff.append(token) - elif token.is_group(): - token.tokens = list(rstrip(token.tokens)) - # process group and look if it starts with a nl - if token.tokens and token.tokens[0].is_whitespace(): - before, rest = token.tokens[0].value.split('\n', 1) - token.tokens[0].value = '\n%s' % rest - buff = [] - while buff: - yield buff.pop(0) - yield token - else: - while buff: - yield buff.pop(0) - yield token - - # -------------------------- # token process @@ -437,4 +409,3 @@ class OutputPHPFilter(Filter): varname = self.varname stmt.tokens = tuple(self._process(stmt.tokens, varname)) return stmt - diff --git a/sqlparse/formatter.py b/sqlparse/formatter.py index b9fd891..1955b5d 100644 --- a/sqlparse/formatter.py +++ b/sqlparse/formatter.py @@ -119,4 +119,3 @@ def build_filter_stack(stack, options): return stack - diff --git a/sqlparse/lexer.py b/sqlparse/lexer.py index ed5771f..8ccc7de 100644 --- a/sqlparse/lexer.py +++ b/sqlparse/lexer.py @@ -43,9 +43,11 @@ def apply_filters(stream, filters, lexer=None): a stream. If lexer is given it's forwarded to the filter, otherwise the filter receives `None`. """ + def _apply(filter_, stream): for token in filter_.filter(lexer, stream): yield token + for filter_ in filters: stream = _apply(filter_, stream) return stream @@ -68,7 +70,8 @@ class LexerMeta(type): if isinstance(tdef, include): # it's a state reference assert tdef != state, "circular state reference %r" % state - tokens.extend(cls._process_state(unprocessed, processed, str(tdef))) + tokens.extend(cls._process_state( + unprocessed, processed, str(tdef))) continue assert type(tdef) is tuple, "wrong rule def %r" % tdef @@ -76,8 +79,9 @@ class LexerMeta(type): try: rex = re.compile(tdef[0], rflags).match except Exception, err: - raise ValueError("uncompilable regex %r in state %r of %r: %s" % - (tdef[0], state, cls, err)) + raise ValueError(("uncompilable regex %r in state" + " %r of %r: %s" + % (tdef[0], state, cls, err))) assert type(tdef[1]) is _TokenType or callable(tdef[1]), \ 'token type must be simple type or callable, not %r' % (tdef[1],) diff --git a/sqlparse/sql.py b/sqlparse/sql.py index 31feb10..1c99a53 100644 --- a/sqlparse/sql.py +++ b/sqlparse/sql.py @@ -201,8 +201,8 @@ class TokenList(Token): passed = False for func in funcs: if func(token): - passed = True - break + passed = True + break if not passed: return token return None |
