diff options
| author | Andi Albrecht <albrecht.andi@gmail.com> | 2009-05-06 07:53:44 +0200 |
|---|---|---|
| committer | Andi Albrecht <albrecht.andi@gmail.com> | 2009-05-06 07:53:44 +0200 |
| commit | 974222bcb24a5b2bf3a0e5ecd616a2c3855e8342 (patch) | |
| tree | 7a366f4fd2bb286b88c23ac7f120fd3ba9954047 /sqlparse | |
| parent | ac165c93766f19d9c503ecb2d47d6e872d54c21c (diff) | |
| download | sqlparse-974222bcb24a5b2bf3a0e5ecd616a2c3855e8342.tar.gz | |
Code cleanup and test coverage.
Diffstat (limited to 'sqlparse')
| -rw-r--r-- | sqlparse/engine/filter.py | 1 | ||||
| -rw-r--r-- | sqlparse/formatter.py | 38 | ||||
| -rw-r--r-- | sqlparse/sql.py | 26 |
3 files changed, 14 insertions, 51 deletions
diff --git a/sqlparse/engine/filter.py b/sqlparse/engine/filter.py index 146690c..08ff21d 100644 --- a/sqlparse/engine/filter.py +++ b/sqlparse/engine/filter.py @@ -17,6 +17,7 @@ class TokenFilter(object): class StatementFilter(TokenFilter): def __init__(self): + TokenFilter.__init__(self) self._in_declare = False self._in_dbldollar = False self._is_create = False diff --git a/sqlparse/formatter.py b/sqlparse/formatter.py index 27a1bd9..9c6f76b 100644 --- a/sqlparse/formatter.py +++ b/sqlparse/formatter.py @@ -120,41 +120,3 @@ def build_filter_stack(stack, options): return stack -def format(statement, **options): - import filters - lexer = Lexer() -# lexer.add_filter('whitespace') - lexer.add_filter(filters.GroupFilter()) - if options.get('reindent', False): - lexer.add_filter(filters.StripWhitespaceFilter()) - lexer.add_filter(filters.IndentFilter( - n_indents=options.get('n_indents', 2))) - if options.get('ltrim', False): - lexer.add_filter(filters.LTrimFilter()) - keyword_case = options.get('keyword_case', None) - if keyword_case is not None: - assert keyword_case in ('lower', 'upper', 'capitalize') - lexer.add_filter(filters.KeywordCaseFilter(case=keyword_case)) - identifier_case = options.get('identifier_case', None) - if identifier_case is not None: - assert identifier_case in ('lower', 'upper', 'capitalize') - lexer.add_filter(filters.IdentifierCaseFilter(case=identifier_case)) - if options.get('strip_comments', False): - lexer.add_filter(filters.StripCommentsFilter()) - right_margin = options.get('right_margin', None) - if right_margin is not None: - right_margin = int(right_margin) - assert right_margin > 0 - lexer.add_filter(filters.RightMarginFilter(margin=right_margin)) - lexer.add_filter(filters.UngroupFilter()) - if options.get('output_format', None): - ofrmt = options['output_format'] - assert ofrmt in ('sql', 'python', 'php') - if ofrmt == 'python': - lexer.add_filter(filters.OutputPythonFilter()) - elif ofrmt == 'php': - lexer.add_filter(filters.OutputPHPFilter()) - tokens = [] - for ttype, value in lexer.get_tokens(unicode(statement)): - tokens.append((ttype, value)) - return statement.__class__(tokens) diff --git a/sqlparse/sql.py b/sqlparse/sql.py index 2ab05b9..5abaad0 100644 --- a/sqlparse/sql.py +++ b/sqlparse/sql.py @@ -118,19 +118,19 @@ class TokenList(Token): def _get_repr_name(self): return self.__class__.__name__ - def _pprint_tree(self, max_depth=None, depth=0): - """Pretty-print the object tree.""" - indent = ' '*(depth*2) - for token in self.tokens: - if token.is_group(): - pre = ' | ' - else: - pre = ' | ' - print '%s%s%s \'%s\'' % (indent, pre, token._get_repr_name(), - token._get_repr_value()) - if (token.is_group() and max_depth is not None - and depth < max_depth): - token._pprint_tree(max_depth, depth+1) + ## def _pprint_tree(self, max_depth=None, depth=0): + ## """Pretty-print the object tree.""" + ## indent = ' '*(depth*2) + ## for token in self.tokens: + ## if token.is_group(): + ## pre = ' | ' + ## else: + ## pre = ' | ' + ## print '%s%s%s \'%s\'' % (indent, pre, token._get_repr_name(), + ## token._get_repr_value()) + ## if (token.is_group() and max_depth is not None + ## and depth < max_depth): + ## token._pprint_tree(max_depth, depth+1) def flatten(self): """Generator yielding ungrouped tokens. |
