diff options
Diffstat (limited to 'sqlparse/sql.py')
-rw-r--r-- | sqlparse/sql.py | 31 |
1 files changed, 12 insertions, 19 deletions
diff --git a/sqlparse/sql.py b/sqlparse/sql.py index 5ecfbdc..8601537 100644 --- a/sqlparse/sql.py +++ b/sqlparse/sql.py @@ -5,6 +5,7 @@ import re import sys +from sqlparse import compat from sqlparse import tokens as T @@ -32,7 +33,7 @@ class Token(object): if sys.version_info[0] == 3: return self.value else: - return unicode(self).encode('utf-8') + return compat.text_type(self).encode('utf-8') def __repr__(self): short = self._get_repr_value() @@ -51,15 +52,15 @@ class Token(object): .. deprecated:: 0.1.5 Use ``unicode(token)`` (for Python 3: ``str(token)``) instead. """ - return unicode(self) + return compat.text_type(self) def _get_repr_name(self): return str(self.ttype).split('.')[-1] def _get_repr_value(self): - raw = unicode(self) + raw = compat.text_type(self) if len(raw) > 7: - raw = raw[:6] + u'...' + raw = raw[:6] + compat.text_type('...') return re.sub('\s+', ' ', raw) def flatten(self): @@ -83,7 +84,7 @@ class Token(object): return type_matched if regex: - if isinstance(values, basestring): + if isinstance(values, compat.string_types): values = set([values]) if self.ttype is T.Keyword: @@ -96,7 +97,7 @@ class Token(object): return True return False - if isinstance(values, basestring): + if isinstance(values, compat.string_types): if self.is_keyword: return values.upper() == self.normalized return values == self.value @@ -172,7 +173,7 @@ class TokenList(Token): if sys.version_info[0] == 3: return ''.join(x.value for x in self.flatten()) else: - return ''.join(unicode(x) for x in self.flatten()) + return ''.join(compat.text_type(x) for x in self.flatten()) def _get_repr_name(self): return self.__class__.__name__ @@ -185,9 +186,9 @@ class TokenList(Token): pre = ' +-' else: pre = ' | ' - print '%s%s%d %s \'%s\'' % (indent, pre, idx, + print('%s%s%d %s \'%s\'' % (indent, pre, idx, token._get_repr_name(), - token._get_repr_value()) + token._get_repr_value())) if (token.is_group() and (max_depth is None or depth < max_depth)): token._pprint_tree(max_depth, depth + 1) @@ -220,18 +221,10 @@ class TokenList(Token): else: yield token -# def __iter__(self): -# return self -# -# def next(self): -# for token in self.tokens: -# yield token - def is_group(self): return True def get_sublists(self): -# return [x for x in self.tokens if isinstance(x, TokenList)] for x in self.tokens: if isinstance(x, TokenList): yield x @@ -285,7 +278,7 @@ class TokenList(Token): if not isinstance(idx, int): idx = self.token_index(idx) - for n in xrange(idx, len(self.tokens)): + for n in range(idx, len(self.tokens)): token = self.tokens[n] if token.match(ttype, value, regex): return token @@ -510,7 +503,7 @@ class Identifier(TokenList): next_ = self.token_next(self.token_index(marker), False) if next_ is None: return None - return unicode(next_) + return compat.text_type(next_) def get_ordering(self): """Returns the ordering or ``None`` as uppercase string.""" |