summaryrefslogtreecommitdiff
path: root/sqlparse
diff options
context:
space:
mode:
Diffstat (limited to 'sqlparse')
-rw-r--r--sqlparse/engine/grouping.py7
-rw-r--r--sqlparse/filters/output.py2
-rw-r--r--sqlparse/filters/reindent.py8
-rw-r--r--sqlparse/filters/right_margin.py2
-rw-r--r--sqlparse/formatter.py40
-rw-r--r--sqlparse/lexer.py15
-rw-r--r--sqlparse/sql.py48
-rw-r--r--sqlparse/utils.py20
8 files changed, 61 insertions, 81 deletions
diff --git a/sqlparse/engine/grouping.py b/sqlparse/engine/grouping.py
index c680995..e8e9dc3 100644
--- a/sqlparse/engine/grouping.py
+++ b/sqlparse/engine/grouping.py
@@ -42,13 +42,14 @@ def _group_left_right(tlist, m, cls,
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
- idx = 1 if imt(tlist, i=cls) else 0
+ idx = 1 if isinstance(tlist, cls) else 0
token = tlist.token_next_by(m=cls.M_OPEN, idx=idx)
while token:
end = find_matching(tlist, token, cls.M_OPEN, cls.M_CLOSE)
if end is not None:
- token = tlist.group_tokens(cls, tlist.tokens_between(token, end))
+ tokens = tlist.tokens_between(token, end)
+ token = tlist.group_tokens(cls, tokens)
_group_matching(token, cls)
token = tlist.token_next_by(m=cls.M_OPEN, idx=token)
@@ -120,7 +121,7 @@ def group_period(tlist):
def group_arrays(tlist):
token = tlist.token_next_by(i=sql.SquareBrackets)
while token:
- prev = tlist.token_prev(idx=token)
+ prev = tlist.token_prev(token)
if imt(prev, i=(sql.SquareBrackets, sql.Identifier, sql.Function),
t=(T.Name, T.String.Symbol,)):
tokens = tlist.tokens_between(prev, token)
diff --git a/sqlparse/filters/output.py b/sqlparse/filters/output.py
index d4528e9..bbc5076 100644
--- a/sqlparse/filters/output.py
+++ b/sqlparse/filters/output.py
@@ -22,7 +22,7 @@ class OutputFilter(object):
def process(self, stmt):
self.count += 1
if self.count > 1:
- varname = '%s%d' % (self.varname, self.count)
+ varname = '{f.varname}{f.count}'.format(f=self)
else:
varname = self.varname
diff --git a/sqlparse/filters/reindent.py b/sqlparse/filters/reindent.py
index f7ddfc9..b490631 100644
--- a/sqlparse/filters/reindent.py
+++ b/sqlparse/filters/reindent.py
@@ -97,13 +97,13 @@ class ReindentFilter(object):
self._process_default(tlist)
def _process_parenthesis(self, tlist):
- is_DML_DLL = tlist.token_next_by(t=(T.Keyword.DML, T.Keyword.DDL))
+ is_dml_dll = tlist.token_next_by(t=(T.Keyword.DML, T.Keyword.DDL))
first = tlist.token_next_by(m=sql.Parenthesis.M_OPEN)
- with indent(self, 1 if is_DML_DLL else 0):
- tlist.tokens.insert(0, self.nl()) if is_DML_DLL else None
+ with indent(self, 1 if is_dml_dll else 0):
+ tlist.tokens.insert(0, self.nl()) if is_dml_dll else None
with offset(self, self._get_offset(first) + 1):
- self._process_default(tlist, not is_DML_DLL)
+ self._process_default(tlist, not is_dml_dll)
def _process_identifierlist(self, tlist):
identifiers = list(tlist.get_identifiers())
diff --git a/sqlparse/filters/right_margin.py b/sqlparse/filters/right_margin.py
index 4e10dc0..b3f905d 100644
--- a/sqlparse/filters/right_margin.py
+++ b/sqlparse/filters/right_margin.py
@@ -38,7 +38,7 @@ class RightMarginFilter(object):
indent = match.group()
else:
indent = ''
- yield sql.Token(T.Whitespace, '\n%s' % indent)
+ yield sql.Token(T.Whitespace, '\n{0}'.format(indent))
self.line = indent
self.line += val
yield token
diff --git a/sqlparse/formatter.py b/sqlparse/formatter.py
index 069109b..8f10557 100644
--- a/sqlparse/formatter.py
+++ b/sqlparse/formatter.py
@@ -15,61 +15,65 @@ def validate_options(options):
"""Validates options."""
kwcase = options.get('keyword_case')
if kwcase not in [None, 'upper', 'lower', 'capitalize']:
- raise SQLParseError('Invalid value for keyword_case: %r' % kwcase)
+ raise SQLParseError('Invalid value for keyword_case: '
+ '{0!r}'.format(kwcase))
idcase = options.get('identifier_case')
if idcase not in [None, 'upper', 'lower', 'capitalize']:
- raise SQLParseError('Invalid value for identifier_case: %r' % idcase)
+ raise SQLParseError('Invalid value for identifier_case: '
+ '{0!r}'.format(idcase))
ofrmt = options.get('output_format')
if ofrmt not in [None, 'sql', 'python', 'php']:
- raise SQLParseError('Unknown output format: %r' % ofrmt)
+ raise SQLParseError('Unknown output format: '
+ '{0!r}'.format(ofrmt))
strip_comments = options.get('strip_comments', False)
if strip_comments not in [True, False]:
- raise SQLParseError('Invalid value for strip_comments: %r'
- % strip_comments)
+ raise SQLParseError('Invalid value for strip_comments: '
+ '{0!r}'.format(strip_comments))
space_around_operators = options.get('use_space_around_operators', False)
if space_around_operators not in [True, False]:
- raise SQLParseError('Invalid value for use_space_around_operators: %r'
- % space_around_operators)
+ raise SQLParseError('Invalid value for use_space_around_operators: '
+ '{0!r}'.format(space_around_operators))
strip_ws = options.get('strip_whitespace', False)
if strip_ws not in [True, False]:
- raise SQLParseError('Invalid value for strip_whitespace: %r'
- % strip_ws)
+ raise SQLParseError('Invalid value for strip_whitespace: '
+ '{0!r}'.format(strip_ws))
truncate_strings = options.get('truncate_strings')
if truncate_strings is not None:
try:
truncate_strings = int(truncate_strings)
except (ValueError, TypeError):
- raise SQLParseError('Invalid value for truncate_strings: %r'
- % truncate_strings)
+ raise SQLParseError('Invalid value for truncate_strings: '
+ '{0!r}'.format(truncate_strings))
if truncate_strings <= 1:
- raise SQLParseError('Invalid value for truncate_strings: %r'
- % truncate_strings)
+ raise SQLParseError('Invalid value for truncate_strings: '
+ '{0!r}'.format(truncate_strings))
options['truncate_strings'] = truncate_strings
options['truncate_char'] = options.get('truncate_char', '[...]')
reindent = options.get('reindent', False)
if reindent not in [True, False]:
- raise SQLParseError('Invalid value for reindent: %r'
- % reindent)
+ raise SQLParseError('Invalid value for reindent: '
+ '{0!r}'.format(reindent))
elif reindent:
options['strip_whitespace'] = True
reindent_aligned = options.get('reindent_aligned', False)
if reindent_aligned not in [True, False]:
- raise SQLParseError('Invalid value for reindent_aligned: %r'
- % reindent)
+ raise SQLParseError('Invalid value for reindent_aligned: '
+ '{0!r}'.format(reindent))
elif reindent_aligned:
options['strip_whitespace'] = True
indent_tabs = options.get('indent_tabs', False)
if indent_tabs not in [True, False]:
- raise SQLParseError('Invalid value for indent_tabs: %r' % indent_tabs)
+ raise SQLParseError('Invalid value for indent_tabs: '
+ '{0!r}'.format(indent_tabs))
elif indent_tabs:
options['indent_char'] = '\t'
else:
diff --git a/sqlparse/lexer.py b/sqlparse/lexer.py
index dd15212..0fb8936 100644
--- a/sqlparse/lexer.py
+++ b/sqlparse/lexer.py
@@ -14,7 +14,7 @@
from sqlparse import tokens
from sqlparse.keywords import SQL_REGEX
-from sqlparse.compat import StringIO, string_types, text_type
+from sqlparse.compat import StringIO, string_types, u
from sqlparse.utils import consume
@@ -37,17 +37,10 @@ class Lexer(object):
``stack`` is the inital stack (default: ``['root']``)
"""
- encoding = encoding or 'utf-8'
-
if isinstance(text, string_types):
- text = StringIO(text)
-
- text = text.read()
- if not isinstance(text, text_type):
- try:
- text = text.decode(encoding)
- except UnicodeDecodeError:
- text = text.decode('unicode-escape')
+ text = u(text, encoding)
+ elif isinstance(text, StringIO):
+ text = u(text.read(), encoding)
iterable = enumerate(text)
for pos, char in iterable:
diff --git a/sqlparse/sql.py b/sqlparse/sql.py
index daa5cf5..eadd04f 100644
--- a/sqlparse/sql.py
+++ b/sqlparse/sql.py
@@ -167,7 +167,7 @@ class TokenList(Token):
idx = 0
for token in self.flatten():
end = idx + len(token.value)
- if idx <= offset <= end:
+ if idx <= offset < end:
return token
idx = end
@@ -248,8 +248,6 @@ class TokenList(Token):
If *skip_ws* is ``True`` (the default) whitespace tokens are ignored.
``None`` is returned if there's no previous token.
"""
- if isinstance(idx, int):
- idx += 1 # alot of code usage current pre-compensates for this
funcs = lambda tk: not ((skip_ws and tk.is_whitespace()) or
(skip_cm and imt(tk, t=T.Comment)))
return self._token_matching(funcs, idx, reverse=True)
@@ -260,8 +258,6 @@ class TokenList(Token):
If *skip_ws* is ``True`` (the default) whitespace tokens are ignored.
``None`` is returned if there's no next token.
"""
- if isinstance(idx, int):
- idx += 1 # alot of code usage current pre-compensates for this
funcs = lambda tk: not ((skip_ws and tk.is_whitespace()) or
(skip_cm and imt(tk, t=T.Comment)))
return self._token_matching(funcs, idx)
@@ -283,34 +279,26 @@ class TokenList(Token):
def group_tokens(self, grp_cls, tokens, skip_ws=False, extend=False):
"""Replace tokens by an instance of *grp_cls*."""
- if skip_ws:
- while tokens and tokens[-1].is_whitespace():
- tokens = tokens[:-1]
+
+ while skip_ws and tokens and tokens[-1].is_whitespace():
+ tokens = tokens[:-1]
left = tokens[0]
idx = self.token_index(left)
- if extend:
- if not isinstance(left, grp_cls):
- grp = grp_cls([left])
- self.tokens.remove(left)
- self.tokens.insert(idx, grp)
- left = grp
- left.parent = self
- tokens = tokens[1:]
- left.tokens.extend(tokens)
- left.value = str(left)
-
+ if extend and isinstance(left, grp_cls):
+ grp = left
+ grp.tokens.extend(tokens[1:])
else:
- left = grp_cls(tokens)
- left.parent = self
- self.tokens.insert(idx, left)
+ grp = grp_cls(tokens)
for token in tokens:
- token.parent = left
+ token.parent = grp
self.tokens.remove(token)
- return left
+ self.tokens.insert(idx, grp)
+ grp.parent = self
+ return grp
def insert_before(self, where, token):
"""Inserts *token* before *where*."""
@@ -322,7 +310,7 @@ class TokenList(Token):
if next_token is None:
self.tokens.append(token)
else:
- self.tokens.insert(self.token_index(next_token), token)
+ self.insert_before(next_token, token)
def has_alias(self):
"""Returns ``True`` if an alias is present."""
@@ -435,19 +423,13 @@ class Identifier(TokenList):
def get_typecast(self):
"""Returns the typecast or ``None`` of this object as a string."""
marker = self.token_next_by(m=(T.Punctuation, '::'))
- if marker is None:
- return None
next_ = self.token_next(marker, False)
- if next_ is None:
- return None
- return next_.value
+ return next_.value if next_ else None
def get_ordering(self):
"""Returns the ordering or ``None`` as uppercase string."""
ordering = self.token_next_by(t=T.Keyword.Order)
- if ordering is None:
- return None
- return ordering.normalized
+ return ordering.normalized if ordering else None
def get_array_indices(self):
"""Returns an iterator of index token lists"""
diff --git a/sqlparse/utils.py b/sqlparse/utils.py
index 4da44c6..8253e0b 100644
--- a/sqlparse/utils.py
+++ b/sqlparse/utils.py
@@ -78,36 +78,36 @@ def recurse(*cls):
def imt(token, i=None, m=None, t=None):
- """Aid function to refactor comparisons for Instance, Match and TokenType
- Aid fun
+ """Helper function to simplify comparisons Instance, Match and TokenType
:param token:
:param i: Class or Tuple/List of Classes
:param m: Tuple of TokenType & Value. Can be list of Tuple for multiple
:param t: TokenType or Tuple/List of TokenTypes
:return: bool
"""
- t = (t,) if t and not isinstance(t, (list, tuple)) else t
- m = (m,) if m and not isinstance(m, (list,)) else m
+ clss = i
+ types = [t, ] if t and not isinstance(t, list) else t
+ mpatterns = [m, ] if m and not isinstance(m, list) else m
if token is None:
return False
- elif i is not None and isinstance(token, i):
+ elif clss and isinstance(token, clss):
return True
- elif m is not None and any((token.match(*x) for x in m)):
+ elif mpatterns and any((token.match(*pattern) for pattern in mpatterns)):
return True
- elif t is not None and token.ttype in t:
+ elif types and any([token.ttype in ttype for ttype in types]):
return True
else:
return False
-def find_matching(tlist, token, M1, M2):
+def find_matching(tlist, token, open_pattern, close_pattern):
idx = tlist.token_index(token)
depth = 0
for token in tlist.tokens[idx:]:
- if token.match(*M1):
+ if token.match(*open_pattern):
depth += 1
- elif token.match(*M2):
+ elif token.match(*close_pattern):
depth -= 1
if depth == 0:
return token