summaryrefslogtreecommitdiff
path: root/sqlparse
diff options
context:
space:
mode:
authorAndi Albrecht <albrecht.andi@gmail.com>2015-03-05 11:54:33 +0100
committerAndi Albrecht <albrecht.andi@gmail.com>2015-03-05 11:54:33 +0100
commitbf2616058ada2748bb79fc7c861abb7748ffb89a (patch)
tree712c0352906003c6b4d4148705c28706a7b426a0 /sqlparse
parent15b0cb9e75ca378e94b55b7f1ff23108f0899cde (diff)
parentacdebefd638225eefe438919897ba68e7882504b (diff)
downloadsqlparse-bf2616058ada2748bb79fc7c861abb7748ffb89a.tar.gz
Merge pull request #177 from darikg/brackets
Better square bracket / array index handling
Diffstat (limited to 'sqlparse')
-rw-r--r--sqlparse/engine/grouping.py80
-rw-r--r--sqlparse/lexer.py6
-rw-r--r--sqlparse/sql.py18
-rw-r--r--sqlparse/tokens.py1
4 files changed, 79 insertions, 26 deletions
diff --git a/sqlparse/engine/grouping.py b/sqlparse/engine/grouping.py
index 9314b89..a317044 100644
--- a/sqlparse/engine/grouping.py
+++ b/sqlparse/engine/grouping.py
@@ -51,19 +51,21 @@ def _group_left_right(tlist, ttype, value, cls,
ttype, value)
+def _find_matching(idx, tlist, start_ttype, start_value, end_ttype, end_value):
+ depth = 1
+ for tok in tlist.tokens[idx:]:
+ if tok.match(start_ttype, start_value):
+ depth += 1
+ elif tok.match(end_ttype, end_value):
+ depth -= 1
+ if depth == 1:
+ return tok
+ return None
+
+
def _group_matching(tlist, start_ttype, start_value, end_ttype, end_value,
cls, include_semicolon=False, recurse=False):
- def _find_matching(i, tl, stt, sva, ett, eva):
- depth = 1
- for n in xrange(i, len(tl.tokens)):
- t = tl.tokens[n]
- if t.match(stt, sva):
- depth += 1
- elif t.match(ett, eva):
- depth -= 1
- if depth == 1:
- return t
- return None
+
[_group_matching(sgroup, start_ttype, start_value, end_ttype, end_value,
cls, include_semicolon) for sgroup in tlist.get_sublists()
if recurse]
@@ -157,16 +159,17 @@ def group_identifier(tlist):
lambda y: (y.match(T.Punctuation, '.')
or y.ttype in (T.Operator,
T.Wildcard,
- T.ArrayIndex,
- T.Name)),
+ T.Name)
+ or isinstance(y, sql.SquareBrackets)),
lambda y: (y.ttype in (T.String.Symbol,
T.Name,
T.Wildcard,
- T.ArrayIndex,
T.Literal.String.Single,
T.Literal.Number.Integer,
T.Literal.Number.Float)
- or isinstance(y, (sql.Parenthesis, sql.Function)))))
+ or isinstance(y, (sql.Parenthesis,
+ sql.SquareBrackets,
+ sql.Function)))))
for t in tl.tokens[i:]:
# Don't take whitespaces into account.
if t.ttype is T.Whitespace:
@@ -275,9 +278,48 @@ def group_identifier_list(tlist):
tcomma = next_
-def group_parenthesis(tlist):
- _group_matching(tlist, T.Punctuation, '(', T.Punctuation, ')',
- sql.Parenthesis)
+def group_brackets(tlist):
+ """Group parentheses () or square brackets []
+
+ This is just like _group_matching, but complicated by the fact that
+ round brackets can contain square bracket groups and vice versa
+ """
+
+ if isinstance(tlist, (sql.Parenthesis, sql.SquareBrackets)):
+ idx = 1
+ else:
+ idx = 0
+
+ # Find the first opening bracket
+ token = tlist.token_next_match(idx, T.Punctuation, ['(', '['])
+
+ while token:
+ start_val = token.value # either '(' or '['
+ if start_val == '(':
+ end_val = ')'
+ group_class = sql.Parenthesis
+ else:
+ end_val = ']'
+ group_class = sql.SquareBrackets
+
+ tidx = tlist.token_index(token)
+
+ # Find the corresponding closing bracket
+ end = _find_matching(tidx, tlist, T.Punctuation, start_val,
+ T.Punctuation, end_val)
+
+ if end is None:
+ idx = tidx + 1
+ else:
+ group = tlist.group_tokens(group_class,
+ tlist.tokens_between(token, end))
+
+ # Check for nested bracket groups within this group
+ group_brackets(group)
+ idx = tlist.token_index(group) + 1
+
+ # Find the next opening bracket
+ token = tlist.token_next_match(idx, T.Punctuation, ['(', '['])
def group_comments(tlist):
@@ -393,7 +435,7 @@ def align_comments(tlist):
def group(tlist):
for func in [
group_comments,
- group_parenthesis,
+ group_brackets,
group_functions,
group_where,
group_case,
diff --git a/sqlparse/lexer.py b/sqlparse/lexer.py
index 999eb2c..4707990 100644
--- a/sqlparse/lexer.py
+++ b/sqlparse/lexer.py
@@ -194,8 +194,10 @@ class Lexer(object):
(r"'(''|\\\\|\\'|[^'])*'", tokens.String.Single),
# not a real string literal in ANSI SQL:
(r'(""|".*?[^\\]")', tokens.String.Symbol),
- (r'(?<=[\w\]])(\[[^\]]*?\])', tokens.Punctuation.ArrayIndex),
- (r'(\[[^\]]+\])', tokens.Name),
+ # sqlite names can be escaped with [square brackets]. left bracket
+ # cannot be preceded by word character or a right bracket --
+ # otherwise it's probably an array index
+ (r'(?<![\w\])])(\[[^\]]+\])', tokens.Name),
(r'((LEFT\s+|RIGHT\s+|FULL\s+)?(INNER\s+|OUTER\s+|STRAIGHT\s+)?|(CROSS\s+|NATURAL\s+)?)?JOIN\b', tokens.Keyword),
(r'END(\s+IF|\s+LOOP)?\b', tokens.Keyword),
(r'NOT NULL\b', tokens.Keyword),
diff --git a/sqlparse/sql.py b/sqlparse/sql.py
index 8492c5e..9fcb546 100644
--- a/sqlparse/sql.py
+++ b/sqlparse/sql.py
@@ -511,11 +511,12 @@ class Identifier(TokenList):
return ordering.value.upper()
def get_array_indices(self):
- """Returns an iterator of index expressions as strings"""
+ """Returns an iterator of index token lists"""
- # Use [1:-1] index to discard the square brackets
- return (tok.value[1:-1] for tok in self.tokens
- if tok.ttype in T.ArrayIndex)
+ for tok in self.tokens:
+ if isinstance(tok, SquareBrackets):
+ # Use [1:-1] index to discard the square brackets
+ yield tok.tokens[1:-1]
class IdentifierList(TokenList):
@@ -542,6 +543,15 @@ class Parenthesis(TokenList):
return self.tokens[1:-1]
+class SquareBrackets(TokenList):
+ """Tokens between square brackets"""
+
+ __slots__ = ('value', 'ttype', 'tokens')
+
+ @property
+ def _groupable_tokens(self):
+ return self.tokens[1:-1]
+
class Assignment(TokenList):
"""An assignment like 'var := val;'"""
__slots__ = ('value', 'ttype', 'tokens')
diff --git a/sqlparse/tokens.py b/sqlparse/tokens.py
index 014984b..01a9b89 100644
--- a/sqlparse/tokens.py
+++ b/sqlparse/tokens.py
@@ -57,7 +57,6 @@ Literal = Token.Literal
String = Literal.String
Number = Literal.Number
Punctuation = Token.Punctuation
-ArrayIndex = Punctuation.ArrayIndex
Operator = Token.Operator
Comparison = Operator.Comparison
Wildcard = Token.Wildcard