diff options
| author | quest <quest@wonky.windwards.net> | 2012-04-22 01:41:22 +0200 |
|---|---|---|
| committer | quest <quest@wonky.windwards.net> | 2012-04-22 01:41:22 +0200 |
| commit | a16c08703c8eb213a8b570bb16636fbe7a2b4a28 (patch) | |
| tree | 5d13bc4428bf678c75e0cbbdf1e35ec5655788ee /sqlparse | |
| parent | 1f8dfd8723dd7aa9610fd9249775dc3b403d7e77 (diff) | |
| download | sqlparse-a16c08703c8eb213a8b570bb16636fbe7a2b4a28.tar.gz | |
various optimizations in sql.py
Diffstat (limited to 'sqlparse')
| -rw-r--r-- | sqlparse/engine/grouping.py | 3 | ||||
| -rw-r--r-- | sqlparse/sql.py | 27 |
2 files changed, 21 insertions, 9 deletions
diff --git a/sqlparse/engine/grouping.py b/sqlparse/engine/grouping.py index 55ec7e2..1487c24 100644 --- a/sqlparse/engine/grouping.py +++ b/sqlparse/engine/grouping.py @@ -55,7 +55,8 @@ def _group_matching(tlist, start_ttype, start_value, end_ttype, end_value, cls, include_semicolon=False, recurse=False): def _find_matching(i, tl, stt, sva, ett, eva): depth = 1 - for t in tl.tokens[i:]: + for n in xrange(i, len(tl.tokens)): + t = tl.tokens[n] if t.match(stt, sva): depth += 1 elif t.match(ett, eva): diff --git a/sqlparse/sql.py b/sqlparse/sql.py index 9c7aeee..31fa34d 100644 --- a/sqlparse/sql.py +++ b/sqlparse/sql.py @@ -15,11 +15,13 @@ class Token(object): the type of the token. """ - __slots__ = ('value', 'ttype', 'parent') + __slots__ = ('value', 'ttype', 'parent', 'normalized', 'is_keyword') def __init__(self, ttype, value): self.value = value + self.normalized = value.upper() if ttype in T.Keyword else value self.ttype = ttype + self.is_keyword = ttype in T.Keyword self.parent = None def __str__(self): @@ -71,9 +73,9 @@ class Token(object): type_matched = self.ttype is ttype if not type_matched or values is None: return type_matched - if isinstance(values, basestring): - values = set([values]) if regex: + if isinstance(values, basestring): + values = set([values]) if self.ttype is T.Keyword: values = set([re.compile(v, re.IGNORECASE) for v in values]) else: @@ -83,10 +85,18 @@ class Token(object): return True return False else: - if self.ttype in T.Keyword: - values = set([v.upper() for v in values]) - return self.value.upper() in values + if isinstance(values, basestring): + if self.is_keyword: + return values.upper() == self.normalized + else: + return values == self.value + if self.is_keyword: + for v in values: + if v.upper() == self.normalized: + return True + return False else: + print len(values) return self.value in values def is_group(self): @@ -227,7 +237,8 @@ class TokenList(Token): if not isinstance(idx, int): idx = self.token_index(idx) - for token in self.tokens[idx:]: + for n in xrange(idx, len(self.tokens)): + token = self.tokens[n] if token.match(ttype, value, regex): return token @@ -395,7 +406,7 @@ class Statement(TokenList): return 'UNKNOWN' elif first_token.ttype in (T.Keyword.DML, T.Keyword.DDL): - return first_token.value.upper() + return first_token.normalized return 'UNKNOWN' |
