diff options
| author | Vik <vmuriart@users.noreply.github.com> | 2016-06-04 14:58:22 -0700 |
|---|---|---|
| committer | Vik <vmuriart@users.noreply.github.com> | 2016-06-04 14:58:22 -0700 |
| commit | c6a5e7ac2a5ecc993f4e5292ab16e6df6b84f26c (patch) | |
| tree | e4bba42af408aa4199883aba5715ca56b85a12db /sqlparse/lexer.py | |
| parent | f8f85fa4f1a8265fb78ea2e747c0476e1f04b09f (diff) | |
| parent | ece7c26727b28ea05feeeba3a9c2d1b4b7eb5c54 (diff) | |
| download | sqlparse-c6a5e7ac2a5ecc993f4e5292ab16e6df6b84f26c.tar.gz | |
Merge pull request #250 from vmuriart/lexer_regex
Simplify Lexer and Regex
Diffstat (limited to 'sqlparse/lexer.py')
| -rw-r--r-- | sqlparse/lexer.py | 42 |
1 files changed, 7 insertions, 35 deletions
diff --git a/sqlparse/lexer.py b/sqlparse/lexer.py index 84c8e78..dd15212 100644 --- a/sqlparse/lexer.py +++ b/sqlparse/lexer.py @@ -12,8 +12,6 @@ # It's separated from the rest of pygments to increase performance # and to allow some customizations. -import re - from sqlparse import tokens from sqlparse.keywords import SQL_REGEX from sqlparse.compat import StringIO, string_types, text_type @@ -21,26 +19,12 @@ from sqlparse.utils import consume class Lexer(object): - flags = re.IGNORECASE | re.UNICODE - - def __init__(self): - self._tokens = {} - - for state in SQL_REGEX: - self._tokens[state] = [] - - for tdef in SQL_REGEX[state]: - rex = re.compile(tdef[0], self.flags).match - new_state = None - if len(tdef) > 2: - # Only Multiline comments - if tdef[2] == '#pop': - new_state = -1 - elif tdef[2] in SQL_REGEX: - new_state = (tdef[2],) - self._tokens[state].append((rex, tdef[1], new_state)) - - def get_tokens(self, text, encoding=None): + """Lexer + Empty class. Leaving for back-support + """ + + @staticmethod + def get_tokens(text, encoding=None): """ Return an iterable of (tokentype, value) pairs generated from `text`. If `unfiltered` is set to `True`, the filtering mechanism @@ -54,8 +38,6 @@ class Lexer(object): ``stack`` is the inital stack (default: ``['root']``) """ encoding = encoding or 'utf-8' - statestack = ['root', ] - statetokens = self._tokens['root'] if isinstance(text, string_types): text = StringIO(text) @@ -69,7 +51,7 @@ class Lexer(object): iterable = enumerate(text) for pos, char in iterable: - for rexmatch, action, new_state in statetokens: + for rexmatch, action in SQL_REGEX: m = rexmatch(text, pos) if not m: @@ -79,16 +61,6 @@ class Lexer(object): elif callable(action): yield action(m.group()) - if isinstance(new_state, tuple): - for state in new_state: - # fixme: multiline-comments not stackable - if not (state == 'multiline-comments' - and statestack[-1] == 'multiline-comments'): - statestack.append(state) - elif isinstance(new_state, int): - del statestack[new_state:] - statetokens = self._tokens[statestack[-1]] - consume(iterable, m.end() - pos - 1) break else: |
