summaryrefslogtreecommitdiff
path: root/sqlparse
diff options
context:
space:
mode:
Diffstat (limited to 'sqlparse')
-rw-r--r--sqlparse/keywords.py71
-rw-r--r--sqlparse/lexer.py42
2 files changed, 48 insertions, 65 deletions
diff --git a/sqlparse/keywords.py b/sqlparse/keywords.py
index c6db0a9..38b7765 100644
--- a/sqlparse/keywords.py
+++ b/sqlparse/keywords.py
@@ -5,6 +5,8 @@
# This module is part of python-sqlparse and is released under
# the BSD License: http://www.opensource.org/licenses/bsd-license.php
+import re
+
from sqlparse import tokens
@@ -15,37 +17,49 @@ def is_keyword(value):
SQL_REGEX = {
'root': [
- (r'(--|# ).*?(\r\n|\r|\n)', tokens.Comment.Single),
- # $ matches *before* newline, therefore we have two patterns
- # to match Comment.Single
- (r'(--|# ).*?$', tokens.Comment.Single),
+ (r'(--|# )\+.*?(\r\n|\r|\n|$)', tokens.Comment.Single.Hint),
+ (r'/\*\+[\s\S]*?\*/', tokens.Comment.Multiline.Hint),
+
+ (r'(--|# ).*?(\r\n|\r|\n|$)', tokens.Comment.Single),
+ (r'/\*[\s\S]*?\*/', tokens.Comment.Multiline),
+
(r'(\r\n|\r|\n)', tokens.Newline),
(r'\s+', tokens.Whitespace),
- (r'/\*', tokens.Comment.Multiline, 'multiline-comments'),
+
(r':=', tokens.Assignment),
(r'::', tokens.Punctuation),
- (r'[*]', tokens.Wildcard),
- (r'CASE\b', tokens.Keyword), # extended CASE(foo)
+
+ (r'\*', tokens.Wildcard),
+
(r"`(``|[^`])*`", tokens.Name),
(r"´(´´|[^´])*´", tokens.Name),
- (r'\$([^\W\d]\w*)?\$', tokens.Name.Builtin),
- (r'\?{1}', tokens.Name.Placeholder),
- (r'%\(\w+\)s', tokens.Name.Placeholder),
- (r'%s', tokens.Name.Placeholder),
+ (r'\$([_A-Z]\w*)?\$', tokens.Name.Builtin),
+
+ (r'\?', tokens.Name.Placeholder),
+ (r'%(\(\w+\))?s', tokens.Name.Placeholder),
(r'[$:?]\w+', tokens.Name.Placeholder),
+
# FIXME(andi): VALUES shouldn't be listed here
# see https://github.com/andialbrecht/sqlparse/pull/64
- (r'VALUES', tokens.Keyword),
- (r'(@|##|#)[^\W\d_]\w+', tokens.Name),
# IN is special, it may be followed by a parenthesis, but
# is never a functino, see issue183
- (r'in\b(?=[ (])?', tokens.Keyword),
- (r'USING(?=\()', tokens.Keyword),
- (r'[^\W\d_]\w*(?=[.(])', tokens.Name), # see issue39
- (r'[-]?0x[0-9a-fA-F]+', tokens.Number.Hexadecimal),
- (r'[-]?[0-9]*(\.[0-9]+)?[eE][-]?[0-9]+', tokens.Number.Float),
- (r'[-]?[0-9]*\.[0-9]+', tokens.Number.Float),
- (r'[-]?[0-9]+', tokens.Number.Integer),
+ (r'(CASE|IN|VALUES|USING)\b', tokens.Keyword),
+
+ (r'(@|##|#)[A-Z]\w+', tokens.Name),
+
+ # see issue #39
+ # Spaces around period `schema . name` are valid identifier
+ # TODO: Spaces before period not implemented
+ (r'[A-Z]\w*(?=\s*\.)', tokens.Name), # 'Name' .
+ (r'(?<=\.)[A-Z]\w*', tokens.Name), # .'Name'
+ (r'[A-Z]\w*(?=\()', tokens.Name), # side effect: change kw to func
+
+ # TODO: `1.` and `.1` are valid numbers
+ (r'-?0x[\dA-F]+', tokens.Number.Hexadecimal),
+ (r'-?\d*(\.\d+)?E-?\d+', tokens.Number.Float),
+ (r'-?\d*\.\d+', tokens.Number.Float),
+ (r'-?\d+', tokens.Number.Integer),
+
(r"'(''|\\\\|\\'|[^'])*'", tokens.String.Single),
# not a real string literal in ANSI SQL:
(r'(""|".*?[^\\]")', tokens.String.Symbol),
@@ -56,22 +70,20 @@ SQL_REGEX = {
(r'((LEFT\s+|RIGHT\s+|FULL\s+)?(INNER\s+|OUTER\s+|STRAIGHT\s+)?'
r'|(CROSS\s+|NATURAL\s+)?)?JOIN\b', tokens.Keyword),
(r'END(\s+IF|\s+LOOP|\s+WHILE)?\b', tokens.Keyword),
- (r'NOT NULL\b', tokens.Keyword),
+ (r'NOT\s+NULL\b', tokens.Keyword),
(r'CREATE(\s+OR\s+REPLACE)?\b', tokens.Keyword.DDL),
(r'DOUBLE\s+PRECISION\b', tokens.Name.Builtin),
- (r'(?<=\.)[^\W\d_]\w*', tokens.Name),
- (r'[^\W\d]\w*', is_keyword),
+
+ (r'[_A-Z]\w*', is_keyword),
+
(r'[;:()\[\],\.]', tokens.Punctuation),
(r'[<>=~!]+', tokens.Operator.Comparison),
(r'[+/@#%^&|`?^-]+', tokens.Operator),
- ],
- 'multiline-comments': [
- (r'/\*', tokens.Comment.Multiline, 'multiline-comments'),
- (r'\*/', tokens.Comment.Multiline, '#pop'),
- (r'[^/\*]+', tokens.Comment.Multiline),
- (r'[/*]', tokens.Comment.Multiline),
]}
+FLAGS = re.IGNORECASE | re.UNICODE
+SQL_REGEX = [(re.compile(rx, FLAGS).match, tt) for rx, tt in SQL_REGEX['root']]
+
KEYWORDS = {
'ABORT': tokens.Keyword,
'ABS': tokens.Keyword,
@@ -600,7 +612,6 @@ KEYWORDS = {
'VARYING': tokens.Name.Builtin,
}
-
KEYWORDS_COMMON = {
'SELECT': tokens.Keyword.DML,
'INSERT': tokens.Keyword.DML,
diff --git a/sqlparse/lexer.py b/sqlparse/lexer.py
index 84c8e78..dd15212 100644
--- a/sqlparse/lexer.py
+++ b/sqlparse/lexer.py
@@ -12,8 +12,6 @@
# It's separated from the rest of pygments to increase performance
# and to allow some customizations.
-import re
-
from sqlparse import tokens
from sqlparse.keywords import SQL_REGEX
from sqlparse.compat import StringIO, string_types, text_type
@@ -21,26 +19,12 @@ from sqlparse.utils import consume
class Lexer(object):
- flags = re.IGNORECASE | re.UNICODE
-
- def __init__(self):
- self._tokens = {}
-
- for state in SQL_REGEX:
- self._tokens[state] = []
-
- for tdef in SQL_REGEX[state]:
- rex = re.compile(tdef[0], self.flags).match
- new_state = None
- if len(tdef) > 2:
- # Only Multiline comments
- if tdef[2] == '#pop':
- new_state = -1
- elif tdef[2] in SQL_REGEX:
- new_state = (tdef[2],)
- self._tokens[state].append((rex, tdef[1], new_state))
-
- def get_tokens(self, text, encoding=None):
+ """Lexer
+ Empty class. Leaving for back-support
+ """
+
+ @staticmethod
+ def get_tokens(text, encoding=None):
"""
Return an iterable of (tokentype, value) pairs generated from
`text`. If `unfiltered` is set to `True`, the filtering mechanism
@@ -54,8 +38,6 @@ class Lexer(object):
``stack`` is the inital stack (default: ``['root']``)
"""
encoding = encoding or 'utf-8'
- statestack = ['root', ]
- statetokens = self._tokens['root']
if isinstance(text, string_types):
text = StringIO(text)
@@ -69,7 +51,7 @@ class Lexer(object):
iterable = enumerate(text)
for pos, char in iterable:
- for rexmatch, action, new_state in statetokens:
+ for rexmatch, action in SQL_REGEX:
m = rexmatch(text, pos)
if not m:
@@ -79,16 +61,6 @@ class Lexer(object):
elif callable(action):
yield action(m.group())
- if isinstance(new_state, tuple):
- for state in new_state:
- # fixme: multiline-comments not stackable
- if not (state == 'multiline-comments'
- and statestack[-1] == 'multiline-comments'):
- statestack.append(state)
- elif isinstance(new_state, int):
- del statestack[new_state:]
- statetokens = self._tokens[statestack[-1]]
-
consume(iterable, m.end() - pos - 1)
break
else: