summaryrefslogtreecommitdiff
path: root/sqlparse
diff options
context:
space:
mode:
authorJesús Leganés Combarro "Piranna" <piranna@gmail.com>2012-05-19 21:43:54 +0200
committerJesús Leganés Combarro "Piranna" <piranna@gmail.com>2012-05-19 21:43:54 +0200
commit59bfe75aaec583fdb92668819f021ba18c4595e3 (patch)
tree07a58dce5da5649b7e1630fa5e0b35b5589f54a7 /sqlparse
parent8bd03f158343bd2b83802c1059e15953c72f9f36 (diff)
parent66742da10ebdc2bc485022ecbd59278d3fc96488 (diff)
downloadsqlparse-59bfe75aaec583fdb92668819f021ba18c4595e3.tar.gz
Merge branch 'master' into antiorm
Conflicts: sqlparse/filters.py
Diffstat (limited to 'sqlparse')
-rw-r--r--sqlparse/engine/__init__.py2
-rw-r--r--sqlparse/filters.py55
-rw-r--r--sqlparse/lexer.py3
-rw-r--r--sqlparse/sql.py9
-rw-r--r--sqlparse/utils.py47
5 files changed, 99 insertions, 17 deletions
diff --git a/sqlparse/engine/__init__.py b/sqlparse/engine/__init__.py
index 3e2822b..c30b6ca 100644
--- a/sqlparse/engine/__init__.py
+++ b/sqlparse/engine/__init__.py
@@ -61,6 +61,8 @@ class FilterStack(object):
def _run1(stream):
ret = []
for stmt in stream:
+ for i in stmt.flatten():
+ print repr(i)
for filter_ in self.stmtprocess:
filter_.process(self, stmt)
ret.append(stmt)
diff --git a/sqlparse/filters.py b/sqlparse/filters.py
index cc6145a..ce5dd85 100644
--- a/sqlparse/filters.py
+++ b/sqlparse/filters.py
@@ -4,13 +4,13 @@ import re
from os.path import abspath, join
-from sqlparse import sql
-from sqlparse import tokens as T
-from sqlparse.engine import FilterStack
-from sqlparse.lexer import tokenize
+from sqlparse import sql, tokens as T
+from sqlparse.engine import FilterStack
+from sqlparse.lexer import tokenize
from sqlparse.pipeline import Pipeline
-from sqlparse.tokens import (Comment, Comparison, Keyword, Name, Punctuation,
- String, Whitespace)
+from sqlparse.tokens import (Comment, Comparison, Keyword, Name, Punctuation,
+ String, Whitespace)
+from sqlparse.utils import memoize_generator
# --------------------------
@@ -94,12 +94,17 @@ def StripWhitespace(stream):
class IncludeStatement:
"""Filter that enable a INCLUDE statement"""
- def __init__(self, dirpath=".", maxRecursive=10):
+ def __init__(self, dirpath=".", maxrecursive=10, raiseexceptions=False):
+ if maxrecursive <= 0:
+ raise ValueError('Max recursion limit reached')
+
self.dirpath = abspath(dirpath)
- self.maxRecursive = maxRecursive
+ self.maxRecursive = maxrecursive
+ self.raiseexceptions = raiseexceptions
self.detected = False
+ @memoize_generator
def process(self, stack, stream):
# Run over all tokens in the stream
for token_type, value in stream:
@@ -112,30 +117,48 @@ class IncludeStatement:
elif self.detected:
# Omit whitespaces
if token_type in Whitespace:
- pass
-
- # Get path of file to include
- path = None
+ continue
+ # Found file path to include
if token_type in String.Symbol:
# if token_type in tokens.String.Symbol:
+
+ # Get path of file to include
path = join(self.dirpath, value[1:-1])
- # Include file if path was found
- if path:
try:
f = open(path)
raw_sql = f.read()
f.close()
+
+ # There was a problem loading the include file
except IOError, err:
+ # Raise the exception to the interpreter
+ if self.raiseexceptions:
+ raise
+
+ # Put the exception as a comment on the SQL code
yield Comment, u'-- IOError: %s\n' % err
else:
# Create new FilterStack to parse readed file
# and add all its tokens to the main stack recursively
- # [ToDo] Add maximum recursive iteration value
+ try:
+ filtr = IncludeStatement(self.dirpath,
+ self.maxRecursive - 1,
+ self.raiseexceptions)
+
+ # Max recursion limit reached
+ except ValueError, err:
+ # Raise the exception to the interpreter
+ if self.raiseexceptions:
+ raise
+
+ # Put the exception as a comment on the SQL code
+ yield Comment, u'-- ValueError: %s\n' % err
+
stack = FilterStack()
- stack.preprocess.append(IncludeStatement(self.dirpath))
+ stack.preprocess.append(filtr)
for tv in stack.run(raw_sql):
yield tv
diff --git a/sqlparse/lexer.py b/sqlparse/lexer.py
index dc794ab..5b0f116 100644
--- a/sqlparse/lexer.py
+++ b/sqlparse/lexer.py
@@ -179,6 +179,9 @@ class Lexer(object):
(r'\$([a-zA-Z_][a-zA-Z0-9_]*)?\$', tokens.Name.Builtin),
(r'\?{1}', tokens.Name.Placeholder),
(r'[$:?%][a-zA-Z0-9_]+', tokens.Name.Placeholder),
+ # FIXME(andi): VALUES shouldn't be listed here
+ # see https://github.com/andialbrecht/sqlparse/pull/64
+ (r'VALUES', tokens.Keyword),
(r'@[a-zA-Z_][a-zA-Z0-9_]+', tokens.Name),
(r'[a-zA-Z_][a-zA-Z0-9_]*(?=[.(])', tokens.Name), # see issue39
(r'[-]?0x[0-9a-fA-F]+', tokens.Number.Hexadecimal),
diff --git a/sqlparse/sql.py b/sqlparse/sql.py
index 05e078d..d18a0a7 100644
--- a/sqlparse/sql.py
+++ b/sqlparse/sql.py
@@ -181,6 +181,13 @@ class TokenList(Token):
else:
yield token
+# def __iter__(self):
+# return self
+#
+# def next(self):
+# for token in self.tokens:
+# yield token
+
def is_group(self):
return True
@@ -255,7 +262,7 @@ class TokenList(Token):
def token_matching(self, idx, funcs):
for token in self.tokens[idx:]:
- for i, func in enumerate(funcs):
+ for func in funcs:
if func(token):
return token
diff --git a/sqlparse/utils.py b/sqlparse/utils.py
new file mode 100644
index 0000000..443c64d
--- /dev/null
+++ b/sqlparse/utils.py
@@ -0,0 +1,47 @@
+'''
+Created on 17/05/2012
+
+@author: piranna
+'''
+
+
+def memoize_generator(func):
+ """Memoize decorator for generators
+
+ Store `func` results in a cache according to their arguments as 'memoize'
+ does but instead this works on decorators instead of regular functions.
+ Obviusly, this is only useful if the generator will always return the same
+ values for each specific parameters...
+ """
+ cache = {}
+
+ def wrapped_func(*args, **kwargs):
+ params = (args, kwargs)
+
+ # Look if cached
+ try:
+ print params
+ cached = cache[params]
+
+ # Not cached, exec and store it
+ except KeyError:
+ # Reset the cache if we have too much cached entries and start over
+ # In the future would be better to use an OrderedDict and drop the
+ # Least Recent Used entries
+ if len(cache) >= 10:
+ cache.clear()
+
+ cached = []
+
+ for item in func(*args, **kwargs):
+ cached.append(item)
+ yield item
+
+ cache[params] = cached
+
+ # Cached, yield its items
+ else:
+ for item in cached:
+ yield item
+
+ return wrapped_func