summaryrefslogtreecommitdiff
path: root/sqlparse
diff options
context:
space:
mode:
authorVictor Uriarte <victor.m.uriarte@intel.com>2016-06-02 14:09:21 -0700
committerVictor Uriarte <victor.m.uriarte@intel.com>2016-06-04 15:06:04 -0700
commit62423c0d5e2e570341d5d0db74982712ff2348c7 (patch)
tree50a426c798502a5b72a1bad28281a7cc5f85b0b7 /sqlparse
parentc6a5e7ac2a5ecc993f4e5292ab16e6df6b84f26c (diff)
downloadsqlparse-62423c0d5e2e570341d5d0db74982712ff2348c7.tar.gz
Remove undocumented features
These features/function/classes were added for AntiORM. Quick look-up didn't show any usage outside of AntiORM. Closes #246
Diffstat (limited to 'sqlparse')
-rw-r--r--sqlparse/__init__.py6
-rw-r--r--sqlparse/filters.py222
-rw-r--r--sqlparse/functions.py44
-rw-r--r--sqlparse/pipeline.py31
-rw-r--r--sqlparse/utils.py71
5 files changed, 1 insertions, 373 deletions
diff --git a/sqlparse/__init__.py b/sqlparse/__init__.py
index 2943997..d69a3d9 100644
--- a/sqlparse/__init__.py
+++ b/sqlparse/__init__.py
@@ -68,9 +68,3 @@ def split(sql, encoding=None):
stack = engine.FilterStack()
stack.split_statements = True
return [u(stmt).strip() for stmt in stack.run(sql, encoding)]
-
-
-def split2(stream):
- from sqlparse.engine.filter import StatementFilter
- splitter = StatementFilter()
- return list(splitter.process(None, stream))
diff --git a/sqlparse/filters.py b/sqlparse/filters.py
index 1cb2f16..ccf8735 100644
--- a/sqlparse/filters.py
+++ b/sqlparse/filters.py
@@ -7,15 +7,8 @@
import re
-from os.path import abspath, join
-
from sqlparse import sql, tokens as T
from sqlparse.compat import u, text_type
-from sqlparse.engine import FilterStack
-from sqlparse.pipeline import Pipeline
-from sqlparse.tokens import (Comment, Comparison, Keyword, Name, Punctuation,
- String, Whitespace)
-from sqlparse.utils import memoize_generator
from sqlparse.utils import split_unquoted_newlines
@@ -74,130 +67,6 @@ class TruncateStringFilter(object):
yield ttype, value
-class GetComments(object):
- """Get the comments from a stack"""
- def process(self, stack, stream):
- for token_type, value in stream:
- if token_type in Comment:
- yield token_type, value
-
-
-class StripComments(object):
- """Strip the comments from a stack"""
- def process(self, stack, stream):
- for token_type, value in stream:
- if token_type not in Comment:
- yield token_type, value
-
-
-def StripWhitespace(stream):
- "Strip the useless whitespaces from a stream leaving only the minimal ones"
- last_type = None
- has_space = False
- ignore_group = frozenset((Comparison, Punctuation))
-
- for token_type, value in stream:
- # We got a previous token (not empty first ones)
- if last_type:
- if token_type in Whitespace:
- has_space = True
- continue
-
- # Ignore first empty spaces and dot-commas
- elif token_type in (Whitespace, Whitespace.Newline, ignore_group):
- continue
-
- # Yield a whitespace if it can't be ignored
- if has_space:
- if not ignore_group.intersection((last_type, token_type)):
- yield Whitespace, ' '
- has_space = False
-
- # Yield the token and set its type for checking with the next one
- yield token_type, value
- last_type = token_type
-
-
-class IncludeStatement(object):
- """Filter that enable a INCLUDE statement"""
-
- def __init__(self, dirpath=".", maxrecursive=10, raiseexceptions=False):
- if maxrecursive <= 0:
- raise ValueError('Max recursion limit reached')
-
- self.dirpath = abspath(dirpath)
- self.maxRecursive = maxrecursive
- self.raiseexceptions = raiseexceptions
-
- self.detected = False
-
- @memoize_generator
- def process(self, stack, stream):
- # Run over all tokens in the stream
- for token_type, value in stream:
- # INCLUDE statement found, set detected mode
- if token_type in Name and value.upper() == 'INCLUDE':
- self.detected = True
- continue
-
- # INCLUDE statement was found, parse it
- elif self.detected:
- # Omit whitespaces
- if token_type in Whitespace:
- continue
-
- # Found file path to include
- if token_type in String.Symbol:
- # Get path of file to include
- path = join(self.dirpath, value[1:-1])
-
- try:
- f = open(path)
- raw_sql = f.read()
- f.close()
-
- # There was a problem loading the include file
- except IOError as err:
- # Raise the exception to the interpreter
- if self.raiseexceptions:
- raise
-
- # Put the exception as a comment on the SQL code
- yield Comment, u'-- IOError: %s\n' % err
-
- else:
- # Create new FilterStack to parse readed file
- # and add all its tokens to the main stack recursively
- try:
- filtr = IncludeStatement(self.dirpath,
- self.maxRecursive - 1,
- self.raiseexceptions)
-
- # Max recursion limit reached
- except ValueError as err:
- # Raise the exception to the interpreter
- if self.raiseexceptions:
- raise
-
- # Put the exception as a comment on the SQL code
- yield Comment, u'-- ValueError: %s\n' % err
-
- stack = FilterStack()
- stack.preprocess.append(filtr)
-
- for tv in stack.run(raw_sql):
- yield tv
-
- # Set normal mode
- self.detected = False
-
- # Don't include any token while in detected mode
- continue
-
- # Normal token
- yield token_type, value
-
-
# ----------------------
# statement process
@@ -520,57 +389,6 @@ class RightMarginFilter(object):
group.tokens = self._process(stack, group, group.tokens)
-class ColumnsSelect(object):
- """Get the columns names of a SELECT query"""
- def process(self, stack, stream):
- mode = 0
- oldValue = ""
- parenthesis = 0
-
- for token_type, value in stream:
- # Ignore comments
- if token_type in Comment:
- continue
-
- # We have not detected a SELECT statement
- if mode == 0:
- if token_type in Keyword and value == 'SELECT':
- mode = 1
-
- # We have detected a SELECT statement
- elif mode == 1:
- if value == 'FROM':
- if oldValue:
- yield oldValue
-
- mode = 3 # Columns have been checked
-
- elif value == 'AS':
- oldValue = ""
- mode = 2
-
- elif (token_type == Punctuation
- and value == ',' and not parenthesis):
- if oldValue:
- yield oldValue
- oldValue = ""
-
- elif token_type not in Whitespace:
- if value == '(':
- parenthesis += 1
- elif value == ')':
- parenthesis -= 1
-
- oldValue += value
-
- # We are processing an AS keyword
- elif mode == 2:
- # We check also for Keywords because a bug in SQLParse
- if token_type == Name or token_type == Keyword:
- yield value
- mode = 1
-
-
# ---------------------------
# postprocess
@@ -583,15 +401,6 @@ class SerializerUnicode(object):
return res
-def Tokens2Unicode(stream):
- result = ""
-
- for _, value in stream:
- result += u(value)
-
- return result
-
-
class OutputFilter(object):
varname_prefix = ''
@@ -704,34 +513,3 @@ class OutputPHPFilter(OutputFilter):
# Close quote
yield sql.Token(T.Text, '"')
yield sql.Token(T.Punctuation, ';')
-
-
-class Limit(object):
- """Get the LIMIT of a query.
-
- If not defined, return -1 (SQL specification for no LIMIT query)
- """
- def process(self, stack, stream):
- index = 7
- stream = list(stream)
- stream.reverse()
-
- # Run over all tokens in the stream from the end
- for token_type, value in stream:
- index -= 1
-
-# if index and token_type in Keyword:
- if index and token_type in Keyword and value == 'LIMIT':
- return stream[4 - index][1]
-
- return -1
-
-
-def compact(stream):
- """Function that return a compacted version of the stream"""
- pipe = Pipeline()
-
- pipe.append(StripComments())
- pipe.append(StripWhitespace)
-
- return pipe(stream)
diff --git a/sqlparse/functions.py b/sqlparse/functions.py
deleted file mode 100644
index e54457e..0000000
--- a/sqlparse/functions.py
+++ /dev/null
@@ -1,44 +0,0 @@
-'''
-Created on 17/05/2012
-
-@author: piranna
-
-Several utility functions to extract info from the SQL sentences
-'''
-
-from sqlparse.filters import ColumnsSelect, Limit
-from sqlparse.pipeline import Pipeline
-from sqlparse.tokens import Keyword, Whitespace
-
-
-def getlimit(stream):
- """Function that return the LIMIT of a input SQL """
- pipe = Pipeline()
-
- pipe.append(Limit())
-
- result = pipe(stream)
- try:
- return int(result)
- except ValueError:
- return result
-
-
-def getcolumns(stream):
- """Function that return the colums of a SELECT query"""
- pipe = Pipeline()
-
- pipe.append(ColumnsSelect())
-
- return pipe(stream)
-
-
-class IsType(object):
- """Functor that return is the statement is of a specific type"""
- def __init__(self, type):
- self.type = type
-
- def __call__(self, stream):
- for token_type, value in stream:
- if token_type not in Whitespace:
- return token_type in Keyword and value == self.type
diff --git a/sqlparse/pipeline.py b/sqlparse/pipeline.py
deleted file mode 100644
index 34dad19..0000000
--- a/sqlparse/pipeline.py
+++ /dev/null
@@ -1,31 +0,0 @@
-# Copyright (C) 2011 Jesus Leganes "piranna", piranna@gmail.com
-#
-# This module is part of python-sqlparse and is released under
-# the BSD License: http://www.opensource.org/licenses/bsd-license.php.
-
-from types import GeneratorType
-
-
-class Pipeline(list):
- """Pipeline to process filters sequentially"""
-
- def __call__(self, stream):
- """Run the pipeline
-
- Return a static (non generator) version of the result
- """
-
- # Run the stream over all the filters on the pipeline
- for filter in self:
- # Functions and callable objects (objects with '__call__' method)
- if callable(filter):
- stream = filter(stream)
-
- # Normal filters (objects with 'process' method)
- else:
- stream = filter.process(None, stream)
-
- # If last filter return a generator, staticalize it inside a list
- if isinstance(stream, GeneratorType):
- return list(stream)
- return stream
diff --git a/sqlparse/utils.py b/sqlparse/utils.py
index 2513c26..4da44c6 100644
--- a/sqlparse/utils.py
+++ b/sqlparse/utils.py
@@ -7,78 +7,9 @@
import itertools
import re
-from collections import OrderedDict, deque
+from collections import deque
from contextlib import contextmanager
-
-class Cache(OrderedDict):
- """Cache with LRU algorithm using an OrderedDict as basis
- """
-
- def __init__(self, maxsize=100):
- OrderedDict.__init__(self)
-
- self._maxsize = maxsize
-
- def __getitem__(self, key, *args, **kwargs):
- # Get the key and remove it from the cache, or raise KeyError
- value = OrderedDict.__getitem__(self, key)
- del self[key]
-
- # Insert the (key, value) pair on the front of the cache
- OrderedDict.__setitem__(self, key, value)
-
- # Return the value from the cache
- return value
-
- def __setitem__(self, key, value, *args, **kwargs):
- # Key was inserted before, remove it so we put it at front later
- if key in self:
- del self[key]
-
- # Too much items on the cache, remove the least recent used
- elif len(self) >= self._maxsize:
- self.popitem(False)
-
- # Insert the (key, value) pair on the front of the cache
- OrderedDict.__setitem__(self, key, value, *args, **kwargs)
-
-
-def memoize_generator(func):
- """Memoize decorator for generators
-
- Store `func` results in a cache according to their arguments as 'memoize'
- does but instead this works on decorators instead of regular functions.
- Obviusly, this is only useful if the generator will always return the same
- values for each specific parameters...
- """
- cache = Cache()
-
- def wrapped_func(*args, **kwargs):
- params = (args, tuple(sorted(kwargs.items())))
-
- # Look if cached
- try:
- cached = cache[params]
-
- # Not cached, exec and store it
- except KeyError:
- cached = []
-
- for item in func(*args, **kwargs):
- cached.append(item)
- yield item
-
- cache[params] = cached
-
- # Cached, yield its items
- else:
- for item in cached:
- yield item
-
- return wrapped_func
-
-
# This regular expression replaces the home-cooked parser that was here before.
# It is much faster, but requires an extra post-processing step to get the
# desired results (that are compatible with what you would expect from the