summaryrefslogtreecommitdiff
path: root/sqlparse
diff options
context:
space:
mode:
authorVictor Uriarte <victor.m.uriarte@intel.com>2017-11-29 09:56:06 -0700
committerVictor Uriarte <victor.m.uriarte@intel.com>2017-11-29 14:52:15 -0700
commit43478c60394e09246ee6275f89ae434e429cc5b5 (patch)
tree07c7cf589827d53212e38f54cc84bc88d56a1b5d /sqlparse
parent745df4576efca44b226bcdca33f718a5fa670eab (diff)
downloadsqlparse-typo.tar.gz
Fix typostypo
Diffstat (limited to 'sqlparse')
-rw-r--r--sqlparse/engine/grouping.py6
-rw-r--r--sqlparse/engine/statement_splitter.py6
-rw-r--r--sqlparse/filters/aligned_indent.py4
-rw-r--r--sqlparse/filters/output.py4
-rw-r--r--sqlparse/keywords.py4
-rw-r--r--sqlparse/lexer.py2
-rw-r--r--sqlparse/sql.py6
-rw-r--r--sqlparse/tokens.py4
8 files changed, 18 insertions, 18 deletions
diff --git a/sqlparse/engine/grouping.py b/sqlparse/engine/grouping.py
index fa87c9f..5078389 100644
--- a/sqlparse/engine/grouping.py
+++ b/sqlparse/engine/grouping.py
@@ -28,8 +28,8 @@ def _group_matching(tlist, cls):
continue
if token.is_group and not isinstance(token, cls):
- # Check inside previously grouped (ie. parenthesis) if group
- # of differnt type is inside (ie, case). though ideally should
+ # Check inside previously grouped (i.e. parenthesis) if group
+ # of different type is inside (i.e., case). though ideally should
# should check for all open/close tokens at once to avoid recursion
_group_matching(token, cls)
continue
@@ -365,7 +365,7 @@ def _group(tlist, cls, match,
extend=True,
recurse=True
):
- """Groups together tokens that are joined by a middle token. ie. x < y"""
+ """Groups together tokens that are joined by a middle token. i.e. x < y"""
tidx_offset = 0
pidx, prev_ = None, None
diff --git a/sqlparse/engine/statement_splitter.py b/sqlparse/engine/statement_splitter.py
index 6c5b599..87fe993 100644
--- a/sqlparse/engine/statement_splitter.py
+++ b/sqlparse/engine/statement_splitter.py
@@ -29,7 +29,7 @@ class StatementSplitter(object):
# ANSI
# if normal token return
# wouldn't parenthesis increase/decrease a level?
- # no, inside a paranthesis can't start new statement
+ # no, inside a parenthesis can't start new statement
if ttype not in T.Keyword:
return 0
@@ -56,9 +56,9 @@ class StatementSplitter(object):
return 1
return 0
- # Should this respect a preceeding BEGIN?
+ # Should this respect a preceding BEGIN?
# In CASE ... WHEN ... END this results in a split level -1.
- # Would having multiple CASE WHEN END and a Assigment Operator
+ # Would having multiple CASE WHEN END and a Assignment Operator
# cause the statement to cut off prematurely?
if unified == 'END':
self._begin_depth = max(0, self._begin_depth - 1)
diff --git a/sqlparse/filters/aligned_indent.py b/sqlparse/filters/aligned_indent.py
index c04d06d..2eb2197 100644
--- a/sqlparse/filters/aligned_indent.py
+++ b/sqlparse/filters/aligned_indent.py
@@ -31,7 +31,7 @@ class AlignedIndentFilter(object):
def nl(self, offset=1):
# offset = 1 represent a single space after SELECT
offset = -len(offset) if not isinstance(offset, int) else offset
- # add two for the space and parens
+ # add two for the space and parenthesis
indent = self.indent * (2 + self._max_kwd_len)
return sql.Token(T.Whitespace, self.n + self.char * (
@@ -50,7 +50,7 @@ class AlignedIndentFilter(object):
if token is not None:
with indent(self):
tlist.insert_after(tlist[0], self.nl('SELECT'))
- # process the inside of the parantheses
+ # process the inside of the parenthesis
self._process_default(tlist)
# de-indent last parenthesis
diff --git a/sqlparse/filters/output.py b/sqlparse/filters/output.py
index c3425df..ca96080 100644
--- a/sqlparse/filters/output.py
+++ b/sqlparse/filters/output.py
@@ -33,7 +33,7 @@ class OutputFilter(object):
class OutputPythonFilter(OutputFilter):
def _process(self, stream, varname, has_nl):
- # SQL query asignation to varname
+ # SQL query assignation to varname
if self.count > 1:
yield sql.Token(T.Whitespace, '\n')
yield sql.Token(T.Name, varname)
@@ -79,7 +79,7 @@ class OutputPHPFilter(OutputFilter):
varname_prefix = '$'
def _process(self, stream, varname, has_nl):
- # SQL query asignation to varname (quote header)
+ # SQL query assignation to varname (quote header)
if self.count > 1:
yield sql.Token(T.Whitespace, '\n')
yield sql.Token(T.Name, varname)
diff --git a/sqlparse/keywords.py b/sqlparse/keywords.py
index a1242ab..af5e348 100644
--- a/sqlparse/keywords.py
+++ b/sqlparse/keywords.py
@@ -45,7 +45,7 @@ SQL_REGEX = {
# FIXME(andi): VALUES shouldn't be listed here
# see https://github.com/andialbrecht/sqlparse/pull/64
# IN is special, it may be followed by a parenthesis, but
- # is never a functino, see issue183
+ # is never a function, see issue183
(r'(CASE|IN|VALUES|USING)\b', tokens.Keyword),
(r'(@|##|#)[A-ZÀ-Ü]\w+', tokens.Name),
@@ -55,7 +55,7 @@ SQL_REGEX = {
# TODO: Spaces before period not implemented
(r'[A-ZÀ-Ü]\w*(?=\s*\.)', tokens.Name), # 'Name' .
# FIXME(atronah): never match,
- # because `re.match` doesn't work with lookbehind regexp feature
+ # because `re.match` doesn't work with look-behind regexp feature
(r'(?<=\.)[A-ZÀ-Ü]\w*', tokens.Name), # .'Name'
(r'[A-ZÀ-Ü]\w*(?=\()', tokens.Name), # side effect: change kw to func
(r'-?0x[\dA-F]+', tokens.Number.Hexadecimal),
diff --git a/sqlparse/lexer.py b/sqlparse/lexer.py
index 60e43da..d1b1699 100644
--- a/sqlparse/lexer.py
+++ b/sqlparse/lexer.py
@@ -35,7 +35,7 @@ class Lexer(object):
Split ``text`` into (tokentype, text) pairs.
- ``stack`` is the inital stack (default: ``['root']``)
+ ``stack`` is the initial stack (default: ``['root']``)
"""
if isinstance(text, file_types):
text = text.read()
diff --git a/sqlparse/sql.py b/sqlparse/sql.py
index f698782..99f133e 100644
--- a/sqlparse/sql.py
+++ b/sqlparse/sql.py
@@ -20,7 +20,7 @@ class Token(object):
"""Base class for all other classes in this module.
It represents a single token and has two instance attributes:
- ``value`` is the unchange value of the token and ``ttype`` is
+ ``value`` is the unchanged value of the token and ``ttype`` is
the type of the token.
"""
@@ -73,7 +73,7 @@ class Token(object):
*values* is a list of possible values for this token. The values
are OR'ed together so if only one of the values matches ``True``
is returned. Except for keyword tokens the comparison is
- case-sensitive. For convenience it's ok to pass in a single string.
+ case-sensitive. For convenience it's OK to pass in a single string.
If *regex* is ``True`` (default is ``False``) the given values are
treated as regular expressions.
"""
@@ -363,7 +363,7 @@ class TokenList(Token):
def get_parent_name(self):
"""Return name of the parent object if any.
- A parent object is identified by the first occuring dot.
+ A parent object is identified by the first occurring dot.
"""
dot_idx, _ = self.token_next_by(m=(T.Punctuation, '.'))
_, prev_ = self.token_prev(dot_idx)
diff --git a/sqlparse/tokens.py b/sqlparse/tokens.py
index 6864f69..efc1e09 100644
--- a/sqlparse/tokens.py
+++ b/sqlparse/tokens.py
@@ -25,7 +25,7 @@ class _TokenType(tuple):
return new
def __repr__(self):
- # self can be False only if its the `root` ie. Token itself
+ # self can be False only if its the `root` i.e. Token itself
return 'Token' + ('.' if self else '') + '.'.join(self)
@@ -55,7 +55,7 @@ Assignment = Token.Assignment
# Generic types for non-source code
Generic = Token.Generic
-# String and some others are not direct childs of Token.
+# String and some others are not direct children of Token.
# alias them:
Token.Token = Token
Token.String = String