diff options
| author | Victor Uriarte <victor.m.uriarte@intel.com> | 2016-05-28 13:34:21 -0700 |
|---|---|---|
| committer | Victor Uriarte <victor.m.uriarte@intel.com> | 2016-05-29 00:08:58 -0700 |
| commit | 73f5f61c73da92cb5358c6b50b25e8d2eb20e3be (patch) | |
| tree | 36ff0250623665babdb1616e3bd67091a7bea5e7 | |
| parent | 4c1200b95fd345d502e3da5173cdbcdbd67bfb11 (diff) | |
| download | sqlparse-73f5f61c73da92cb5358c6b50b25e8d2eb20e3be.tar.gz | |
Remove undocumented ws handlers from lexer.py
Removing nl before tokenizing breaks comments (stripnl/stripall)
Remove tab-expansion.
| -rw-r--r-- | sqlparse/lexer.py | 17 | ||||
| -rw-r--r-- | tests/test_tokenize.py | 7 |
2 files changed, 2 insertions, 22 deletions
diff --git a/sqlparse/lexer.py b/sqlparse/lexer.py index 74a3431..6915a6a 100644 --- a/sqlparse/lexer.py +++ b/sqlparse/lexer.py @@ -100,23 +100,15 @@ class LexerMeta(type): class _Lexer(object): encoding = 'utf-8' - stripall = False - stripnl = False - tabsize = 0 flags = re.IGNORECASE | re.UNICODE def __init__(self): self.filters = [] - def _expandtabs(self, text): - if self.tabsize > 0: - text = text.expandtabs(self.tabsize) - return text - def _decode(self, text): if sys.version_info[0] == 3: if isinstance(text, str): - return self._expandtabs(text) + return text if self.encoding == 'guess': try: text = text.decode('utf-8') @@ -129,7 +121,7 @@ class _Lexer(object): text = text.decode(self.encoding) except UnicodeDecodeError: text = text.decode('unicode-escape') - return self._expandtabs(text) + return text def get_tokens(self, text): """ @@ -141,11 +133,6 @@ class _Lexer(object): wanted and applies registered filters. """ if isinstance(text, string_types): - if self.stripall: - text = text.strip() - elif self.stripnl: - text = text.strip('\n') - if sys.version_info[0] < 3 and isinstance(text, text_type): text = StringIO(text.encode('utf-8')) self.encoding = 'utf-8' diff --git a/tests/test_tokenize.py b/tests/test_tokenize.py index 02c3f25..4bcfd49 100644 --- a/tests/test_tokenize.py +++ b/tests/test_tokenize.py @@ -72,13 +72,6 @@ class TestTokenize(unittest.TestCase): self.assertEqual(tokens[2][0], T.Number.Integer) self.assertEqual(tokens[2][1], '-1') - def test_tab_expansion(self): - s = "\t" - lex = lexer.Lexer() - lex.tabsize = 5 - tokens = list(lex.get_tokens(s)) - self.assertEqual(tokens[0][1], " " * 5) - class TestToken(unittest.TestCase): |
