diff options
| author | Victor Uriarte <victor.m.uriarte@intel.com> | 2016-06-10 18:34:06 -0700 |
|---|---|---|
| committer | Victor Uriarte <victor.m.uriarte@intel.com> | 2016-06-11 04:33:11 -0700 |
| commit | 886408b4e223694db4c3288643d55d4a382e5d67 (patch) | |
| tree | 0e1cb050d3f33924c840e1c13135ff4533c83182 /sqlparse | |
| parent | 1957755f5ff90568ec8dbdf41442018a341b1b6b (diff) | |
| download | sqlparse-886408b4e223694db4c3288643d55d4a382e5d67.tar.gz | |
Remove unneded code from sql.py
Remove HACK code. Code is now properly updated
Diffstat (limited to 'sqlparse')
| -rw-r--r-- | sqlparse/sql.py | 14 |
1 files changed, 2 insertions, 12 deletions
diff --git a/sqlparse/sql.py b/sqlparse/sql.py index daa5cf5..ced18af 100644 --- a/sqlparse/sql.py +++ b/sqlparse/sql.py @@ -248,8 +248,6 @@ class TokenList(Token): If *skip_ws* is ``True`` (the default) whitespace tokens are ignored. ``None`` is returned if there's no previous token. """ - if isinstance(idx, int): - idx += 1 # alot of code usage current pre-compensates for this funcs = lambda tk: not ((skip_ws and tk.is_whitespace()) or (skip_cm and imt(tk, t=T.Comment))) return self._token_matching(funcs, idx, reverse=True) @@ -260,8 +258,6 @@ class TokenList(Token): If *skip_ws* is ``True`` (the default) whitespace tokens are ignored. ``None`` is returned if there's no next token. """ - if isinstance(idx, int): - idx += 1 # alot of code usage current pre-compensates for this funcs = lambda tk: not ((skip_ws and tk.is_whitespace()) or (skip_cm and imt(tk, t=T.Comment))) return self._token_matching(funcs, idx) @@ -435,19 +431,13 @@ class Identifier(TokenList): def get_typecast(self): """Returns the typecast or ``None`` of this object as a string.""" marker = self.token_next_by(m=(T.Punctuation, '::')) - if marker is None: - return None next_ = self.token_next(marker, False) - if next_ is None: - return None - return next_.value + return next_.value if next_ else None def get_ordering(self): """Returns the ordering or ``None`` as uppercase string.""" ordering = self.token_next_by(t=T.Keyword.Order) - if ordering is None: - return None - return ordering.normalized + return ordering.normalized if ordering else None def get_array_indices(self): """Returns an iterator of index token lists""" |
