diff options
| author | Sylvain Thénault <sylvain.thenault@logilab.fr> | 2014-11-19 09:50:28 +0100 |
|---|---|---|
| committer | Sylvain Thénault <sylvain.thenault@logilab.fr> | 2014-11-19 09:50:28 +0100 |
| commit | d788df3700257c6483d3bfebdc0d232bfec07f1b (patch) | |
| tree | e8a15100c69c971f730039cf72491466e515b6ae /checkers/strings.py | |
| parent | b14dfa4485c705990c7820014cdae78632732553 (diff) | |
| download | pylint-git-d788df3700257c6483d3bfebdc0d232bfec07f1b.tar.gz | |
pylint pylint
Diffstat (limited to 'checkers/strings.py')
| -rw-r--r-- | checkers/strings.py | 14 |
1 files changed, 6 insertions, 8 deletions
diff --git a/checkers/strings.py b/checkers/strings.py index d4b2e65d1..e88085d44 100644 --- a/checkers/strings.py +++ b/checkers/strings.py @@ -538,17 +538,18 @@ class StringConstantChecker(BaseTokenChecker): self._unicode_literals = 'unicode_literals' in module.future_imports def process_tokens(self, tokens): - for (tok_type, token, (start_row, start_col), _, _) in tokens: + for (tok_type, token, (start_row, _), _, _) in tokens: if tok_type == tokenize.STRING: # 'token' is the whole un-parsed token; we can look at the start # of it to see whether it's a raw or unicode string etc. - self.process_string_token(token, start_row, start_col) + self.process_string_token(token, start_row) - def process_string_token(self, token, start_row, start_col): + def process_string_token(self, token, start_row): for i, c in enumerate(token): if c in '\'\"': quote_char = c break + # pylint: disable=undefined-loop-variable prefix = token[:i].lower() # markers like u, b, r. after_prefix = token[i:] if after_prefix[:3] == after_prefix[-3:] == 3 * quote_char: @@ -557,18 +558,15 @@ class StringConstantChecker(BaseTokenChecker): string_body = after_prefix[1:-1] # Chop off quotes # No special checks on raw strings at the moment. if 'r' not in prefix: - self.process_non_raw_string_token(prefix, string_body, - start_row, start_col) + self.process_non_raw_string_token(prefix, string_body, start_row) - def process_non_raw_string_token(self, prefix, string_body, start_row, - start_col): + def process_non_raw_string_token(self, prefix, string_body, start_row): """check for bad escapes in a non-raw string. prefix: lowercase string of eg 'ur' string prefix markers. string_body: the un-parsed body of the string, not including the quote marks. start_row: integer line number in the source. - start_col: integer column number in the source. """ # Walk through the string; if we see a backslash then escape the next # character, and skip over it. If we see a non-escaped character, |
