From b20497d2a8c11a6210e6925f9cad6d97ff7f713b Mon Sep 17 00:00:00 2001 From: Ned Batchelder Date: Mon, 10 Aug 2015 07:35:50 -0400 Subject: Add docstring for source_token_lines --- coverage/plugin.py | 22 ++++++++++++++++++++-- 1 file changed, 20 insertions(+), 2 deletions(-) (limited to 'coverage/plugin.py') diff --git a/coverage/plugin.py b/coverage/plugin.py index aa2d2088..5d61e7f8 100644 --- a/coverage/plugin.py +++ b/coverage/plugin.py @@ -204,9 +204,27 @@ class FileReporter(object): return f.read() def source_token_lines(self): - """Return the 'tokenized' text for the code. + """Generate a series of tokenized lines, one for each line in `source`. + + These tokens are used for syntax-colored reports. + + Each line is a list of pairs, each pair is a token:: + + [('key', 'def'), ('ws', ' '), ('nam', 'hello'), ('op', '('), ... ] + + Each pair has a token class, and the token text. The token classes are: + + * `'com'`: a comment + * `'key'`: a keyword + * `'nam'`: a name, or identifier + * `'num'`: a number + * `'op'`: an operator + * `'str'`: a string literal + * `'txt'`: some other kind of text + + If you concatenate all the token texts, and then join them with newlines, + you should have your original `source` back. - 'str', 'nam', 'num', 'key', 'com', 'op' """ # A generic implementation, each line is one "txt" token. for line in self.source().splitlines(): -- cgit v1.2.1