summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--coverage/parser.py86
-rw-r--r--coverage/python.py11
-rw-r--r--lab/parser.py19
-rw-r--r--tests/test_parser.py11
-rw-r--r--tox.ini5
5 files changed, 63 insertions, 69 deletions
diff --git a/coverage/parser.py b/coverage/parser.py
index a5e96237..111826da 100644
--- a/coverage/parser.py
+++ b/coverage/parser.py
@@ -12,7 +12,7 @@ import tokenize
from coverage.backward import range # pylint: disable=redefined-builtin
from coverage.backward import bytes_to_ints
from coverage.bytecode import ByteCodes, CodeObjects
-from coverage.misc import contract, nice_pair, expensive, join_regex
+from coverage.misc import contract, nice_pair, join_regex
from coverage.misc import CoverageException, NoSource, NotPython
from coverage.phystokens import compile_unicode, generate_tokens
@@ -42,25 +42,39 @@ class PythonParser(object):
self.exclude = exclude
- self.show_tokens = False
-
# The text lines of the parsed code.
self.lines = self.text.split('\n')
- # The line numbers of excluded lines of code.
+ # The normalized line numbers of the statements in the code. Exclusions
+ # are taken into account, and statements are adjusted to their first
+ # lines.
+ self.statements = set()
+
+ # The normalized line numbers of the excluded lines in the code,
+ # adjusted to their first lines.
self.excluded = set()
- # The line numbers of docstring lines.
- self.docstrings = set()
+ # The raw_* attributes are only used in this class, and in
+ # lab/parser.py to show how this class is working.
+
+ # The line numbers that start statements, as reported by the line
+ # number table in the bytecode.
+ self.raw_statements = set()
+
+ # The raw line numbers of excluded lines of code, as marked by pragmas.
+ self.raw_excluded = set()
# The line numbers of class definitions.
- self.classdefs = set()
+ self.raw_classdefs = set()
- # A dict mapping line numbers to (lo,hi) for multi-line statements.
- self.multiline = {}
+ # The line numbers of docstring lines.
+ self.raw_docstrings = set()
+
+ # Internal detail, used by lab/parser.py.
+ self.show_tokens = False
- # The line numbers that start statements.
- self.statement_starts = set()
+ # A dict mapping line numbers to (lo,hi) for multi-line statements.
+ self._multiline = {}
# Lazily-created ByteParser and arc data.
self._byte_parser = None
@@ -91,12 +105,12 @@ class PythonParser(object):
def _raw_parse(self):
"""Parse the source to find the interesting facts about its lines.
- A handful of member fields are updated.
+ A handful of attributes are updated.
"""
# Find lines which match an exclusion pattern.
if self.exclude:
- self.excluded = self.lines_matching(self.exclude)
+ self.raw_excluded = self.lines_matching(self.exclude)
# Tokenize, to find excluded suites, to find docstrings, and to find
# multi-line statements.
@@ -122,9 +136,9 @@ class PythonParser(object):
# Class definitions look like branches in the byte code, so
# we need to exclude them. The simplest way is to note the
# lines with the 'class' keyword.
- self.classdefs.add(slineno)
+ self.raw_classdefs.add(slineno)
elif toktype == token.OP and ttext == ':':
- if not excluding and elineno in self.excluded:
+ if not excluding and elineno in self.raw_excluded:
# Start excluding a suite. We trigger off of the colon
# token so that the #pragma comment will be recognized on
# the same line as the colon.
@@ -135,14 +149,14 @@ class PythonParser(object):
# (a trick from trace.py in the stdlib.) This works for
# 99.9999% of cases. For the rest (!) see:
# http://stackoverflow.com/questions/1769332/x/1769794#1769794
- self.docstrings.update(range(slineno, elineno+1))
+ self.raw_docstrings.update(range(slineno, elineno+1))
elif toktype == token.NEWLINE:
if first_line is not None and elineno != first_line:
# We're at the end of a line, and we've ended on a
# different line than the first line of the statement,
# so record a multi-line range.
for l in range(first_line, elineno+1):
- self.multiline[l] = first_line
+ self._multiline[l] = first_line
first_line = None
if ttext.strip() and toktype != tokenize.COMMENT:
@@ -156,17 +170,17 @@ class PythonParser(object):
if excluding and indent <= exclude_indent:
excluding = False
if excluding:
- self.excluded.add(elineno)
+ self.raw_excluded.add(elineno)
prev_toktype = toktype
# Find the starts of the executable statements.
if not empty:
- self.statement_starts.update(self.byte_parser._find_statements())
+ self.raw_statements.update(self.byte_parser._find_statements())
def first_line(self, line):
"""Return the first line number of the statement including `line`."""
- first_line = self.multiline.get(line)
+ first_line = self._multiline.get(line)
if first_line:
return first_line
else:
@@ -187,20 +201,13 @@ class PythonParser(object):
def translate_arcs(self, arcs):
"""Implement `FileReporter.translate_arcs`."""
- return [
- (self.first_line(a), self.first_line(b))
- for (a, b) in arcs
- ]
+ return [(self.first_line(a), self.first_line(b)) for (a, b) in arcs]
- @expensive
def parse_source(self):
"""Parse source text to find executable lines, excluded lines, etc.
- Return values are 1) a set of executable line numbers, and 2) a set of
- excluded line numbers.
-
- Reported line numbers are normalized to the first line of multi-line
- statements.
+ Sets the .excluded and .statements attributes, normalized to the first
+ line of multi-line statements.
"""
try:
@@ -216,15 +223,11 @@ class PythonParser(object):
)
)
- excluded_lines = self.first_lines(self.excluded)
- ignore = set()
- ignore.update(excluded_lines)
- ignore.update(self.docstrings)
- starts = self.statement_starts - ignore
- lines = self.first_lines(starts)
- lines -= ignore
+ self.excluded = self.first_lines(self.raw_excluded)
- return lines, excluded_lines
+ ignore = self.excluded | self.raw_docstrings
+ starts = self.raw_statements - ignore
+ self.statements = self.first_lines(starts) - ignore
def arcs(self):
"""Get information about the arcs available in the code.
@@ -248,22 +251,21 @@ class PythonParser(object):
Excluded lines are excluded.
"""
- excluded_lines = self.first_lines(self.excluded)
exit_counts = collections.defaultdict(int)
for l1, l2 in self.arcs():
if l1 < 0:
# Don't ever report -1 as a line number
continue
- if l1 in excluded_lines:
+ if l1 in self.excluded:
# Don't report excluded lines as line numbers.
continue
- if l2 in excluded_lines:
+ if l2 in self.excluded:
# Arcs to excluded lines shouldn't count.
continue
exit_counts[l1] += 1
# Class definitions have one extra exit, so remove one for each:
- for l in self.classdefs:
+ for l in self.raw_classdefs:
# Ensure key is there: class definitions can include excluded lines.
if l in exit_counts:
exit_counts[l] -= 1
diff --git a/coverage/python.py b/coverage/python.py
index 4f589735..5e563828 100644
--- a/coverage/python.py
+++ b/coverage/python.py
@@ -130,21 +130,16 @@ class PythonFileReporter(FileReporter):
filename=self.filename,
exclude=self.coverage._exclude_regex('exclude'),
)
+ self._parser.parse_source()
return self._parser
- @expensive
def lines(self):
"""Return the line numbers of statements in the file."""
- if self._statements is None:
- self._statements, self._excluded = self.parser.parse_source()
- return self._statements
+ return self.parser.statements
- @expensive
def excluded_lines(self):
"""Return the line numbers of statements in the file."""
- if self._excluded is None:
- self._statements, self._excluded = self.parser.parse_source()
- return self._excluded
+ return self.parser.excluded
def translate_lines(self, lines):
return self.parser.translate_lines(lines)
diff --git a/lab/parser.py b/lab/parser.py
index 97c81d89..bb593f8f 100644
--- a/lab/parser.py
+++ b/lab/parser.py
@@ -17,6 +17,7 @@ from coverage.python import get_python_source
opcode_counts = collections.Counter()
+
class ParserMain(object):
"""A main for code parsing experiments."""
@@ -69,7 +70,6 @@ class ParserMain(object):
for opcode, number in opcode_counts.most_common():
print("{0:20s} {1:6d} {2:.1%}".format(opcode, number, number/total))
-
def one_file(self, options, filename):
"""Process just one file."""
@@ -85,7 +85,7 @@ class ParserMain(object):
self.disassemble(bp, histogram=options.histogram)
arcs = bp._all_arcs()
- if options.chunks:# and not options.dis:
+ if options.chunks:
chunks = bp._all_chunks()
if options.recursive:
print("%6d: %s" % (len(chunks), filename))
@@ -96,7 +96,7 @@ class ParserMain(object):
if options.source or options.tokens:
cp = PythonParser(filename=filename, exclude=r"no\s*cover")
cp.show_tokens = options.tokens
- cp._raw_parse()
+ cp.parse_source()
if options.source:
if options.chunks:
@@ -108,21 +108,19 @@ class ParserMain(object):
for lineno, ltext in enumerate(cp.lines, start=1):
m0 = m1 = m2 = m3 = a = ' '
- if lineno in cp.statement_starts:
+ if lineno in cp.raw_statements:
m0 = '-'
exits = exit_counts.get(lineno, 0)
if exits > 1:
m1 = str(exits)
- if lineno in cp.docstrings:
+ if lineno in cp.raw_docstrings:
m2 = '"'
- if lineno in cp.classdefs:
+ if lineno in cp.raw_classdefs:
m2 = 'C'
- if lineno in cp.excluded:
+ if lineno in cp.raw_excluded:
m3 = 'x'
a = arc_chars[lineno].ljust(arc_width)
- print("%4d %s%s%s%s%s %s" %
- (lineno, m0, m1, m2, m3, a, ltext)
- )
+ print("%4d %s%s%s%s%s %s" % (lineno, m0, m1, m2, m3, a, ltext))
def disassemble(self, byte_parser, histogram=False):
"""Disassemble code, for ad-hoc experimenting."""
@@ -199,4 +197,3 @@ class ParserMain(object):
if __name__ == '__main__':
ParserMain().main(sys.argv[1:])
-
diff --git a/tests/test_parser.py b/tests/test_parser.py
index 84b9a214..372bf79b 100644
--- a/tests/test_parser.py
+++ b/tests/test_parser.py
@@ -142,9 +142,8 @@ class ParserFileTest(CoverageTest):
def parse_file(self, filename):
"""Parse `text` as source, and return the `PythonParser` used."""
- # pylint: disable=attribute-defined-outside-init
parser = PythonParser(filename=filename, exclude="nocover")
- self.statements, self.excluded = parser.parse_source()
+ parser.parse_source()
return parser
def test_line_endings(self):
@@ -191,8 +190,8 @@ class ParserFileTest(CoverageTest):
stderr=subprocess.PIPE).communicate()
""")
- self.parse_file("normal.py")
- self.assertEqual(self.statements, set([1]))
+ parser = self.parse_file("normal.py")
+ self.assertEqual(parser.statements, set([1]))
self.make_file("abrupt.py", """\
out, err = subprocess.Popen(
@@ -204,5 +203,5 @@ class ParserFileTest(CoverageTest):
with open("abrupt.py") as f:
self.assertEqual(f.read()[-1], ")")
- self.parse_file("abrupt.py")
- self.assertEqual(self.statements, set([1]))
+ parser = self.parse_file("abrupt.py")
+ self.assertEqual(parser.statements, set([1]))
diff --git a/tox.ini b/tox.ini
index 08282ed3..569bdd9e 100644
--- a/tox.ini
+++ b/tox.ini
@@ -65,9 +65,10 @@ commands =
# Yes, pep8 will read its settings from tox.ini!
[pep8]
-# E265: block comment should start with '# '
+# E265 block comment should start with '# '
+# E266 too many leading '#' for block comment
# E301 expected 1 blank line, found 0
# E401 multiple imports on one line
# The rest are the default ignored warnings.
-ignore = E265,E123,E133,E226,E241,E242,E301,E401
+ignore = E265,E266,E123,E133,E226,E241,E242,E301,E401
max-line-length = 100