summaryrefslogtreecommitdiff
path: root/coverage/parser.py
diff options
context:
space:
mode:
Diffstat (limited to 'coverage/parser.py')
-rw-r--r--coverage/parser.py221
1 files changed, 112 insertions, 109 deletions
diff --git a/coverage/parser.py b/coverage/parser.py
index fc751eb2..7b8a60f1 100644
--- a/coverage/parser.py
+++ b/coverage/parser.py
@@ -1,4 +1,7 @@
-"""Code parsing for Coverage."""
+# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
+# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
+
+"""Code parsing for coverage.py."""
import collections
import dis
@@ -9,31 +12,15 @@ import tokenize
from coverage.backward import range # pylint: disable=redefined-builtin
from coverage.backward import bytes_to_ints
from coverage.bytecode import ByteCodes, CodeObjects
-from coverage.misc import nice_pair, expensive, join_regex
+from coverage.misc import contract, nice_pair, join_regex
from coverage.misc import CoverageException, NoSource, NotPython
-from coverage.phystokens import generate_tokens
-
+from coverage.phystokens import compile_unicode, generate_tokens
-class CodeParser(object):
- """
- Base class for any code parser.
- """
- def translate_lines(self, lines):
- return set(lines)
- def translate_arcs(self, arcs):
- return arcs
-
- def exit_counts(self):
- return {}
-
- def arcs(self):
- return []
-
-
-class PythonParser(CodeParser):
+class PythonParser(object):
"""Parse code to find executable lines, excluded lines, etc."""
+ @contract(text='unicode|None')
def __init__(self, text=None, filename=None, exclude=None):
"""
Source can be provided as `text`, the text itself, or `filename`, from
@@ -51,40 +38,47 @@ class PythonParser(CodeParser):
except IOError as err:
raise NoSource(
"No source for code: '%s': %s" % (self.filename, err)
- )
-
- if self.text:
- assert isinstance(self.text, str)
- # Scrap the BOM if it exists.
- # (Used to do this, but no longer. Not sure what bad will happen
- # if we don't do it.)
- # if ord(self.text[0]) == 0xfeff:
- # self.text = self.text[1:]
+ )
self.exclude = exclude
- self.show_tokens = False
-
# The text lines of the parsed code.
self.lines = self.text.split('\n')
- # The line numbers of excluded lines of code.
+ # The normalized line numbers of the statements in the code. Exclusions
+ # are taken into account, and statements are adjusted to their first
+ # lines.
+ self.statements = set()
+
+ # The normalized line numbers of the excluded lines in the code,
+ # adjusted to their first lines.
self.excluded = set()
- # The line numbers of docstring lines.
- self.docstrings = set()
+ # The raw_* attributes are only used in this class, and in
+ # lab/parser.py to show how this class is working.
+
+ # The line numbers that start statements, as reported by the line
+ # number table in the bytecode.
+ self.raw_statements = set()
+
+ # The raw line numbers of excluded lines of code, as marked by pragmas.
+ self.raw_excluded = set()
# The line numbers of class definitions.
- self.classdefs = set()
+ self.raw_classdefs = set()
- # A dict mapping line numbers to (lo,hi) for multi-line statements.
- self.multiline = {}
+ # The line numbers of docstring lines.
+ self.raw_docstrings = set()
- # The line numbers that start statements.
- self.statement_starts = set()
+ # Internal detail, used by lab/parser.py.
+ self.show_tokens = False
- # Lazily-created ByteParser
+ # A dict mapping line numbers to (lo,hi) for multi-line statements.
+ self._multiline = {}
+
+ # Lazily-created ByteParser and arc data.
self._byte_parser = None
+ self._all_arcs = None
@property
def byte_parser(self):
@@ -111,21 +105,23 @@ class PythonParser(CodeParser):
def _raw_parse(self):
"""Parse the source to find the interesting facts about its lines.
- A handful of member fields are updated.
+ A handful of attributes are updated.
"""
# Find lines which match an exclusion pattern.
if self.exclude:
- self.excluded = self.lines_matching(self.exclude)
+ self.raw_excluded = self.lines_matching(self.exclude)
# Tokenize, to find excluded suites, to find docstrings, and to find
# multi-line statements.
indent = 0
exclude_indent = 0
excluding = False
+ excluding_decorators = False
prev_toktype = token.INDENT
first_line = None
empty = True
+ first_on_line = True
tokgen = generate_tokens(self.text)
for toktype, ttext, (slineno, _), (elineno, _), ltext in tokgen:
@@ -133,37 +129,49 @@ class PythonParser(CodeParser):
print("%10s %5s %-20r %r" % (
tokenize.tok_name.get(toktype, toktype),
nice_pair((slineno, elineno)), ttext, ltext
- ))
+ ))
if toktype == token.INDENT:
indent += 1
elif toktype == token.DEDENT:
indent -= 1
- elif toktype == token.NAME and ttext == 'class':
- # Class definitions look like branches in the byte code, so
- # we need to exclude them. The simplest way is to note the
- # lines with the 'class' keyword.
- self.classdefs.add(slineno)
- elif toktype == token.OP and ttext == ':':
- if not excluding and elineno in self.excluded:
- # Start excluding a suite. We trigger off of the colon
- # token so that the #pragma comment will be recognized on
- # the same line as the colon.
- exclude_indent = indent
- excluding = True
+ elif toktype == token.NAME:
+ if ttext == 'class':
+ # Class definitions look like branches in the byte code, so
+ # we need to exclude them. The simplest way is to note the
+ # lines with the 'class' keyword.
+ self.raw_classdefs.add(slineno)
+ elif toktype == token.OP:
+ if ttext == ':':
+ should_exclude = (elineno in self.raw_excluded) or excluding_decorators
+ if not excluding and should_exclude:
+ # Start excluding a suite. We trigger off of the colon
+ # token so that the #pragma comment will be recognized on
+ # the same line as the colon.
+ self.raw_excluded.add(elineno)
+ exclude_indent = indent
+ excluding = True
+ excluding_decorators = False
+ elif ttext == '@' and first_on_line:
+ # A decorator.
+ if elineno in self.raw_excluded:
+ excluding_decorators = True
+ if excluding_decorators:
+ self.raw_excluded.add(elineno)
elif toktype == token.STRING and prev_toktype == token.INDENT:
# Strings that are first on an indented line are docstrings.
# (a trick from trace.py in the stdlib.) This works for
# 99.9999% of cases. For the rest (!) see:
# http://stackoverflow.com/questions/1769332/x/1769794#1769794
- self.docstrings.update(range(slineno, elineno+1))
+ self.raw_docstrings.update(range(slineno, elineno+1))
elif toktype == token.NEWLINE:
if first_line is not None and elineno != first_line:
# We're at the end of a line, and we've ended on a
# different line than the first line of the statement,
# so record a multi-line range.
for l in range(first_line, elineno+1):
- self.multiline[l] = first_line
+ self._multiline[l] = first_line
first_line = None
+ first_on_line = True
if ttext.strip() and toktype != tokenize.COMMENT:
# A non-whitespace token.
@@ -176,17 +184,18 @@ class PythonParser(CodeParser):
if excluding and indent <= exclude_indent:
excluding = False
if excluding:
- self.excluded.add(elineno)
+ self.raw_excluded.add(elineno)
+ first_on_line = False
prev_toktype = toktype
# Find the starts of the executable statements.
if not empty:
- self.statement_starts.update(self.byte_parser._find_statements())
+ self.raw_statements.update(self.byte_parser._find_statements())
def first_line(self, line):
"""Return the first line number of the statement including `line`."""
- first_line = self.multiline.get(line)
+ first_line = self._multiline.get(line)
if first_line:
return first_line
else:
@@ -202,83 +211,77 @@ class PythonParser(CodeParser):
return set(self.first_line(l) for l in lines)
def translate_lines(self, lines):
+ """Implement `FileReporter.translate_lines`."""
return self.first_lines(lines)
def translate_arcs(self, arcs):
- return [
- (self.first_line(a), self.first_line(b))
- for (a, b) in arcs
- ]
+ """Implement `FileReporter.translate_arcs`."""
+ return [(self.first_line(a), self.first_line(b)) for (a, b) in arcs]
def parse_source(self):
"""Parse source text to find executable lines, excluded lines, etc.
- Return values are 1) a set of executable line numbers, and 2) a set of
- excluded line numbers.
-
- Reported line numbers are normalized to the first line of multi-line
- statements.
+ Sets the .excluded and .statements attributes, normalized to the first
+ line of multi-line statements.
"""
try:
self._raw_parse()
- except (tokenize.TokenError, IndentationError) as tokerr:
- msg, lineno = tokerr.args # pylint: disable=unpacking-non-sequence
+ except (tokenize.TokenError, IndentationError) as err:
+ if hasattr(err, "lineno"):
+ lineno = err.lineno # IndentationError
+ else:
+ lineno = err.args[1][0] # TokenError
raise NotPython(
- "Couldn't parse '%s' as Python source: '%s' at %s" %
- (self.filename, msg, lineno)
+ u"Couldn't parse '%s' as Python source: '%s' at line %d" % (
+ self.filename, err.args[0], lineno
)
+ )
- excluded_lines = self.first_lines(self.excluded)
- ignore = set()
- ignore.update(excluded_lines)
- ignore.update(self.docstrings)
- starts = self.statement_starts - ignore
- lines = self.first_lines(starts)
- lines -= ignore
+ self.excluded = self.first_lines(self.raw_excluded)
- return lines, excluded_lines
+ ignore = self.excluded | self.raw_docstrings
+ starts = self.raw_statements - ignore
+ self.statements = self.first_lines(starts) - ignore
- @expensive
def arcs(self):
"""Get information about the arcs available in the code.
- Returns a sorted list of line number pairs. Line numbers have been
- normalized to the first line of multi-line statements.
+ Returns a set of line number pairs. Line numbers have been normalized
+ to the first line of multi-line statements.
"""
- all_arcs = []
- for l1, l2 in self.byte_parser._all_arcs():
- fl1 = self.first_line(l1)
- fl2 = self.first_line(l2)
- if fl1 != fl2:
- all_arcs.append((fl1, fl2))
- return sorted(all_arcs)
-
- @expensive
+ if self._all_arcs is None:
+ self._all_arcs = set()
+ for l1, l2 in self.byte_parser._all_arcs():
+ fl1 = self.first_line(l1)
+ fl2 = self.first_line(l2)
+ if fl1 != fl2:
+ self._all_arcs.add((fl1, fl2))
+ return self._all_arcs
+
def exit_counts(self):
- """Get a mapping from line numbers to count of exits from that line.
+ """Get a count of exits from that each line.
Excluded lines are excluded.
"""
- excluded_lines = self.first_lines(self.excluded)
exit_counts = collections.defaultdict(int)
for l1, l2 in self.arcs():
if l1 < 0:
# Don't ever report -1 as a line number
continue
- if l1 in excluded_lines:
+ if l1 in self.excluded:
# Don't report excluded lines as line numbers.
continue
- if l2 in excluded_lines:
+ if l2 in self.excluded:
# Arcs to excluded lines shouldn't count.
continue
exit_counts[l1] += 1
# Class definitions have one extra exit, so remove one for each:
- for l in self.classdefs:
- # Ensure key is there: classdefs can include excluded lines.
+ for l in self.raw_classdefs:
+ # Ensure key is there: class definitions can include excluded lines.
if l in exit_counts:
exit_counts[l] -= 1
@@ -309,7 +312,7 @@ OPS_CODE_END = _opcode_set('RETURN_VALUE')
OPS_CHUNK_END = _opcode_set(
'JUMP_ABSOLUTE', 'JUMP_FORWARD', 'RETURN_VALUE', 'RAISE_VARARGS',
'BREAK_LOOP', 'CONTINUE_LOOP',
- )
+)
# Opcodes that unconditionally begin a new code chunk. By starting new chunks
# with unconditional jump instructions, we neatly deal with jumps to jumps
@@ -319,7 +322,7 @@ OPS_CHUNK_BEGIN = _opcode_set('JUMP_ABSOLUTE', 'JUMP_FORWARD')
# Opcodes that push a block on the block stack.
OPS_PUSH_BLOCK = _opcode_set(
'SETUP_LOOP', 'SETUP_EXCEPT', 'SETUP_FINALLY', 'SETUP_WITH'
- )
+)
# Block types for exception handling.
OPS_EXCEPT_BLOCKS = _opcode_set('SETUP_EXCEPT', 'SETUP_FINALLY')
@@ -334,7 +337,7 @@ OPS_NO_JUMP = OPS_PUSH_BLOCK
OP_BREAK_LOOP = _opcode('BREAK_LOOP')
OP_END_FINALLY = _opcode('END_FINALLY')
OP_COMPARE_OP = _opcode('COMPARE_OP')
-COMPARE_EXCEPTION = 10 # just have to get this const from the code.
+COMPARE_EXCEPTION = 10 # just have to get this constant from the code.
OP_LOAD_CONST = _opcode('LOAD_CONST')
OP_RETURN_VALUE = _opcode('RETURN_VALUE')
@@ -342,16 +345,17 @@ OP_RETURN_VALUE = _opcode('RETURN_VALUE')
class ByteParser(object):
"""Parse byte codes to understand the structure of code."""
+ @contract(text='unicode')
def __init__(self, text, code=None, filename=None):
self.text = text
if code:
self.code = code
else:
try:
- self.code = compile(text, filename, "exec")
+ self.code = compile_unicode(text, filename, "exec")
except SyntaxError as synerr:
raise NotPython(
- "Couldn't parse '%s' as Python source: '%s' at line %d" % (
+ u"Couldn't parse '%s' as Python source: '%s' at line %d" % (
filename, synerr.msg, synerr.lineno
)
)
@@ -361,10 +365,9 @@ class ByteParser(object):
for attr in ['co_lnotab', 'co_firstlineno', 'co_consts', 'co_code']:
if not hasattr(self.code, attr):
raise CoverageException(
- "This implementation of Python doesn't support code "
- "analysis.\n"
+ "This implementation of Python doesn't support code analysis.\n"
"Run coverage.py under CPython for this command."
- )
+ )
def child_parsers(self):
"""Iterate over all the code objects nested within this one.
@@ -682,4 +685,4 @@ class Chunk(object):
"!" if self.first else "",
"v" if self.entrance else "",
list(self.exits),
- )
+ )