summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--CHANGES.rst31
-rw-r--r--coverage/backward.py8
-rw-r--r--coverage/bytecode.py65
-rw-r--r--coverage/parser.py799
-rw-r--r--coverage/test_helpers.py65
-rw-r--r--lab/branches.py2
-rw-r--r--lab/disgen.py6
-rw-r--r--lab/parser.py79
-rw-r--r--pylintrc6
-rw-r--r--tests/coveragetest.py26
-rw-r--r--tests/test_arcs.py566
-rw-r--r--tests/test_backward.py3
-rw-r--r--tests/test_coverage.py100
-rw-r--r--tests/test_parser.py2
-rw-r--r--tests/test_summary.py6
-rw-r--r--tests/test_testing.py43
16 files changed, 1276 insertions, 531 deletions
diff --git a/CHANGES.rst b/CHANGES.rst
index a7fbf430..5e20ceae 100644
--- a/CHANGES.rst
+++ b/CHANGES.rst
@@ -9,6 +9,32 @@ Change history for Coverage.py
Unreleased
----------
+- Branch coverage has been rewritten: it used to be based on bytecode analysis,
+ but now uses AST analysis. This has changed a number of things:
+
+ - More code paths are now considered runnable, especially in `try`/`except`
+ structures. This may mean that coverage.py will identify more code paths
+ as uncovered. This could either raise or lower your overall coverage
+ number.
+
+ - Python 3.5's `async` and `await` keywords are properly supported, fixing
+ `issue 434`_.
+
+ - A some long-standing branch coverage bugs were fixed:
+
+ - `issue 129`_: functions with only a docstring for a body would incorrectly
+ report a missing branch on the ``def`` line.
+
+ - `issue 212`_: code in an ``except`` block could be incorrectly marked as
+ a missing branch.
+
+ - `issue 146`_: context manages (``with`` statements) in a loop or ``try``
+ block could confuse the branch measurement, reporting incorrect partial
+ branches.
+
+ - `issue 422`_: in Python 3.5, an actual partial branch could be marked as
+ complete.
+
- Pragmas to disable coverage measurement can now be used on decorator lines,
and they will apply to the entire function or class being decorated. This
implements the feature requested in `issue 131`_.
@@ -25,7 +51,12 @@ Unreleased
- Form-feed characters would prevent accurate determination of the beginning of
statements in the rest of the file. This is now fixed, closing `issue 461`_.
+.. _issue 129: https://bitbucket.org/ned/coveragepy/issues/129/misleading-branch-coverage-of-empty
.. _issue 131: https://bitbucket.org/ned/coveragepy/issues/131/pragma-on-a-decorator-line-should-affect
+.. _issue 146: https://bitbucket.org/ned/coveragepy/issues/146/context-managers-confuse-branch-coverage
+.. _issue 212: https://bitbucket.org/ned/coveragepy/issues/212/coverage-erroneously-reports-partial
+.. _issue 422: https://bitbucket.org/ned/coveragepy/issues/422/python35-partial-branch-marked-as-fully
+.. _issue 434: https://bitbucket.org/ned/coveragepy/issues/434/indexerror-in-python-35
.. _issue 453: https://bitbucket.org/ned/coveragepy/issues/453/source-code-encoding-can-only-be-specified
.. _issue 455: https://bitbucket.org/ned/coveragepy/issues/455/unusual-exclusions-stopped-working-in
.. _issue 461: https://bitbucket.org/ned/coveragepy/issues/461/multiline-asserts-need-too-many-pragma
diff --git a/coverage/backward.py b/coverage/backward.py
index 4fc72215..50d49a0f 100644
--- a/coverage/backward.py
+++ b/coverage/backward.py
@@ -93,10 +93,6 @@ if env.PY3:
"""Produce a byte string with the ints from `byte_values`."""
return bytes(byte_values)
- def byte_to_int(byte_value):
- """Turn an element of a bytes object into an int."""
- return byte_value
-
def bytes_to_ints(bytes_value):
"""Turn a bytes object into a sequence of ints."""
# In Python 3, iterating bytes gives ints.
@@ -111,10 +107,6 @@ else:
"""Produce a byte string with the ints from `byte_values`."""
return "".join(chr(b) for b in byte_values)
- def byte_to_int(byte_value):
- """Turn an element of a bytes object into an int."""
- return ord(byte_value)
-
def bytes_to_ints(bytes_value):
"""Turn a bytes object into a sequence of ints."""
for byte in bytes_value:
diff --git a/coverage/bytecode.py b/coverage/bytecode.py
index 82929cef..d823c67c 100644
--- a/coverage/bytecode.py
+++ b/coverage/bytecode.py
@@ -3,73 +3,8 @@
"""Bytecode manipulation for coverage.py"""
-import opcode
import types
-from coverage.backward import byte_to_int
-
-
-class ByteCode(object):
- """A single bytecode."""
- def __init__(self):
- # The offset of this bytecode in the code object.
- self.offset = -1
-
- # The opcode, defined in the `opcode` module.
- self.op = -1
-
- # The argument, a small integer, whose meaning depends on the opcode.
- self.arg = -1
-
- # The offset in the code object of the next bytecode.
- self.next_offset = -1
-
- # The offset to jump to.
- self.jump_to = -1
-
-
-class ByteCodes(object):
- """Iterator over byte codes in `code`.
-
- This handles the logic of EXTENDED_ARG byte codes internally. Those byte
- codes are not returned by this iterator.
-
- Returns `ByteCode` objects.
-
- """
- def __init__(self, code):
- self.code = code
-
- def __getitem__(self, i):
- return byte_to_int(self.code[i])
-
- def __iter__(self):
- offset = 0
- ext_arg = 0
- while offset < len(self.code):
- bc = ByteCode()
- bc.op = self[offset]
- bc.offset = offset
-
- next_offset = offset+1
- if bc.op >= opcode.HAVE_ARGUMENT:
- bc.arg = ext_arg + self[offset+1] + 256*self[offset+2]
- next_offset += 2
-
- label = -1
- if bc.op in opcode.hasjrel:
- label = next_offset + bc.arg
- elif bc.op in opcode.hasjabs:
- label = bc.arg
- bc.jump_to = label
-
- bc.next_offset = offset = next_offset
- if bc.op == opcode.EXTENDED_ARG:
- ext_arg = bc.arg * 256*256
- else:
- ext_arg = 0
- yield bc
-
class CodeObjects(object):
"""Iterate over all the code objects in `code`."""
diff --git a/coverage/parser.py b/coverage/parser.py
index 884d40cb..9f7400e5 100644
--- a/coverage/parser.py
+++ b/coverage/parser.py
@@ -3,19 +3,20 @@
"""Code parsing for coverage.py."""
+import ast
import collections
-import dis
+import os
import re
import token
import tokenize
from coverage import env
from coverage.backward import range # pylint: disable=redefined-builtin
-from coverage.backward import bytes_to_ints
-from coverage.bytecode import ByteCodes, CodeObjects
+from coverage.backward import bytes_to_ints, string_class
+from coverage.bytecode import CodeObjects
from coverage.misc import contract, nice_pair, join_regex
from coverage.misc import CoverageException, NoSource, NotPython
-from coverage.phystokens import compile_unicode, generate_tokens
+from coverage.phystokens import compile_unicode, generate_tokens, neuter_encoding_declaration
class PythonParser(object):
@@ -65,8 +66,9 @@ class PythonParser(object):
# The raw line numbers of excluded lines of code, as marked by pragmas.
self.raw_excluded = set()
- # The line numbers of class definitions.
+ # The line numbers of class and function definitions.
self.raw_classdefs = set()
+ self.raw_funcdefs = set()
# The line numbers of docstring lines.
self.raw_docstrings = set()
@@ -140,10 +142,12 @@ class PythonParser(object):
indent -= 1
elif toktype == token.NAME:
if ttext == 'class':
- # Class definitions look like branches in the byte code, so
+ # Class definitions look like branches in the bytecode, so
# we need to exclude them. The simplest way is to note the
# lines with the 'class' keyword.
self.raw_classdefs.add(slineno)
+ elif ttext == 'def':
+ self.raw_funcdefs.add(slineno)
elif toktype == token.OP:
if ttext == ':':
should_exclude = (elineno in self.raw_excluded) or excluding_decorators
@@ -256,8 +260,11 @@ class PythonParser(object):
"""
if self._all_arcs is None:
+ aaa = AstArcAnalyzer(self.text, self.raw_funcdefs, self.raw_classdefs)
+ arcs = aaa.collect_arcs()
+
self._all_arcs = set()
- for l1, l2 in self.byte_parser._all_arcs():
+ for l1, l2 in arcs:
fl1 = self.first_line(l1)
fl2 = self.first_line(l2)
if fl1 != fl2:
@@ -292,62 +299,435 @@ class PythonParser(object):
return exit_counts
-## Opcodes that guide the ByteParser.
+class LoopBlock(object):
+ def __init__(self, start):
+ self.start = start
+ self.break_exits = set()
-def _opcode(name):
- """Return the opcode by name from the dis module."""
- return dis.opmap[name]
+class FunctionBlock(object):
+ def __init__(self, start):
+ self.start = start
-def _opcode_set(*names):
- """Return a set of opcodes by the names in `names`."""
- s = set()
- for name in names:
- try:
- s.add(_opcode(name))
- except KeyError:
- pass
- return s
-# Opcodes that leave the code object.
-OPS_CODE_END = _opcode_set('RETURN_VALUE')
+class TryBlock(object):
+ def __init__(self, handler_start=None, final_start=None):
+ self.handler_start = handler_start
+ self.final_start = final_start
+ self.break_from = set()
+ self.continue_from = set()
+ self.return_from = set()
+ self.raise_from = set()
+
+
+class AstArcAnalyzer(object):
+ @contract(text='unicode', funcdefs=set, classdefs=set)
+ def __init__(self, text, funcdefs, classdefs):
+ self.root_node = ast.parse(neuter_encoding_declaration(text))
+ self.funcdefs = funcdefs
+ self.classdefs = classdefs
+
+ if int(os.environ.get("COVERAGE_ASTDUMP", 0)): # pragma: debugging
+ # Dump the AST so that failing tests have helpful output.
+ ast_dump(self.root_node)
+
+ self.arcs = None
+ self.block_stack = []
+
+ def collect_arcs(self):
+ self.arcs = set()
+ self.add_arcs_for_code_objects(self.root_node)
+ return self.arcs
-# Opcodes that unconditionally end the code chunk.
-OPS_CHUNK_END = _opcode_set(
- 'JUMP_ABSOLUTE', 'JUMP_FORWARD', 'RETURN_VALUE', 'RAISE_VARARGS',
- 'BREAK_LOOP', 'CONTINUE_LOOP',
-)
+ def blocks(self):
+ """Yield the blocks in nearest-to-farthest order."""
+ return reversed(self.block_stack)
-# Opcodes that unconditionally begin a new code chunk. By starting new chunks
-# with unconditional jump instructions, we neatly deal with jumps to jumps
-# properly.
-OPS_CHUNK_BEGIN = _opcode_set('JUMP_ABSOLUTE', 'JUMP_FORWARD')
+ def line_for_node(self, node):
+ """What is the right line number to use for this node?"""
+ node_name = node.__class__.__name__
+ handler = getattr(self, "_line__" + node_name, None)
+ if handler is not None:
+ return handler(node)
+ else:
+ return node.lineno
+
+ def _line__Assign(self, node):
+ return self.line_for_node(node.value)
+
+ def _line__Dict(self, node):
+ # Python 3.5 changed how dict literals are made.
+ if env.PYVERSION >= (3, 5) and node.keys:
+ return node.keys[0].lineno
+ else:
+ return node.lineno
-# Opcodes that push a block on the block stack.
-OPS_PUSH_BLOCK = _opcode_set(
- 'SETUP_LOOP', 'SETUP_EXCEPT', 'SETUP_FINALLY', 'SETUP_WITH'
-)
+ def _line__List(self, node):
+ if node.elts:
+ return self.line_for_node(node.elts[0])
+ else:
+ return node.lineno
+
+ def _line__Module(self, node):
+ if node.body:
+ return self.line_for_node(node.body[0])
+ else:
+ # Modules have no line number, they always start at 1.
+ return 1
-# Block types for exception handling.
-OPS_EXCEPT_BLOCKS = _opcode_set('SETUP_EXCEPT', 'SETUP_FINALLY')
+ OK_TO_DEFAULT = set([
+ "Assign", "Assert", "AugAssign", "Delete", "Exec", "Expr", "Global",
+ "Import", "ImportFrom", "Pass", "Print",
+ ])
-# Opcodes that pop a block from the block stack.
-OPS_POP_BLOCK = _opcode_set('POP_BLOCK')
+ def add_arcs(self, node):
+ """Add the arcs for `node`.
-# Opcodes that have a jump destination, but aren't really a jump.
-OPS_NO_JUMP = OPS_PUSH_BLOCK
+ Return a set of line numbers, exits from this node to the next.
+ """
+ # Yield-froms and awaits can appear anywhere.
+ # TODO: this is probably over-doing it, and too expensive. Can we
+ # instrument the ast walking to see how many nodes we are revisiting?
+ if isinstance(node, ast.stmt):
+ for _, value in ast.iter_fields(node):
+ if isinstance(value, ast.expr) and self.contains_return_expression(value):
+ self.process_return_exits([self.line_for_node(node)])
+ break
+
+ node_name = node.__class__.__name__
+ handler = getattr(self, "_handle__" + node_name, None)
+ if handler is not None:
+ return handler(node)
+
+ if 0:
+ node_name = node.__class__.__name__
+ if node_name not in self.OK_TO_DEFAULT:
+ print("*** Unhandled: {0}".format(node))
+ return set([self.line_for_node(node)])
+
+ def add_body_arcs(self, body, from_line=None, prev_lines=None):
+ if prev_lines is None:
+ prev_lines = set([from_line])
+ for body_node in body:
+ lineno = self.line_for_node(body_node)
+ for prev_lineno in prev_lines:
+ self.arcs.add((prev_lineno, lineno))
+ prev_lines = self.add_arcs(body_node)
+ return prev_lines
+
+ def is_constant_expr(self, node):
+ """Is this a compile-time constant?"""
+ node_name = node.__class__.__name__
+ if node_name in ["NameConstant", "Num"]:
+ return True
+ elif node_name == "Name":
+ if env.PY3 and node.id in ["True", "False", "None"]:
+ return True
+ return False
+
+ # tests to write:
+ # TODO: while EXPR:
+ # TODO: while False:
+ # TODO: listcomps hidden deep in other expressions
+ # TODO: listcomps hidden in lists: x = [[i for i in range(10)]]
+ # TODO: nested function definitions
+
+ def process_break_exits(self, exits):
+ for block in self.blocks():
+ if isinstance(block, LoopBlock):
+ block.break_exits.update(exits)
+ break
+ elif isinstance(block, TryBlock) and block.final_start:
+ block.break_from.update(exits)
+ break
+
+ def process_continue_exits(self, exits):
+ for block in self.blocks():
+ if isinstance(block, LoopBlock):
+ for xit in exits:
+ self.arcs.add((xit, block.start))
+ break
+ elif isinstance(block, TryBlock) and block.final_start:
+ block.continue_from.update(exits)
+ break
+
+ def process_raise_exits(self, exits):
+ for block in self.blocks():
+ if isinstance(block, TryBlock):
+ if block.handler_start:
+ for xit in exits:
+ self.arcs.add((xit, block.handler_start))
+ break
+ elif block.final_start:
+ block.raise_from.update(exits)
+ break
+ elif isinstance(block, FunctionBlock):
+ for xit in exits:
+ self.arcs.add((xit, -block.start))
+ break
+
+ def process_return_exits(self, exits):
+ for block in self.blocks():
+ if isinstance(block, TryBlock) and block.final_start:
+ block.return_from.update(exits)
+ break
+ elif isinstance(block, FunctionBlock):
+ for xit in exits:
+ self.arcs.add((xit, -block.start))
+ break
+
+ ## Handlers
+
+ def _handle__Break(self, node):
+ here = self.line_for_node(node)
+ self.process_break_exits([here])
+ return set()
+
+ def _handle__ClassDef(self, node):
+ return self.process_decorated(node, self.classdefs)
+
+ def process_decorated(self, node, defs):
+ last = self.line_for_node(node)
+ if node.decorator_list:
+ for dec_node in node.decorator_list:
+ dec_start = self.line_for_node(dec_node)
+ if dec_start != last:
+ self.arcs.add((last, dec_start))
+ last = dec_start
+ # The definition line may have been missed, but we should have it in
+ # `defs`.
+ body_start = self.line_for_node(node.body[0])
+ for lineno in range(last+1, body_start):
+ if lineno in defs:
+ self.arcs.add((last, lineno))
+ last = lineno
+ # the body is handled in add_arcs_for_code_objects.
+ return set([last])
+
+ def _handle__Continue(self, node):
+ here = self.line_for_node(node)
+ self.process_continue_exits([here])
+ return set()
+
+ def _handle__For(self, node):
+ start = self.line_for_node(node.iter)
+ self.block_stack.append(LoopBlock(start=start))
+ exits = self.add_body_arcs(node.body, from_line=start)
+ for xit in exits:
+ self.arcs.add((xit, start))
+ my_block = self.block_stack.pop()
+ exits = my_block.break_exits
+ if node.orelse:
+ else_exits = self.add_body_arcs(node.orelse, from_line=start)
+ exits |= else_exits
+ else:
+ # no else clause: exit from the for line.
+ exits.add(start)
+ return exits
+
+ _handle__AsyncFor = _handle__For
+
+ def _handle__FunctionDef(self, node):
+ return self.process_decorated(node, self.funcdefs)
+
+ _handle__AsyncFunctionDef = _handle__FunctionDef
+
+ def _handle__If(self, node):
+ start = self.line_for_node(node.test)
+ exits = self.add_body_arcs(node.body, from_line=start)
+ exits |= self.add_body_arcs(node.orelse, from_line=start)
+ return exits
+
+ def _handle__Raise(self, node):
+ # `raise` statement jumps away, no exits from here.
+ here = self.line_for_node(node)
+ self.process_raise_exits([here])
+ return set()
+
+ def _handle__Return(self, node):
+ here = self.line_for_node(node)
+ self.process_return_exits([here])
+ return set()
+
+ def _handle__Try(self, node):
+ # try/finally is tricky. If there's a finally clause, then we need a
+ # FinallyBlock to track what flows might go through the finally instead
+ # of their normal flow.
+ if node.handlers:
+ handler_start = self.line_for_node(node.handlers[0])
+ else:
+ handler_start = None
-# Individual opcodes we need below.
-OP_BREAK_LOOP = _opcode('BREAK_LOOP')
-OP_END_FINALLY = _opcode('END_FINALLY')
-OP_COMPARE_OP = _opcode('COMPARE_OP')
-COMPARE_EXCEPTION = 10 # just have to get this constant from the code.
-OP_LOAD_CONST = _opcode('LOAD_CONST')
-OP_RETURN_VALUE = _opcode('RETURN_VALUE')
+ if node.finalbody:
+ final_start = self.line_for_node(node.finalbody[0])
+ else:
+ final_start = None
+
+ self.block_stack.append(TryBlock(handler_start=handler_start, final_start=final_start))
+
+ start = self.line_for_node(node)
+ exits = self.add_body_arcs(node.body, from_line=start)
+
+ try_block = self.block_stack.pop()
+ handler_exits = set()
+ last_handler_start = None
+ if node.handlers:
+ for handler_node in node.handlers:
+ handler_start = self.line_for_node(handler_node)
+ if last_handler_start is not None:
+ self.arcs.add((last_handler_start, handler_start))
+ last_handler_start = handler_start
+ handler_exits |= self.add_body_arcs(handler_node.body, from_line=handler_start)
+ if handler_node.type is None:
+ # "except:" doesn't jump to subsequent handlers, or
+ # "finally:".
+ last_handler_start = None
+ # TODO: should we break here? Handlers after "except:"
+ # won't be run. Should coverage know that code can't be
+ # run, or should it flag it as not run?
+
+ if node.orelse:
+ exits = self.add_body_arcs(node.orelse, prev_lines=exits)
+
+ exits |= handler_exits
+ if node.finalbody:
+ final_from = ( # You can get to the `finally` clause from:
+ exits | # the exits of the body or `else` clause,
+ try_block.break_from | # or a `break` in the body,
+ try_block.continue_from | # or a `continue` in the body,
+ try_block.return_from # or a `return` in the body.
+ )
+ if node.handlers and last_handler_start is not None:
+ # If there was an "except X:" clause, then a "raise" in the
+ # body goes to the "except X:" before the "finally", but the
+ # "except" go to the finally.
+ final_from.add(last_handler_start)
+ else:
+ final_from |= try_block.raise_from
+ exits = self.add_body_arcs(node.finalbody, prev_lines=final_from)
+ if try_block.break_from:
+ self.process_break_exits(exits)
+ if try_block.continue_from:
+ self.process_continue_exits(exits)
+ if try_block.raise_from:
+ self.process_raise_exits(exits)
+ if try_block.return_from:
+ self.process_return_exits(exits)
+ return exits
+
+ def _handle__TryExcept(self, node):
+ # Python 2.7 uses separate TryExcept and TryFinally nodes. If we get
+ # TryExcept, it means there was no finally, so fake it, and treat as
+ # a general Try node.
+ node.finalbody = []
+ return self._handle__Try(node)
+
+ def _handle__TryFinally(self, node):
+ # Python 2.7 uses separate TryExcept and TryFinally nodes. If we get
+ # TryFinally, see if there's a TryExcept nested inside. If so, merge
+ # them. Otherwise, fake fields to complete a Try node.
+ node.handlers = []
+ node.orelse = []
+
+ first = node.body[0]
+ if first.__class__.__name__ == "TryExcept" and node.lineno == first.lineno:
+ assert len(node.body) == 1
+ node.body = first.body
+ node.handlers = first.handlers
+ node.orelse = first.orelse
+
+ return self._handle__Try(node)
+
+ def _handle__While(self, node):
+ constant_test = self.is_constant_expr(node.test)
+ start = to_top = self.line_for_node(node.test)
+ if constant_test:
+ to_top = self.line_for_node(node.body[0])
+ self.block_stack.append(LoopBlock(start=start))
+ exits = self.add_body_arcs(node.body, from_line=start)
+ for xit in exits:
+ self.arcs.add((xit, to_top))
+ exits = set()
+ my_block = self.block_stack.pop()
+ exits.update(my_block.break_exits)
+ if node.orelse:
+ else_exits = self.add_body_arcs(node.orelse, from_line=start)
+ exits |= else_exits
+ else:
+ # No `else` clause: you can exit from the start.
+ if not constant_test:
+ exits.add(start)
+ return exits
+
+ def _handle__With(self, node):
+ start = self.line_for_node(node)
+ exits = self.add_body_arcs(node.body, from_line=start)
+ return exits
+
+ _handle__AsyncWith = _handle__With
+
+ def add_arcs_for_code_objects(self, root_node):
+ for node in ast.walk(root_node):
+ node_name = node.__class__.__name__
+ code_object_handler = getattr(self, "_code_object__" + node_name, None)
+ if code_object_handler is not None:
+ code_object_handler(node)
+
+ def _code_object__Module(self, node):
+ start = self.line_for_node(node)
+ if node.body:
+ exits = self.add_body_arcs(node.body, from_line=-1)
+ for xit in exits:
+ self.arcs.add((xit, -start))
+ else:
+ # Empty module.
+ self.arcs.add((-1, start))
+ self.arcs.add((start, -1))
+
+ def _code_object__FunctionDef(self, node):
+ start = self.line_for_node(node)
+ self.block_stack.append(FunctionBlock(start=start))
+ exits = self.add_body_arcs(node.body, from_line=-1)
+ self.block_stack.pop()
+ for xit in exits:
+ self.arcs.add((xit, -start))
+
+ _code_object__AsyncFunctionDef = _code_object__FunctionDef
+
+ def _code_object__ClassDef(self, node):
+ start = self.line_for_node(node)
+ self.arcs.add((-1, start))
+ exits = self.add_body_arcs(node.body, from_line=start)
+ for xit in exits:
+ self.arcs.add((xit, -start))
+
+ def do_code_object_comprehension(self, node):
+ start = self.line_for_node(node)
+ self.arcs.add((-1, start))
+ self.arcs.add((start, -start))
+
+ _code_object__GeneratorExp = do_code_object_comprehension
+ _code_object__DictComp = do_code_object_comprehension
+ _code_object__SetComp = do_code_object_comprehension
+ if env.PY3:
+ _code_object__ListComp = do_code_object_comprehension
+
+ def _code_object__Lambda(self, node):
+ start = self.line_for_node(node)
+ self.arcs.add((-1, start))
+ self.arcs.add((start, -start))
+
+ def contains_return_expression(self, node):
+ """Is there a yield-from or await in `node` someplace?"""
+ for child in ast.walk(node):
+ if child.__class__.__name__ in ["YieldFrom", "Await"]:
+ return True
+
+ return False
class ByteParser(object):
- """Parse byte codes to understand the structure of code."""
+ """Parse bytecode to understand the structure of code."""
@contract(text='unicode')
def __init__(self, text, code=None, filename=None):
@@ -366,7 +746,7 @@ class ByteParser(object):
# Alternative Python implementations don't always provide all the
# attributes on code objects that we need to do the analysis.
- for attr in ['co_lnotab', 'co_firstlineno', 'co_consts', 'co_code']:
+ for attr in ['co_lnotab', 'co_firstlineno', 'co_consts']:
if not hasattr(self.code, attr):
raise CoverageException(
"This implementation of Python doesn't support code analysis.\n"
@@ -421,272 +801,57 @@ class ByteParser(object):
for _, l in bp._bytes_lines():
yield l
- def _block_stack_repr(self, block_stack): # pragma: debugging
- """Get a string version of `block_stack`, for debugging."""
- blocks = ", ".join(
- "(%s, %r)" % (dis.opname[b[0]], b[1]) for b in block_stack
- )
- return "[" + blocks + "]"
-
- def _split_into_chunks(self):
- """Split the code object into a list of `Chunk` objects.
-
- Each chunk is only entered at its first instruction, though there can
- be many exits from a chunk.
-
- Returns a list of `Chunk` objects.
- """
- # The list of chunks so far, and the one we're working on.
- chunks = []
- chunk = None
-
- # A dict mapping byte offsets of line starts to the line numbers.
- bytes_lines_map = dict(self._bytes_lines())
-
- # The block stack: loops and try blocks get pushed here for the
- # implicit jumps that can occur.
- # Each entry is a tuple: (block type, destination)
- block_stack = []
-
- # Some op codes are followed by branches that should be ignored. This
- # is a count of how many ignores are left.
- ignore_branch = 0
-
- # We have to handle the last two bytecodes specially.
- ult = penult = None
-
- # Get a set of all of the jump-to points.
- jump_to = set()
- bytecodes = list(ByteCodes(self.code.co_code))
- for bc in bytecodes:
- if bc.jump_to >= 0:
- jump_to.add(bc.jump_to)
-
- chunk_lineno = 0
-
- # Walk the byte codes building chunks.
- for bc in bytecodes:
- # Maybe have to start a new chunk.
- start_new_chunk = False
- first_chunk = False
- if bc.offset in bytes_lines_map:
- # Start a new chunk for each source line number.
- start_new_chunk = True
- chunk_lineno = bytes_lines_map[bc.offset]
- first_chunk = True
- elif bc.offset in jump_to:
- # To make chunks have a single entrance, we have to make a new
- # chunk when we get to a place some bytecode jumps to.
- start_new_chunk = True
- elif bc.op in OPS_CHUNK_BEGIN:
- # Jumps deserve their own unnumbered chunk. This fixes
- # problems with jumps to jumps getting confused.
- start_new_chunk = True
-
- if not chunk or start_new_chunk:
- if chunk:
- chunk.exits.add(bc.offset)
- chunk = Chunk(bc.offset, chunk_lineno, first_chunk)
- if not chunks:
- # The very first chunk of a code object is always an
- # entrance.
- chunk.entrance = True
- chunks.append(chunk)
-
- # Look at the opcode.
- if bc.jump_to >= 0 and bc.op not in OPS_NO_JUMP:
- if ignore_branch:
- # Someone earlier wanted us to ignore this branch.
- ignore_branch -= 1
- else:
- # The opcode has a jump, it's an exit for this chunk.
- chunk.exits.add(bc.jump_to)
-
- if bc.op in OPS_CODE_END:
- # The opcode can exit the code object.
- chunk.exits.add(-self.code.co_firstlineno)
- if bc.op in OPS_PUSH_BLOCK:
- # The opcode adds a block to the block_stack.
- block_stack.append((bc.op, bc.jump_to))
- if bc.op in OPS_POP_BLOCK:
- # The opcode pops a block from the block stack.
- block_stack.pop()
- if bc.op in OPS_CHUNK_END:
- # This opcode forces the end of the chunk.
- if bc.op == OP_BREAK_LOOP:
- # A break is implicit: jump where the top of the
- # block_stack points.
- chunk.exits.add(block_stack[-1][1])
- chunk = None
- if bc.op == OP_END_FINALLY:
- # For the finally clause we need to find the closest exception
- # block, and use its jump target as an exit.
- for block in reversed(block_stack):
- if block[0] in OPS_EXCEPT_BLOCKS:
- chunk.exits.add(block[1])
- break
- if bc.op == OP_COMPARE_OP and bc.arg == COMPARE_EXCEPTION:
- # This is an except clause. We want to overlook the next
- # branch, so that except's don't count as branches.
- ignore_branch += 1
-
- penult = ult
- ult = bc
-
- if chunks:
- # The last two bytecodes could be a dummy "return None" that
- # shouldn't be counted as real code. Every Python code object seems
- # to end with a return, and a "return None" is inserted if there
- # isn't an explicit return in the source.
- if ult and penult:
- if penult.op == OP_LOAD_CONST and ult.op == OP_RETURN_VALUE:
- if self.code.co_consts[penult.arg] is None:
- # This is "return None", but is it dummy? A real line
- # would be a last chunk all by itself.
- if chunks[-1].byte != penult.offset:
- ex = -self.code.co_firstlineno
- # Split the last chunk
- last_chunk = chunks[-1]
- last_chunk.exits.remove(ex)
- last_chunk.exits.add(penult.offset)
- chunk = Chunk(
- penult.offset, last_chunk.line, False
- )
- chunk.exits.add(ex)
- chunks.append(chunk)
-
- # Give all the chunks a length.
- chunks[-1].length = bc.next_offset - chunks[-1].byte
- for i in range(len(chunks)-1):
- chunks[i].length = chunks[i+1].byte - chunks[i].byte
-
- #self.validate_chunks(chunks)
- return chunks
-
- def validate_chunks(self, chunks): # pragma: debugging
- """Validate the rule that chunks have a single entrance."""
- # starts is the entrances to the chunks
- starts = set(ch.byte for ch in chunks)
- for ch in chunks:
- assert all((ex in starts or ex < 0) for ex in ch.exits)
-
- def _arcs(self):
- """Find the executable arcs in the code.
-
- Yields pairs: (from,to). From and to are integer line numbers. If
- from is < 0, then the arc is an entrance into the code object. If to
- is < 0, the arc is an exit from the code object.
-
- """
- chunks = self._split_into_chunks()
-
- # A map from byte offsets to the chunk starting at that offset.
- byte_chunks = dict((c.byte, c) for c in chunks)
-
- # Traverse from the first chunk in each line, and yield arcs where
- # the trace function will be invoked.
- for chunk in chunks:
- if chunk.entrance:
- yield (-1, chunk.line)
-
- if not chunk.first:
- continue
-
- chunks_considered = set()
- chunks_to_consider = [chunk]
- while chunks_to_consider:
- # Get the chunk we're considering, and make sure we don't
- # consider it again.
- this_chunk = chunks_to_consider.pop()
- chunks_considered.add(this_chunk)
-
- # For each exit, add the line number if the trace function
- # would be triggered, or add the chunk to those being
- # considered if not.
- for ex in this_chunk.exits:
- if ex < 0:
- yield (chunk.line, ex)
- else:
- next_chunk = byte_chunks[ex]
- if next_chunk in chunks_considered:
- continue
-
- # The trace function is invoked if visiting the first
- # bytecode in a line, or if the transition is a
- # backward jump.
- backward_jump = next_chunk.byte < this_chunk.byte
- if next_chunk.first or backward_jump:
- if next_chunk.line != chunk.line:
- yield (chunk.line, next_chunk.line)
- else:
- chunks_to_consider.append(next_chunk)
-
- def _all_chunks(self):
- """Returns a list of `Chunk` objects for this code and its children.
-
- See `_split_into_chunks` for details.
-
- """
- chunks = []
- for bp in self.child_parsers():
- chunks.extend(bp._split_into_chunks())
-
- return chunks
-
- def _all_arcs(self):
- """Get the set of all arcs in this code object and its children.
-
- See `_arcs` for details.
-
- """
- arcs = set()
- for bp in self.child_parsers():
- arcs.update(bp._arcs())
-
- return arcs
-
-
-class Chunk(object):
- """A sequence of byte codes with a single entrance.
-
- To analyze byte code, we have to divide it into chunks, sequences of byte
- codes such that each chunk has only one entrance, the first instruction in
- the block.
-
- This is almost the CS concept of `basic block`_, except that we're willing
- to have many exits from a chunk, and "basic block" is a more cumbersome
- term.
-
- .. _basic block: http://en.wikipedia.org/wiki/Basic_block
-
- `byte` is the offset to the bytecode starting this chunk.
-
- `line` is the source line number containing this chunk.
-
- `first` is true if this is the first chunk in the source line.
-
- An exit < 0 means the chunk can leave the code (return). The exit is
- the negative of the starting line number of the code block.
-
- The `entrance` attribute is a boolean indicating whether the code object
- can be entered at this chunk.
-
- """
- def __init__(self, byte, line, first):
- self.byte = byte
- self.line = line
- self.first = first
- self.length = 0
- self.entrance = False
- self.exits = set()
+SKIP_DUMP_FIELDS = ["ctx"]
+
+def is_simple_value(value):
+ return (
+ value in [None, [], (), {}, set()] or
+ isinstance(value, (string_class, int, float))
+ )
+
+# TODO: a test of ast_dump?
+def ast_dump(node, depth=0):
+ indent = " " * depth
+ if not isinstance(node, ast.AST):
+ print("{0}<{1} {2!r}>".format(indent, node.__class__.__name__, node))
+ return
+
+ lineno = getattr(node, "lineno", None)
+ if lineno is not None:
+ linemark = " @ {0}".format(node.lineno)
+ else:
+ linemark = ""
+ head = "{0}<{1}{2}".format(indent, node.__class__.__name__, linemark)
+
+ named_fields = [
+ (name, value)
+ for name, value in ast.iter_fields(node)
+ if name not in SKIP_DUMP_FIELDS
+ ]
+ if not named_fields:
+ print("{0}>".format(head))
+ elif len(named_fields) == 1 and is_simple_value(named_fields[0][1]):
+ field_name, value = named_fields[0]
+ print("{0} {1}: {2!r}>".format(head, field_name, value))
+ else:
+ print(head)
+ if 0:
+ print("{0}# mro: {1}".format(
+ indent, ", ".join(c.__name__ for c in node.__class__.__mro__[1:]),
+ ))
+ next_indent = indent + " "
+ for field_name, value in named_fields:
+ prefix = "{0}{1}:".format(next_indent, field_name)
+ if is_simple_value(value):
+ print("{0} {1!r}".format(prefix, value))
+ elif isinstance(value, list):
+ print("{0} [".format(prefix))
+ for n in value:
+ ast_dump(n, depth + 8)
+ print("{0}]".format(next_indent))
+ else:
+ print(prefix)
+ ast_dump(value, depth + 8)
- def __repr__(self):
- return "<%d+%d @%d%s%s %r>" % (
- self.byte,
- self.length,
- self.line,
- "!" if self.first else "",
- "v" if self.entrance else "",
- list(self.exits),
- )
+ print("{0}>".format(indent))
diff --git a/coverage/test_helpers.py b/coverage/test_helpers.py
index 50cc3298..a76bed35 100644
--- a/coverage/test_helpers.py
+++ b/coverage/test_helpers.py
@@ -162,20 +162,20 @@ class StdStreamCapturingMixin(TestCase):
# nose keeps stdout from littering the screen, so we can safely Tee it,
# but it doesn't capture stderr, so we don't want to Tee stderr to the
# real stderr, since it will interfere with our nice field of dots.
- self.old_stdout = sys.stdout
+ old_stdout = sys.stdout
self.captured_stdout = StringIO()
sys.stdout = Tee(sys.stdout, self.captured_stdout)
- self.old_stderr = sys.stderr
+ old_stderr = sys.stderr
self.captured_stderr = StringIO()
sys.stderr = self.captured_stderr
- self.addCleanup(self.cleanup_std_streams)
+ self.addCleanup(self.cleanup_std_streams, old_stdout, old_stderr)
- def cleanup_std_streams(self):
+ def cleanup_std_streams(self, old_stdout, old_stderr):
"""Restore stdout and stderr."""
- sys.stdout = self.old_stdout
- sys.stderr = self.old_stderr
+ sys.stdout = old_stdout
+ sys.stderr = old_stderr
def stdout(self):
"""Return the data written to stdout during the test."""
@@ -186,6 +186,59 @@ class StdStreamCapturingMixin(TestCase):
return self.captured_stderr.getvalue()
+class DelayedAssertionMixin(TestCase):
+ """A test case mixin that provides a `delayed_assertions` context manager.
+
+ Use it like this::
+
+ with self.delayed_assertions():
+ self.assertEqual(x, y)
+ self.assertEqual(z, w)
+
+ All of the assertions will run. The failures will be displayed at the end
+ of the with-statement.
+
+ NOTE: this only works with some assertions. These are known to work:
+
+ - `assertEqual(str, str)`
+
+ - `assertMultilineEqual(str, str)`
+
+ """
+ def __init__(self, *args, **kwargs):
+ super(DelayedAssertionMixin, self).__init__(*args, **kwargs)
+ # This mixin only works with assert methods that call `self.fail`. In
+ # Python 2.7, `assertEqual` didn't, but we can do what Python 3 does,
+ # and use `assertMultiLineEqual` for comparing strings.
+ self.addTypeEqualityFunc(str, 'assertMultiLineEqual')
+ self._delayed_assertions = None
+
+ @contextlib.contextmanager
+ def delayed_assertions(self):
+ """The context manager: assert that we didn't collect any assertions."""
+ self._delayed_assertions = []
+ old_fail = self.fail
+ self.fail = self._delayed_fail
+ try:
+ yield
+ finally:
+ self.fail = old_fail
+ if self._delayed_assertions:
+ if len(self._delayed_assertions) == 1:
+ self.fail(self._delayed_assertions[0])
+ else:
+ self.fail(
+ "{0} failed assertions:\n{1}".format(
+ len(self._delayed_assertions),
+ "\n".join(self._delayed_assertions),
+ )
+ )
+
+ def _delayed_fail(self, msg=None):
+ """The stand-in for TestCase.fail during delayed_assertions."""
+ self._delayed_assertions.append(msg)
+
+
class TempDirMixin(SysPathAwareMixin, ModuleAwareMixin, TestCase):
"""A test case mixin that creates a temp directory and files in it.
diff --git a/lab/branches.py b/lab/branches.py
index 275eef4a..d1908d0f 100644
--- a/lab/branches.py
+++ b/lab/branches.py
@@ -21,7 +21,7 @@ def my_function(x):
# Notice that "while 1" also has this problem. Even though the compiler
# knows there's no computation at the top of the loop, it's still expressed
- # in byte code as a branch with two possibilities.
+ # in bytecode as a branch with two possibilities.
i = 0
while 1:
diff --git a/lab/disgen.py b/lab/disgen.py
index 4e4c6fa6..26bc56bc 100644
--- a/lab/disgen.py
+++ b/lab/disgen.py
@@ -1,4 +1,4 @@
-"""Disassembler of Python byte code into mnemonics."""
+"""Disassembler of Python bytecode into mnemonics."""
# Adapted from stdlib dis.py, but returns structured information
# instead of printing to stdout.
@@ -133,7 +133,7 @@ def byte_from_code(code, i):
return byte
def findlabels(code):
- """Detect all offsets in a byte code which are jump targets.
+ """Detect all offsets in a bytecode which are jump targets.
Return the list of offsets.
@@ -158,7 +158,7 @@ def findlabels(code):
return labels
def findlinestarts(code):
- """Find the offsets in a byte code which are start of lines in the source.
+ """Find the offsets in a bytecode which are start of lines in the source.
Generate pairs (offset, lineno) as described in Python/compile.c.
diff --git a/lab/parser.py b/lab/parser.py
index 1a679e8c..5e5b4b36 100644
--- a/lab/parser.py
+++ b/lab/parser.py
@@ -5,9 +5,13 @@
from __future__ import division
-import glob, os, sys
import collections
-from optparse import OptionParser
+import glob
+import optparse
+import os
+import re
+import sys
+import textwrap
import disgen
@@ -24,7 +28,7 @@ class ParserMain(object):
def main(self, args):
"""A main function for trying the code from the command line."""
- parser = OptionParser()
+ parser = optparse.OptionParser()
parser.add_option(
"-c", action="store_true", dest="chunks",
help="Show basic block chunks"
@@ -72,9 +76,21 @@ class ParserMain(object):
def one_file(self, options, filename):
"""Process just one file."""
+ # `filename` can have a line number suffix. In that case, extract those
+ # lines, dedent them, and use that. This is for trying test cases
+ # embedded in the test files.
+ match = re.search(r"^(.*):(\d+)-(\d+)$", filename)
+ if match:
+ filename, start, end = match.groups()
+ start, end = int(start), int(end)
+ else:
+ start = end = None
try:
text = get_python_source(filename)
+ if start is not None:
+ lines = text.splitlines(True)
+ text = textwrap.dedent("".join(lines[start-1:end]).replace("\\\\", "\\"))
bp = ByteParser(text, filename=filename)
except Exception as err:
print("%s" % (err,))
@@ -82,7 +98,7 @@ class ParserMain(object):
if options.dis:
print("Main code:")
- self.disassemble(bp, histogram=options.histogram)
+ self.disassemble(bp, chunks=options.chunks, histogram=options.histogram)
arcs = bp._all_arcs()
if options.chunks:
@@ -109,29 +125,37 @@ class ParserMain(object):
exit_counts = cp.exit_counts()
for lineno, ltext in enumerate(cp.lines, start=1):
- m0 = m1 = m2 = m3 = a = ' '
+ marks = [' ', ' ', ' ', ' ', ' ']
+ a = ' '
+ if lineno in cp.raw_statements:
+ marks[0] = '-'
if lineno in cp.statements:
- m0 = '='
- elif lineno in cp.raw_statements:
- m0 = '-'
+ marks[1] = '='
exits = exit_counts.get(lineno, 0)
if exits > 1:
- m1 = str(exits)
+ marks[2] = str(exits)
if lineno in cp.raw_docstrings:
- m2 = '"'
+ marks[3] = '"'
if lineno in cp.raw_classdefs:
- m2 = 'C'
+ marks[3] = 'C'
+ if lineno in cp.raw_funcdefs:
+ marks[3] = 'f'
if lineno in cp.raw_excluded:
- m3 = 'x'
- a = arc_chars[lineno].ljust(arc_width)
- print("%4d %s%s%s%s%s %s" % (lineno, m0, m1, m2, m3, a, ltext))
+ marks[4] = 'x'
+
+ if arc_chars:
+ a = arc_chars[lineno].ljust(arc_width)
+ else:
+ a = ""
- def disassemble(self, byte_parser, histogram=False):
+ print("%4d %s%s %s" % (lineno, "".join(marks), a, ltext))
+
+ def disassemble(self, byte_parser, chunks=False, histogram=False):
"""Disassemble code, for ad-hoc experimenting."""
for bp in byte_parser.child_parsers():
- chunks = bp._split_into_chunks()
- chunkd = dict((chunk.byte, chunk) for chunk in chunks)
+ if chunks:
+ chunkd = dict((chunk.byte, chunk) for chunk in bp._split_into_chunks())
if bp.text:
srclines = bp.text.splitlines()
else:
@@ -151,11 +175,11 @@ class ParserMain(object):
elif disline.offset > 0:
print("")
line = disgen.format_dis_line(disline)
- chunk = chunkd.get(disline.offset)
- if chunk:
- chunkstr = ":: %r" % chunk
- else:
- chunkstr = ""
+ chunkstr = ""
+ if chunks:
+ chunk = chunkd.get(disline.offset)
+ if chunk:
+ chunkstr = ":: %r" % chunk
print("%-70s%s" % (line, chunkstr))
print("")
@@ -168,6 +192,7 @@ class ParserMain(object):
"""
+ plus_ones = set()
arc_chars = collections.defaultdict(str)
for lfrom, lto in sorted(arcs):
if lfrom < 0:
@@ -176,13 +201,12 @@ class ParserMain(object):
arc_chars[lfrom] += '^'
else:
if lfrom == lto - 1:
- # Don't show obvious arcs.
+ plus_ones.add(lfrom)
continue
if lfrom < lto:
l1, l2 = lfrom, lto
else:
l1, l2 = lto, lfrom
- #w = max(len(arc_chars[l]) for l in range(l1, l2+1))
w = first_all_blanks(arc_chars[l] for l in range(l1, l2+1))
for l in range(l1, l2+1):
if l == lfrom:
@@ -193,6 +217,13 @@ class ParserMain(object):
ch = '|'
arc_chars[l] = set_char(arc_chars[l], w, ch)
+ # Add the plusses as the first character
+ for lineno, arcs in arc_chars.items():
+ arc_chars[lineno] = (
+ ("+" if lineno in plus_ones else " ") +
+ arcs
+ )
+
return arc_chars
diff --git a/pylintrc b/pylintrc
index 09ac1416..4dc9c8e1 100644
--- a/pylintrc
+++ b/pylintrc
@@ -134,7 +134,11 @@ required-attributes=
# Regular expression which should only match functions or classes name which do
# not require a docstring
-no-docstring-rgx=__.*__|test[A-Z_].*|setUp|tearDown
+# Special methods don't: __foo__
+# Test methods don't: testXXXX
+# TestCase overrides don't: setUp, tearDown
+# Dispatched methods don't: _xxx__Xxxx
+no-docstring-rgx=__.*__|test[A-Z_].*|setUp|tearDown|_.*__.*
# Regular expression which should only match correct module names
module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$
diff --git a/tests/coveragetest.py b/tests/coveragetest.py
index 3468b794..d79aee7f 100644
--- a/tests/coveragetest.py
+++ b/tests/coveragetest.py
@@ -20,6 +20,7 @@ from coverage.cmdline import CoverageScript
from coverage.debug import _TEST_NAME_FILE, DebugControl
from coverage.test_helpers import (
EnvironmentAwareMixin, StdStreamCapturingMixin, TempDirMixin,
+ DelayedAssertionMixin,
)
from nose.plugins.skip import SkipTest
@@ -35,6 +36,7 @@ class CoverageTest(
EnvironmentAwareMixin,
StdStreamCapturingMixin,
TempDirMixin,
+ DelayedAssertionMixin,
TestCase
):
"""A base class for coverage.py test cases."""
@@ -163,7 +165,7 @@ class CoverageTest(
def check_coverage(
self, text, lines=None, missing="", report="",
excludes=None, partials="",
- arcz=None, arcz_missing=None, arcz_unpredicted=None,
+ arcz=None, arcz_missing="", arcz_unpredicted="",
arcs=None, arcs_missing=None, arcs_unpredicted=None,
):
"""Check the coverage measurement of `text`.
@@ -175,10 +177,11 @@ class CoverageTest(
of the measurement report.
For arc measurement, `arcz` is a string that can be decoded into arcs
- in the code (see `arcz_to_arcs` for the encoding scheme),
+ in the code (see `arcz_to_arcs` for the encoding scheme).
`arcz_missing` are the arcs that are not executed, and
- `arcs_unpredicted` are the arcs executed in the code, but not deducible
- from the code.
+ `arcz_unpredicted` are the arcs executed in the code, but not deducible
+ from the code. These last two default to "", meaning we explicitly
+ check that there are no missing or unpredicted arcs.
Returns the Coverage object, in case you want to poke at it some more.
@@ -191,14 +194,13 @@ class CoverageTest(
if arcs is None and arcz is not None:
arcs = self.arcz_to_arcs(arcz)
- if arcs_missing is None and arcz_missing is not None:
+ if arcs_missing is None:
arcs_missing = self.arcz_to_arcs(arcz_missing)
- if arcs_unpredicted is None and arcz_unpredicted is not None:
+ if arcs_unpredicted is None:
arcs_unpredicted = self.arcz_to_arcs(arcz_unpredicted)
- branch = any(x is not None for x in [arcs, arcs_missing, arcs_unpredicted])
# Start up coverage.py.
- cov = coverage.Coverage(branch=branch)
+ cov = coverage.Coverage(branch=True)
cov.erase()
for exc in excludes or []:
cov.exclude(exc)
@@ -238,15 +240,17 @@ class CoverageTest(
self.fail("None of the missing choices matched %r" % missing_formatted)
if arcs is not None:
- self.assert_equal_args(analysis.arc_possibilities(), arcs, "Possible arcs differ")
+ with self.delayed_assertions():
+ self.assert_equal_args(
+ analysis.arc_possibilities(), arcs,
+ "Possible arcs differ",
+ )
- if arcs_missing is not None:
self.assert_equal_args(
analysis.arcs_missing(), arcs_missing,
"Missing arcs differ"
)
- if arcs_unpredicted is not None:
self.assert_equal_args(
analysis.arcs_unpredicted(), arcs_unpredicted,
"Unpredicted arcs differ"
diff --git a/tests/test_arcs.py b/tests/test_arcs.py
index 18b18fdb..c52bc8aa 100644
--- a/tests/test_arcs.py
+++ b/tests/test_arcs.py
@@ -83,7 +83,8 @@ class SimpleArcTest(CoverageTest):
if len([]) == 0: a = 2
assert a == 2
""",
- arcz=".1 12 23 3.", arcz_missing="")
+ arcz=".1 12 23 3.",
+ )
self.check_coverage("""\
def fn(x):
if x % 2: return True
@@ -102,7 +103,8 @@ class SimpleArcTest(CoverageTest):
b = \\
6
""",
- arcz=".1 15 5-2", arcz_missing="")
+ arcz=".1 15 5-2",
+ )
def test_if_return(self):
self.check_coverage("""\
@@ -114,8 +116,8 @@ class SimpleArcTest(CoverageTest):
x = if_ret(0) + if_ret(1)
assert x == 8
""",
- arcz=".1 16 67 7. .2 23 24 3. 45 5.", arcz_missing=""
- )
+ arcz=".1 16 67 7. .2 23 24 3. 45 5.",
+ )
def test_dont_confuse_exit_and_else(self):
self.check_coverage("""\
@@ -141,15 +143,21 @@ class SimpleArcTest(CoverageTest):
)
def test_unused_lambdas_are_confusing_bug_90(self):
- self.skip("Expected failure: bug 90")
self.check_coverage("""\
a = 1
fn = lambda x: x
b = 3
""",
- arcz=".1 12 .2 2-2 23 3."
+ arcz=".1 12 .2 2-2 23 3.", arcz_missing=".2 2-2",
)
+ def test_what_is_the_sound_of_no_lines_clapping(self):
+ self.check_coverage("""\
+ # __init__.py
+ """,
+ arcz=".1 1.",
+ )
+
class WithTest(CoverageTest):
"""Arc-measuring tests involving context managers."""
@@ -188,7 +196,8 @@ class LoopArcTest(CoverageTest):
a = i
assert a == 9
""",
- arcz=".1 12 21 13 3.", arcz_missing="")
+ arcz=".1 12 21 13 3.",
+ )
self.check_coverage("""\
a = -1
for i in range(0):
@@ -204,7 +213,8 @@ class LoopArcTest(CoverageTest):
a = i + j
assert a == 4
""",
- arcz=".1 12 23 32 21 14 4.", arcz_missing="")
+ arcz=".1 12 23 32 21 14 4.",
+ )
def test_break(self):
self.check_coverage("""\
@@ -267,7 +277,7 @@ class LoopArcTest(CoverageTest):
assert a == 4 and i == 3
""",
arcz=arcz,
- )
+ )
def test_for_if_else_for(self):
self.check_coverage("""\
@@ -311,6 +321,22 @@ class LoopArcTest(CoverageTest):
arcz=".1 .2 23 32 34 47 26 67 7. 18 89 9."
)
+ def test_while_else(self):
+ self.check_coverage("""\
+ def whileelse(seq):
+ while seq:
+ n = seq.pop()
+ if n > 4:
+ break
+ else:
+ n = 99
+ return n
+ assert whileelse([1, 2]) == 99
+ assert whileelse([1, 5]) == 5
+ """,
+ arcz=".1 19 9A A. .2 23 34 45 58 42 27 78 8.",
+ )
+
def test_confusing_for_loop_bug_175(self):
if env.PY3:
# Py3 counts the list comp as a separate code object.
@@ -324,7 +350,8 @@ class LoopArcTest(CoverageTest):
x = tup[0]
y = tup[1]
""",
- arcz=arcz, arcz_missing="", arcz_unpredicted="")
+ arcz=arcz,
+ )
if env.PY3:
arcz = ".1 12 .2 2-2 23 34 42 2."
else:
@@ -335,7 +362,92 @@ class LoopArcTest(CoverageTest):
x = tup[0]
y = tup[1]
""",
- arcz=arcz, arcz_missing="", arcz_unpredicted="")
+ arcz=arcz,
+ )
+
+ def test_generator_expression(self):
+ # Generator expression:
+ self.check_coverage("""\
+ o = ((1,2), (3,4))
+ o = (a for a in o)
+ for tup in o:
+ x = tup[0]
+ y = tup[1]
+ """,
+ arcz=".1 .2 2-2 12 23 34 45 53 3.",
+ )
+
+ def test_other_comprehensions(self):
+ if env.PYVERSION < (2, 7):
+ self.skip("Don't have set or dict comprehensions before 2.7")
+ # Set comprehension:
+ self.check_coverage("""\
+ o = ((1,2), (3,4))
+ o = {a for a in o}
+ for tup in o:
+ x = tup[0]
+ y = tup[1]
+ """,
+ arcz=".1 .2 2-2 12 23 34 45 53 3.",
+ )
+ # Dict comprehension:
+ self.check_coverage("""\
+ o = ((1,2), (3,4))
+ o = {a:1 for a in o}
+ for tup in o:
+ x = tup[0]
+ y = tup[1]
+ """,
+ arcz=".1 .2 2-2 12 23 34 45 53 3.",
+ )
+
+ def test_multiline_dict_comp(self):
+ if env.PYVERSION < (2, 7):
+ self.skip("Don't have set or dict comprehensions before 2.7")
+ if env.PYVERSION < (3, 5):
+ arcz = ".2 2B B-4 2-4"
+ else:
+ arcz = ".2 2B B-3 2-3"
+ # Multiline dict comp:
+ self.check_coverage("""\
+ # comment
+ d = \\
+ {
+ i:
+ str(i)
+ for
+ i
+ in
+ range(9)
+ }
+ x = 11
+ """,
+ arcz=arcz,
+ )
+ # Multi dict comp:
+ if env.PYVERSION < (3, 5):
+ arcz = ".2 2F F-4 2-4"
+ else:
+ arcz = ".2 2F F-3 2-3"
+ self.check_coverage("""\
+ # comment
+ d = \\
+ {
+ (i, j):
+ str(i+j)
+ for
+ i
+ in
+ range(9)
+ for
+ j
+ in
+ range(13)
+ }
+ x = 15
+ """,
+ arcz=arcz,
+ )
class ExceptionArcTest(CoverageTest):
@@ -361,44 +473,48 @@ class ExceptionArcTest(CoverageTest):
b = 7
assert a == 3 and b == 7
""",
- arcz=".1 12 23 34 58 67 78 8.",
- arcz_missing="58", arcz_unpredicted="46")
+ arcz=".1 12 23 34 46 58 67 78 8.",
+ arcz_missing="58",
+ )
def test_hidden_raise(self):
self.check_coverage("""\
a, b = 1, 1
def oops(x):
- if x % 2: raise Exception("odd")
+ if x % 2:
+ raise Exception("odd")
try:
- a = 5
+ a = 6
oops(1)
- a = 7
+ a = 8
except:
- b = 9
- assert a == 5 and b == 9
+ b = 10
+ assert a == 6 and b == 10
""",
- arcz=".1 12 .3 3-2 24 45 56 67 7A 89 9A A.",
- arcz_missing="67 7A", arcz_unpredicted="68")
+ arcz=".1 12 .3 34 3-2 4-2 25 56 67 78 8B 9A AB B.",
+ arcz_missing="3-2 78 8B", arcz_unpredicted="79",
+ )
def test_except_with_type(self):
self.check_coverage("""\
a, b = 1, 1
def oops(x):
- if x % 2: raise ValueError("odd")
+ if x % 2:
+ raise ValueError("odd")
def try_it(x):
try:
- a = 6
+ a = 7
oops(x)
- a = 8
+ a = 9
except ValueError:
- b = 10
+ b = 11
return a
- assert try_it(0) == 8 # C
- assert try_it(1) == 6 # D
+ assert try_it(0) == 9 # C
+ assert try_it(1) == 7 # D
""",
- arcz=".1 12 .3 3-2 24 4C CD D. .5 56 67 78 8B 9A AB B-4",
- arcz_missing="",
- arcz_unpredicted="79")
+ arcz=".1 12 .3 34 3-2 4-2 25 5D DE E. .6 67 78 89 9C AB BC C-5",
+ arcz_unpredicted="8A",
+ )
def test_try_finally(self):
self.check_coverage("""\
@@ -409,7 +525,8 @@ class ExceptionArcTest(CoverageTest):
c = 5
assert a == 3 and c == 5
""",
- arcz=".1 12 23 35 56 6.", arcz_missing="")
+ arcz=".1 12 23 35 56 6.",
+ )
self.check_coverage("""\
a, c, d = 1, 1, 1
try:
@@ -421,8 +538,9 @@ class ExceptionArcTest(CoverageTest):
d = 8
assert a == 4 and c == 6 and d == 1 # 9
""",
- arcz=".1 12 23 34 46 67 78 89 69 9.",
- arcz_missing="67 78 89", arcz_unpredicted="")
+ arcz=".1 12 23 34 46 78 89 69 9.",
+ arcz_missing="78 89",
+ )
self.check_coverage("""\
a, c, d = 1, 1, 1
try:
@@ -436,8 +554,9 @@ class ExceptionArcTest(CoverageTest):
d = 10 # A
assert a == 4 and c == 8 and d == 10 # B
""",
- arcz=".1 12 23 34 45 68 89 8B 9A AB B.",
- arcz_missing="68 8B", arcz_unpredicted="58")
+ arcz=".1 12 23 34 45 58 68 89 8B 9A AB B.",
+ arcz_missing="68 8B",
+ )
def test_finally_in_loop(self):
self.check_coverage("""\
@@ -455,8 +574,9 @@ class ExceptionArcTest(CoverageTest):
d = 12 # C
assert a == 5 and c == 10 and d == 12 # D
""",
- arcz=".1 12 23 34 3D 45 56 67 68 8A A3 AB BC CD D.",
- arcz_missing="3D", arcz_unpredicted="7A")
+ arcz=".1 12 23 34 3D 45 56 67 68 7A 8A A3 AB BC CD D.",
+ arcz_missing="3D",
+ )
self.check_coverage("""\
a, c, d, i = 1, 1, 1, 99
try:
@@ -472,11 +592,12 @@ class ExceptionArcTest(CoverageTest):
d = 12 # C
assert a == 8 and c == 10 and d == 1 # D
""",
- arcz=".1 12 23 34 3D 45 56 67 68 8A A3 AB BC CD D.",
- arcz_missing="67 AB BC CD", arcz_unpredicted="")
+ arcz=".1 12 23 34 3D 45 56 67 68 7A 8A A3 AB BC CD D.",
+ arcz_missing="67 7A AB BC CD",
+ )
- def test_break_in_finally(self):
+ def test_break_through_finally(self):
self.check_coverage("""\
a, c, d, i = 1, 1, 1, 99
try:
@@ -492,8 +613,29 @@ class ExceptionArcTest(CoverageTest):
d = 12 # C
assert a == 5 and c == 10 and d == 1 # D
""",
- arcz=".1 12 23 34 3D 45 56 67 68 7A 8A A3 AB BC CD D.",
- arcz_missing="3D AB BC CD", arcz_unpredicted="AD")
+ arcz=".1 12 23 34 3D 45 56 67 68 7A 8A A3 AD BC CD D.",
+ arcz_missing="3D BC CD",
+ )
+
+ def test_continue_through_finally(self):
+ self.check_coverage("""\
+ a, b, c, d, i = 1, 1, 1, 1, 99
+ try:
+ for i in range(5):
+ try:
+ a = 5
+ if i > 0:
+ continue
+ b = 8
+ finally:
+ c = 10
+ except:
+ d = 12 # C
+ assert (a, b, c, d) == (5, 8, 10, 1) # D
+ """,
+ arcz=".1 12 23 34 3D 45 56 67 68 7A 8A A3 BC CD D.",
+ arcz_missing="BC CD",
+ )
def test_finally_in_loop_bug_92(self):
self.check_coverage("""\
@@ -506,14 +648,10 @@ class ExceptionArcTest(CoverageTest):
h = 7
""",
arcz=".1 12 23 35 56 61 17 7.",
- arcz_missing="", arcz_unpredicted="")
+ )
# "except Exception as e" is crucial here.
def test_bug_212(self):
- # Run this test only on Py2 for now. I hope to fix it on Py3
- # eventually...
- if env.PY3:
- self.skip("This doesn't work on Python 3")
self.check_coverage("""\
def b(exc):
try:
@@ -530,8 +668,8 @@ class ExceptionArcTest(CoverageTest):
except:
pass
""",
- arcz=".1 .2 1A 23 34 56 67 68 8. AB BC C. DE E.",
- arcz_missing="C.", arcz_unpredicted="45 7. CD")
+ arcz=".1 .2 1A 23 34 45 56 67 68 7. 8. AB BC C. DE E.",
+ arcz_missing="C.", arcz_unpredicted="CD")
def test_except_finally(self):
self.check_coverage("""\
@@ -562,6 +700,89 @@ class ExceptionArcTest(CoverageTest):
arcz=".1 12 .3 3-2 24 45 56 67 7B 89 9B BC C.",
arcz_missing="67 7B", arcz_unpredicted="68")
+ def test_multiple_except_clauses(self):
+ self.check_coverage("""\
+ a, b, c = 1, 1, 1
+ try:
+ a = 3
+ except ValueError:
+ b = 5
+ except IndexError:
+ a = 7
+ finally:
+ c = 9
+ assert a == 3 and b == 1 and c == 9
+ """,
+ arcz=".1 12 23 45 46 39 59 67 79 69 9A A.",
+ arcz_missing="45 59 46 67 79 69",
+ )
+ self.check_coverage("""\
+ a, b, c = 1, 1, 1
+ try:
+ a = int("xyz") # ValueError
+ except ValueError:
+ b = 5
+ except IndexError:
+ a = 7
+ finally:
+ c = 9
+ assert a == 1 and b == 5 and c == 9
+ """,
+ arcz=".1 12 23 45 46 69 39 59 67 79 9A A.",
+ arcz_missing="39 46 67 79 69",
+ arcz_unpredicted="34",
+ )
+ self.check_coverage("""\
+ a, b, c = 1, 1, 1
+ try:
+ a = [1][3] # IndexError
+ except ValueError:
+ b = 5
+ except IndexError:
+ a = 7
+ finally:
+ c = 9
+ assert a == 7 and b == 1 and c == 9
+ """,
+ arcz=".1 12 23 45 46 39 59 67 79 69 9A A.",
+ arcz_missing="39 45 59 69",
+ arcz_unpredicted="34",
+ )
+ self.check_coverage("""\
+ a, b, c = 1, 1, 1
+ try:
+ try:
+ a = 4/0 # ZeroDivisionError
+ except ValueError:
+ b = 6
+ except IndexError:
+ a = 8
+ finally:
+ c = 10
+ except ZeroDivisionError:
+ pass
+ assert a == 1 and b == 1 and c == 10
+ """,
+ arcz=".1 12 23 34 4A 56 6A 57 78 8A 7A AD BC CD D.",
+ arcz_missing="4A 56 6A 78 8A AD",
+ arcz_unpredicted="45 AB",
+ )
+
+ def test_return_finally(self):
+ self.check_coverage("""\
+ a = [1]
+ def func():
+ try:
+ return 10
+ finally:
+ a.append(6)
+
+ assert func() == 10
+ assert a == [1, 6]
+ """,
+ arcz=".1 12 28 89 9. .3 34 46 6-2",
+ )
+
class YieldTest(CoverageTest):
"""Arc tests for generators."""
@@ -575,8 +796,7 @@ class YieldTest(CoverageTest):
list(gen([1,2,3]))
""",
arcz=".1 .2 23 2. 32 15 5.",
- arcz_missing="",
- arcz_unpredicted="")
+ )
def test_padded_yield_in_loop(self):
self.check_coverage("""\
@@ -591,8 +811,7 @@ class YieldTest(CoverageTest):
list(gen([1,2,3]))
""",
arcz=".1 19 9. .2 23 34 45 56 63 37 7.",
- arcz_missing="",
- arcz_unpredicted="")
+ )
def test_bug_308(self):
self.check_coverage("""\
@@ -604,8 +823,7 @@ class YieldTest(CoverageTest):
print(f())
""",
arcz=".1 15 56 65 5. .2 23 32 2. .3 3-3",
- arcz_missing="",
- arcz_unpredicted="")
+ )
self.check_coverage("""\
def run():
@@ -617,8 +835,7 @@ class YieldTest(CoverageTest):
print(f())
""",
arcz=".1 16 67 76 6. .2 23 34 43 3. 2-2 .4 4-4",
- arcz_missing="",
- arcz_unpredicted="")
+ )
self.check_coverage("""\
def run():
@@ -628,8 +845,7 @@ class YieldTest(CoverageTest):
print(f())
""",
arcz=".1 14 45 54 4. .2 2. 2-2",
- arcz_missing="",
- arcz_unpredicted="")
+ )
def test_bug_324(self):
# This code is tricky: the list() call pulls all the values from gen(),
@@ -647,12 +863,12 @@ class YieldTest(CoverageTest):
".2 23 32 2. " # The gen() function
".3 3-3", # The generator expression
arcz_missing=".3 3-3",
- arcz_unpredicted="")
+ )
def test_coroutines(self):
self.check_coverage("""\
def double_inputs():
- while [1]: # avoid compiler differences
+ while len([1]): # avoid compiler differences
x = yield
x *= 2
yield x
@@ -667,9 +883,26 @@ class YieldTest(CoverageTest):
".1 17 78 89 9A AB B. "
".2 23 34 45 52 2.",
arcz_missing="2.",
- arcz_unpredicted="")
+ )
self.assertEqual(self.stdout(), "20\n12\n")
+ def test_yield_from(self):
+ if env.PYVERSION < (3, 3):
+ self.skip("Python before 3.3 doesn't have 'yield from'")
+ self.check_coverage("""\
+ def gen(inp):
+ i = 2
+ for n in inp:
+ i = 4
+ yield from range(3)
+ i = 6
+ i = 7
+
+ list(gen([1,2,3]))
+ """,
+ arcz=".1 19 9. .2 23 34 45 56 5. 63 37 7.",
+ )
+
class MiscArcTest(CoverageTest):
"""Miscellaneous arc-measuring tests."""
@@ -691,29 +924,222 @@ class MiscArcTest(CoverageTest):
}
assert d
""",
- arcz=arcz)
+ arcz=arcz,
+ )
+ self.check_coverage("""\
+ d = \\
+ { 'a': 2,
+ 'b': 3,
+ 'c': {
+ 'd': 5,
+ 'e': 6,
+ }
+ }
+ assert d
+ """,
+ arcz=".1 19 9-2",
+ )
def test_pathologically_long_code_object(self):
# https://bitbucket.org/ned/coveragepy/issue/359
- # The structure of this file is such that an EXTENDED_ARG byte code is
+ # The structure of this file is such that an EXTENDED_ARG bytecode is
# needed to encode the jump at the end. We weren't interpreting those
# opcodes.
+ # Note that we no longer interpret bytecode at all, but it couldn't
+ # hurt to keep the test...
code = """\
data = [
""" + "".join("""\
- [{i}, {i}, {i}, {i}, {i}, {i}, {i}, {i}, {i}, {i}],
+ [
+ {i}, {i}, {i}, {i}, {i}, {i}, {i}, {i}, {i}, {i}],
""".format(i=i) for i in range(2000)
) + """\
]
- if __name__ == "__main__":
- print(len(data))
+ print(len(data))
"""
self.check_coverage(
code,
- arcs=[(-1, 1), (1, 2004), (2004, -2), (2004, 2005), (2005, -2)],
+ arcs=[(-1, 1), (1, 4004), (4004, -3)],
+ arcs_missing=[], arcs_unpredicted=[],
)
+ def test_optimized_away_lines(self):
+ self.skip("TODO: fix this test")
+ self.check_coverage("""\
+ a = 1
+ if len([2]):
+ c = 3
+ if 0: # this line isn't in the compiled code.
+ if len([5]):
+ d = 6
+ e = 7
+ """,
+ lines=[1, 2, 3, 7],
+ arcz=".1 12 23 27 37 7.",
+ )
+
+
+class DecoractorArcTest(CoverageTest):
+ """Tests of arcs with decorators."""
+
+ def test_function_decorator(self):
+ self.check_coverage("""\
+ def decorator(arg):
+ def _dec(f):
+ return f
+ return _dec
+
+ @decorator(6)
+ @decorator(
+ len([8]),
+ )
+ def my_function(
+ a=len([11]),
+ ):
+ x = 13
+ a = 14
+ my_function()
+ """,
+ arcz=
+ ".1 16 67 7A AE EF F. " # main line
+ ".2 24 4. .3 3-2 " # decorators
+ ".D D-6 ", # my_function
+ )
+
+ def test_class_decorator(self):
+ self.check_coverage("""\
+ def decorator(arg):
+ def _dec(c):
+ return c
+ return _dec
+
+ @decorator(6)
+ @decorator(
+ len([8]),
+ )
+ class MyObject(
+ object
+ ):
+ X = 13
+ a = 14
+ """,
+ arcz=
+ ".1 16 67 6D 7A AE E. " # main line
+ ".2 24 4. .3 3-2 " # decorators
+ ".6 D-6 ", # MyObject
+ )
+
+
+class LambdaArcTest(CoverageTest):
+ """Tests of lambdas"""
+
+ def test_lambda(self):
+ self.check_coverage("""\
+ fn = (lambda x:
+ x + 2
+ )
+ assert fn(4) == 6
+ """,
+ arcz=".1 14 4-1 1-1",
+ )
+ self.check_coverage("""\
+
+ fn = \\
+ (
+ lambda
+ x:
+ x
+ +
+ 8
+ )
+ assert fn(10) == 18
+ """,
+ arcz=".2 2A A-4 2-4",
+ )
+
+
+class AsyncTest(CoverageTest):
+ """Tests of the new async and await keywords in Python 3.5"""
+
+ def setUp(self):
+ if env.PYVERSION < (3, 5):
+ self.skip("Async features are new in Python 3.5")
+ super(AsyncTest, self).setUp()
+
+ def test_async(self):
+ self.check_coverage("""\
+ import asyncio
+
+ async def compute(x, y): # 3
+ print("Compute %s + %s ..." % (x, y))
+ await asyncio.sleep(0.001)
+ return x + y # 6
+
+ async def print_sum(x, y): # 8
+ result = (0 +
+ await compute(x, y) # A
+ )
+ print("%s + %s = %s" % (x, y, result))
+
+ loop = asyncio.get_event_loop() # E
+ loop.run_until_complete(print_sum(1, 2))
+ loop.close() # G
+ """,
+ arcz=
+ ".1 13 38 8E EF FG G. "
+ ".4 45 56 5-3 6-3 "
+ ".9 9-8 9C C-8",
+ )
+ self.assertEqual(self.stdout(), "Compute 1 + 2 ...\n1 + 2 = 3\n")
+
+ def test_async_for(self):
+ self.check_coverage("""\
+ import asyncio
+
+ class AsyncIteratorWrapper: # 3
+ def __init__(self, obj): # 4
+ self._it = iter(obj)
+
+ async def __aiter__(self): # 7
+ return self
+
+ async def __anext__(self): # A
+ try:
+ return next(self._it)
+ except StopIteration:
+ raise StopAsyncIteration
+
+ async def doit(): # G
+ async for letter in AsyncIteratorWrapper("abc"):
+ print(letter)
+ print(".")
+
+ loop = asyncio.get_event_loop() # L
+ loop.run_until_complete(doit())
+ loop.close()
+ """,
+ arcz=
+ ".1 13 3G GL LM MN N. " # module main line
+ ".3 34 47 7A A-3 " # class definition
+ ".H HI IH HJ J-G " # doit
+ ".5 5-4 " # __init__
+ ".8 8-7 " # __aiter__
+ ".B BC C-A DE E-A ", # __anext__
+ arcz_unpredicted="CD",
+ )
+ self.assertEqual(self.stdout(), "a\nb\nc\n.\n")
+
+ def test_async_with(self):
+ self.check_coverage("""\
+ async def go():
+ async with x:
+ pass
+ """,
+ arcz=".1 1. .2 23 3.",
+ arcz_missing=".2 23 3.",
+ )
+
class ExcludeTest(CoverageTest):
"""Tests of exclusions to indicate known partial branches."""
@@ -732,7 +1158,8 @@ class ExcludeTest(CoverageTest):
f = 9
""",
[1,2,3,4,5,6,7,8,9],
- arcz=".1 12 23 24 34 45 56 57 67 78 89 9. 8.", arcz_missing="")
+ arcz=".1 12 23 24 34 45 56 57 67 78 89 9. 8.",
+ )
def test_custom_pragmas(self):
self.check_coverage("""\
@@ -744,7 +1171,8 @@ class ExcludeTest(CoverageTest):
""",
[1,2,3,4,5],
partials=["only some"],
- arcz=".1 12 23 34 45 25 5.", arcz_missing="")
+ arcz=".1 12 23 34 45 25 5.",
+ )
class LineDataTest(CoverageTest):
diff --git a/tests/test_backward.py b/tests/test_backward.py
index fbb9ad8b..bbecb780 100644
--- a/tests/test_backward.py
+++ b/tests/test_backward.py
@@ -4,7 +4,7 @@
"""Tests that our version shims in backward.py are working."""
from coverage.backunittest import TestCase
-from coverage.backward import iitems, binary_bytes, byte_to_int, bytes_to_ints
+from coverage.backward import iitems, binary_bytes, bytes_to_ints
class BackwardTest(TestCase):
@@ -20,4 +20,3 @@ class BackwardTest(TestCase):
bb = binary_bytes(byte_values)
self.assertEqual(len(bb), len(byte_values))
self.assertEqual(byte_values, list(bytes_to_ints(bb)))
- self.assertEqual(byte_values, [byte_to_int(b) for b in bb])
diff --git a/tests/test_coverage.py b/tests/test_coverage.py
index 081fe11b..227360e3 100644
--- a/tests/test_coverage.py
+++ b/tests/test_coverage.py
@@ -101,7 +101,7 @@ class BasicCoverageTest(CoverageTest):
# Nothing here
d = 6
""",
- [1,2,4,6], report="4 0 100%")
+ [1,2,4,6], report="4 0 0 0 100%")
def test_indentation_wackiness(self):
# Partial final lines are OK.
@@ -109,7 +109,7 @@ class BasicCoverageTest(CoverageTest):
import sys
if not sys.path:
a = 1
- """,
+ """, # indented last line
[1,2,3], "3")
def test_multiline_initializer(self):
@@ -198,6 +198,21 @@ class SimpleStatementTest(CoverageTest):
""",
[1,2,3], "")
+ def test_more_assignments(self):
+ self.check_coverage("""\
+ x = []
+ d = {}
+ d[
+ 4 + len(x)
+ + 5
+ ] = \\
+ d[
+ 8 ** 2
+ ] = \\
+ 9
+ """,
+ [1, 2, 3], "")
+
def test_attribute_assignment(self):
# Attribute assignment
self.check_coverage("""\
@@ -550,6 +565,15 @@ class SimpleStatementTest(CoverageTest):
""",
([1,3,6,7], [1,3,5,6,7], [1,3,4,5,6,7]), "")
+ def test_nonascii(self):
+ self.check_coverage("""\
+ # coding: utf8
+ a = 2
+ b = 3
+ """,
+ [2, 3]
+ )
+
class CompoundStatementTest(CoverageTest):
"""Testing coverage of multi-line compound statements."""
@@ -618,7 +642,8 @@ class CompoundStatementTest(CoverageTest):
z = 7
assert x == 3
""",
- [1,2,3,4,5,7,8], "4-7", report="7 3 57% 4-7")
+ [1,2,3,4,5,7,8], "4-7", report="7 3 4 1 45% 4-7, 2->4",
+ )
self.check_coverage("""\
a = 1; b = 2; c = 3;
if a != 1:
@@ -629,7 +654,8 @@ class CompoundStatementTest(CoverageTest):
z = 7
assert y == 5
""",
- [1,2,3,4,5,7,8], "3, 7", report="7 2 71% 3, 7")
+ [1,2,3,4,5,7,8], "3, 7", report="7 2 4 2 64% 3, 7, 2->3, 4->7",
+ )
self.check_coverage("""\
a = 1; b = 2; c = 3;
if a != 1:
@@ -640,7 +666,8 @@ class CompoundStatementTest(CoverageTest):
z = 7
assert z == 7
""",
- [1,2,3,4,5,7,8], "3, 5", report="7 2 71% 3, 5")
+ [1,2,3,4,5,7,8], "3, 5", report="7 2 4 2 64% 3, 5, 2->3, 4->5",
+ )
def test_elif_no_else(self):
self.check_coverage("""\
@@ -651,7 +678,8 @@ class CompoundStatementTest(CoverageTest):
y = 5
assert x == 3
""",
- [1,2,3,4,5,6], "4-5", report="6 2 67% 4-5")
+ [1,2,3,4,5,6], "4-5", report="6 2 4 1 50% 4-5, 2->4",
+ )
self.check_coverage("""\
a = 1; b = 2; c = 3;
if a != 1:
@@ -660,7 +688,8 @@ class CompoundStatementTest(CoverageTest):
y = 5
assert y == 5
""",
- [1,2,3,4,5,6], "3", report="6 1 83% 3")
+ [1,2,3,4,5,6], "3", report="6 1 4 2 70% 3, 2->3, 4->6",
+ )
def test_elif_bizarre(self):
self.check_coverage("""\
@@ -1008,7 +1037,10 @@ class CompoundStatementTest(CoverageTest):
a = 123
assert a == 123
""",
- [1,2,3,4,5,7,8], "4-5")
+ [1,2,3,4,5,7,8], "4-5",
+ arcz=".1 12 23 45 58 37 78 8.",
+ arcz_missing="45 58",
+ )
self.check_coverage("""\
a = 0
try:
@@ -1020,7 +1052,10 @@ class CompoundStatementTest(CoverageTest):
a = 123
assert a == 99
""",
- [1,2,3,4,5,6,8,9], "8")
+ [1,2,3,4,5,6,8,9], "8",
+ arcz=".1 12 23 34 45 56 69 89 9.",
+ arcz_missing="89",
+ )
def test_try_finally(self):
self.check_coverage("""\
@@ -1366,7 +1401,10 @@ class ExcludeTest(CoverageTest):
a = 123
assert a == 123
""",
- [1,2,3,7,8], "", excludes=['#pragma: NO COVER'])
+ [1,2,3,7,8], "", excludes=['#pragma: NO COVER'],
+ arcz=".1 12 23 37 45 58 78 8.",
+ arcz_missing="45 58",
+ )
self.check_coverage("""\
a = 0
try:
@@ -1378,7 +1416,10 @@ class ExcludeTest(CoverageTest):
a = 123
assert a == 99
""",
- [1,2,3,4,5,6,9], "", excludes=['#pragma: NO COVER'])
+ [1,2,3,4,5,6,9], "", excludes=['#pragma: NO COVER'],
+ arcz=".1 12 23 34 45 56 69 89 9.",
+ arcz_missing="89",
+ )
def test_excluding_try_except_pass(self):
self.check_coverage("""\
@@ -1412,7 +1453,10 @@ class ExcludeTest(CoverageTest):
a = 123
assert a == 123
""",
- [1,2,3,7,8], "", excludes=['#pragma: NO COVER'])
+ [1,2,3,7,8], "", excludes=['#pragma: NO COVER'],
+ arcz=".1 12 23 37 45 58 78 8.",
+ arcz_missing="45 58",
+ )
self.check_coverage("""\
a = 0
try:
@@ -1424,7 +1468,10 @@ class ExcludeTest(CoverageTest):
x = 2
assert a == 99
""",
- [1,2,3,4,5,6,9], "", excludes=['#pragma: NO COVER'])
+ [1,2,3,4,5,6,9], "", excludes=['#pragma: NO COVER'],
+ arcz=".1 12 23 34 45 56 69 89 9.",
+ arcz_missing="89",
+ )
def test_excluding_if_pass(self):
# From a comment on the coverage.py page by Michael McNeil Forbes:
@@ -1614,7 +1661,9 @@ class Py25Test(CoverageTest):
b = 2
assert a == 1 and b == 2
""",
- [1,2,3,4,5,7,8], "4-5")
+ [1,2,3,4,5,7,8], "4-5",
+ arcz=".1 12 23 37 45 57 78 8.", arcz_missing="45 57",
+ )
self.check_coverage("""\
a = 0; b = 0
try:
@@ -1626,7 +1675,9 @@ class Py25Test(CoverageTest):
b = 2
assert a == 99 and b == 2
""",
- [1,2,3,4,5,6,8,9], "")
+ [1,2,3,4,5,6,8,9], "",
+ arcz=".1 12 23 34 45 56 68 89 9.",
+ )
self.check_coverage("""\
a = 0; b = 0
try:
@@ -1640,7 +1691,9 @@ class Py25Test(CoverageTest):
b = 2
assert a == 123 and b == 2
""",
- [1,2,3,4,5,6,7,8,10,11], "6")
+ [1,2,3,4,5,6,7,8,10,11], "6",
+ arcz=".1 12 23 34 45 56 57 78 6A 8A AB B.", arcz_missing="56 6A",
+ )
self.check_coverage("""\
a = 0; b = 0
try:
@@ -1656,7 +1709,10 @@ class Py25Test(CoverageTest):
b = 2
assert a == 17 and b == 2
""",
- [1,2,3,4,5,6,7,8,9,10,12,13], "6, 9-10")
+ [1,2,3,4,5,6,7,8,9,10,12,13], "6, 9-10",
+ arcz=".1 12 23 34 45 56 6C 57 78 8C 79 9A AC CD D.",
+ arcz_missing="56 6C 79 9A AC",
+ )
self.check_coverage("""\
a = 0; b = 0
try:
@@ -1669,7 +1725,10 @@ class Py25Test(CoverageTest):
b = 2
assert a == 123 and b == 2
""",
- [1,2,3,4,5,7,9,10], "4-5")
+ [1,2,3,4,5,7,9,10], "4-5",
+ arcz=".1 12 23 37 45 59 79 9A A.",
+ arcz_missing="45 59",
+ )
self.check_coverage("""\
a = 0; b = 0
try:
@@ -1683,7 +1742,10 @@ class Py25Test(CoverageTest):
b = 2
assert a == 99 and b == 2
""",
- [1,2,3,4,5,6,8,10,11], "8")
+ [1,2,3,4,5,6,8,10,11], "8",
+ arcz=".1 12 23 34 45 56 6A 8A AB B.",
+ arcz_missing="8A",
+ )
class ModuleTest(CoverageTest):
diff --git a/tests/test_parser.py b/tests/test_parser.py
index 2a4c88a7..c32fdc4d 100644
--- a/tests/test_parser.py
+++ b/tests/test_parser.py
@@ -72,7 +72,7 @@ class PythonParserTest(CoverageTest):
b = 9
""")
self.assertEqual(parser.exit_counts(), {
- 1: 1, 2:1, 3:1, 4:1, 5:1, 6:1, 7:1, 8:1, 9:1
+ 1: 1, 2:1, 3:2, 4:1, 5:2, 6:1, 7:1, 8:1, 9:1
})
def test_excluded_classes(self):
diff --git a/tests/test_summary.py b/tests/test_summary.py
index 56c0b831..9d7a6fe7 100644
--- a/tests/test_summary.py
+++ b/tests/test_summary.py
@@ -607,7 +607,7 @@ class SummaryTest2(CoverageTest):
def test_empty_files(self):
# Shows that empty files like __init__.py are listed as having zero
# statements, not one statement.
- cov = coverage.Coverage()
+ cov = coverage.Coverage(branch=True)
cov.start()
import usepkgs # pragma: nested # pylint: disable=import-error,unused-variable
cov.stop() # pragma: nested
@@ -617,8 +617,8 @@ class SummaryTest2(CoverageTest):
report = repout.getvalue().replace('\\', '/')
report = re.sub(r"\s+", " ", report)
- self.assertIn("tests/modules/pkg1/__init__.py 2 0 100%", report)
- self.assertIn("tests/modules/pkg2/__init__.py 0 0 100%", report)
+ self.assertIn("tests/modules/pkg1/__init__.py 2 0 0 0 100%", report)
+ self.assertIn("tests/modules/pkg2/__init__.py 0 0 0 0 100%", report)
class ReportingReturnValueTest(CoverageTest):
diff --git a/tests/test_testing.py b/tests/test_testing.py
index 9fc7f11d..1dafdd0d 100644
--- a/tests/test_testing.py
+++ b/tests/test_testing.py
@@ -6,13 +6,15 @@
import datetime
import os
+import re
import sys
+import textwrap
import coverage
from coverage.backunittest import TestCase
from coverage.backward import to_bytes
from coverage.files import actual_path
-from coverage.test_helpers import EnvironmentAwareMixin, TempDirMixin
+from coverage.test_helpers import EnvironmentAwareMixin, TempDirMixin, DelayedAssertionMixin
from tests.coveragetest import CoverageTest
@@ -97,6 +99,45 @@ class EnvironmentAwareMixinTest(EnvironmentAwareMixin, TestCase):
self.assertNotIn("XYZZY_PLUGH", os.environ)
+class DelayedAssertionMixinTest(DelayedAssertionMixin, TestCase):
+ """Test the `delayed_assertions` method."""
+
+ def test_delayed_assertions(self):
+ # Two assertions can be shown at once:
+ msg = re.escape(textwrap.dedent("""\
+ 2 failed assertions:
+ 'x' != 'y'
+ - x
+ + y
+
+ 'w' != 'z'
+ - w
+ + z
+ """))
+ with self.assertRaisesRegex(AssertionError, msg):
+ with self.delayed_assertions():
+ self.assertEqual("x", "y")
+ self.assertEqual("w", "z")
+
+ # It's also OK if only one fails:
+ msg = re.escape(textwrap.dedent("""\
+ 'w' != 'z'
+ - w
+ + z
+ """))
+ with self.assertRaisesRegex(AssertionError, msg):
+ with self.delayed_assertions():
+ self.assertEqual("x", "x")
+ self.assertEqual("w", "z")
+
+ # If an error happens, it gets reported immediately, no special
+ # handling:
+ with self.assertRaises(ZeroDivisionError):
+ with self.delayed_assertions():
+ self.assertEqual("x", "y")
+ self.assertEqual("w", 1/0)
+
+
class CoverageTestTest(CoverageTest):
"""Test the methods in `CoverageTest`."""