summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--CHANGES.rst5
-rw-r--r--README.rst6
-rw-r--r--coverage/cmdline.py7
-rw-r--r--coverage/collector.py22
-rw-r--r--coverage/config.py2
-rw-r--r--coverage/control.py21
-rw-r--r--coverage/data.py11
-rw-r--r--coverage/sqldata.py129
-rw-r--r--doc/cmd.rst6
-rw-r--r--doc/config.rst5
-rw-r--r--doc/contexts.rst51
-rw-r--r--doc/index.rst1
-rw-r--r--tests/test_cmdline.py2
-rw-r--r--tests/test_context.py104
-rw-r--r--tests/test_debug.py4
-rw-r--r--tests/test_summary.py58
16 files changed, 354 insertions, 80 deletions
diff --git a/CHANGES.rst b/CHANGES.rst
index 8f8cd32b..089fec7f 100644
--- a/CHANGES.rst
+++ b/CHANGES.rst
@@ -17,6 +17,11 @@ Change history for Coverage.py
Unreleased
----------
+- Context support: static contexts let you specify a label for a coverage run,
+ which is recorded in the data, and retained when you combine files. See
+ :ref:`contexts` for more information. Currently, only static contexts are
+ supported, with no reporting features.
+
- Environment variable substitution in configuration files now supports two
syntaxes for controlling the behavior of undefined variables: if ``VARNAME``
is not defined, ``${VARNAME?}`` will raise an error, and ``${VARNAME-default
diff --git a/README.rst b/README.rst
index b1b2e0ee..3ad446c5 100644
--- a/README.rst
+++ b/README.rst
@@ -19,7 +19,8 @@ library to determine which lines are executable, and which have been executed.
Coverage.py runs on many versions of Python:
-* CPython 2.7 and 3.4 through 3.7.
+* CPython 2.7.
+* CPython 3.4 through 3.7.
* PyPy2 6.0 and PyPy3 6.0.
* Jython 2.7.1, though not for reporting.
* IronPython 2.7.7, though not for reporting.
@@ -31,7 +32,8 @@ Documentation is on `Read the Docs`_. Code repository and issue tracker are on
.. _GitHub: https://github.com/nedbat/coveragepy
-**New in 5.0:** SQLite data storage, dropped support for Python 2.6 and 3.3.
+**New in 5.0:** SQLite data storage, contexts, dropped support for Python 2.6
+and 3.3.
New in 4.5: Configurator plug-ins.
diff --git a/coverage/cmdline.py b/coverage/cmdline.py
index 8f6b0a90..7137852d 100644
--- a/coverage/cmdline.py
+++ b/coverage/cmdline.py
@@ -43,6 +43,10 @@ class Opts(object):
"Valid values are: %s."
) % ", ".join(CONCURRENCY_CHOICES),
)
+ context = optparse.make_option(
+ '', '--context', action='store', metavar="LABEL",
+ help="The context label to record for this coverage run",
+ )
debug = optparse.make_option(
'', '--debug', action='store', metavar="OPTS",
help="Debug options, separated by commas. [env: COVERAGE_DEBUG]",
@@ -160,6 +164,7 @@ class CoverageOptionParser(optparse.OptionParser, object):
append=None,
branch=None,
concurrency=None,
+ context=None,
debug=None,
directory=None,
fail_under=None,
@@ -358,6 +363,7 @@ CMDS = {
Opts.append,
Opts.branch,
Opts.concurrency,
+ Opts.context,
Opts.include,
Opts.module,
Opts.omit,
@@ -482,6 +488,7 @@ class CoverageScript(object):
debug=debug,
concurrency=options.concurrency,
check_preimported=True,
+ context=options.context,
)
if options.action == "debug":
diff --git a/coverage/collector.py b/coverage/collector.py
index fa3eaaa4..e0144979 100644
--- a/coverage/collector.py
+++ b/coverage/collector.py
@@ -99,6 +99,9 @@ class Collector(object):
self.warn = warn
self.branch = branch
self.threading = None
+ self.covdata = None
+
+ self.static_context = None
self.origin = short_stack()
@@ -160,6 +163,12 @@ class Collector(object):
def __repr__(self):
return "<Collector at 0x%x: %s>" % (id(self), self.tracer_name())
+ def use_data(self, covdata, context):
+ """Use `covdata` for recording data."""
+ self.covdata = covdata
+ self.static_context = context
+ self.covdata.set_context(self.static_context)
+
def tracer_name(self):
"""Return the class name of the tracer we're using."""
return self._trace_class.__name__
@@ -378,8 +387,11 @@ class Collector(object):
except KeyError:
return self.abs_file_cache.setdefault(key, abs_file(filename))
- def save_data(self, covdata):
- """Save the collected data to a `CoverageData`.
+ def flush_data(self):
+ """Save the collected data to our associated `CoverageData`.
+
+ Data may have also been saved along the way. This forces the
+ last of the data to be saved.
Returns True if there was data to save, False if not.
"""
@@ -406,10 +418,10 @@ class Collector(object):
return dict((self.cached_abs_file(k), v) for k, v in items)
if self.branch:
- covdata.add_arcs(abs_file_dict(self.data))
+ self.covdata.add_arcs(abs_file_dict(self.data))
else:
- covdata.add_lines(abs_file_dict(self.data))
- covdata.add_file_tracers(abs_file_dict(self.file_tracers))
+ self.covdata.add_lines(abs_file_dict(self.data))
+ self.covdata.add_file_tracers(abs_file_dict(self.file_tracers))
if self.wtw:
# Just a hack, so just hack it.
diff --git a/coverage/config.py b/coverage/config.py
index 69c929b4..9a11323d 100644
--- a/coverage/config.py
+++ b/coverage/config.py
@@ -175,6 +175,7 @@ class CoverageConfig(object):
# Defaults for [run]
self.branch = False
self.concurrency = None
+ self.context = None
self.cover_pylib = False
self.data_file = ".coverage"
self.debug = []
@@ -318,6 +319,7 @@ class CoverageConfig(object):
# [run]
('branch', 'run:branch', 'boolean'),
('concurrency', 'run:concurrency', 'list'),
+ ('context', 'run:context'),
('cover_pylib', 'run:cover_pylib', 'boolean'),
('data_file', 'run:data_file'),
('debug', 'run:debug', 'list'),
diff --git a/coverage/control.py b/coverage/control.py
index 5d42af77..f7d97cf6 100644
--- a/coverage/control.py
+++ b/coverage/control.py
@@ -57,7 +57,7 @@ class Coverage(object):
self, data_file=None, data_suffix=None, cover_pylib=None,
auto_data=False, timid=None, branch=None, config_file=True,
source=None, omit=None, include=None, debug=None,
- concurrency=None, check_preimported=False,
+ concurrency=None, check_preimported=False, context=None,
):
"""
`data_file` is the base name of the data file to use, defaulting to
@@ -116,6 +116,8 @@ class Coverage(object):
by coverage. Importing measured files before coverage is started can
mean that code is missed.
+ `context` is a string to use as the context label for collected data.
+
.. versionadded:: 4.0
The `concurrency` parameter.
@@ -133,7 +135,7 @@ class Coverage(object):
branch=branch, parallel=bool_or_none(data_suffix),
source=source, run_omit=omit, run_include=include, debug=debug,
report_omit=omit, report_include=include,
- concurrency=concurrency,
+ concurrency=concurrency, context=context,
)
# This is injectable by tests.
@@ -333,6 +335,7 @@ class Coverage(object):
def _init_for_start(self):
"""Initialization for start()"""
+ # Construct the collector.
concurrency = self.config.concurrency or []
if "multiprocessing" in concurrency:
if not patch_multiprocessing:
@@ -363,6 +366,8 @@ class Coverage(object):
self._init_data(suffix)
+ self._collector.use_data(self._data, self.config.context)
+
# Early warning if we aren't going to be able to support plugins.
if self._plugins.file_tracers and not self._collector.supports_plugins:
self._warn(
@@ -562,7 +567,7 @@ class Coverage(object):
self._init_data(suffix=None)
self._post_init()
- if self._collector and self._collector.save_data(self._data):
+ if self._collector and self._collector.flush_data():
self._post_save_work()
return self._data
@@ -678,15 +683,11 @@ class Coverage(object):
if not morfs:
morfs = self._data.measured_files()
- # Be sure we have a list.
- if not isinstance(morfs, (list, tuple)):
+ # Be sure we have a collection.
+ if not isinstance(morfs, (list, tuple, set)):
morfs = [morfs]
- file_reporters = []
- for morf in morfs:
- file_reporter = self._get_file_reporter(morf)
- file_reporters.append(file_reporter)
-
+ file_reporters = [self._get_file_reporter(morf) for morf in morfs]
return file_reporters
def report(
diff --git a/coverage/data.py b/coverage/data.py
index f03e90ca..e6d56d84 100644
--- a/coverage/data.py
+++ b/coverage/data.py
@@ -252,8 +252,8 @@ class CoverageJsonData(object):
return self._runs
def measured_files(self):
- """A list of all files that had been measured."""
- return list(self._arcs or self._lines or {})
+ """A set of all files that had been measured."""
+ return set(self._arcs or self._lines or {})
def __nonzero__(self):
return bool(self._lines or self._arcs)
@@ -445,6 +445,11 @@ class CoverageJsonData(object):
self._validate()
+ def set_context(self, context):
+ """Set the context. Not implemented for JSON storage."""
+ if context:
+ raise CoverageException("JSON storage doesn't support contexts")
+
def write(self):
"""Write the collected coverage data to a file.
@@ -722,6 +727,8 @@ def combine_parallel_data(data, aliases=None, data_paths=None, strict=False):
files_combined = 0
for f in files_to_combine:
+ if data._debug and data._debug.should('dataio'):
+ data._debug.write("Combining data file %r" % (f,))
try:
new_data = CoverageData(f, debug=data._debug)
new_data.read()
diff --git a/coverage/sqldata.py b/coverage/sqldata.py
index f9598485..fb2279c9 100644
--- a/coverage/sqldata.py
+++ b/coverage/sqldata.py
@@ -12,6 +12,7 @@
# TODO: run_info
import glob
+import itertools
import os
import sqlite3
@@ -22,7 +23,11 @@ from coverage.files import PathAliases
from coverage.misc import CoverageException, file_be_gone
-SCHEMA_VERSION = 1
+# Schema versions:
+# 1: Released in 5.0a2
+# 2: Added contexts
+
+SCHEMA_VERSION = 2
SCHEMA = """
create table coverage_schema (
@@ -40,17 +45,25 @@ create table file (
unique(path)
);
+create table context (
+ id integer primary key,
+ context text,
+ unique(context)
+);
+
create table line (
file_id integer,
+ context_id integer,
lineno integer,
- unique(file_id, lineno)
+ unique(file_id, context_id, lineno)
);
create table arc (
file_id integer,
+ context_id integer,
fromno integer,
tono integer,
- unique(file_id, fromno, tono)
+ unique(file_id, context_id, fromno, tono)
);
create table tracer (
@@ -78,6 +91,8 @@ class CoverageSqliteData(SimpleReprMixin):
self._has_lines = False
self._has_arcs = False
+ self._current_context_id = None
+
def _choose_filename(self):
self.filename = self._basename
suffix = filename_suffix(self._suffix)
@@ -90,6 +105,7 @@ class CoverageSqliteData(SimpleReprMixin):
self._db = None
self._file_map = {}
self._have_used = False
+ self._current_context_id = None
def _create_db(self):
if self._debug and self._debug.should('dataio'):
@@ -97,7 +113,7 @@ class CoverageSqliteData(SimpleReprMixin):
self._db = Sqlite(self.filename, self._debug)
with self._db:
for stmt in SCHEMA.split(';'):
- stmt = stmt.strip()
+ stmt = " ".join(stmt.strip().split())
if stmt:
self._db.execute(stmt)
self._db.execute("insert into coverage_schema (version) values (?)", (SCHEMA_VERSION,))
@@ -151,6 +167,12 @@ class CoverageSqliteData(SimpleReprMixin):
__bool__ = __nonzero__
+ def dump(self): # pragma: debugging
+ """Write a dump of the database."""
+ if self._debug:
+ with self._connect() as con:
+ self._debug.write(con.dump())
+
def _file_id(self, filename, add=False):
"""Get the file id for `filename`.
@@ -164,6 +186,29 @@ class CoverageSqliteData(SimpleReprMixin):
self._file_map[filename] = cur.lastrowid
return self._file_map.get(filename)
+ def _context_id(self, context):
+ """Get the id for a context."""
+ assert context is not None
+ self._start_using()
+ with self._connect() as con:
+ row = con.execute("select id from context where context = ?", (context,)).fetchone()
+ if row is not None:
+ return row[0]
+ else:
+ return None
+
+ def set_context(self, context):
+ """Set the current context for future `add_lines` etc."""
+ self._start_using()
+ context = context or ""
+ with self._connect() as con:
+ row = con.execute("select id from context where context = ?", (context,)).fetchone()
+ if row is not None:
+ self._current_context_id = row[0]
+ else:
+ cur = con.execute("insert into context (context) values (?)", (context,))
+ self._current_context_id = cur.lastrowid
+
def add_lines(self, line_data):
"""Add measured line data.
@@ -178,12 +223,14 @@ class CoverageSqliteData(SimpleReprMixin):
))
self._start_using()
self._choose_lines_or_arcs(lines=True)
+ if self._current_context_id is None:
+ self.set_context("")
with self._connect() as con:
for filename, linenos in iitems(line_data):
file_id = self._file_id(filename, add=True)
- data = [(file_id, lineno) for lineno in linenos]
+ data = [(file_id, self._current_context_id, lineno) for lineno in linenos]
con.executemany(
- "insert or ignore into line (file_id, lineno) values (?, ?)",
+ "insert or ignore into line (file_id, context_id, lineno) values (?, ?, ?)",
data,
)
@@ -201,12 +248,14 @@ class CoverageSqliteData(SimpleReprMixin):
))
self._start_using()
self._choose_lines_or_arcs(arcs=True)
+ if self._current_context_id is None:
+ self.set_context("")
with self._connect() as con:
for filename, arcs in iitems(arc_data):
file_id = self._file_id(filename, add=True)
- data = [(file_id, fromno, tono) for fromno, tono in arcs]
+ data = [(file_id, self._current_context_id, fromno, tono) for fromno, tono in arcs]
con.executemany(
- "insert or ignore into arc (file_id, fromno, tono) values (?, ?, ?)",
+ "insert or ignore into arc (file_id, context_id, fromno, tono) values (?, ?, ?, ?)",
data,
)
@@ -276,23 +325,27 @@ class CoverageSqliteData(SimpleReprMixin):
aliases = aliases or PathAliases()
# See what we had already measured, for accurate conflict reporting.
- this_measured = set(self.measured_files())
+ this_measured = self.measured_files()
# lines
if other_data._has_lines:
- for filename in other_data.measured_files():
- lines = set(other_data.lines(filename))
- filename = aliases.map(filename)
- lines.update(self.lines(filename) or ())
- self.add_lines({filename: lines})
+ for context in other_data.measured_contexts():
+ self.set_context(context)
+ for filename in other_data.measured_files():
+ lines = set(other_data.lines(filename, context=context))
+ filename = aliases.map(filename)
+ lines.update(self.lines(filename, context=context) or ())
+ self.add_lines({filename: lines})
# arcs
if other_data._has_arcs:
- for filename in other_data.measured_files():
- arcs = set(other_data.arcs(filename))
- filename = aliases.map(filename)
- arcs.update(self.arcs(filename) or ())
- self.add_arcs({filename: arcs})
+ for context in other_data.measured_contexts():
+ self.set_context(context)
+ for filename in other_data.measured_files():
+ arcs = set(other_data.arcs(filename, context=context))
+ filename = aliases.map(filename)
+ arcs.update(self.arcs(filename, context=context) or ())
+ self.add_arcs({filename: arcs})
# file_tracers
for filename in other_data.measured_files():
@@ -353,8 +406,15 @@ class CoverageSqliteData(SimpleReprMixin):
return bool(self._has_arcs)
def measured_files(self):
- """A list of all files that had been measured."""
- return list(self._file_map)
+ """A set of all files that had been measured."""
+ return set(self._file_map)
+
+ def measured_contexts(self):
+ """A set of all contexts that have been measured."""
+ self._start_using()
+ with self._connect() as con:
+ contexts = set(row[0] for row in con.execute("select distinct(context) from context"))
+ return contexts
def file_tracer(self, filename):
"""Get the plugin name of the file tracer for a file.
@@ -374,12 +434,11 @@ class CoverageSqliteData(SimpleReprMixin):
return row[0] or ""
return "" # File was measured, but no tracer associated.
- def lines(self, filename):
+ def lines(self, filename, context=None):
self._start_using()
if self.has_arcs():
- arcs = self.arcs(filename)
+ arcs = self.arcs(filename, context=context)
if arcs is not None:
- import itertools
all_lines = itertools.chain.from_iterable(arcs)
return list(set(l for l in all_lines if l > 0))
@@ -388,18 +447,28 @@ class CoverageSqliteData(SimpleReprMixin):
if file_id is None:
return None
else:
- linenos = con.execute("select lineno from line where file_id = ?", (file_id,))
+ query = "select lineno from line where file_id = ?"
+ data = [file_id]
+ if context is not None:
+ query += " and context_id = ?"
+ data += [self._context_id(context)]
+ linenos = con.execute(query, data)
return [lineno for lineno, in linenos]
- def arcs(self, filename):
+ def arcs(self, filename, context=None):
self._start_using()
with self._connect() as con:
file_id = self._file_id(filename)
if file_id is None:
return None
else:
- arcs = con.execute("select fromno, tono from arc where file_id = ?", (file_id,))
- return [pair for pair in arcs]
+ query = "select fromno, tono from arc where file_id = ?"
+ data = [file_id]
+ if context is not None:
+ query += " and context_id = ?"
+ data += [self._context_id(context)]
+ arcs = con.execute(query, data)
+ return list(arcs)
def run_infos(self):
return [] # TODO
@@ -456,3 +525,7 @@ class Sqlite(SimpleReprMixin):
if self.debug:
self.debug.write("Executing many {!r} with {} rows".format(sql, len(data)))
return self.con.executemany(sql, data)
+
+ def dump(self): # pragma: debugging
+ """Return a multi-line string, the dump of the database."""
+ return "\n".join(self.con.iterdump())
diff --git a/doc/cmd.rst b/doc/cmd.rst
index 00a9a6c3..86a858e4 100644
--- a/doc/cmd.rst
+++ b/doc/cmd.rst
@@ -69,7 +69,7 @@ control, and can provide options that other invocation techniques (like test
runner plugins) may not offer. See :ref:`config` for more details.
-.. _cmd_execution:
+.. _cmd_run:
Execution
---------
@@ -122,6 +122,10 @@ If you are measuring coverage in a multi-process program, or across a number of
machines, you'll want the ``--parallel-mode`` switch to keep the data separate
during measurement. See :ref:`cmd_combining` below.
+You can specify a :ref:`static context <contexts>` for a coverage run with
+``--context``. This can be any label you want, and will be recorded with the
+data. See :ref:`contexts` for more information.
+
By default, coverage.py does not measure code installed with the Python
interpreter, for example, the standard library. If you want to measure that
code as well as your own, add the ``-L`` (or ``--pylib``) flag.
diff --git a/doc/config.rst b/doc/config.rst
index 666a1321..b8117a43 100644
--- a/doc/config.rst
+++ b/doc/config.rst
@@ -132,6 +132,11 @@ Before version 4.2, this option only accepted a single string.
.. versionadded:: 4.0
+``context`` (string): the static context to record for this coverage run. See
+:ref:`contexts` for more information
+
+.. versionadded:: 5.0
+
``data_file`` (string, default ".coverage"): the name of the data file to use
for storing or reporting coverage. This value can include a path to another
directory.
diff --git a/doc/contexts.rst b/doc/contexts.rst
new file mode 100644
index 00000000..c1d4a173
--- /dev/null
+++ b/doc/contexts.rst
@@ -0,0 +1,51 @@
+.. Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
+.. For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
+
+.. _contexts:
+
+====================
+Measurement Contexts
+====================
+
+.. :history: 20180921T085400, new for 5.0
+
+.. versionadded:: 5.0
+
+Coverage.py measures whether code was run, but it can also record the context
+in which it was run. This can provide more information to help you understand
+the behavior of your tests.
+
+There are two kinds of context: static and dynamic. Static contexts are fixed
+for an entire run, and are set explicitly with an option.
+
+Dynamic contexts are coming soon.
+
+
+Static contexts
+---------------
+
+A static context is set by an option when you run coverage.py. The value is
+fixed for the duration of a run. They can be any text you like, for example,
+"python3" or "with_numpy". The context is recorded with the data.
+
+When you :ref:`combine multiple data files <cmd_combining>` together, they can
+have differing contexts. All of the information is retained, so that the
+different contexts are correctly recorded in the combined file.
+
+A static context is specified with the ``--context=CONTEXT`` option to
+:ref:`coverage run <cmd_run>`.
+
+
+Dynamic contexts
+----------------
+
+Not implemented yet.
+
+
+Context reporting
+-----------------
+
+There is currently no support for using contexts during reporting. I'm
+interested to `hear your ideas`__ for what would be useful.
+
+__ https://nedbatchelder.com/site/aboutned.html
diff --git a/doc/index.rst b/doc/index.rst
index 0a795011..149eba07 100644
--- a/doc/index.rst
+++ b/doc/index.rst
@@ -206,6 +206,7 @@ More information
excluding
branch
subprocess
+ contexts
api
howitworks
plugins
diff --git a/tests/test_cmdline.py b/tests/test_cmdline.py
index 59c76c73..db89137b 100644
--- a/tests/test_cmdline.py
+++ b/tests/test_cmdline.py
@@ -33,7 +33,7 @@ class BaseCmdLineTest(CoverageTest):
defaults.Coverage(
cover_pylib=None, data_suffix=None, timid=None, branch=None,
config_file=True, source=None, include=None, omit=None, debug=None,
- concurrency=None, check_preimported=True,
+ concurrency=None, check_preimported=True, context=None,
)
defaults.annotate(
directory=None, ignore_errors=None, include=None, omit=None, morfs=[],
diff --git a/tests/test_context.py b/tests/test_context.py
new file mode 100644
index 00000000..a6be922d
--- /dev/null
+++ b/tests/test_context.py
@@ -0,0 +1,104 @@
+# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
+# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
+
+"""Tests for context support."""
+
+import os.path
+
+import coverage
+from coverage.data import CoverageData
+
+from tests.coveragetest import CoverageTest
+
+
+class GlobalContextTest(CoverageTest):
+ """Tests of the global context."""
+
+ def setUp(self):
+ super(GlobalContextTest, self).setUp()
+ self.skip_unless_data_storage_is("sql")
+
+ def test_no_context(self):
+ self.make_file("main.py", "a = 1")
+ cov = coverage.Coverage()
+ self.start_import_stop(cov, "main")
+ data = cov.get_data()
+ self.assertCountEqual(data.measured_contexts(), [""])
+
+ def test_global_context(self):
+ self.make_file("main.py", "a = 1")
+ cov = coverage.Coverage(context="gooey")
+ self.start_import_stop(cov, "main")
+ data = cov.get_data()
+ self.assertCountEqual(data.measured_contexts(), ["gooey"])
+
+ SOURCE = """\
+ a = 1
+ if a > 2:
+ a = 3
+ assert a == 1
+ """
+
+ LINES = [1, 2, 4]
+ ARCS = [(-1, 1), (1, 2), (2, 4), (4, -1)]
+
+ def run_red_blue(self, **options):
+ """Run red.py and blue.py, and return their CoverageData objects."""
+ self.make_file("red.py", self.SOURCE)
+ red_cov = coverage.Coverage(context="red", data_suffix="r", source=["."], **options)
+ self.start_import_stop(red_cov, "red")
+ red_cov.save()
+ red_data = red_cov.get_data()
+
+ self.make_file("blue.py", self.SOURCE)
+ blue_cov = coverage.Coverage(context="blue", data_suffix="b", source=["."], **options)
+ self.start_import_stop(blue_cov, "blue")
+ blue_cov.save()
+ blue_data = blue_cov.get_data()
+
+ return red_data, blue_data
+
+ def test_combining_line_contexts(self):
+ red_data, blue_data = self.run_red_blue()
+ for datas in [[red_data, blue_data], [blue_data, red_data]]:
+ combined = CoverageData(suffix="combined")
+ for data in datas:
+ combined.update(data)
+
+ self.assertEqual(combined.measured_contexts(), {'red', 'blue'})
+
+ full_names = {os.path.basename(f): f for f in combined.measured_files()}
+ self.assertCountEqual(full_names, ['red.py', 'blue.py'])
+
+ fred = full_names['red.py']
+ fblue = full_names['blue.py']
+
+ self.assertEqual(combined.lines(fred, context='red'), self.LINES)
+ self.assertEqual(combined.lines(fred, context='blue'), [])
+ self.assertEqual(combined.lines(fblue, context='red'), [])
+ self.assertEqual(combined.lines(fblue, context='blue'), self.LINES)
+
+ def test_combining_arc_contexts(self):
+ red_data, blue_data = self.run_red_blue(branch=True)
+ for datas in [[red_data, blue_data], [blue_data, red_data]]:
+ combined = CoverageData(suffix="combined")
+ for data in datas:
+ combined.update(data)
+
+ self.assertEqual(combined.measured_contexts(), {'red', 'blue'})
+
+ full_names = {os.path.basename(f): f for f in combined.measured_files()}
+ self.assertCountEqual(full_names, ['red.py', 'blue.py'])
+
+ fred = full_names['red.py']
+ fblue = full_names['blue.py']
+
+ self.assertEqual(combined.lines(fred, context='red'), self.LINES)
+ self.assertEqual(combined.lines(fred, context='blue'), [])
+ self.assertEqual(combined.lines(fblue, context='red'), [])
+ self.assertEqual(combined.lines(fblue, context='blue'), self.LINES)
+
+ self.assertEqual(combined.arcs(fred, context='red'), self.ARCS)
+ self.assertEqual(combined.arcs(fred, context='blue'), [])
+ self.assertEqual(combined.arcs(fblue, context='red'), [])
+ self.assertEqual(combined.arcs(fblue, context='blue'), self.ARCS)
diff --git a/tests/test_debug.py b/tests/test_debug.py
index 284d9567..63edc84f 100644
--- a/tests/test_debug.py
+++ b/tests/test_debug.py
@@ -144,8 +144,8 @@ class DebugTraceTest(CoverageTest):
self.assertRegex(real_messages[-1], r"^\s*\d+\.\w{4}: Writing data")
self.assertRegex(last_line, r"\s+_write_file : .*coverage[/\\]data.py @\d+$")
else:
- self.assertRegex(real_messages[-1], r"^\s*\d+\.\w{4}: Creating data file")
- self.assertRegex(last_line, r"\s+_create_db : .*coverage[/\\]sqldata.py @\d+$")
+ self.assertRegex(real_messages[-1], r"^\s*\d+\.\w{4}: Adding lines")
+ self.assertRegex(last_line, r"\s+add_lines : .*coverage[/\\]sqldata.py @\d+$")
def test_debug_config(self):
out_lines = self.f1_debug_output(["config"])
diff --git a/tests/test_summary.py b/tests/test_summary.py
index 936df7fa..c3b572d2 100644
--- a/tests/test_summary.py
+++ b/tests/test_summary.py
@@ -750,24 +750,31 @@ class ReportingReturnValueTest(CoverageTest):
class TestSummaryReporterConfiguration(CoverageTest):
"""Tests of SummaryReporter."""
- LINES_1 = {
- os.path.join(TESTS_DIR, "test_api.py"): dict.fromkeys(range(400)),
- os.path.join(TESTS_DIR, "test_backward.py"): dict.fromkeys(range(20)),
- os.path.join(TESTS_DIR, "test_coverage.py"): dict.fromkeys(range(15)),
- }
-
- def get_coverage_data(self, lines):
- """Get a CoverageData object that includes the requested lines."""
- data = CoverageData()
- data.add_lines(lines)
- return data
-
- def get_summary_text(self, coverage_data, options):
+ def make_rigged_file(self, filename, stmts, miss):
+ """Create a file that will have specific results.
+
+ `stmts` and `miss` are ints, the number of statements, and
+ missed statements that should result.
+ """
+ run = stmts - miss - 1
+ dont_run = miss
+ source = ""
+ source += "a = 1\n" * run
+ source += "if a == 99:\n"
+ source += " a = 2\n" * dont_run
+ self.make_file(filename, source)
+
+ def get_summary_text(self, options):
"""Get text output from the SummaryReporter."""
- cov = Coverage()
+ self.make_rigged_file("file1.py", 339, 155)
+ self.make_rigged_file("file2.py", 13, 3)
+ self.make_rigged_file("file3.py", 234, 228)
+ self.make_file("doit.py", "import file1, file2, file3")
+
+ cov = Coverage(source=["."], omit=["doit.py"])
cov.start()
+ import doit # pragma: nested # pylint: disable=import-error, unused-variable
cov.stop() # pragma: nested
- cov._data = coverage_data
printer = SummaryReporter(cov, options)
destination = StringIO()
printer.report([], destination)
@@ -777,8 +784,7 @@ class TestSummaryReporterConfiguration(CoverageTest):
# We use our own test files as test data. Check that our assumptions
# about them are still valid. We want the three columns of numbers to
# sort in three different orders.
- data = self.get_coverage_data(self.LINES_1)
- report = self.get_summary_text(data, CoverageConfig())
+ report = self.get_summary_text(CoverageConfig())
print(report)
# Name Stmts Miss Cover
# --------------------------------------------
@@ -802,18 +808,16 @@ class TestSummaryReporterConfiguration(CoverageTest):
def test_defaults(self):
"""Run the report with no configuration options."""
- data = self.get_coverage_data(self.LINES_1)
opts = CoverageConfig()
- report = self.get_summary_text(data, opts)
+ report = self.get_summary_text(opts)
self.assertNotIn('Missing', report)
self.assertNotIn('Branch', report)
def test_print_missing(self):
"""Run the report printing the missing lines."""
- data = self.get_coverage_data(self.LINES_1)
opts = CoverageConfig()
opts.from_args(show_missing=True)
- report = self.get_summary_text(data, opts)
+ report = self.get_summary_text(opts)
self.assertIn('Missing', report)
self.assertNotIn('Branch', report)
@@ -827,33 +831,29 @@ class TestSummaryReporterConfiguration(CoverageTest):
def test_sort_report_by_stmts(self):
# Sort the text report by the Stmts column.
- data = self.get_coverage_data(self.LINES_1)
opts = CoverageConfig()
opts.from_args(sort='Stmts')
- report = self.get_summary_text(data, opts)
+ report = self.get_summary_text(opts)
self.assert_ordering(report, "test_backward.py", "test_coverage.py", "test_api.py")
def test_sort_report_by_missing(self):
# Sort the text report by the Missing column.
- data = self.get_coverage_data(self.LINES_1)
opts = CoverageConfig()
opts.from_args(sort='Miss')
- report = self.get_summary_text(data, opts)
+ report = self.get_summary_text(opts)
self.assert_ordering(report, "test_backward.py", "test_api.py", "test_coverage.py")
def test_sort_report_by_cover(self):
# Sort the text report by the Cover column.
- data = self.get_coverage_data(self.LINES_1)
opts = CoverageConfig()
opts.from_args(sort='Cover')
- report = self.get_summary_text(data, opts)
+ report = self.get_summary_text(opts)
self.assert_ordering(report, "test_coverage.py", "test_api.py", "test_backward.py")
def test_sort_report_by_invalid_option(self):
# Sort the text report by a nonsense column.
- data = self.get_coverage_data(self.LINES_1)
opts = CoverageConfig()
opts.from_args(sort='Xyzzy')
msg = "Invalid sorting option: 'Xyzzy'"
with self.assertRaisesRegex(CoverageException, msg):
- self.get_summary_text(data, opts)
+ self.get_summary_text(opts)