summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--CHANGES.rst15
-rw-r--r--MANIFEST.in5
-rw-r--r--coverage/backward.py6
-rw-r--r--coverage/cmdline.py2
-rw-r--r--coverage/config.py1
-rw-r--r--coverage/ctracer/tracer.c2
-rw-r--r--coverage/ctracer/util.h8
-rw-r--r--coverage/data.py8
-rw-r--r--coverage/sqldata.py237
-rw-r--r--doc/cmd.rst50
-rw-r--r--doc/config.rst12
-rwxr-xr-xlab/set_env.py121
-rw-r--r--tests/conftest.py3
-rw-r--r--tests/test_api.py15
-rw-r--r--tests/test_cmdline.py4
-rw-r--r--tests/test_data.py16
-rw-r--r--tox.ini17
17 files changed, 276 insertions, 246 deletions
diff --git a/CHANGES.rst b/CHANGES.rst
index 73fef237..e8dc45de 100644
--- a/CHANGES.rst
+++ b/CHANGES.rst
@@ -23,11 +23,26 @@ Unreleased
that missing branches are reported near the other lines they affect. The
values used to show all missing lines, and then all missing branches.
+- Access to the SQLite database used for data storage is now thread-safe.
+ Thanks, Stephan Richter. This closes `issue 702`_.
+
+- Combining data stored in SQLite now goes about twice as fast, fixing `issue
+ 761`_. Thanks, Stephan Richter.
+
+- The ``filename`` attribute on `CoverageData` object has been made private.
+ You can use the ``filename`` method to get the actual file name being used
+ to store data, and the ``base_filename`` method to get the original filename
+ before parallelizing suffixes were added. This is part of fixing `issue
+ 708`_.
+
- Line numbers in the HTML report now align properly with source lines, even
when Chrome's minimum font size is set, fixing `issue 748`_. Thanks Wen Ye.
+.. _issue 702: https://github.com/nedbat/coveragepy/issues/702
+.. _issue 708: https://github.com/nedbat/coveragepy/issues/708
.. _issue 746: https://github.com/nedbat/coveragepy/issues/746
.. _issue 748: https://github.com/nedbat/coveragepy/issues/748
+.. _issue 761: https://github.com/nedbat/coveragepy/issues/761
.. _changes_50a4:
diff --git a/MANIFEST.in b/MANIFEST.in
index a06c0562..ba46f3db 100644
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -3,6 +3,11 @@
# MANIFEST.in file for coverage.py
+# This file includes everything needed to recreate the entire project, even
+# though many of these files are not installed by setup.py. Unpacking the
+# .tar.gz source distribution would give you everything needed to continue
+# developing the project. "pip install" will not install many of these files.
+
include CONTRIBUTORS.txt
include CHANGES.rst
include LICENSE.txt
diff --git a/coverage/backward.py b/coverage/backward.py
index b43e35f3..2d0494a7 100644
--- a/coverage/backward.py
+++ b/coverage/backward.py
@@ -44,6 +44,12 @@ try:
except NameError:
range = range
+# Where do we get the thread id from?
+try:
+ from thread import get_ident as get_thread_id
+except ImportError:
+ from threading import get_ident as get_thread_id
+
# shlex.quote is new, but there's an undocumented implementation in "pipes",
# who knew!?
try:
diff --git a/coverage/cmdline.py b/coverage/cmdline.py
index 4af48170..f2c36745 100644
--- a/coverage/cmdline.py
+++ b/coverage/cmdline.py
@@ -667,7 +667,7 @@ class CoverageScript(object):
self.coverage.load()
data = self.coverage.get_data()
print(info_header("data"))
- print("path: %s" % self.coverage.get_data().filename)
+ print("path: %s" % self.coverage.get_data().filename())
if data:
print("has_arcs: %r" % data.has_arcs())
summary = line_counts(data, fullpath=True)
diff --git a/coverage/config.py b/coverage/config.py
index f61d6951..f7e1605b 100644
--- a/coverage/config.py
+++ b/coverage/config.py
@@ -486,6 +486,7 @@ def read_coverage_config(config_file, **kwargs):
if specified_file:
raise CoverageException("Couldn't read '%s' as a config file" % fname)
+ # $set_env.py: COVERAGE_DEBUG - Options for --debug.
# 3) from environment variables:
env_data_file = os.environ.get('COVERAGE_FILE')
if env_data_file:
diff --git a/coverage/ctracer/tracer.c b/coverage/ctracer/tracer.c
index 7d639112..d497a94d 100644
--- a/coverage/ctracer/tracer.c
+++ b/coverage/ctracer/tracer.c
@@ -541,7 +541,7 @@ CTracer_handle_call(CTracer *self, PyFrameObject *frame)
/* Make the frame right in case settrace(gettrace()) happens. */
Py_INCREF(self);
- My_XSETREF(frame->f_trace, (PyObject*)self);
+ Py_XSETREF(frame->f_trace, (PyObject*)self);
/* A call event is really a "start frame" event, and can happen for
* re-entering a generator also. f_lasti is -1 for a true call, and a
diff --git a/coverage/ctracer/util.h b/coverage/ctracer/util.h
index 96d2e51c..cb8aceb9 100644
--- a/coverage/ctracer/util.h
+++ b/coverage/ctracer/util.h
@@ -44,14 +44,6 @@
#endif /* Py3k */
-// Undocumented, and not in 2.6, so our own copy of it.
-#define My_XSETREF(op, op2) \
- do { \
- PyObject *_py_tmp = (PyObject *)(op); \
- (op) = (op2); \
- Py_XDECREF(_py_tmp); \
- } while (0)
-
/* The values returned to indicate ok or error. */
#define RET_OK 0
#define RET_ERROR -1
diff --git a/coverage/data.py b/coverage/data.py
index d6061293..f78628a5 100644
--- a/coverage/data.py
+++ b/coverage/data.py
@@ -710,7 +710,7 @@ def combine_parallel_data(data, aliases=None, data_paths=None, strict=False):
"""
# Because of the os.path.abspath in the constructor, data_dir will
# never be an empty string.
- data_dir, local = os.path.split(data.filename)
+ data_dir, local = os.path.split(data.base_filename())
localdot = local + '.*'
data_paths = data_paths or [data_dir]
@@ -729,6 +729,12 @@ def combine_parallel_data(data, aliases=None, data_paths=None, strict=False):
files_combined = 0
for f in files_to_combine:
+ if f == data.filename():
+ # Sometimes we are combining into a file which is one of the
+ # parallel files. Skip that file.
+ if data._debug.should('dataio'):
+ data._debug.write("Skipping combining ourself: %r" % (f,))
+ continue
if data._debug.should('dataio'):
data._debug.write("Combining data file %r" % (f,))
try:
diff --git a/coverage/sqldata.py b/coverage/sqldata.py
index 8bfb04be..893f620d 100644
--- a/coverage/sqldata.py
+++ b/coverage/sqldata.py
@@ -17,7 +17,7 @@ import os
import sqlite3
import sys
-from coverage.backward import iitems
+from coverage.backward import get_thread_id, iitems
from coverage.data import filename_suffix
from coverage.debug import NoDebugging, SimpleReprMixin
from coverage.files import PathAliases
@@ -84,7 +84,7 @@ class CoverageSqliteData(SimpleReprMixin):
self._choose_filename()
self._file_map = {}
- self._db = None
+ self._dbs = {}
self._pid = os.getpid()
# Are we in sync with the data file?
@@ -97,71 +97,72 @@ class CoverageSqliteData(SimpleReprMixin):
self._current_context_id = None
def _choose_filename(self):
- self.filename = self._basename
+ self._filename = self._basename
suffix = filename_suffix(self._suffix)
if suffix:
- self.filename += "." + suffix
+ self._filename += "." + suffix
def _reset(self):
- if self._db is not None:
- self._db.close()
- self._db = None
+ if self._dbs:
+ for db in self._dbs.values():
+ db.close()
+ self._dbs = {}
self._file_map = {}
self._have_used = False
self._current_context_id = None
def _create_db(self):
if self._debug.should('dataio'):
- self._debug.write("Creating data file {!r}".format(self.filename))
- self._db = Sqlite(self.filename, self._debug)
- with self._db:
+ self._debug.write("Creating data file {!r}".format(self._filename))
+ self._dbs[get_thread_id()] = Sqlite(self._filename, self._debug)
+ with self._dbs[get_thread_id()] as db:
for stmt in SCHEMA.split(';'):
stmt = " ".join(stmt.strip().split())
if stmt:
- self._db.execute(stmt)
- self._db.execute("insert into coverage_schema (version) values (?)", (SCHEMA_VERSION,))
- self._db.execute(
+ db.execute(stmt)
+ db.execute("insert into coverage_schema (version) values (?)", (SCHEMA_VERSION,))
+ db.execute(
"insert into meta (has_lines, has_arcs, sys_argv) values (?, ?, ?)",
(self._has_lines, self._has_arcs, str(getattr(sys, 'argv', None)))
)
def _open_db(self):
if self._debug.should('dataio'):
- self._debug.write("Opening data file {!r}".format(self.filename))
- self._db = Sqlite(self.filename, self._debug)
- with self._db:
+ self._debug.write("Opening data file {!r}".format(self._filename))
+ self._dbs[get_thread_id()] = Sqlite(self._filename, self._debug)
+ with self._dbs[get_thread_id()] as db:
try:
- schema_version, = self._db.execute("select version from coverage_schema").fetchone()
+ schema_version, = db.execute("select version from coverage_schema").fetchone()
except Exception as exc:
raise CoverageException(
"Data file {!r} doesn't seem to be a coverage data file: {}".format(
- self.filename, exc
+ self._filename, exc
)
)
else:
if schema_version != SCHEMA_VERSION:
raise CoverageException(
"Couldn't use data file {!r}: wrong schema: {} instead of {}".format(
- self.filename, schema_version, SCHEMA_VERSION
+ self._filename, schema_version, SCHEMA_VERSION
)
)
- for row in self._db.execute("select has_lines, has_arcs from meta"):
+ for row in db.execute("select has_lines, has_arcs from meta"):
self._has_lines, self._has_arcs = row
- for path, id in self._db.execute("select path, id from file"):
+ for path, id in db.execute("select path, id from file"):
self._file_map[path] = id
def _connect(self):
- if self._db is None:
- if os.path.exists(self.filename):
+ if get_thread_id() not in self._dbs:
+ if os.path.exists(self._filename):
self._open_db()
else:
self._create_db()
- return self._db
+ return self._dbs[get_thread_id()]
def __nonzero__(self):
- if self._db is None and not os.path.exists(self.filename):
+ if (get_thread_id() not in self._dbs and not os.path.exists(self._filename)):
return False
try:
with self._connect() as con:
@@ -181,13 +182,13 @@ class CoverageSqliteData(SimpleReprMixin):
def _file_id(self, filename, add=False):
"""Get the file id for `filename`.
- If filename is not in the database yet, add if it `add` is True.
+ If filename is not in the database yet, add it if `add` is True.
If `add` is not True, return None.
"""
if filename not in self._file_map:
if add:
with self._connect() as con:
- cur = con.execute("insert into file (path) values (?)", (filename,))
+ cur = con.execute("insert or replace into file (path) values (?)", (filename,))
self._file_map[filename] = cur.lastrowid
return self._file_map.get(filename)
@@ -220,6 +221,14 @@ class CoverageSqliteData(SimpleReprMixin):
cur = con.execute("insert into context (context) values (?)", (context,))
self._current_context_id = cur.lastrowid
+ def base_filename(self):
+ """The base filename for storing data."""
+ return self._basename
+
+ def filename(self):
+ """Where is the data stored?"""
+ return self._filename
+
def add_lines(self, line_data):
"""Add measured line data.
@@ -326,6 +335,11 @@ class CoverageSqliteData(SimpleReprMixin):
self.add_file_tracers({filename: plugin_name})
def update(self, other_data, aliases=None):
+ """Update this data with data from several other `CoverageData` instances.
+
+ If `aliases` is provided, it's a `PathAliases` object that is used to
+ re-map paths to match the local machine's.
+ """
if self._has_lines and other_data._has_arcs:
raise CoverageException("Can't combine arc data with line data")
if self._has_arcs and other_data._has_lines:
@@ -333,57 +347,134 @@ class CoverageSqliteData(SimpleReprMixin):
aliases = aliases or PathAliases()
- # See what we had already measured, for accurate conflict reporting.
- this_measured = self.measured_files()
-
- other_files = set()
-
# Force the database we're writing to to exist before we start nesting
# contexts.
self._start_using()
- # Start a single transaction in each file.
- with self._connect(), other_data._connect():
- # lines
- if other_data._has_lines:
- for context in other_data.measured_contexts():
- self.set_context(context)
- for filename in other_data.measured_files():
- lines = set(other_data.lines(filename, context=context))
- if lines:
- other_files.add(filename)
- filename = aliases.map(filename)
- lines.update(self.lines(filename, context=context) or ())
- self.add_lines({filename: lines})
-
- # arcs
- if other_data._has_arcs:
- for context in other_data.measured_contexts():
- self.set_context(context)
- for filename in other_data.measured_files():
- arcs = set(other_data.arcs(filename, context=context))
- if arcs:
- other_files.add(filename)
- filename = aliases.map(filename)
- arcs.update(self.arcs(filename, context=context) or ())
- self.add_arcs({filename: arcs})
-
- # file_tracers
- for filename in other_files:
- other_plugin = other_data.file_tracer(filename)
- filename = aliases.map(filename)
- if filename in this_measured:
- this_plugin = self.file_tracer(filename)
- else:
- this_plugin = None
- if this_plugin is None:
- self.add_file_tracers({filename: other_plugin})
- elif this_plugin != other_plugin:
+ # Collector for all arcs, lines and tracers
+ other_data.read()
+ with other_data._connect() as conn:
+ # Get files data.
+ cur = conn.execute('select path from file')
+ files = {path: aliases.map(path) for (path,) in cur}
+ cur.close()
+
+ # Get contexts data.
+ cur = conn.execute('select context from context')
+ contexts = [context for (context,) in cur]
+ cur.close()
+
+ # Get arc data.
+ cur = conn.execute(
+ 'select file.path, context.context, arc.fromno, arc.tono '
+ 'from arc '
+ 'inner join file on file.id = arc.file_id '
+ 'inner join context on context.id = arc.context_id'
+ )
+ arcs = [(files[path], context, fromno, tono) for (path, context, fromno, tono) in cur]
+ cur.close()
+
+ # Get line data.
+ cur = conn.execute(
+ 'select file.path, context.context, line.lineno '
+ 'from line '
+ 'inner join file on file.id = line.file_id '
+ 'inner join context on context.id = line.context_id'
+ )
+ lines = [(files[path], context, lineno) for (path, context, lineno) in cur]
+ cur.close()
+
+ # Get tracer data.
+ cur = conn.execute(
+ 'select file.path, tracer '
+ 'from tracer '
+ 'inner join file on file.id = tracer.file_id'
+ )
+ tracers = {files[path]: tracer for (path, tracer) in cur}
+ cur.close()
+
+ with self._connect() as conn:
+ conn.isolation_level = 'IMMEDIATE'
+
+ # Get all tracers in the DB. Files not in the tracers are assumed
+ # to have an empty string tracer. Since Sqlite does not support
+ # full outer joins, we have to make two queries to fill the
+ # dictionary.
+ this_tracers = {path: '' for path, in conn.execute('select path from file')}
+ this_tracers.update({
+ aliases.map(path): tracer
+ for path, tracer in conn.execute(
+ 'select file.path, tracer from tracer '
+ 'inner join file on file.id = tracer.file_id'
+ )
+ })
+
+ # Create all file and context rows in the DB.
+ conn.executemany(
+ 'insert or ignore into file (path) values (?)',
+ ((file,) for file in files.values())
+ )
+ file_ids = {
+ path: id
+ for id, path in conn.execute('select id, path from file')
+ }
+ conn.executemany(
+ 'insert or ignore into context (context) values (?)',
+ ((context,) for context in contexts)
+ )
+ context_ids = {
+ context: id
+ for id, context in conn.execute('select id, context from context')
+ }
+
+ # Prepare tracers and fail, if a conflict is found.
+ # tracer_paths is used to ensure consistency over the tracer data
+ # and tracer_map tracks the tracers to be inserted.
+ tracer_map = {}
+ for path in files.values():
+ this_tracer = this_tracers.get(path)
+ other_tracer = tracers.get(path, '')
+ # If there is no tracer, there is always the None tracer.
+ if this_tracer is not None and this_tracer != other_tracer:
raise CoverageException(
"Conflicting file tracer name for '%s': %r vs %r" % (
- filename, this_plugin, other_plugin,
+ path, this_tracer, other_tracer
)
)
+ tracer_map[path] = other_tracer
+
+ # Prepare arc and line rows to be inserted by converting the file
+ # and context strings with integer ids. Then use the efficient
+ # `executemany()` to insert all rows at once.
+ arc_rows = (
+ (file_ids[file], context_ids[context], fromno, tono)
+ for file, context, fromno, tono in arcs
+ )
+ line_rows = (
+ (file_ids[file], context_ids[context], lineno)
+ for file, context, lineno in lines
+ )
+
+ self._choose_lines_or_arcs(arcs=bool(arcs), lines=bool(lines))
+
+ conn.executemany(
+ 'insert or ignore into arc '
+ '(file_id, context_id, fromno, tono) values (?, ?, ?, ?)',
+ arc_rows
+ )
+ conn.executemany(
+ 'insert or ignore into line '
+ '(file_id, context_id, lineno) values (?, ?, ?)',
+ line_rows
+ )
+ conn.executemany(
+ 'insert or ignore into tracer (file_id, tracer) values (?, ?)',
+ ((file_ids[filename], tracer) for filename, tracer in tracer_map.items())
+ )
+
+ # Update all internal cache data.
+ self._reset()
+ self.read()
def erase(self, parallel=False):
"""Erase the data in this object.
@@ -394,10 +485,10 @@ class CoverageSqliteData(SimpleReprMixin):
"""
self._reset()
if self._debug.should('dataio'):
- self._debug.write("Erasing data file {!r}".format(self.filename))
- file_be_gone(self.filename)
+ self._debug.write("Erasing data file {!r}".format(self._filename))
+ file_be_gone(self._filename)
if parallel:
- data_dir, local = os.path.split(self.filename)
+ data_dir, local = os.path.split(self._filename)
localdot = local + '.*'
pattern = os.path.join(os.path.abspath(data_dir), localdot)
for filename in glob.glob(pattern):
diff --git a/doc/cmd.rst b/doc/cmd.rst
index 1344d20e..3551e90d 100644
--- a/doc/cmd.rst
+++ b/doc/cmd.rst
@@ -219,8 +219,24 @@ To erase the collected data, use the **erase** command::
Combining data files
--------------------
-If you need to collect coverage data from different machines or processes,
-coverage.py can combine multiple files into one for reporting.
+Often test suites are run under different conditions, for example, with
+different versions of Python, or dependencies, or on different operating
+systems. In these cases, you can collect coverage data for each test run, and
+then combine all the separate data files into one combined file for reporting.
+
+The **combine** command knows how to read a number of separate data files,
+match the data by source file name, and write a combined data file with all of
+the data.
+
+Coverage normally writes data to a filed named ".coverage". The ``run
+--parallel-mode`` switch (or ``[run] parallel=True`` configuration option)
+tells coverage to expand the file name to include machine name, process id, and
+a random number so that every data file is distinct::
+
+ .coverage.Neds-MacBook-Pro.local.88335.316857
+ .coverage.Geometer.8044.799674
+
+You can also define a new data file name with the ``[run] data_file`` option.
Once you have created a number of these files, you can copy them all to a
single directory, and use the **combine** command to combine them into one
@@ -237,10 +253,9 @@ current directory isn't searched if you use command-line arguments. If you
also want data from the current directory, name it explicitly on the command
line.
-When coverage.py looks in directories for data files to combine, even the
-current directory, it only reads files with certain names. It looks for files
-named the same as the data file (defaulting to ".coverage"), with a dotted
-suffix. Here are some examples of data files that can be combined::
+When coverage.py combines data file, it looks for files named the same as the
+data file (defaulting to ".coverage"), with a dotted suffix. Here are some
+examples of data files that can be combined::
.coverage.machine1
.coverage.20120807T212300
@@ -251,20 +266,19 @@ An existing combined data file is ignored and re-written. If you want to use
runs, use the ``--append`` switch on the **combine** command. This behavior
was the default before version 4.2.
-The ``run --parallel-mode`` switch automatically creates separate data files
-for each run which can be combined later. The file names include the machine
-name, the process id, and a random number::
-
- .coverage.Neds-MacBook-Pro.local.88335.316857
- .coverage.Geometer.8044.799674
+To combine data for a source file, coverage has to find its data
+in each of the data files. Different test runs may run the same source file
+from different locations. For example, different operating systems will use
+different paths for the same file, or perhaps each Python version is run from a
+different subdirectory. Coverage needs to know that different file paths are
+actually the same source file for reporting purposes.
-If the different machines run your code from different places in their file
-systems, coverage.py won't know how to combine the data. You can tell
-coverage.py how the different locations correlate with a ``[paths]`` section in
-your configuration file. See :ref:`config_paths` for details.
+You can tell coverage.py how different source locations relate with a
+``[paths]`` section in your configuration file. See :ref:`config_paths` for
+details.
-If any data files can't be read, coverage.py will print a warning indicating
-the file and the problem.
+If any of the data files can't be read, coverage.py will print a warning
+indicating the file and the problem.
.. _cmd_reporting:
diff --git a/doc/config.rst b/doc/config.rst
index 0b668351..dfff813a 100644
--- a/doc/config.rst
+++ b/doc/config.rst
@@ -198,16 +198,20 @@ equivalent when combining data from different machines::
/jenkins/build/*/src
c:\myproj\src
-The names of the entries are ignored, you may choose any name that you like.
-The value is a list of strings. When combining data with the ``combine``
-command, two file paths will be combined if they start with paths from the same
-list.
+The names of the entries ("source" in this example) are ignored, you may choose
+any name that you like. The value is a list of strings. When combining data
+with the ``combine`` command, two file paths will be combined if they start
+with paths from the same list.
The first value must be an actual file path on the machine where the reporting
will happen, so that source code can be found. The other values can be file
patterns to match against the paths of collected data, or they can be absolute
or relative file paths on the current machine.
+In this example, data collected for "/jenkins/build/1234/src/module.py" will be
+combined with data for "c:\myproj\src\module.py", and will be reported against
+the source file found at "src/module.py".
+
See :ref:`cmd_combining` for more information.
diff --git a/lab/set_env.py b/lab/set_env.py
deleted file mode 100755
index fe0a4d49..00000000
--- a/lab/set_env.py
+++ /dev/null
@@ -1,121 +0,0 @@
-#!/usr/bin/env python3
-#
-# Run this like:
-#
-# $ $(lab/set_env.py)
-#
-
-import functools
-import glob
-import itertools
-import os
-import re
-import sys
-
-# Some other environment variables that could be useful:
-# $set_env.py: PYTEST_ADDOPTS - Extra arguments to pytest.
-
-pstderr = functools.partial(print, file=sys.stderr)
-
-SETTINGS = []
-
-def find_settings():
- line_pattern = r"\$set_env.py: (\w+) - (.*)"
- globs = "*/*.py *.py"
-
- filenames = itertools.chain.from_iterable(glob.glob(g) for g in globs.split())
- files = 0
- for filename in filenames:
- files += 1
- with open(filename) as f:
- for line in f:
- m = re.search(line_pattern, line)
- if m:
- SETTINGS.append(m.groups())
- SETTINGS.sort()
- pstderr("Read {} files".format(files))
-
-def read_them():
- values = {}
- for name, _ in SETTINGS:
- values[name] = os.environ.get(name)
- return values
-
-def show_them(values):
- for i, (name, description) in enumerate(SETTINGS, start=1):
- value = values[name]
- if value is None:
- eq = ' '
- value = ''
- else:
- eq = '='
- value = repr(value)
- pstderr("{:2d}: {:>30s} {} {:12s} {}".format(i, name, eq, value, description))
-
-def set_by_num(values, n, value):
- setting_name = SETTINGS[int(n)-1][0]
- values[setting_name] = value
-
-PROMPT = "(# value | x # | q) ::> "
-
-def get_new_values(values):
- show = True
- while True:
- if show:
- show_them(values)
- show = False
- pstderr("")
- pstderr(PROMPT, end='')
- sys.stderr.flush()
- try:
- cmd = input("").strip().split()
- except EOFError:
- pstderr("\n")
- break
- if not cmd:
- continue
- if cmd[0] == 'q':
- break
- if cmd[0] == 'x':
- if len(cmd) < 2:
- pstderr("Need numbers of entries to delete")
- continue
- try:
- nums = map(int, cmd[1:])
- except ValueError:
- pstderr("Need numbers of entries to delete")
- continue
- else:
- for num in nums:
- set_by_num(values, num, None)
- else:
- try:
- num = int(cmd[0])
- except ValueError:
- pstderr("Don't understand option {!r}".format(cmd[0]))
- continue
- else:
- if len(cmd) >= 2:
- set_by_num(values, num, " ".join(cmd[1:]))
- else:
- pstderr("Need a value to set")
- continue
- show = True
-
- return values
-
-def as_exports(values):
- exports = []
- for name, value in values.items():
- if value is None:
- exports.append("export -n {}".format(name))
- else:
- exports.append("export {}={!r}".format(name, value))
- return "eval " + "; ".join(exports)
-
-def main():
- find_settings()
- print(as_exports(get_new_values(read_them())))
-
-if __name__ == '__main__':
- main()
diff --git a/tests/conftest.py b/tests/conftest.py
index c883ef7b..d0572dca 100644
--- a/tests/conftest.py
+++ b/tests/conftest.py
@@ -16,6 +16,9 @@ import pytest
from coverage import env
+# Pytest can take additional options:
+# $set_env.py: PYTEST_ADDOPTS - Extra arguments to pytest.
+
@pytest.fixture(autouse=True)
def set_warnings():
"""Enable DeprecationWarnings during all tests."""
diff --git a/tests/test_api.py b/tests/test_api.py
index 755a89a2..24fe8776 100644
--- a/tests/test_api.py
+++ b/tests/test_api.py
@@ -420,6 +420,21 @@ class ApiTest(CoverageTest):
self.assertEqual(statements, [1, 2])
self.assertEqual(missing, [1, 2])
+ def test_combining_with_a_used_coverage(self):
+ # Can you use a coverage object to run one shard of a parallel suite,
+ # and then also combine the data?
+ self.make_code1_code2()
+ cov = coverage.Coverage(data_suffix=True)
+ self.start_import_stop(cov, "code1")
+ cov.save()
+
+ cov = coverage.Coverage(data_suffix=True)
+ self.start_import_stop(cov, "code2")
+ cov.save()
+
+ cov.combine()
+ self.check_code1_code2(cov)
+
def test_warnings(self):
self.make_file("hello.py", """\
import sys, os
diff --git a/tests/test_cmdline.py b/tests/test_cmdline.py
index 51112602..2a7fba57 100644
--- a/tests/test_cmdline.py
+++ b/tests/test_cmdline.py
@@ -691,7 +691,7 @@ class CmdLineWithFilesTest(BaseCmdLineTest):
2 files:
file1.py: 17 lines [a_plugin]
file2.py: 23 lines
- """).replace("FILENAME", data.filename))
+ """).replace("FILENAME", data.filename()))
def test_debug_data_with_no_data(self):
data = CoverageData()
@@ -700,7 +700,7 @@ class CmdLineWithFilesTest(BaseCmdLineTest):
-- data ------------------------------------------------------
path: FILENAME
No data collected
- """).replace("FILENAME", data.filename))
+ """).replace("FILENAME", data.filename()))
class CmdLineStdoutTest(BaseCmdLineTest):
diff --git a/tests/test_data.py b/tests/test_data.py
index 3f96288f..417f9771 100644
--- a/tests/test_data.py
+++ b/tests/test_data.py
@@ -9,6 +9,7 @@ import os
import os.path
import re
import sqlite3
+import threading
import mock
@@ -455,6 +456,21 @@ class CoverageDataTest(DataTestHelpers, CoverageTest):
covdata2.read()
self.assert_arcs3_data(covdata2)
+ def test_thread_stress(self):
+ covdata = CoverageData()
+
+ def thread_main():
+ """Every thread will try to add the same data."""
+ covdata.add_lines(LINES_1)
+
+ threads = [threading.Thread(target=thread_main) for _ in range(10)]
+ for t in threads:
+ t.start()
+ for t in threads:
+ t.join()
+
+ self.assert_lines1_data(covdata)
+
class CoverageDataTestInTempDir(DataTestHelpers, CoverageTest):
"""Tests of CoverageData that need a temporary directory to make files."""
diff --git a/tox.ini b/tox.ini
index 83ab9edb..eb8e898b 100644
--- a/tox.ini
+++ b/tox.ini
@@ -45,23 +45,6 @@ commands =
python setup.py --quiet build_ext --inplace
python igor.py test_with_tracer c {posargs}
-[testenv:py38]
-basepython = python3.8
-
-[testenv:pypy]
-# The "pypy" environment is for Travis. Probably can make Travis use one of
-# the other environments...
-basepython = pypy
-
-[testenv:pypy2]
-basepython = pypy2
-
-[testenv:pypy3]
-basepython = pypy3
-
-[testenv:jython]
-basepython = jython
-
[testenv:doc]
# Build the docs so we know if they are successful. We build twice: once with
# -q to get all warnings, and once with -QW to get a success/fail status