summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Makefile5
-rw-r--r--coverage/annotate.py3
-rw-r--r--coverage/collector.py2
-rw-r--r--coverage/ctracer/tracer.c4
-rw-r--r--coverage/ctracer/tracer.h2
-rw-r--r--coverage/env.py4
-rw-r--r--coverage/files.py14
-rw-r--r--coverage/html.py23
-rw-r--r--coverage/htmlfiles/index.html8
-rw-r--r--coverage/htmlfiles/pyfile.html2
-rw-r--r--coverage/misc.py25
-rw-r--r--coverage/parser.py50
-rw-r--r--coverage/phystokens.py2
-rw-r--r--coverage/plugin.py116
-rw-r--r--coverage/plugin_support.py19
-rw-r--r--coverage/python.py11
-rw-r--r--coverage/pytracer.py8
-rw-r--r--coverage/report.py2
-rw-r--r--coverage/results.py25
-rw-r--r--coverage/summary.py7
-rw-r--r--coverage/xmlreport.py14
-rw-r--r--doc/_static/coverage.css7
-rw-r--r--doc/api_coverage.rst2
-rw-r--r--doc/api_coveragedata.rst2
-rw-r--r--doc/conf.py41
-rw-r--r--doc/howitworks.rst98
-rw-r--r--doc/index.rst1
-rw-r--r--igor.py5
-rw-r--r--tests/coveragetest.py28
-rw-r--r--tests/modules/plugins/a_plugin.py2
-rw-r--r--tests/modules/plugins/another.py2
-rw-r--r--tests/plugin1.py9
-rw-r--r--tests/plugin2.py6
-rw-r--r--tests/test_filereporter.py12
-rw-r--r--tests/test_files.py6
-rw-r--r--tests/test_parser.py20
-rw-r--r--tests/test_plugins.py2
-rw-r--r--tests/test_summary.py14
-rw-r--r--tox.ini4
39 files changed, 367 insertions, 240 deletions
diff --git a/Makefile b/Makefile
index 67fd6f00..8f244d01 100644
--- a/Makefile
+++ b/Makefile
@@ -38,7 +38,7 @@ spell:
-pylint --disable=all --enable=spelling $(LINTABLE)
pep8:
- pep8 --filename=*.py --ignore=E401,E301 --repeat $(LINTABLE)
+ pep8 --filename=*.py --repeat $(LINTABLE)
test:
tox -e py27 $(ARGS)
@@ -65,6 +65,9 @@ winkit:
kit_local:
cp -v dist/* `awk -F "=" '/find-links/ {print $$2}' ~/.pip/pip.conf`
+ # pip caches wheels of things it has installed. Clean them out so we
+ # don't go crazy trying to figure out why our new code isn't installing.
+ find ~/Library/Caches/pip/wheels -name 'coverage-*' -delete
pypi:
python setup.py register
diff --git a/coverage/annotate.py b/coverage/annotate.py
index 4b4966e9..60772656 100644
--- a/coverage/annotate.py
+++ b/coverage/annotate.py
@@ -7,6 +7,7 @@ import io
import os
import re
+from coverage.files import flat_rootname
from coverage.report import Reporter
class AnnotateReporter(Reporter):
@@ -57,7 +58,7 @@ class AnnotateReporter(Reporter):
excluded = sorted(analysis.excluded)
if self.directory:
- dest_file = os.path.join(self.directory, fr.flat_rootname())
+ dest_file = os.path.join(self.directory, flat_rootname(fr.relative_filename()))
if dest_file.endswith("_py"):
dest_file = dest_file[:-3] + ".py"
dest_file += ",cover"
diff --git a/coverage/collector.py b/coverage/collector.py
index ff7400e8..48b017ce 100644
--- a/coverage/collector.py
+++ b/coverage/collector.py
@@ -190,7 +190,7 @@ class Collector(object):
"""Start a new Tracer object, and store it in self.tracers."""
tracer = self._trace_class()
tracer.data = self.data
- tracer.arcs = self.branch
+ tracer.trace_arcs = self.branch
tracer.should_trace = self.should_trace
tracer.should_trace_cache = self.should_trace_cache
tracer.warn = self.warn
diff --git a/coverage/ctracer/tracer.c b/coverage/ctracer/tracer.c
index 586bddc0..933f3cfd 100644
--- a/coverage/ctracer/tracer.c
+++ b/coverage/ctracer/tracer.c
@@ -911,7 +911,7 @@ CTracer_start(CTracer *self, PyObject *args_unused)
{
PyEval_SetTrace((Py_tracefunc)CTracer_trace, (PyObject*)self);
self->started = 1;
- self->tracing_arcs = self->arcs && PyObject_IsTrue(self->arcs);
+ self->tracing_arcs = self->trace_arcs && PyObject_IsTrue(self->trace_arcs);
self->cur_entry.last_line = -1;
/* start() returns a trace function usable with sys.settrace() */
@@ -976,7 +976,7 @@ CTracer_members[] = {
{ "should_trace_cache", T_OBJECT, offsetof(CTracer, should_trace_cache), 0,
PyDoc_STR("Dictionary caching should_trace results.") },
- { "arcs", T_OBJECT, offsetof(CTracer, arcs), 0,
+ { "trace_arcs", T_OBJECT, offsetof(CTracer, trace_arcs), 0,
PyDoc_STR("Should we trace arcs, or just lines?") },
{ NULL }
diff --git a/coverage/ctracer/tracer.h b/coverage/ctracer/tracer.h
index e6c0d669..9f17e03e 100644
--- a/coverage/ctracer/tracer.h
+++ b/coverage/ctracer/tracer.h
@@ -24,7 +24,7 @@ typedef struct CTracer {
PyObject * data;
PyObject * file_tracers;
PyObject * should_trace_cache;
- PyObject * arcs;
+ PyObject * trace_arcs;
/* Has the tracer been started? */
int started;
diff --git a/coverage/env.py b/coverage/env.py
index 1d2846c6..4cd02c04 100644
--- a/coverage/env.py
+++ b/coverage/env.py
@@ -27,4 +27,6 @@ C_TRACER = os.getenv('COVERAGE_TEST_TRACER', 'c') == 'c'
METACOV = os.getenv('COVERAGE_COVERAGE', '') != ''
# Are we running our test suite?
-TESTING = os.getenv('COVERAGE_TESTING', '') != ''
+# Even when running tests, you can use COVERAGE_TESTING=0 to disable the
+# test-specific behavior like contracts.
+TESTING = os.getenv('COVERAGE_TESTING', '') == 'True'
diff --git a/coverage/files.py b/coverage/files.py
index e3ebd6ce..d2742a39 100644
--- a/coverage/files.py
+++ b/coverage/files.py
@@ -67,6 +67,20 @@ def canonical_filename(filename):
return CANONICAL_FILENAME_CACHE[filename]
+def flat_rootname(filename):
+ """A base for a flat filename to correspond to this file.
+
+ Useful for writing files about the code where you want all the files in
+ the same directory, but need to differentiate same-named files from
+ different directories.
+
+ For example, the file a/b/c.py will return 'a_b_c_py'
+
+ """
+ name = os.path.splitdrive(filename)[1]
+ return re.sub(r"[\\/.:]", "_", name)
+
+
if env.WINDOWS:
_ACTUAL_PATH_CACHE = {}
diff --git a/coverage/html.py b/coverage/html.py
index 3c5e3b46..6d1bb434 100644
--- a/coverage/html.py
+++ b/coverage/html.py
@@ -12,6 +12,7 @@ import shutil
import coverage
from coverage import env
from coverage.backward import iitems
+from coverage.files import flat_rootname
from coverage.misc import CoverageException, Hasher
from coverage.report import Reporter
from coverage.results import Numbers
@@ -92,7 +93,7 @@ class HtmlReporter(Reporter):
self.coverage = cov
self.files = []
- self.arcs = self.coverage.data.has_arcs()
+ self.has_arcs = self.coverage.data.has_arcs()
self.status = HtmlStatus()
self.extra_css = None
self.totals = Numbers()
@@ -166,20 +167,20 @@ class HtmlReporter(Reporter):
source = fr.source()
# Find out if the file on disk is already correct.
- flat_rootname = fr.flat_rootname()
+ rootname = flat_rootname(fr.relative_filename())
this_hash = self.file_hash(source.encode('utf-8'), fr)
- that_hash = self.status.file_hash(flat_rootname)
+ that_hash = self.status.file_hash(rootname)
if this_hash == that_hash:
# Nothing has changed to require the file to be reported again.
- self.files.append(self.status.index_info(flat_rootname))
+ self.files.append(self.status.index_info(rootname))
return
- self.status.set_file_hash(flat_rootname, this_hash)
+ self.status.set_file_hash(rootname, this_hash)
# Get the numbers for this file.
nums = analysis.numbers
- if self.arcs:
+ if self.has_arcs:
missing_branch_arcs = analysis.missing_branch_arcs()
# These classes determine which lines are highlighted by default.
@@ -201,7 +202,7 @@ class HtmlReporter(Reporter):
line_class.append(c_exc)
elif lineno in analysis.missing:
line_class.append(c_mis)
- elif self.arcs and lineno in missing_branch_arcs:
+ elif self.has_arcs and lineno in missing_branch_arcs:
line_class.append(c_par)
shorts = []
longs = []
@@ -250,13 +251,13 @@ class HtmlReporter(Reporter):
# Write the HTML page for this file.
template_values = {
'c_exc': c_exc, 'c_mis': c_mis, 'c_par': c_par, 'c_run': c_run,
- 'arcs': self.arcs, 'extra_css': self.extra_css,
+ 'has_arcs': self.has_arcs, 'extra_css': self.extra_css,
'fr': fr, 'nums': nums, 'lines': lines,
'time_stamp': self.time_stamp,
}
html = spaceless(self.source_tmpl.render(template_values))
- html_filename = flat_rootname + ".html"
+ html_filename = rootname + ".html"
html_path = os.path.join(self.directory, html_filename)
self.write_html(html_path, html)
@@ -267,7 +268,7 @@ class HtmlReporter(Reporter):
'relative_filename': fr.relative_filename(),
}
self.files.append(index_info)
- self.status.set_index_info(flat_rootname, index_info)
+ self.status.set_index_info(rootname, index_info)
def index_file(self):
"""Write the index.html file for this report."""
@@ -276,7 +277,7 @@ class HtmlReporter(Reporter):
self.totals = sum(f['nums'] for f in self.files)
html = index_tmpl.render({
- 'arcs': self.arcs,
+ 'has_arcs': self.has_arcs,
'extra_css': self.extra_css,
'files': self.files,
'totals': self.totals,
diff --git a/coverage/htmlfiles/index.html b/coverage/htmlfiles/index.html
index 25ced0eb..ee2deab0 100644
--- a/coverage/htmlfiles/index.html
+++ b/coverage/htmlfiles/index.html
@@ -44,7 +44,7 @@
<span class="key">s</span>
<span class="key">m</span>
<span class="key">x</span>
- {% if arcs %}
+ {% if has_arcs %}
<span class="key">b</span>
<span class="key">p</span>
{% endif %}
@@ -62,7 +62,7 @@
<th class="shortkey_s">statements</th>
<th class="shortkey_m">missing</th>
<th class="shortkey_x">excluded</th>
- {% if arcs %}
+ {% if has_arcs %}
<th class="shortkey_b">branches</th>
<th class="shortkey_p">partial</th>
{% endif %}
@@ -76,7 +76,7 @@
<td>{{totals.n_statements}}</td>
<td>{{totals.n_missing}}</td>
<td>{{totals.n_excluded}}</td>
- {% if arcs %}
+ {% if has_arcs %}
<td>{{totals.n_branches}}</td>
<td>{{totals.n_partial_branches}}</td>
{% endif %}
@@ -90,7 +90,7 @@
<td>{{file.nums.n_statements}}</td>
<td>{{file.nums.n_missing}}</td>
<td>{{file.nums.n_excluded}}</td>
- {% if arcs %}
+ {% if has_arcs %}
<td>{{file.nums.n_branches}}</td>
<td>{{file.nums.n_partial_branches}}</td>
{% endif %}
diff --git a/coverage/htmlfiles/pyfile.html b/coverage/htmlfiles/pyfile.html
index 7bf9f554..ad7969db 100644
--- a/coverage/htmlfiles/pyfile.html
+++ b/coverage/htmlfiles/pyfile.html
@@ -37,7 +37,7 @@
<span class="{{c_mis}} shortkey_m button_toggle_mis">{{nums.n_missing}} missing</span>
<span class="{{c_exc}} shortkey_x button_toggle_exc">{{nums.n_excluded}} excluded</span>
- {% if arcs %}
+ {% if has_arcs %}
<span class="{{c_par}} shortkey_p button_toggle_par">{{nums.n_partial_branches}} partial</span>
{% endif %}
</h2>
diff --git a/coverage/misc.py b/coverage/misc.py
index 50396d61..44f89772 100644
--- a/coverage/misc.py
+++ b/coverage/misc.py
@@ -83,19 +83,24 @@ def format_lines(statements, lines):
def expensive(fn):
- """A decorator to cache the result of an expensive operation.
+ """A decorator to indicate that a method shouldn't be called more than once.
- Only applies to methods with no arguments.
+ Normally, this does nothing. During testing, this raises an exception if
+ called more than once.
"""
- attr = "_cache_" + fn.__name__
-
- def _wrapped(self):
- """Inner function that checks the cache."""
- if not hasattr(self, attr):
- setattr(self, attr, fn(self))
- return getattr(self, attr)
- return _wrapped
+ if env.TESTING:
+ attr = "_once_" + fn.__name__
+
+ def _wrapped(self):
+ """Inner function that checks the cache."""
+ if hasattr(self, attr):
+ raise Exception("Shouldn't have called %s more than once" % fn.__name__)
+ setattr(self, attr, True)
+ return fn(self)
+ return _wrapped
+ else:
+ return fn
def bool_or_none(b):
diff --git a/coverage/parser.py b/coverage/parser.py
index 497ddeb4..014b4ab5 100644
--- a/coverage/parser.py
+++ b/coverage/parser.py
@@ -38,7 +38,7 @@ class PythonParser(object):
except IOError as err:
raise NoSource(
"No source for code: '%s': %s" % (self.filename, err)
- )
+ )
self.exclude = exclude
@@ -62,8 +62,9 @@ class PythonParser(object):
# The line numbers that start statements.
self.statement_starts = set()
- # Lazily-created ByteParser
+ # Lazily-created ByteParser and arc data.
self._byte_parser = None
+ self._all_arcs = None
@property
def byte_parser(self):
@@ -112,7 +113,7 @@ class PythonParser(object):
print("%10s %5s %-20r %r" % (
tokenize.tok_name.get(toktype, toktype),
nice_pair((slineno, elineno)), ttext, ltext
- ))
+ ))
if toktype == token.INDENT:
indent += 1
elif toktype == token.DEDENT:
@@ -191,6 +192,7 @@ class PythonParser(object):
for (a, b) in arcs
]
+ @expensive
def parse_source(self):
"""Parse source text to find executable lines, excluded lines, etc.
@@ -203,12 +205,16 @@ class PythonParser(object):
"""
try:
self._raw_parse()
- except (tokenize.TokenError, IndentationError) as tokerr:
- msg, lineno = tokerr.args # pylint: disable=unpacking-non-sequence
+ except (tokenize.TokenError, IndentationError) as err:
+ if hasattr(err, "lineno"):
+ lineno = err.lineno # IndentationError
+ else:
+ lineno = err.args[1][0] # TokenError
raise NotPython(
- "Couldn't parse '%s' as Python source: '%s' at %s" %
- (self.filename, msg, lineno)
+ "Couldn't parse '%s' as Python source: '%s' at line %d" % (
+ self.filename, err.args[0], lineno
)
+ )
excluded_lines = self.first_lines(self.excluded)
ignore = set()
@@ -220,23 +226,22 @@ class PythonParser(object):
return lines, excluded_lines
- @expensive
def arcs(self):
"""Get information about the arcs available in the code.
- Returns a sorted list of line number pairs. Line numbers have been
+ Returns a list of line number pairs. Line numbers have been
normalized to the first line of multi-line statements.
"""
- all_arcs = []
- for l1, l2 in self.byte_parser._all_arcs():
- fl1 = self.first_line(l1)
- fl2 = self.first_line(l2)
- if fl1 != fl2:
- all_arcs.append((fl1, fl2))
- return sorted(all_arcs)
+ if self._all_arcs is None:
+ self._all_arcs = []
+ for l1, l2 in self.byte_parser._all_arcs():
+ fl1 = self.first_line(l1)
+ fl2 = self.first_line(l2)
+ if fl1 != fl2:
+ self._all_arcs.append((fl1, fl2))
+ return self._all_arcs
- @expensive
def exit_counts(self):
"""Get a mapping from line numbers to count of exits from that line.
@@ -290,7 +295,7 @@ OPS_CODE_END = _opcode_set('RETURN_VALUE')
OPS_CHUNK_END = _opcode_set(
'JUMP_ABSOLUTE', 'JUMP_FORWARD', 'RETURN_VALUE', 'RAISE_VARARGS',
'BREAK_LOOP', 'CONTINUE_LOOP',
- )
+)
# Opcodes that unconditionally begin a new code chunk. By starting new chunks
# with unconditional jump instructions, we neatly deal with jumps to jumps
@@ -300,7 +305,7 @@ OPS_CHUNK_BEGIN = _opcode_set('JUMP_ABSOLUTE', 'JUMP_FORWARD')
# Opcodes that push a block on the block stack.
OPS_PUSH_BLOCK = _opcode_set(
'SETUP_LOOP', 'SETUP_EXCEPT', 'SETUP_FINALLY', 'SETUP_WITH'
- )
+)
# Block types for exception handling.
OPS_EXCEPT_BLOCKS = _opcode_set('SETUP_EXCEPT', 'SETUP_FINALLY')
@@ -343,10 +348,9 @@ class ByteParser(object):
for attr in ['co_lnotab', 'co_firstlineno', 'co_consts', 'co_code']:
if not hasattr(self.code, attr):
raise CoverageException(
- "This implementation of Python doesn't support code "
- "analysis.\n"
+ "This implementation of Python doesn't support code analysis.\n"
"Run coverage.py under CPython for this command."
- )
+ )
def child_parsers(self):
"""Iterate over all the code objects nested within this one.
@@ -664,4 +668,4 @@ class Chunk(object):
"!" if self.first else "",
"v" if self.entrance else "",
list(self.exits),
- )
+ )
diff --git a/coverage/phystokens.py b/coverage/phystokens.py
index 92da8d32..7092d39e 100644
--- a/coverage/phystokens.py
+++ b/coverage/phystokens.py
@@ -109,7 +109,7 @@ def source_token_lines(source):
mark_end = False
else:
if mark_start and scol > col:
- line.append(("ws", " " * (scol - col)))
+ line.append(("ws", u" " * (scol - col)))
mark_start = False
tok_class = tokenize.tok_name.get(ttype, 'xx').lower()[:3]
if ttype == token.NAME and keyword.iskeyword(ttext):
diff --git a/coverage/plugin.py b/coverage/plugin.py
index f4182b0f..1b098364 100644
--- a/coverage/plugin.py
+++ b/coverage/plugin.py
@@ -3,11 +3,8 @@
"""Plugin interfaces for coverage.py"""
-import os
-import re
-
from coverage import files
-from coverage.misc import _needs_to_implement
+from coverage.misc import contract, _needs_to_implement
class CoveragePlugin(object):
@@ -154,42 +151,42 @@ class FileTracer(object):
class FileReporter(object):
"""Support needed for files during the reporting phase."""
+
def __init__(self, filename):
- # TODO: document that this init happens.
+ """Simple initialization of a `FileReporter`.
+
+ The `filename` argument is the path to the file being reported. This
+ will be available as the `.filename` attribute on the object. Other
+ method implementations on this base class rely on this attribute.
+
+ """
self.filename = filename
def __repr__(self):
return "<{0.__class__.__name__} filename={0.filename!r}>".format(self)
def relative_filename(self):
- return files.relative_filename(self.filename)
-
- # Annoying comparison operators. Py3k wants __lt__ etc, and Py2k needs all
- # of them defined.
-
- def __eq__(self, other):
- return isinstance(other, FileReporter) and self.filename == other.filename
+ """Return the relative filename for this file.
- def __ne__(self, other):
- return not (self == other)
+ This file path will be displayed in reports. You only need to supply
+ this method if you have an unusual syntax for file paths. The default
+ implementation will supply the actual project-relative file path.
- def __lt__(self, other):
- return self.filename < other.filename
-
- def __le__(self, other):
- return self.filename <= other.filename
+ """
+ return files.relative_filename(self.filename)
- def __gt__(self, other):
- return self.filename > other.filename
+ def lines(self):
+ """Return a set of executable lines"""
+ _needs_to_implement(self, "lines")
- def __ge__(self, other):
- return self.filename >= other.filename
+ def excluded_lines(self):
+ return set()
- def statements(self):
- _needs_to_implement(self, "statements")
+ def arcs(self):
+ return []
- def excluded_statements(self):
- return set([])
+ def no_branch_lines(self):
+ return set()
def translate_lines(self, lines):
return set(lines)
@@ -197,47 +194,60 @@ class FileReporter(object):
def translate_arcs(self, arcs):
return arcs
- def no_branch_lines(self):
- return set()
-
def exit_counts(self):
return {}
- def arcs(self):
- return []
-
+ @contract(returns='unicode')
def source(self):
"""Return the source for the code, a Unicode string."""
# A generic implementation: read the text of self.filename
- with open(self.filename) as f:
- return f.read()
+ with open(self.filename, "rb") as f:
+ return f.read().decode("utf8")
def source_token_lines(self):
- """Return the 'tokenized' text for the code."""
+ """Generate a series of tokenized lines, one for each line in `source`.
+
+ These tokens are used for syntax-colored reports.
+
+ Each line is a list of pairs, each pair is a token::
+
+ [('key', 'def'), ('ws', ' '), ('nam', 'hello'), ('op', '('), ... ]
+
+ Each pair has a token class, and the token text. The token classes are:
+
+ * `'com'`: a comment
+ * `'key'`: a keyword
+ * `'nam'`: a name, or identifier
+ * `'num'`: a number
+ * `'op'`: an operator
+ * `'str'`: a string literal
+ * `'txt'`: some other kind of text
+
+ If you concatenate all the token texts, and then join them with newlines,
+ you should have your original `source` back.
+
+ """
# A generic implementation, each line is one "txt" token.
for line in self.source().splitlines():
yield [('txt', line)]
- def should_be_python(self):
- """Does it seem like this file should contain Python?
+ # Annoying comparison operators. Py3k wants __lt__ etc, and Py2k needs all
+ # of them defined.
- This is used to decide if a file reported as part of the execution of
- a program was really likely to have contained Python in the first
- place.
- """
- return False
+ def __eq__(self, other):
+ return isinstance(other, FileReporter) and self.filename == other.filename
- def flat_rootname(self):
- """A base for a flat filename to correspond to this file.
+ def __ne__(self, other):
+ return not (self == other)
- Useful for writing files about the code where you want all the files in
- the same directory, but need to differentiate same-named files from
- different directories.
+ def __lt__(self, other):
+ return self.filename < other.filename
- For example, the file a/b/c.py will return 'a_b_c_py'
+ def __le__(self, other):
+ return self.filename <= other.filename
- You should not need to override this method.
+ def __gt__(self, other):
+ return self.filename > other.filename
- """
- name = os.path.splitdrive(self.relative_filename())[1]
- return re.sub(r"[\\/.:]", "_", name)
+ def __ge__(self, other):
+ return self.filename >= other.filename
diff --git a/coverage/plugin_support.py b/coverage/plugin_support.py
index 23c1bc1a..f88342e9 100644
--- a/coverage/plugin_support.py
+++ b/coverage/plugin_support.py
@@ -160,7 +160,7 @@ class DebugFileTracerWrapper(FileTracer):
return "%s@%d" % (
os.path.basename(frame.f_code.co_filename),
frame.f_lineno,
- )
+ )
def source_filename(self):
sfilename = self.tracer.source_filename()
@@ -198,14 +198,14 @@ class DebugFileReporterWrapper(FileReporter):
self.debug.write("relative_filename() --> %r" % (ret,))
return ret
- def statements(self):
- ret = self.reporter.statements()
- self.debug.write("statements() --> %r" % (ret,))
+ def lines(self):
+ ret = self.reporter.lines()
+ self.debug.write("lines() --> %r" % (ret,))
return ret
- def excluded_statements(self):
- ret = self.reporter.excluded_statements()
- self.debug.write("excluded_statements() --> %r" % (ret,))
+ def excluded_lines(self):
+ ret = self.reporter.excluded_lines()
+ self.debug.write("excluded_lines() --> %r" % (ret,))
return ret
def translate_lines(self, lines):
@@ -242,8 +242,3 @@ class DebugFileReporterWrapper(FileReporter):
ret = list(self.reporter.source_token_lines())
self.debug.write("source_token_lines() --> %d tokens" % (len(ret),))
return ret
-
- def should_be_python(self):
- ret = self.reporter.should_be_python()
- self.debug.write("should_be_python() --> %r" % (ret,))
- return ret
diff --git a/coverage/python.py b/coverage/python.py
index 94d20fd8..33e6ec01 100644
--- a/coverage/python.py
+++ b/coverage/python.py
@@ -7,7 +7,7 @@ import os.path
import zipimport
from coverage import env, files
-from coverage.misc import contract, NoSource, join_regex
+from coverage.misc import contract, expensive, NoSource, join_regex
from coverage.parser import PythonParser
from coverage.phystokens import source_token_lines, source_encoding
from coverage.plugin import FileReporter
@@ -126,13 +126,15 @@ class PythonFileReporter(FileReporter):
)
return self._parser
- def statements(self):
+ @expensive
+ def lines(self):
"""Return the line numbers of statements in the file."""
if self._statements is None:
self._statements, self._excluded = self.parser.parse_source()
return self._statements
- def excluded_statements(self):
+ @expensive
+ def excluded_lines(self):
"""Return the line numbers of statements in the file."""
if self._excluded is None:
self._statements, self._excluded = self.parser.parse_source()
@@ -144,6 +146,7 @@ class PythonFileReporter(FileReporter):
def translate_arcs(self, arcs):
return self.parser.translate_arcs(arcs)
+ @expensive
def no_branch_lines(self):
no_branch = self.parser.lines_matching(
join_regex(self.coverage.config.partial_list),
@@ -151,9 +154,11 @@ class PythonFileReporter(FileReporter):
)
return no_branch
+ @expensive
def arcs(self):
return self.parser.arcs()
+ @expensive
def exit_counts(self):
return self.parser.exit_counts()
diff --git a/coverage/pytracer.py b/coverage/pytracer.py
index c657ad01..94b8231e 100644
--- a/coverage/pytracer.py
+++ b/coverage/pytracer.py
@@ -36,7 +36,7 @@ class PyTracer(object):
def __init__(self):
# Attributes set from the collector:
self.data = None
- self.arcs = False
+ self.trace_arcs = False
self.should_trace = None
self.should_trace_cache = None
self.warn = None
@@ -68,7 +68,7 @@ class PyTracer(object):
if self.last_exc_back:
if frame == self.last_exc_back:
# Someone forgot a return event.
- if self.arcs and self.cur_file_dict:
+ if self.trace_arcs and self.cur_file_dict:
pair = (self.last_line, -self.last_exc_firstlineno)
self.cur_file_dict[pair] = None
self.cur_file_dict, self.last_line = self.data_stack.pop()
@@ -99,13 +99,13 @@ class PyTracer(object):
# Record an executed line.
if self.cur_file_dict is not None:
lineno = frame.f_lineno
- if self.arcs:
+ if self.trace_arcs:
self.cur_file_dict[(self.last_line, lineno)] = None
else:
self.cur_file_dict[lineno] = None
self.last_line = lineno
elif event == 'return':
- if self.arcs and self.cur_file_dict:
+ if self.trace_arcs and self.cur_file_dict:
# Record an arc leaving the function, but beware that a
# "return" event might just mean yielding from a generator.
bytecode = frame.f_code.co_code[frame.f_lasti]
diff --git a/coverage/report.py b/coverage/report.py
index fa081862..1be4155d 100644
--- a/coverage/report.py
+++ b/coverage/report.py
@@ -87,5 +87,7 @@ class Reporter(object):
except NotPython:
# Only report errors for .py files, and only if we didn't
# explicitly suppress those errors.
+ # NotPython is only raised by PythonFileReporter, which has a
+ # should_be_python() method.
if fr.should_be_python() and not self.config.ignore_errors:
raise
diff --git a/coverage/results.py b/coverage/results.py
index f15fae74..9627373d 100644
--- a/coverage/results.py
+++ b/coverage/results.py
@@ -12,12 +12,12 @@ from coverage.misc import format_lines
class Analysis(object):
"""The results of analyzing a FileReporter."""
- def __init__(self, data, file_reporters):
+ def __init__(self, data, file_reporter):
self.data = data
- self.file_reporter = file_reporters
+ self.file_reporter = file_reporter
self.filename = self.file_reporter.filename
- self.statements = self.file_reporter.statements()
- self.excluded = self.file_reporter.excluded_statements()
+ self.statements = self.file_reporter.lines()
+ self.excluded = self.file_reporter.excluded_lines()
# Identify missing statements.
executed = self.data.lines(self.filename) or []
@@ -25,6 +25,8 @@ class Analysis(object):
self.missing = self.statements - executed
if self.data.has_arcs():
+ self._arc_possibilities = sorted(self.file_reporter.arcs())
+ self.exit_counts = self.file_reporter.exit_counts()
self.no_branch = self.file_reporter.no_branch_lines()
n_branches = self.total_branches()
mba = self.missing_branch_arcs()
@@ -33,8 +35,10 @@ class Analysis(object):
)
n_missing_branches = sum(len(v) for k,v in iitems(mba))
else:
- n_branches = n_partial_branches = n_missing_branches = 0
+ self._arc_possibilities = []
+ self.exit_counts = {}
self.no_branch = set()
+ n_branches = n_partial_branches = n_missing_branches = 0
self.numbers = Numbers(
n_files=1,
@@ -60,7 +64,7 @@ class Analysis(object):
def arc_possibilities(self):
"""Returns a sorted list of the arcs in the code."""
- return self.file_reporter.arcs()
+ return self._arc_possibilities
def arcs_executed(self):
"""Returns a sorted list of the arcs actually executed in the code."""
@@ -116,13 +120,11 @@ class Analysis(object):
def branch_lines(self):
"""Returns a list of line numbers that have more than one exit."""
- exit_counts = self.file_reporter.exit_counts()
- return [l1 for l1,count in iitems(exit_counts) if count > 1]
+ return [l1 for l1,count in iitems(self.exit_counts) if count > 1]
def total_branches(self):
"""How many total branches are there?"""
- exit_counts = self.file_reporter.exit_counts()
- return sum(count for count in exit_counts.values() if count > 1)
+ return sum(count for count in self.exit_counts.values() if count > 1)
def missing_branch_arcs(self):
"""Return arcs that weren't executed from branch lines.
@@ -145,11 +147,10 @@ class Analysis(object):
(total_exits, taken_exits).
"""
- exit_counts = self.file_reporter.exit_counts()
missing_arcs = self.missing_branch_arcs()
stats = {}
for lnum in self.branch_lines():
- exits = exit_counts[lnum]
+ exits = self.exit_counts[lnum]
try:
missing = len(missing_arcs[lnum])
except KeyError:
diff --git a/coverage/summary.py b/coverage/summary.py
index 03270c04..46aa4f5c 100644
--- a/coverage/summary.py
+++ b/coverage/summary.py
@@ -61,10 +61,7 @@ class SummaryReporter(Reporter):
if self.config.skip_covered:
# Don't report on 100% files.
no_missing_lines = (nums.n_missing == 0)
- if self.branches:
- no_missing_branches = (nums.n_partial_branches == 0)
- else:
- no_missing_branches = True
+ no_missing_branches = (nums.n_partial_branches == 0)
if no_missing_lines and no_missing_branches:
continue
@@ -87,6 +84,8 @@ class SummaryReporter(Reporter):
report_it = not self.config.ignore_errors
if report_it:
typ, msg = sys.exc_info()[:2]
+ # NotPython is only raised by PythonFileReporter, which has a
+ # should_be_python() method.
if typ is NotPython and not fr.should_be_python():
report_it = False
if report_it:
diff --git a/coverage/xmlreport.py b/coverage/xmlreport.py
index b60cecd2..d547559c 100644
--- a/coverage/xmlreport.py
+++ b/coverage/xmlreport.py
@@ -35,7 +35,7 @@ class XmlReporter(Reporter):
self.source_paths = set()
self.packages = {}
self.xml_out = None
- self.arcs = coverage.data.has_arcs()
+ self.has_arcs = coverage.data.has_arcs()
def report(self, morfs, outfile=None):
"""Generate a Cobertura-compatible XML report for `morfs`.
@@ -92,7 +92,7 @@ class XmlReporter(Reporter):
xclasses.appendChild(class_elts[class_name])
xpackage.setAttribute("name", pkg_name.replace(os.sep, '.'))
xpackage.setAttribute("line-rate", rate(lhits, lnum))
- if self.arcs:
+ if self.has_arcs:
branch_rate = rate(bhits, bnum)
else:
branch_rate = "0"
@@ -105,7 +105,7 @@ class XmlReporter(Reporter):
bhits_tot += bhits
xcoverage.setAttribute("line-rate", rate(lhits_tot, lnum_tot))
- if self.arcs:
+ if self.has_arcs:
branch_rate = rate(bhits_tot, bnum_tot)
else:
branch_rate = "0"
@@ -127,7 +127,7 @@ class XmlReporter(Reporter):
# Create the 'lines' and 'package' XML elements, which
# are populated later. Note that a package == a directory.
- filename = files.relative_filename(fr.filename)
+ filename = fr.relative_filename()
filename = filename.replace("\\", "/")
dirname = os.path.dirname(filename) or "."
parts = dirname.split("/")
@@ -161,7 +161,7 @@ class XmlReporter(Reporter):
# executed? If so, that should be recorded here.
xline.setAttribute("hits", str(int(line not in analysis.missing)))
- if self.arcs:
+ if self.has_arcs:
if line in branch_stats:
total, taken = branch_stats[line]
xline.setAttribute("branch", "true")
@@ -177,7 +177,7 @@ class XmlReporter(Reporter):
class_lines = len(analysis.statements)
class_hits = class_lines - len(analysis.missing)
- if self.arcs:
+ if self.has_arcs:
class_branches = sum(t for t, k in branch_stats.values())
missing_branches = sum(t - k for t, k in branch_stats.values())
class_br_hits = class_branches - missing_branches
@@ -187,7 +187,7 @@ class XmlReporter(Reporter):
# Finalize the statistics that are collected in the XML DOM.
xclass.setAttribute("line-rate", rate(class_hits, class_lines))
- if self.arcs:
+ if self.has_arcs:
branch_rate = rate(class_br_hits, class_branches)
else:
branch_rate = "0"
diff --git a/doc/_static/coverage.css b/doc/_static/coverage.css
new file mode 100644
index 00000000..d32132ef
--- /dev/null
+++ b/doc/_static/coverage.css
@@ -0,0 +1,7 @@
+body {
+ font-family: Georgia;
+}
+
+h1, h2, h3, h4, h5, h6 {
+ font-family: Helvetica;
+}
diff --git a/doc/api_coverage.rst b/doc/api_coverage.rst
index 4aa71719..ddbe9a9f 100644
--- a/doc/api_coverage.rst
+++ b/doc/api_coverage.rst
@@ -8,7 +8,7 @@
The Coverage class
------------------
-.. automodule:: coverage
+.. module:: coverage
.. autoclass:: Coverage
:members:
diff --git a/doc/api_coveragedata.rst b/doc/api_coveragedata.rst
index 7e7a9a5f..bd33dd55 100644
--- a/doc/api_coveragedata.rst
+++ b/doc/api_coveragedata.rst
@@ -10,7 +10,7 @@ The CoverageData class
.. versionadded:: 4.0
-.. automodule:: coverage
+.. module:: coverage
.. autoclass:: CoverageData
:members:
diff --git a/doc/conf.py b/doc/conf.py
index 98c24238..8b2bb24c 100644
--- a/doc/conf.py
+++ b/doc/conf.py
@@ -108,44 +108,14 @@ pygments_style = 'sphinx'
# Sphinx are currently 'default' and 'sphinxdoc'.
#html_theme = 'default'
-if 0:
- if not on_rtd: # only import and set the theme if we're building docs locally
- import sphinx_rtd_theme
- html_theme = 'sphinx_rtd_theme'
- html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
+if not on_rtd: # only import and set the theme if we're building docs locally
+ import sphinx_rtd_theme
+ html_theme = 'sphinx_rtd_theme'
+ html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# otherwise, readthedocs.org uses their theme by default, so no need to specify it
-import alabaster
-html_theme = 'alabaster'
-html_theme_path = ['.', alabaster.get_path()]
-
-# Theme options are theme-specific and customize the look and feel of a theme
-# further. For a list of options available for each theme, see the
-# documentation.
-html_theme_options = {
- 'description':
- 'A quick guide to Python for experienced programmers',
- 'show_related': True,
- 'github_button': False,
- 'show_powered_by': False,
-
- 'font_family':
- 'Georgia Pro, Georgia, '
- 'serif',
- 'head_font_family':
- 'Franklin Gothic Medium, Franklin Gothic, ITC Franklin Gothic, '
- 'Helvetica, Arial, '
- 'sans-serif',
- 'code_font_family':
- 'Consolas, '
- 'Menlo, '
- 'monospace',
-}
-
-
-
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
@@ -234,5 +204,6 @@ autoclass_content = "class"
prerelease = bool(max(release).isalpha())
def setup(app):
+ app.add_stylesheet('coverage.css')
app.add_config_value('prerelease', False, 'env')
- print "** Prerelease = %r" % prerelease
+ app.info("** Prerelease = %r" % prerelease)
diff --git a/doc/howitworks.rst b/doc/howitworks.rst
new file mode 100644
index 00000000..08b19cba
--- /dev/null
+++ b/doc/howitworks.rst
@@ -0,0 +1,98 @@
+.. Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
+.. For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
+
+.. _howitworks:
+
+=====================
+How Coverage.py works
+=====================
+
+.. :history: 20150812T071000, new page.
+
+For advanced use of coverage.py, or just because you are curious, it helps to
+understand what's happening behind the scenes. Coverage.py works in three
+phases:
+
+* **Execution**: your code is run, and monitored to see what lines were executed.
+
+* **Analysis**: your code is examined to determine what lines could have run.
+
+* **Reporting**: the results of execution and analysis are combined to produce
+ a coverage number and an indication of missing execution.
+
+The execution phase is handled by the ``coverage run`` command. The analysis
+and reporting phases are handled by the reporting commands like ``coverage
+report`` or ``coverage html``.
+
+Let's look at each phase in more detail.
+
+
+Execution
+---------
+
+At the heart of the execution phase is a Python trace function. This is a
+function that Python will invoke for each line executed in a program.
+Coverage.py implements a trace function that records each file and line number
+as it is executed.
+
+Executing a function for every line in your program can make execution very
+slow. Coverage.py's trace function is implemented in C to reduce that
+slowdown, and also takes care to not trace code that you aren't interested in.
+
+When measuring branch coverage, the same trace function is used, but instead of
+recording line numbers, coverage.py records pairs of line numbers. Each
+invocation of the trace function remembers the line number, then the next
+invocation records the pair `(prev, this)` to indicate that execution
+transitioned from the previous line to this line. Internally, these are called
+arcs.
+
+For more details of trace functions, see the Python docs for `sys.settrace`_,
+or if you are really brave, `How C trace functions really work`_.
+
+At the end of execution, coverage.py writes the data it collected to a data
+file, usually named ``.coverage``. This is a JSON-based file containing all of
+the recorded file names and line numbers executed.
+
+.. _sys.settrace: https://docs.python.org/3/library/sys.html#sys.settrace
+.. _How C trace functions really work: http://nedbatchelder.com/text/trace-function.html
+
+
+Analysis
+--------
+
+After your program has been executed and the line numbers recorded, coverage.py
+needs to determine what lines could have been executed. Luckily, compiled
+Python files (.pyc files) have a table of line numbers in them. Coverage.py
+reads this table to get the set of executable lines.
+
+The table isn't used directly, because it records line numbers for docstrings,
+for example, and we don't want to consider them executable. A few tweaks are
+made for considerations like this, and we have a set of lines that could have
+been executed.
+
+The data file is read to get the set of lines that were executed. The
+difference between those two sets are the lines that were not executed.
+
+The same principle applies for branch measurement, though the process for
+determining possible branches is more involved. Coverage.py reads the bytecode
+of the compiled Python file, and decides on a set of possible branches.
+Unfortunately, this process is inexact, and there are some `well-known cases`__
+that aren't correct.
+
+.. __: https://bitbucket.org/ned/coveragepy/issues?status=new&status=open&component=branch
+
+
+Reporting
+---------
+
+Once we have the set of executed lines and missing lines, reporting is just a
+matter of formatting that information in a useful way. Each reporting method
+(text, html, annotated source, xml) has a different output format, but the
+process is the same: write out the information in the particular format,
+possibly including the source code itself.
+
+
+Plugins
+-------
+
+Plugins interact with these phases.
diff --git a/doc/index.rst b/doc/index.rst
index fef9af7f..f5e134cd 100644
--- a/doc/index.rst
+++ b/doc/index.rst
@@ -172,6 +172,7 @@ More information
branch
subprocess
api
+ howitworks
plugins
contributing
trouble
diff --git a/igor.py b/igor.py
index 4ea65fad..8d6ff1f7 100644
--- a/igor.py
+++ b/igor.py
@@ -79,7 +79,8 @@ def run_tests(tracer, *nose_args):
print(msg)
return
- os.environ['COVERAGE_TESTING'] = "True"
+ if 'COVERAGE_TESTING' not in os.environ:
+ os.environ['COVERAGE_TESTING'] = "True"
print_banner(label)
nose_args = ["nosetests"] + list(nose_args)
nose.core.main(argv=nose_args)
@@ -220,7 +221,7 @@ def do_check_eol():
'*.egg-info',
'_build',
]
- checked = set([])
+ checked = set()
def check_file(fname, crlf=True, trail_white=True):
"""Check a single file for whitespace abuse."""
diff --git a/tests/coveragetest.py b/tests/coveragetest.py
index 9e0bb26e..fdb27e6e 100644
--- a/tests/coveragetest.py
+++ b/tests/coveragetest.py
@@ -108,9 +108,9 @@ class CoverageTest(
# Map chars to numbers for arcz_to_arcs
_arcz_map = {'.': -1}
- _arcz_map.update(dict((c, ord(c)-ord('0')) for c in '123456789'))
+ _arcz_map.update(dict((c, ord(c) - ord('0')) for c in '123456789'))
_arcz_map.update(dict(
- (c, 10+ord(c)-ord('A')) for c in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
+ (c, 10 + ord(c) - ord('A')) for c in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
))
def arcz_to_arcs(self, arcz):
@@ -141,7 +141,7 @@ class CoverageTest(
assert pair[1] == '-'
a, _, b = pair
bsgn = -1
- arcs.append((asgn*self._arcz_map[a], bsgn*self._arcz_map[b]))
+ arcs.append((asgn * self._arcz_map[a], bsgn * self._arcz_map[b]))
return sorted(arcs)
def assert_equal_args(self, a1, a2, msg=None):
@@ -178,7 +178,7 @@ class CoverageTest(
# Coverage.py wants to deal with things as modules with file names.
modname = self.get_module_name()
- self.make_file(modname+".py", text)
+ self.make_file(modname + ".py", text)
if arcs is None and arcz is not None:
arcs = self.arcz_to_arcs(arcz)
@@ -186,9 +186,10 @@ class CoverageTest(
arcs_missing = self.arcz_to_arcs(arcz_missing)
if arcs_unpredicted is None and arcz_unpredicted is not None:
arcs_unpredicted = self.arcz_to_arcs(arcz_unpredicted)
+ branch = any(x is not None for x in [arcs, arcs_missing, arcs_unpredicted])
# Start up coverage.py.
- cov = coverage.Coverage(branch=(arcs_missing is not None))
+ cov = coverage.Coverage(branch=branch)
cov.erase()
for exc in excludes or []:
cov.exclude(exc)
@@ -215,9 +216,7 @@ class CoverageTest(
if statements == line_list:
break
else:
- self.fail(
- "None of the lines choices matched %r" % statements
- )
+ self.fail("None of the lines choices matched %r" % statements)
missing_formatted = analysis.missing_formatted()
if isinstance(missing, string_class):
@@ -227,27 +226,22 @@ class CoverageTest(
if missing_formatted == missing_list:
break
else:
- self.fail(
- "None of the missing choices matched %r" %
- missing_formatted
- )
+ self.fail("None of the missing choices matched %r" % missing_formatted)
if arcs is not None:
- self.assert_equal_args(
- analysis.arc_possibilities(), arcs, "Possible arcs differ"
- )
+ self.assert_equal_args(analysis.arc_possibilities(), arcs, "Possible arcs differ")
if arcs_missing is not None:
self.assert_equal_args(
analysis.arcs_missing(), arcs_missing,
"Missing arcs differ"
- )
+ )
if arcs_unpredicted is not None:
self.assert_equal_args(
analysis.arcs_unpredicted(), arcs_unpredicted,
"Unpredicted arcs differ"
- )
+ )
if report:
frep = StringIO()
diff --git a/tests/modules/plugins/a_plugin.py b/tests/modules/plugins/a_plugin.py
index 2a9910d0..0cc96e5a 100644
--- a/tests/modules/plugins/a_plugin.py
+++ b/tests/modules/plugins/a_plugin.py
@@ -2,8 +2,10 @@
from coverage import CoveragePlugin
+
class Plugin(CoveragePlugin):
pass
+
def coverage_init(reg, options):
reg.add_file_tracer(Plugin())
diff --git a/tests/modules/plugins/another.py b/tests/modules/plugins/another.py
index 096d3b9d..80902d34 100644
--- a/tests/modules/plugins/another.py
+++ b/tests/modules/plugins/another.py
@@ -5,8 +5,10 @@
from coverage import CoveragePlugin
+
class Plugin(CoveragePlugin):
pass
+
def coverage_init(reg, options):
reg.add_file_tracer(Plugin())
diff --git a/tests/plugin1.py b/tests/plugin1.py
index c28b886f..af4dfc52 100644
--- a/tests/plugin1.py
+++ b/tests/plugin1.py
@@ -20,7 +20,7 @@ class Plugin(coverage.CoveragePlugin):
return FileReporter(filename)
-class FileTracer(coverage.plugin.FileTracer):
+class FileTracer(coverage.FileTracer):
"""A FileTracer emulating a simple static plugin."""
def __init__(self, filename):
@@ -40,14 +40,11 @@ class FileTracer(coverage.plugin.FileTracer):
return lineno*100+5, lineno*100+7
-class FileReporter(coverage.plugin.FileReporter):
+class FileReporter(coverage.FileReporter):
"""Dead-simple FileReporter."""
- def statements(self):
+ def lines(self):
return set([105, 106, 107, 205, 206, 207])
- def excluded_statements(self):
- return set([])
-
def coverage_init(reg, options): # pylint: disable=unused-argument
"""Called by coverage to initialize the plugins here."""
diff --git a/tests/plugin2.py b/tests/plugin2.py
index cbd2fc11..3bdfbdfd 100644
--- a/tests/plugin2.py
+++ b/tests/plugin2.py
@@ -18,7 +18,7 @@ class Plugin(coverage.CoveragePlugin):
return FileReporter(filename)
-class RenderFileTracer(coverage.plugin.FileTracer):
+class RenderFileTracer(coverage.FileTracer):
"""A FileTracer using information from the caller."""
def has_dynamic_source_filename(self):
@@ -35,9 +35,9 @@ class RenderFileTracer(coverage.plugin.FileTracer):
return lineno, lineno+1
-class FileReporter(coverage.plugin.FileReporter):
+class FileReporter(coverage.FileReporter):
"""A goofy file reporter."""
- def statements(self):
+ def lines(self):
# Goofy test arrangement: claim that the file has as many lines as the
# number in its name.
num = os.path.basename(self.filename).split(".")[0].split("_")[1]
diff --git a/tests/test_filereporter.py b/tests/test_filereporter.py
index 380d92d3..a348a844 100644
--- a/tests/test_filereporter.py
+++ b/tests/test_filereporter.py
@@ -38,9 +38,6 @@ class FileReporterTest(CoverageTest):
self.assertEqual(acu.relative_filename(), "aa/afile.py")
self.assertEqual(bcu.relative_filename(), "aa/bb/bfile.py")
self.assertEqual(ccu.relative_filename(), "aa/bb/cc/cfile.py")
- self.assertEqual(acu.flat_rootname(), "aa_afile_py")
- self.assertEqual(bcu.flat_rootname(), "aa_bb_bfile_py")
- self.assertEqual(ccu.flat_rootname(), "aa_bb_cc_cfile_py")
self.assertEqual(acu.source(), "# afile.py\n")
self.assertEqual(bcu.source(), "# bfile.py\n")
self.assertEqual(ccu.source(), "# cfile.py\n")
@@ -52,9 +49,6 @@ class FileReporterTest(CoverageTest):
self.assertEqual(acu.relative_filename(), "aa/afile.odd.py")
self.assertEqual(bcu.relative_filename(), "aa/bb/bfile.odd.py")
self.assertEqual(b2cu.relative_filename(), "aa/bb.odd/bfile.py")
- self.assertEqual(acu.flat_rootname(), "aa_afile_odd_py")
- self.assertEqual(bcu.flat_rootname(), "aa_bb_bfile_odd_py")
- self.assertEqual(b2cu.flat_rootname(), "aa_bb_odd_bfile_py")
self.assertEqual(acu.source(), "# afile.odd.py\n")
self.assertEqual(bcu.source(), "# bfile.odd.py\n")
self.assertEqual(b2cu.source(), "# bfile.py\n")
@@ -70,9 +64,6 @@ class FileReporterTest(CoverageTest):
self.assertEqual(acu.relative_filename(), native("aa.py"))
self.assertEqual(bcu.relative_filename(), native("aa/bb.py"))
self.assertEqual(ccu.relative_filename(), native("aa/bb/cc.py"))
- self.assertEqual(acu.flat_rootname(), "aa_py")
- self.assertEqual(bcu.flat_rootname(), "aa_bb_py")
- self.assertEqual(ccu.flat_rootname(), "aa_bb_cc_py")
self.assertEqual(acu.source(), "# aa\n")
self.assertEqual(bcu.source(), "# bb\n")
self.assertEqual(ccu.source(), "") # yes, empty
@@ -88,9 +79,6 @@ class FileReporterTest(CoverageTest):
self.assertEqual(acu.relative_filename(), native("aa/afile.py"))
self.assertEqual(bcu.relative_filename(), native("aa/bb/bfile.py"))
self.assertEqual(ccu.relative_filename(), native("aa/bb/cc/cfile.py"))
- self.assertEqual(acu.flat_rootname(), "aa_afile_py")
- self.assertEqual(bcu.flat_rootname(), "aa_bb_bfile_py")
- self.assertEqual(ccu.flat_rootname(), "aa_bb_cc_cfile_py")
self.assertEqual(acu.source(), "# afile.py\n")
self.assertEqual(bcu.source(), "# bfile.py\n")
self.assertEqual(ccu.source(), "# cfile.py\n")
diff --git a/tests/test_files.py b/tests/test_files.py
index b658853a..813f8612 100644
--- a/tests/test_files.py
+++ b/tests/test_files.py
@@ -9,7 +9,7 @@ import os.path
from coverage import files
from coverage.files import (
TreeMatcher, FnmatchMatcher, ModuleMatcher, PathAliases,
- find_python_files, abs_file, actual_path
+ find_python_files, abs_file, actual_path, flat_rootname,
)
from coverage.misc import CoverageException
from coverage import env
@@ -54,6 +54,10 @@ class FilesTest(CoverageTest):
rel = os.path.join('sub', trick, 'file1.py')
self.assertEqual(files.relative_filename(abs_file(rel)), rel)
+ def test_flat_rootname(self):
+ self.assertEqual(flat_rootname("a/b/c.py"), "a_b_c_py")
+ self.assertEqual(flat_rootname(r"c:\foo\bar.html"), "c__foo_bar_html")
+
class MatcherTest(CoverageTest):
"""Tests of file matchers."""
diff --git a/tests/test_parser.py b/tests/test_parser.py
index 18621d15..84b9a214 100644
--- a/tests/test_parser.py
+++ b/tests/test_parser.py
@@ -8,6 +8,7 @@ import textwrap
from tests.coveragetest import CoverageTest
from coverage import env
+from coverage.misc import NotPython
from coverage.parser import PythonParser
@@ -116,6 +117,25 @@ class PythonParserTest(CoverageTest):
""")
self.assertEqual(parser.exit_counts(), { 1:1, 2:1, 3:1, 6:1 })
+ def test_indentation_error(self):
+ msg = (
+ "Couldn't parse '<code>' as Python source: "
+ "'unindent does not match any outer indentation level' at line 3"
+ )
+ with self.assertRaisesRegex(NotPython, msg):
+ _ = self.parse_source("""\
+ 0 spaces
+ 2
+ 1
+ """)
+
+ def test_token_error(self):
+ msg = "Couldn't parse '<code>' as Python source: 'EOF in multi-line string' at line 1"
+ with self.assertRaisesRegex(NotPython, msg):
+ _ = self.parse_source("""\
+ '''
+ """)
+
class ParserFileTest(CoverageTest):
"""Tests for coverage.py's code parsing from files."""
diff --git a/tests/test_plugins.py b/tests/test_plugins.py
index 5218f6c9..686dcf99 100644
--- a/tests/test_plugins.py
+++ b/tests/test_plugins.py
@@ -459,7 +459,7 @@ class GoodPluginTest(FileTracerTest):
for snip in [
'filename="bar_4.html" line-rate="0.5" name="bar_4.html"',
'filename="foo_7.html" line-rate="0.2857" name="foo_7.html"',
- ]:
+ ]:
self.assertIn(snip, xml)
def test_defer_to_python(self):
diff --git a/tests/test_summary.py b/tests/test_summary.py
index 850f4dfd..fda44ee7 100644
--- a/tests/test_summary.py
+++ b/tests/test_summary.py
@@ -142,8 +142,7 @@ class SummaryTest(CoverageTest):
self.assertEqual(self.line_count(report), 3)
self.assertIn("mybranch.py ", report)
- self.assertEqual(self.last_line_squeezed(report),
- "mybranch.py 5 0 2 1 86%")
+ self.assertEqual(self.last_line_squeezed(report), "mybranch.py 5 0 2 1 86%")
def test_report_show_missing(self):
self.make_file("mymissing.py", """\
@@ -342,7 +341,7 @@ class SummaryTest(CoverageTest):
# pylint: disable=line-too-long
# Name Stmts Miss Cover
# ----------------------------
- # mycode NotPython: Couldn't parse '/tmp/test_cover/63354509363/mycode.py' as Python source: 'invalid syntax' at line 1
+ # mycode NotPython: Couldn't parse '...' as Python source: 'invalid syntax' at line 1
# No data to report.
last = self.squeezed_lines(report)[-2]
@@ -350,11 +349,10 @@ class SummaryTest(CoverageTest):
last = re.sub(r"parse '.*mycode.py", "parse 'mycode.py", last)
# The actual error message varies version to version
last = re.sub(r": '.*' at", ": 'error' at", last)
- self.assertEqual(last,
- "mycode.py NotPython: "
- "Couldn't parse 'mycode.py' as Python source: "
- "'error' at line 1"
- )
+ self.assertEqual(
+ last,
+ "mycode.py NotPython: Couldn't parse 'mycode.py' as Python source: 'error' at line 1"
+ )
def test_dotpy_not_python_ignored(self):
# We run a .py file, and when reporting, we can't parse it as Python,
diff --git a/tox.ini b/tox.ini
index 7e55bc42..355ca7de 100644
--- a/tox.ini
+++ b/tox.ini
@@ -49,6 +49,8 @@ basepython = pypy3-2.4
# Yes, pep8 will read its settings from tox.ini!
[pep8]
# E265: block comment should start with '# '
+# E301 expected 1 blank line, found 0
+# E401 multiple imports on one line
# The rest are the default ignored warnings.
-ignore = E265,E123,E133,E226,E241,E242
+ignore = E265,E123,E133,E226,E241,E242,E301,E401
max-line-length = 100