summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.hgignore2
-rw-r--r--.travis.yml4
-rw-r--r--AUTHORS.txt5
-rw-r--r--CHANGES.txt51
-rw-r--r--Makefile7
-rw-r--r--TODO.txt3
-rw-r--r--coverage/__init__.py6
-rw-r--r--coverage/annotate.py79
-rw-r--r--coverage/backunittest.py36
-rw-r--r--coverage/backward.py88
-rw-r--r--coverage/bytecode.py1
-rw-r--r--coverage/cmdline.py25
-rw-r--r--coverage/codeunit.py213
-rw-r--r--coverage/collector.py247
-rw-r--r--coverage/config.py80
-rw-r--r--coverage/control.py210
-rw-r--r--coverage/data.py19
-rw-r--r--coverage/debug.py2
-rw-r--r--coverage/django.py61
-rw-r--r--coverage/execfile.py126
-rw-r--r--coverage/files.py20
-rw-r--r--coverage/html.py13
-rw-r--r--coverage/misc.py11
-rw-r--r--coverage/parser.py75
-rw-r--r--coverage/phystokens.py2
-rw-r--r--coverage/plugin.py108
-rw-r--r--coverage/pytracer.py163
-rw-r--r--coverage/report.py20
-rw-r--r--coverage/results.py71
-rw-r--r--coverage/summary.py11
-rw-r--r--coverage/templite.py180
-rw-r--r--coverage/test_helpers.py258
-rw-r--r--coverage/tracer.c333
-rw-r--r--coverage/xmlreport.py20
-rw-r--r--doc/api.rst9
-rw-r--r--doc/config.rst2
-rw-r--r--doc/faq.rst13
-rw-r--r--howto.txt2
-rw-r--r--igor.py15
-rw-r--r--lab/parser.py28
-rw-r--r--metacov.ini27
-rw-r--r--pylintrc9
-rw-r--r--requirements.txt2
-rw-r--r--setup.py2
-rw-r--r--tests/backtest.py50
-rw-r--r--tests/backunittest.py26
-rw-r--r--tests/coveragetest.py318
-rw-r--r--tests/farm/annotate/annotate_dir.py4
-rw-r--r--tests/farm/annotate/run.py4
-rw-r--r--tests/farm/annotate/run_multi.py4
-rw-r--r--tests/farm/html/gold_x_xml/coverage.xml3
-rw-r--r--tests/farm/html/gold_y_xml_branch/coverage.xml3
-rw-r--r--tests/farm/html/run_a_xml_1.py5
-rw-r--r--tests/farm/html/run_a_xml_2.py5
-rw-r--r--tests/farm/html/run_unicode.py14
-rw-r--r--tests/farm/html/run_y_xml_branch.py5
-rw-r--r--tests/farm/html/src/coverage.xml3
-rw-r--r--tests/farm/run/run_chdir.py2
-rw-r--r--tests/farm/run/run_timid.py8
-rw-r--r--tests/farm/run/run_xxx.py4
-rw-r--r--tests/modules/pkg1/p1a.py2
-rw-r--r--tests/modules/plugins/__init__.py0
-rw-r--r--tests/modules/plugins/a_plugin.py6
-rw-r--r--tests/modules/plugins/another.py6
-rw-r--r--tests/test_api.py8
-rw-r--r--tests/test_backward.py6
-rw-r--r--tests/test_cmdline.py4
-rw-r--r--tests/test_codeunit.py28
-rw-r--r--tests/test_config.py168
-rw-r--r--tests/test_coroutine.py208
-rw-r--r--tests/test_coverage.py14
-rw-r--r--tests/test_data.py14
-rw-r--r--tests/test_execfile.py11
-rw-r--r--tests/test_farm.py66
-rw-r--r--tests/test_files.py77
-rw-r--r--tests/test_html.py42
-rw-r--r--tests/test_oddball.py66
-rw-r--r--tests/test_parser.py54
-rw-r--r--tests/test_phystokens.py5
-rw-r--r--tests/test_plugins.py217
-rw-r--r--tests/test_process.py94
-rw-r--r--tests/test_summary.py148
-rw-r--r--tests/test_templite.py70
-rw-r--r--tests/test_testing.py42
-rw-r--r--tests/test_xml.py7
-rw-r--r--tox.ini26
86 files changed, 3173 insertions, 1333 deletions
diff --git a/.hgignore b/.hgignore
index 34fa8a7b..1a6022d5 100644
--- a/.hgignore
+++ b/.hgignore
@@ -9,6 +9,8 @@ syntax: glob
*.bak
.coverage
.coverage.*
+.metacov
+.metacov.*
*.swp
# Stuff generated by editors.
diff --git a/.travis.yml b/.travis.yml
index 232d9430..42051dec 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -4,20 +4,22 @@
language: python
python:
- - 2.5
- 2.6
- 2.7
- 3.2
- 3.3
+ - 3.4
- pypy
install:
+ - if [[ $TRAVIS_PYTHON_VERSION == '2.6' ]]; then pip install unittest2; fi
- python setup.py clean develop
before_script:
- pwd
- ls -l
- coverage debug sys
+ - if [[ $TRAVIS_PYTHON_VERSION == 'pypy' ]]; then export COVERAGE_NO_EXTENSION=1; fi
script:
- python igor.py zip_mods install_egg
diff --git a/AUTHORS.txt b/AUTHORS.txt
index 5ea7e040..d374c2ac 100644
--- a/AUTHORS.txt
+++ b/AUTHORS.txt
@@ -21,8 +21,10 @@ Imri Goldberg
Bill Hart
Christian Heimes
Roger Hu
+Stan Hu
Devin Jeanpierre
Ross Lawley
+Steve Leonard
Edward Loper
Sandra Martocchia
Patrick Mezard
@@ -30,11 +32,14 @@ Noel O'Boyle
Detlev Offenbach
JT Olds
George Paci
+Peter Portante
Catherine Proulx
Brandon Rhodes
Adi Roiban
Greg Rogers
+Chris Rose
George Song
+Anthony Sottile
David Stanek
Joseph Tate
Sigve Tjora
diff --git a/CHANGES.txt b/CHANGES.txt
index 8819a98f..b034744f 100644
--- a/CHANGES.txt
+++ b/CHANGES.txt
@@ -5,7 +5,53 @@ Change history for Coverage.py
4.0
---
-- Python versions supported are now 2.6, 2.7, 3.2, 3.3.
+- Python versions supported are now CPython 2.6, 2.7, 3.2, 3.3, and 3.4, and
+ PyPy 2.2.
+
+- Gevent, eventlet, and greenlet are now supported, closing `issue 149`_. Huge
+ thanks to Peter Portante for initial implementation, and to Joe Jevnik for
+ the final insight that completed the work.
+
+- Options are now also read from a setup.cfg file, if any. Sections are
+ prefixed with "coverage:", so the ``[run]`` options will be read from the
+ ``[coverage:run]`` section of setup.cfg. Finishes `issue 304`_.
+
+- The ``report`` command can now show missing branches when reporting on branch
+ coverage. Thanks, Steve Leonard. Closes `issue 230`.
+
+- The XML report now contains a <source> element, fixing `issue 94`_. Thanks
+ Stan Hu.
+
+- The class defined in the coverage module is now called ``Coverage`` instead
+ of ``coverage``, though the old name still works, for backward compatibility.
+
+- The ``fail-under`` value is now rounded the same as reported results,
+ preventing paradoxical results, fixing `issue 284`_.
+
+- The XML report will now create the output directory if need be, fixing
+ `issue 285`_. Thanks Chris Rose.
+
+- HTML reports no longer raise UnicodeDecodeError if a Python file has
+ undecodable characters, fixing `issue 303`_.
+
+- The annotate command will now annotate all files, not just ones relative to
+ the current directory, fixing `issue 57`_.
+
+- The coverage module no longer causes deprecation warnings on Python 3.4 by
+ importing the imp module, fixing `issue 305`_.
+
+- Encoding declarations in source files are only considered if they are truly
+ comments. Thanks, Anthony Sottile.
+
+.. _issue 57: https://bitbucket.org/ned/coveragepy/issue/57/annotate-command-fails-to-annotate-many
+.. _issue 94: https://bitbucket.org/ned/coveragepy/issue/94/coverage-xml-doesnt-produce-sources
+.. _issue 149: https://bitbucket.org/ned/coveragepy/issue/149/coverage-gevent-looks-broken
+.. _issue 230: https://bitbucket.org/ned/coveragepy/issue/230/show-line-no-for-missing-branches-in
+.. _issue 284: https://bitbucket.org/ned/coveragepy/issue/284/fail-under-should-show-more-precision
+.. _issue 285: https://bitbucket.org/ned/coveragepy/issue/285/xml-report-fails-if-output-file-directory
+.. _issue 303: https://bitbucket.org/ned/coveragepy/issue/303/unicodedecodeerror
+.. _issue 304: https://bitbucket.org/ned/coveragepy/issue/304/attempt-to-get-configuration-from-setupcfg
+.. _issue 305: https://bitbucket.org/ned/coveragepy/issue/305/pendingdeprecationwarning-the-imp-module
3.7.1 -- 13 December 2013
@@ -204,7 +250,6 @@ Version 3.6b1 --- 28 November 2012
.. _issue 60: https://bitbucket.org/ned/coveragepy/issue/60/incorrect-path-to-orphaned-pyc-files
.. _issue 67: https://bitbucket.org/ned/coveragepy/issue/67/xml-report-filenames-may-be-generated
-.. _issue 82: https://bitbucket.org/ned/coveragepy/issue/82/tokenerror-when-generating-html-report
.. _issue 89: https://bitbucket.org/ned/coveragepy/issue/89/on-windows-all-packages-are-reported-in
.. _issue 97: https://bitbucket.org/ned/coveragepy/issue/97/allow-environment-variables-to-be
.. _issue 100: https://bitbucket.org/ned/coveragepy/issue/100/source-directive-doesnt-work-for-packages
@@ -256,7 +301,6 @@ Version 3.5.3 --- 29 September 2012
- Testing is now done with `tox`_, thanks, Marc Abramowitz.
-.. _issue 82: https://bitbucket.org/ned/coveragepy/issue/82/tokenerror-when-generating-html-report
.. _issue 147: https://bitbucket.org/ned/coveragepy/issue/147/massive-memory-usage-by-ctracer
.. _issue 179: https://bitbucket.org/ned/coveragepy/issue/179/htmlreporter-fails-when-source-file-is
.. _issue 183: https://bitbucket.org/ned/coveragepy/issue/183/install-fails-for-python-23
@@ -309,7 +353,6 @@ Version 3.5.2b1 --- 29 April 2012
- When installing into pypy, we no longer attempt (and fail) to compile
the C tracer function, closing `issue 166`_.
-.. _issue 82: https://bitbucket.org/ned/coveragepy/issue/82/tokenerror-when-generating-html-report
.. _issue 142: https://bitbucket.org/ned/coveragepy/issue/142/executing-python-file-syspath-is-replaced
.. _issue 155: https://bitbucket.org/ned/coveragepy/issue/155/cant-use-coverage-run-m-unittest-discover
.. _issue 157: https://bitbucket.org/ned/coveragepy/issue/157/chokes-on-source-files-with-non-utf-8
diff --git a/Makefile b/Makefile
index 71cd5fea..fdab74eb 100644
--- a/Makefile
+++ b/Makefile
@@ -3,9 +3,6 @@
default:
@echo "* No default action *"
-TEST_ZIP = tests/zipmods.zip
-TEST_EGG = tests/eggsrc/dist/covtestegg1-0.0.0-py*.egg
-
clean:
-rm -f *.pyd */*.pyd
-rm -f *.so */*.so
@@ -18,8 +15,8 @@ clean:
-rm -rf __pycache__ */__pycache__ */*/__pycache__ */*/*/__pycache__ */*/*/*/__pycache__ */*/*/*/*/__pycache__
-rm -f coverage/*,cover
-rm -f MANIFEST
- -rm -f .coverage .coverage.* coverage.xml
- -rm -f $(TEST_ZIP)
+ -rm -f .coverage .coverage.* coverage.xml .metacov*
+ -rm -f tests/zipmods.zip
-rm -rf tests/eggsrc/build tests/eggsrc/dist tests/eggsrc/*.egg-info
-rm -f setuptools-*.egg distribute-*.egg distribute-*.tar.gz
-rm -rf doc/_build
diff --git a/TODO.txt b/TODO.txt
index 36b7c29a..438bcee1 100644
--- a/TODO.txt
+++ b/TODO.txt
@@ -22,9 +22,10 @@ Key:
- .format() ?
+ try/except/finally
+ with assertRaises
- - addCleaup instead of tearDown
+ - addCleaup instead of tearDown (only in 2.7!)
+ exec statement can look like a function in py2 (since when?)
- runpy ?
+ - we can use "except ExcClass as e:"
+ Remove code only run on <2.6
- Change data file to json
diff --git a/coverage/__init__.py b/coverage/__init__.py
index 193b7a10..5ae32aba 100644
--- a/coverage/__init__.py
+++ b/coverage/__init__.py
@@ -7,10 +7,14 @@ http://nedbatchelder.com/code/coverage
from coverage.version import __version__, __url__
-from coverage.control import coverage, process_startup
+from coverage.control import Coverage, process_startup
from coverage.data import CoverageData
from coverage.cmdline import main, CoverageScript
from coverage.misc import CoverageException
+from coverage.plugin import CoveragePlugin
+
+# Backward compatibility.
+coverage = Coverage
# Module-level functions. The original API to this module was based on
# functions defined directly in the module, with a singleton of the coverage()
diff --git a/coverage/annotate.py b/coverage/annotate.py
index 19777eaf..5b96448a 100644
--- a/coverage/annotate.py
+++ b/coverage/annotate.py
@@ -47,55 +47,44 @@ class AnnotateReporter(Reporter):
`cu` is the CodeUnit for the file to annotate.
"""
- if not cu.relative:
- return
+ statements = sorted(analysis.statements)
+ missing = sorted(analysis.missing)
+ excluded = sorted(analysis.excluded)
- filename = cu.filename
- source = cu.source_file()
if self.directory:
dest_file = os.path.join(self.directory, cu.flat_rootname())
dest_file += ".py,cover"
else:
- dest_file = filename + ",cover"
- dest = open(dest_file, 'w')
-
- statements = sorted(analysis.statements)
- missing = sorted(analysis.missing)
- excluded = sorted(analysis.excluded)
-
- lineno = 0
- i = 0
- j = 0
- covered = True
- while True:
- line = source.readline()
- if line == '':
- break
- lineno += 1
- while i < len(statements) and statements[i] < lineno:
- i += 1
- while j < len(missing) and missing[j] < lineno:
- j += 1
- if i < len(statements) and statements[i] == lineno:
- covered = j >= len(missing) or missing[j] > lineno
- if self.blank_re.match(line):
- dest.write(' ')
- elif self.else_re.match(line):
- # Special logic for lines containing only 'else:'.
- if i >= len(statements) and j >= len(missing):
- dest.write('! ')
- elif i >= len(statements) or j >= len(missing):
+ dest_file = cu.filename + ",cover"
+
+ with open(dest_file, 'w') as dest:
+ i = 0
+ j = 0
+ covered = True
+ source = cu.source()
+ for lineno, line in enumerate(source.splitlines(True), start=1):
+ while i < len(statements) and statements[i] < lineno:
+ i += 1
+ while j < len(missing) and missing[j] < lineno:
+ j += 1
+ if i < len(statements) and statements[i] == lineno:
+ covered = j >= len(missing) or missing[j] > lineno
+ if self.blank_re.match(line):
+ dest.write(' ')
+ elif self.else_re.match(line):
+ # Special logic for lines containing only 'else:'.
+ if i >= len(statements) and j >= len(missing):
+ dest.write('! ')
+ elif i >= len(statements) or j >= len(missing):
+ dest.write('> ')
+ elif statements[i] == missing[j]:
+ dest.write('! ')
+ else:
+ dest.write('> ')
+ elif lineno in excluded:
+ dest.write('- ')
+ elif covered:
dest.write('> ')
- elif statements[i] == missing[j]:
- dest.write('! ')
else:
- dest.write('> ')
- elif lineno in excluded:
- dest.write('- ')
- elif covered:
- dest.write('> ')
- else:
- dest.write('! ')
- dest.write(line)
- source.close()
- dest.close()
+ dest.write('! ')
+ dest.write(line)
diff --git a/coverage/backunittest.py b/coverage/backunittest.py
new file mode 100644
index 00000000..b2b7ca2f
--- /dev/null
+++ b/coverage/backunittest.py
@@ -0,0 +1,36 @@
+"""Implementations of unittest features from the future."""
+
+# Use unittest2 if it's available, otherwise unittest. This gives us
+# backported features for 2.6.
+try:
+ import unittest2 as unittest # pylint: disable=F0401
+except ImportError:
+ import unittest
+
+
+def unittest_has(method):
+ """Does `unitttest.TestCase` have `method` defined?"""
+ return hasattr(unittest.TestCase, method)
+
+
+class TestCase(unittest.TestCase):
+ """Just like unittest.TestCase, but with assert methods added.
+
+ Designed to be compatible with 3.1 unittest. Methods are only defined if
+ `unittest` doesn't have them.
+
+ """
+ # pylint: disable=missing-docstring
+
+ if not unittest_has('assertCountEqual'):
+ def assertCountEqual(self, s1, s2):
+ """Assert these have the same elements, regardless of order."""
+ self.assertEqual(set(s1), set(s2))
+
+ if not unittest_has('assertRaisesRegex'):
+ def assertRaisesRegex(self, *args, **kwargs):
+ return self.assertRaisesRegexp(*args, **kwargs)
+
+ if not unittest_has('assertRegex'):
+ def assertRegex(self, *args, **kwargs):
+ return self.assertRegexpMatches(*args, **kwargs)
diff --git a/coverage/backward.py b/coverage/backward.py
index a0dc9027..9597449c 100644
--- a/coverage/backward.py
+++ b/coverage/backward.py
@@ -1,14 +1,15 @@
"""Add things to old Pythons so I can pretend they are newer."""
# This file does lots of tricky stuff, so disable a bunch of lintisms.
-# pylint: disable=F0401,W0611,W0622
-# F0401: Unable to import blah
-# W0611: Unused import blah
-# W0622: Redefining built-in blah
+# pylint: disable=redefined-builtin
+# pylint: disable=import-error
+# pylint: disable=no-member
+# pylint: disable=unused-import
+# pylint: disable=no-name-in-module
import os, re, sys
-# Pythons 2 and 3 differ on where to get StringIO
+# Pythons 2 and 3 differ on where to get StringIO.
try:
from cStringIO import StringIO
BytesIO = StringIO
@@ -49,24 +50,9 @@ else:
if sys.version_info >= (3, 0):
# Python 3.2 provides `tokenize.open`, the best way to open source files.
import tokenize
- try:
- open_source = tokenize.open # pylint: disable=E1101
- except AttributeError:
- from io import TextIOWrapper
- detect_encoding = tokenize.detect_encoding # pylint: disable=E1101
- # Copied from the 3.2 stdlib:
- def open_source(fname):
- """Open a file in read only mode using the encoding detected by
- detect_encoding().
- """
- buffer = open(fname, 'rb')
- encoding, _ = detect_encoding(buffer.readline)
- buffer.seek(0)
- text = TextIOWrapper(buffer, encoding, line_buffering=True)
- text.mode = 'r'
- return text
+ open_python_source = tokenize.open # pylint: disable=E1101
else:
- def open_source(fname):
+ def open_python_source(fname):
"""Open a source file the best way."""
return open(fname, "rU")
@@ -117,10 +103,58 @@ else:
for byte in bytes_value:
yield ord(byte)
-# Md5 is available in different places.
+
+try:
+ # In Py 2.x, the builtins were in __builtin__
+ BUILTINS = sys.modules['__builtin__']
+except KeyError:
+ # In Py 3.x, they're in builtins
+ BUILTINS = sys.modules['builtins']
+
+
+# imp was deprecated in Python 3.3
try:
- import hashlib
- md5 = hashlib.md5
+ import importlib, importlib.util
+ imp = None
except ImportError:
- import md5
- md5 = md5.new
+ importlib = None
+
+# we only want to use importlib if it has everything we need.
+try:
+ importlib_util_find_spec = importlib.util.find_spec
+except Exception:
+ import imp
+ importlib_util_find_spec = None
+
+try:
+ PYC_MAGIC_NUMBER = importlib.util.MAGIC_NUMBER
+except AttributeError:
+ PYC_MAGIC_NUMBER = imp.get_magic()
+
+
+def import_local_file(modname):
+ """Import a local file as a module.
+
+ Opens a file in the current directory named `modname`.py, imports it
+ as `modname`, and returns the module object.
+
+ """
+ try:
+ from importlib.machinery import SourceFileLoader
+ except ImportError:
+ SourceFileLoader = None
+
+ modfile = modname + '.py'
+ if SourceFileLoader:
+ mod = SourceFileLoader(modname, modfile).load_module()
+ else:
+ for suff in imp.get_suffixes():
+ if suff[0] == '.py':
+ break
+
+ with open(modfile, 'r') as f:
+ # pylint: disable=W0631
+ # (Using possibly undefined loop variable 'suff')
+ mod = imp.load_module(modname, f, modfile, suff)
+
+ return mod
diff --git a/coverage/bytecode.py b/coverage/bytecode.py
index 85360638..3f62dfaf 100644
--- a/coverage/bytecode.py
+++ b/coverage/bytecode.py
@@ -29,7 +29,6 @@ class ByteCodes(object):
Returns `ByteCode` objects.
"""
- # pylint: disable=R0924
def __init__(self, code):
self.code = code
diff --git a/coverage/cmdline.py b/coverage/cmdline.py
index 9b807040..c723fa95 100644
--- a/coverage/cmdline.py
+++ b/coverage/cmdline.py
@@ -1,6 +1,6 @@
"""Command-line support for Coverage."""
-import optparse, os, sys, time, traceback
+import glob, optparse, os, sys, time, traceback
from coverage.execfile import run_python_file, run_python_module
from coverage.misc import CoverageException, ExceptionDuringRun, NoSource
@@ -449,7 +449,7 @@ class CoverageScript(object):
# Remaining actions are reporting, with some common options.
report_args = dict(
- morfs = args,
+ morfs = unglob_args(args),
ignore_errors = options.ignore_errors,
omit = omit,
include = include,
@@ -470,6 +470,14 @@ class CoverageScript(object):
total = self.coverage.xml_report(outfile=outfile, **report_args)
if options.fail_under is not None:
+ # Total needs to be rounded, but be careful of 0 and 100.
+ if 0 < total < 1:
+ total = 1
+ elif 99 < total < 100:
+ total = 99
+ else:
+ total = round(total)
+
if total >= options.fail_under:
return OK
else:
@@ -633,6 +641,19 @@ def unshell_list(s):
return s.split(',')
+def unglob_args(args):
+ """Interpret shell wildcards for platforms that need it."""
+ if sys.platform == 'win32':
+ globbed = []
+ for arg in args:
+ if '?' in arg or '*' in arg:
+ globbed.extend(glob.glob(arg))
+ else:
+ globbed.append(arg)
+ args = globbed
+ return args
+
+
HELP_TOPICS = {
# -------------------------
'classic':
diff --git a/coverage/codeunit.py b/coverage/codeunit.py
index c58e237b..c9ab2622 100644
--- a/coverage/codeunit.py
+++ b/coverage/codeunit.py
@@ -1,18 +1,24 @@
"""Code unit (module) handling for Coverage."""
-import glob, os
+import os
-from coverage.backward import open_source, string_class, StringIO
-from coverage.misc import CoverageException
+from coverage.backward import open_python_source, string_class
+from coverage.misc import CoverageException, NoSource
+from coverage.parser import CodeParser, PythonParser
+from coverage.phystokens import source_token_lines, source_encoding
-def code_unit_factory(morfs, file_locator):
+def code_unit_factory(morfs, file_locator, get_plugin=None):
"""Construct a list of CodeUnits from polymorphic inputs.
`morfs` is a module or a filename, or a list of same.
`file_locator` is a FileLocator that can help resolve filenames.
+ `get_plugin` is a function taking a filename, and returning a plugin
+ responsible for the file. It can also return None if there is no plugin
+ claiming the file.
+
Returns a list of CodeUnit objects.
"""
@@ -20,16 +26,30 @@ def code_unit_factory(morfs, file_locator):
if not isinstance(morfs, (list, tuple)):
morfs = [morfs]
- # On Windows, the shell doesn't expand wildcards. Do it here.
- globbed = []
+ code_units = []
for morf in morfs:
- if isinstance(morf, string_class) and ('?' in morf or '*' in morf):
- globbed.extend(glob.glob(morf))
+ plugin = None
+ if isinstance(morf, string_class) and get_plugin:
+ plugin = get_plugin(morf)
+ if plugin:
+ klass = plugin.code_unit_class(morf)
+ #klass = DjangoTracer # NOT REALLY! TODO
+ # Hacked-in Mako support. Define COVERAGE_MAKO_PATH as a fragment of
+ # the path that indicates the Python file is actually a compiled Mako
+ # template. THIS IS TEMPORARY!
+ #MAKO_PATH = os.environ.get('COVERAGE_MAKO_PATH')
+ #if MAKO_PATH and isinstance(morf, string_class) and MAKO_PATH in morf:
+ # # Super hack! Do mako both ways!
+ # if 0:
+ # cu = PythonCodeUnit(morf, file_locator)
+ # cu.name += '_fako'
+ # code_units.append(cu)
+ # klass = MakoCodeUnit
+ #elif isinstance(morf, string_class) and morf.endswith(".html"):
+ # klass = DjangoCodeUnit
else:
- globbed.append(morf)
- morfs = globbed
-
- code_units = [CodeUnit(morf, file_locator) for morf in morfs]
+ klass = PythonCodeUnit
+ code_units.append(klass(morf, file_locator))
return code_units
@@ -44,6 +64,7 @@ class CodeUnit(object):
`relative` is a boolean.
"""
+
def __init__(self, morf, file_locator):
self.file_locator = file_locator
@@ -51,11 +72,7 @@ class CodeUnit(object):
f = morf.__file__
else:
f = morf
- # .pyc files should always refer to a .py instead.
- if f.endswith(('.pyc', '.pyo')):
- f = f[:-1]
- elif f.endswith('$py.class'): # Jython
- f = f[:-9] + ".py"
+ f = self._adjust_filename(f)
self.filename = self.file_locator.canonical_filename(f)
if hasattr(morf, '__name__'):
@@ -73,9 +90,15 @@ class CodeUnit(object):
self.name = n
self.modname = modname
+ self._source = None
+
def __repr__(self):
return "<CodeUnit name=%r filename=%r>" % (self.name, self.filename)
+ def _adjust_filename(self, f):
+ # TODO: This shouldn't be in the base class, right?
+ return f
+
# Annoying comparison operators. Py3k wants __lt__ etc, and Py2k needs all
# of them defined.
@@ -99,7 +122,7 @@ class CodeUnit(object):
the same directory, but need to differentiate same-named files from
different directories.
- For example, the file a/b/c.py might return 'a_b_c'
+ For example, the file a/b/c.py will return 'a_b_c'
"""
if self.modname:
@@ -108,26 +131,105 @@ class CodeUnit(object):
root = os.path.splitdrive(self.name)[1]
return root.replace('\\', '_').replace('/', '_').replace('.', '_')
- def source_file(self):
- """Return an open file for reading the source of the code unit."""
+ def source(self):
+ if self._source is None:
+ self._source = self.get_source()
+ return self._source
+
+ def get_source(self):
+ """Return the source code, as a string."""
if os.path.exists(self.filename):
# A regular text file: open it.
- return open_source(self.filename)
+ with open_python_source(self.filename) as f:
+ return f.read()
# Maybe it's in a zip file?
source = self.file_locator.get_zip_data(self.filename)
if source is not None:
- return StringIO(source)
+ return source
# Couldn't find source.
raise CoverageException(
"No source for code '%s'." % self.filename
)
+ def source_token_lines(self):
+ """Return the 'tokenized' text for the code."""
+ for line in self.source().splitlines():
+ yield [('txt', line)]
+
def should_be_python(self):
"""Does it seem like this file should contain Python?
- This is used to decide if a file reported as part of the exection of
+ This is used to decide if a file reported as part of the execution of
+ a program was really likely to have contained Python in the first
+ place.
+ """
+ return False
+
+ def get_parser(self, exclude=None):
+ raise NotImplementedError
+
+
+class PythonCodeUnit(CodeUnit):
+ """Represents a Python file."""
+
+ def _adjust_filename(self, fname):
+ # .pyc files should always refer to a .py instead.
+ if fname.endswith(('.pyc', '.pyo')):
+ fname = fname[:-1]
+ elif fname.endswith('$py.class'): # Jython
+ fname = fname[:-9] + ".py"
+ return fname
+
+ def get_parser(self, exclude=None):
+ actual_filename, source = self._find_source(self.filename)
+ return PythonParser(
+ text=source, filename=actual_filename, exclude=exclude,
+ )
+
+ def _find_source(self, filename):
+ """Find the source for `filename`.
+
+ Returns two values: the actual filename, and the source.
+
+ The source returned depends on which of these cases holds:
+
+ * The filename seems to be a non-source file: returns None
+
+ * The filename is a source file, and actually exists: returns None.
+
+ * The filename is a source file, and is in a zip file or egg:
+ returns the source.
+
+ * The filename is a source file, but couldn't be found: raises
+ `NoSource`.
+
+ """
+ source = None
+
+ base, ext = os.path.splitext(filename)
+ TRY_EXTS = {
+ '.py': ['.py', '.pyw'],
+ '.pyw': ['.pyw'],
+ }
+ try_exts = TRY_EXTS.get(ext)
+ if not try_exts:
+ return filename, None
+
+ for try_ext in try_exts:
+ try_filename = base + try_ext
+ if os.path.exists(try_filename):
+ return try_filename, None
+ source = self.file_locator.get_zip_data(try_filename)
+ if source:
+ return try_filename, source
+ raise NoSource("No source for code: '%s'" % filename)
+
+ def should_be_python(self):
+ """Does it seem like this file should contain Python?
+
+ This is used to decide if a file reported as part of the execution of
a program was really likely to have contained Python in the first
place.
@@ -143,3 +245,68 @@ class CodeUnit(object):
return True
# Everything else is probably not Python.
return False
+
+ def source_token_lines(self):
+ return source_token_lines(self.source())
+
+ def source_encoding(self):
+ return source_encoding(self.source())
+
+
+class MakoParser(CodeParser):
+ def __init__(self, metadata):
+ self.metadata = metadata
+
+ def parse_source(self):
+ """Returns executable_line_numbers, excluded_line_numbers"""
+ executable = set(self.metadata['line_map'].values())
+ return executable, set()
+
+ def translate_lines(self, lines):
+ tlines = set()
+ for l in lines:
+ try:
+ tlines.add(self.metadata['full_line_map'][l])
+ except IndexError:
+ pass
+ return tlines
+
+
+class MakoCodeUnit(CodeUnit):
+ def __init__(self, *args, **kwargs):
+ super(MakoCodeUnit, self).__init__(*args, **kwargs)
+ from mako.template import ModuleInfo
+ py_source = open(self.filename).read()
+ self.metadata = ModuleInfo.get_module_source_metadata(py_source, full_line_map=True)
+
+ def get_source(self):
+ return open(self.metadata['filename']).read()
+
+ def get_parser(self, exclude=None):
+ return MakoParser(self.metadata)
+
+ def source_encoding(self):
+ return self.metadata['source_encoding']
+
+
+class DjangoCodeUnit(CodeUnit):
+ def get_source(self):
+ with open(self.filename) as f:
+ return f.read()
+
+ def get_parser(self, exclude=None):
+ return DjangoParser(self.filename)
+
+ def source_encoding(self):
+ return "utf8"
+
+
+class DjangoParser(CodeParser):
+ def __init__(self, filename):
+ self.filename = filename
+
+ def parse_source(self):
+ with open(self.filename) as f:
+ source = f.read()
+ executable = set(range(1, len(source.splitlines())+1))
+ return executable, set()
diff --git a/coverage/collector.py b/coverage/collector.py
index 94af5df5..97b45deb 100644
--- a/coverage/collector.py
+++ b/coverage/collector.py
@@ -1,6 +1,9 @@
"""Raw data collector for Coverage."""
-import collections, os, sys, threading
+import os, sys
+
+from coverage.misc import CoverageException
+from coverage.pytracer import PyTracer
try:
# Use the C extension code when we can, for speed.
@@ -21,147 +24,6 @@ except ImportError:
CTracer = None
-class PyTracer(object):
- """Python implementation of the raw data tracer."""
-
- # Because of poor implementations of trace-function-manipulating tools,
- # the Python trace function must be kept very simple. In particular, there
- # must be only one function ever set as the trace function, both through
- # sys.settrace, and as the return value from the trace function. Put
- # another way, the trace function must always return itself. It cannot
- # swap in other functions, or return None to avoid tracing a particular
- # frame.
- #
- # The trace manipulator that introduced this restriction is DecoratorTools,
- # which sets a trace function, and then later restores the pre-existing one
- # by calling sys.settrace with a function it found in the current frame.
- #
- # Systems that use DecoratorTools (or similar trace manipulations) must use
- # PyTracer to get accurate results. The command-line --timid argument is
- # used to force the use of this tracer.
-
- def __init__(self):
- self.data = None
- self.should_trace = None
- self.should_trace_cache = None
- self.warn = None
- self.cur_file_data = None
- self.last_line = 0
- self.data_stack = []
- self.data_stacks = collections.defaultdict(list)
- self.last_exc_back = None
- self.last_exc_firstlineno = 0
- self.arcs = False
- self.thread = None
- self.stopped = False
- self.coroutine_id_func = None
- self.last_coroutine = None
-
- def _trace(self, frame, event, arg_unused):
- """The trace function passed to sys.settrace."""
-
- if self.stopped:
- return
-
- if 0:
- sys.stderr.write("trace event: %s %r @%d\n" % (
- event, frame.f_code.co_filename, frame.f_lineno
- ))
-
- if self.last_exc_back:
- if frame == self.last_exc_back:
- # Someone forgot a return event.
- if self.arcs and self.cur_file_data:
- pair = (self.last_line, -self.last_exc_firstlineno)
- self.cur_file_data[pair] = None
- if self.coroutine_id_func:
- self.data_stack = self.data_stacks[self.coroutine_id_func()]
- self.cur_file_data, self.last_line = self.data_stack.pop()
- self.last_exc_back = None
-
- if event == 'call':
- # Entering a new function context. Decide if we should trace
- # in this file.
- if self.coroutine_id_func:
- self.data_stack = self.data_stacks[self.coroutine_id_func()]
- self.last_coroutine = self.coroutine_id_func()
- self.data_stack.append((self.cur_file_data, self.last_line))
- filename = frame.f_code.co_filename
- if filename not in self.should_trace_cache:
- tracename = self.should_trace(filename, frame)
- self.should_trace_cache[filename] = tracename
- else:
- tracename = self.should_trace_cache[filename]
- #print("called, stack is %d deep, tracename is %r" % (
- # len(self.data_stack), tracename))
- if tracename:
- if tracename not in self.data:
- self.data[tracename] = {}
- self.cur_file_data = self.data[tracename]
- else:
- self.cur_file_data = None
- # Set the last_line to -1 because the next arc will be entering a
- # code block, indicated by (-1, n).
- self.last_line = -1
- elif event == 'line':
- # Record an executed line.
- #if self.coroutine_id_func:
- # assert self.last_coroutine == self.coroutine_id_func()
- if self.cur_file_data is not None:
- if self.arcs:
- #print("lin", self.last_line, frame.f_lineno)
- self.cur_file_data[(self.last_line, frame.f_lineno)] = None
- else:
- #print("lin", frame.f_lineno)
- self.cur_file_data[frame.f_lineno] = None
- self.last_line = frame.f_lineno
- elif event == 'return':
- if self.arcs and self.cur_file_data:
- first = frame.f_code.co_firstlineno
- self.cur_file_data[(self.last_line, -first)] = None
- # Leaving this function, pop the filename stack.
- if self.coroutine_id_func:
- self.data_stack = self.data_stacks[self.coroutine_id_func()]
- self.last_coroutine = self.coroutine_id_func()
- self.cur_file_data, self.last_line = self.data_stack.pop()
- #print("returned, stack is %d deep" % (len(self.data_stack)))
- elif event == 'exception':
- #print("exc", self.last_line, frame.f_lineno)
- self.last_exc_back = frame.f_back
- self.last_exc_firstlineno = frame.f_code.co_firstlineno
- return self._trace
-
- def start(self):
- """Start this Tracer.
-
- Return a Python function suitable for use with sys.settrace().
-
- """
- self.thread = threading.currentThread()
- sys.settrace(self._trace)
- return self._trace
-
- def stop(self):
- """Stop this Tracer."""
- self.stopped = True
- if self.thread != threading.currentThread():
- # Called on a different thread than started us: we can't unhook
- # ourseves, but we've set the flag that we should stop, so we won't
- # do any more tracing.
- return
-
- if hasattr(sys, "gettrace") and self.warn:
- if sys.gettrace() != self._trace:
- msg = "Trace function changed, measurement is likely wrong: %r"
- self.warn(msg % (sys.gettrace(),))
- #print("Stopping tracer on %s" % threading.current_thread().ident)
- sys.settrace(None)
-
- def get_stats(self):
- """Return a dictionary of statistics, or None."""
- return None
-
-
class Collector(object):
"""Collects trace data.
@@ -183,13 +45,17 @@ class Collector(object):
# the top, and resumed when they become the top again.
_collectors = []
- def __init__(self, should_trace, timid, branch, warn, coroutine):
+ def __init__(self,
+ should_trace, check_include, timid, branch, warn, coroutine,
+ ):
"""Create a collector.
`should_trace` is a function, taking a filename, and returning a
canonicalized filename, or None depending on whether the file should
be traced or not.
+ TODO: `check_include`
+
If `timid` is true, then a slower simpler trace function will be
used. This is important for some environments where manipulation of
tracing functions make the faster more sophisticated trace function not
@@ -202,21 +68,44 @@ class Collector(object):
`warn` is a warning function, taking a single string message argument,
to be used if a warning needs to be issued.
+ TODO: `coroutine`
+
"""
self.should_trace = should_trace
+ self.check_include = check_include
self.warn = warn
self.branch = branch
- if coroutine == "greenlet":
- import greenlet
- self.coroutine_id_func = greenlet.getcurrent
- elif coroutine == "eventlet":
- import eventlet.greenthread
- self.coroutine_id_func = eventlet.greenthread.getcurrent
- elif coroutine == "gevent":
- import gevent
- self.coroutine_id_func = gevent.getcurrent
- else:
- self.coroutine_id_func = None
+ self.threading = None
+ self.coroutine = coroutine
+
+ self.coroutine_id_func = None
+
+ try:
+ if coroutine == "greenlet":
+ import greenlet
+ self.coroutine_id_func = greenlet.getcurrent
+ elif coroutine == "eventlet":
+ import eventlet.greenthread
+ self.coroutine_id_func = eventlet.greenthread.getcurrent
+ elif coroutine == "gevent":
+ import gevent
+ self.coroutine_id_func = gevent.getcurrent
+ elif coroutine == "thread" or not coroutine:
+ # It's important to import threading only if we need it. If
+ # it's imported early, and the program being measured uses
+ # gevent, then gevent's monkey-patching won't work properly.
+ import threading
+ self.threading = threading
+ else:
+ raise CoverageException(
+ "Don't understand coroutine=%s" % coroutine
+ )
+ except ImportError:
+ raise CoverageException(
+ "Couldn't trace with coroutine=%s, "
+ "the module isn't installed." % coroutine
+ )
+
self.reset()
if timid:
@@ -240,6 +129,8 @@ class Collector(object):
# or mapping filenames to dicts with linenumber pairs as keys.
self.data = {}
+ self.plugin_data = {}
+
# A cache of the results from should_trace, the decision about whether
# to trace execution in a file. A dict of filename to (filename or
# None).
@@ -256,10 +147,25 @@ class Collector(object):
tracer.should_trace = self.should_trace
tracer.should_trace_cache = self.should_trace_cache
tracer.warn = self.warn
+
if hasattr(tracer, 'coroutine_id_func'):
tracer.coroutine_id_func = self.coroutine_id_func
+ elif self.coroutine_id_func:
+ raise CoverageException(
+ "Can't support coroutine=%s with %s, "
+ "only threads are supported" % (
+ self.coroutine, self.tracer_name(),
+ )
+ )
+
+ if hasattr(tracer, 'plugin_data'):
+ tracer.plugin_data = self.plugin_data
+ if hasattr(tracer, 'threading'):
+ tracer.threading = self.threading
+
fn = tracer.start()
self.tracers.append(tracer)
+
return fn
# The trace function has to be set individually on each thread before
@@ -286,20 +192,19 @@ class Collector(object):
if self._collectors:
self._collectors[-1].pause()
self._collectors.append(self)
- #print("Started: %r" % self._collectors, file=sys.stderr)
# Check to see whether we had a fullcoverage tracer installed.
traces0 = []
- if hasattr(sys, "gettrace"):
- fn0 = sys.gettrace()
- if fn0:
- tracer0 = getattr(fn0, '__self__', None)
- if tracer0:
- traces0 = getattr(tracer0, 'traces', [])
+ fn0 = sys.gettrace()
+ if fn0:
+ tracer0 = getattr(fn0, '__self__', None)
+ if tracer0:
+ traces0 = getattr(tracer0, 'traces', [])
# Install the tracer on this thread.
fn = self._start_tracer()
+ # Replay all the events from fullcoverage into the new trace function.
for args in traces0:
(frame, event, arg), lineno = args
try:
@@ -311,11 +216,11 @@ class Collector(object):
# Install our installation tracer in threading, to jump start other
# threads.
- threading.settrace(self._installation_trace)
+ if self.threading:
+ self.threading.settrace(self._installation_trace)
def stop(self):
"""Stop collecting trace information."""
- #print >>sys.stderr, "Stopping: %r" % self._collectors
assert self._collectors
assert self._collectors[-1] is self
@@ -337,13 +242,17 @@ class Collector(object):
print("\nCoverage.py tracer stats:")
for k in sorted(stats.keys()):
print("%16s: %s" % (k, stats[k]))
- threading.settrace(None)
+ if self.threading:
+ self.threading.settrace(None)
def resume(self):
"""Resume tracing after a `pause`."""
for tracer in self.tracers:
tracer.start()
- threading.settrace(self._installation_trace)
+ if self.threading:
+ self.threading.settrace(self._installation_trace)
+ else:
+ self._start_tracer()
def get_line_data(self):
"""Return the line data collected.
@@ -356,10 +265,7 @@ class Collector(object):
# to show line data.
line_data = {}
for f, arcs in self.data.items():
- line_data[f] = ldf = {}
- for l1, _ in list(arcs.keys()):
- if l1:
- ldf[l1] = None
+ line_data[f] = dict((l1, None) for l1, _ in arcs.keys() if l1)
return line_data
else:
return self.data
@@ -377,3 +283,6 @@ class Collector(object):
return self.data
else:
return {}
+
+ def get_plugin_data(self):
+ return self.plugin_data
diff --git a/coverage/config.py b/coverage/config.py
index 60ec3f41..c671ef75 100644
--- a/coverage/config.py
+++ b/coverage/config.py
@@ -13,6 +13,11 @@ except ImportError:
class HandyConfigParser(configparser.RawConfigParser):
"""Our specialization of ConfigParser."""
+ def __init__(self, section_prefix):
+ # pylint: disable=super-init-not-called
+ configparser.RawConfigParser.__init__(self)
+ self.section_prefix = section_prefix
+
def read(self, filename):
"""Read a filename as UTF-8 configuration data."""
kwargs = {}
@@ -20,8 +25,37 @@ class HandyConfigParser(configparser.RawConfigParser):
kwargs['encoding'] = "utf-8"
return configparser.RawConfigParser.read(self, filename, **kwargs)
- def get(self, *args, **kwargs):
- v = configparser.RawConfigParser.get(self, *args, **kwargs)
+ def has_option(self, section, option):
+ section = self.section_prefix + section
+ return configparser.RawConfigParser.has_option(self, section, option)
+
+ def has_section(self, section):
+ section = self.section_prefix + section
+ return configparser.RawConfigParser.has_section(self, section)
+
+ def options(self, section):
+ section = self.section_prefix + section
+ return configparser.RawConfigParser.options(self, section)
+
+ def get_section(self, section):
+ """Get the contents of a section, as a dictionary."""
+ d = {}
+ for opt in self.options(section):
+ d[opt] = self.get(section, opt)
+ return d
+
+ def get(self, section, *args, **kwargs):
+ """Get a value, replacing environment variables also.
+
+ The arguments are the same as `RawConfigParser.get`, but in the found
+ value, ``$WORD`` or ``${WORD}`` are replaced by the value of the
+ environment variable ``WORD``.
+
+ Returns the finished value.
+
+ """
+ section = self.section_prefix + section
+ v = configparser.RawConfigParser.get(self, section, *args, **kwargs)
def dollar_replace(m):
"""Called for each $replacement."""
# Only one of the groups will have matched, just get its text.
@@ -113,6 +147,7 @@ class CoverageConfig(object):
self.timid = False
self.source = None
self.debug = []
+ self.plugins = []
# Defaults for [report]
self.exclude_list = DEFAULT_EXCLUDE[:]
@@ -135,6 +170,9 @@ class CoverageConfig(object):
# Defaults for [paths]
self.paths = {}
+ # Options for plugins
+ self.plugin_options = {}
+
def from_environment(self, env_var):
"""Read configuration from the `env_var` environment variable."""
# Timidity: for nose users, read an environment variable. This is a
@@ -144,7 +182,7 @@ class CoverageConfig(object):
if env:
self.timid = ('--timid' in env)
- MUST_BE_LIST = ["omit", "include", "debug"]
+ MUST_BE_LIST = ["omit", "include", "debug", "plugins"]
def from_args(self, **kwargs):
"""Read config values from `kwargs`."""
@@ -154,34 +192,54 @@ class CoverageConfig(object):
v = [v]
setattr(self, k, v)
- def from_file(self, filename):
+ def from_file(self, filename, section_prefix=""):
"""Read configuration from a .rc file.
`filename` is a file name to read.
+ Returns True or False, whether the file could be read.
+
"""
self.attempted_config_files.append(filename)
- cp = HandyConfigParser()
+ cp = HandyConfigParser(section_prefix)
files_read = cp.read(filename)
- if files_read is not None: # return value changed in 2.4
- self.config_files.extend(files_read)
+ if not files_read:
+ return False
+
+ self.config_files.extend(files_read)
for option_spec in self.CONFIG_FILE_OPTIONS:
- self.set_attr_from_config_option(cp, *option_spec)
+ self._set_attr_from_config_option(cp, *option_spec)
# [paths] is special
if cp.has_section('paths'):
for option in cp.options('paths'):
self.paths[option] = cp.getlist('paths', option)
+ # plugins can have options
+ for plugin in self.plugins:
+ if cp.has_section(plugin):
+ self.plugin_options[plugin] = cp.get_section(plugin)
+
+ return True
+
CONFIG_FILE_OPTIONS = [
+ # These are *args for _set_attr_from_config_option:
+ # (attr, where, type_="")
+ #
+ # attr is the attribute to set on the CoverageConfig object.
+ # where is the section:name to read from the configuration file.
+ # type_ is the optional type to apply, by using .getTYPE to read the
+ # configuration value from the file.
+
# [run]
('branch', 'run:branch', 'boolean'),
('coroutine', 'run:coroutine'),
('cover_pylib', 'run:cover_pylib', 'boolean'),
('data_file', 'run:data_file'),
('debug', 'run:debug', 'list'),
+ ('plugins', 'run:plugins', 'list'),
('include', 'run:include', 'list'),
('omit', 'run:omit', 'list'),
('parallel', 'run:parallel', 'boolean'),
@@ -207,9 +265,13 @@ class CoverageConfig(object):
('xml_output', 'xml:output'),
]
- def set_attr_from_config_option(self, cp, attr, where, type_=''):
+ def _set_attr_from_config_option(self, cp, attr, where, type_=''):
"""Set an attribute on self if it exists in the ConfigParser."""
section, option = where.split(":")
if cp.has_option(section, option):
method = getattr(cp, 'get'+type_)
setattr(self, attr, method(section, option))
+
+ def get_plugin_options(self, plugin):
+ """Get a dictionary of options for the plugin named `plugin`."""
+ return self.plugin_options.get(plugin, {})
diff --git a/coverage/control.py b/coverage/control.py
index d5e2c6f8..86a2ae23 100644
--- a/coverage/control.py
+++ b/coverage/control.py
@@ -1,14 +1,15 @@
"""Core control stuff for Coverage."""
-import atexit, os, random, socket, sys
+import atexit, os, platform, random, socket, sys
from coverage.annotate import AnnotateReporter
from coverage.backward import string_class, iitems
-from coverage.codeunit import code_unit_factory, CodeUnit
+from coverage.codeunit import code_unit_factory, CodeUnit, PythonCodeUnit
from coverage.collector import Collector
from coverage.config import CoverageConfig
from coverage.data import CoverageData
from coverage.debug import DebugControl
+from coverage.plugin import Plugins, plugin_implements
from coverage.files import FileLocator, TreeMatcher, FnmatchMatcher
from coverage.files import PathAliases, find_python_files, prep_patterns
from coverage.html import HtmlReporter
@@ -18,6 +19,7 @@ from coverage.results import Analysis, Numbers
from coverage.summary import SummaryReporter
from coverage.xmlreport import XmlReporter
+
# Pypy has some unusual stuff in the "stdlib". Consider those locations
# when deciding where the stdlib is.
try:
@@ -26,14 +28,14 @@ except ImportError:
_structseq = None
-class coverage(object):
+class Coverage(object):
"""Programmatic access to coverage.py.
To use::
from coverage import coverage
- cov = coverage()
+ cov = Coverage()
cov.start()
#.. call your code ..
cov.stop()
@@ -43,7 +45,7 @@ class coverage(object):
def __init__(self, data_file=None, data_suffix=None, cover_pylib=None,
auto_data=False, timid=None, branch=None, config_file=True,
source=None, omit=None, include=None, debug=None,
- debug_file=None, coroutine=None):
+ debug_file=None, coroutine=None, plugins=None):
"""
`data_file` is the base name of the data file to use, defaulting to
".coverage". `data_suffix` is appended (with a dot) to `data_file` to
@@ -85,7 +87,9 @@ class coverage(object):
`coroutine` is a string indicating the coroutining library being used
in the measured code. Without this, coverage.py will get incorrect
results. Valid strings are "greenlet", "eventlet", or "gevent", which
- are all equivalent.
+ are all equivalent. TODO: really?
+
+ `plugins` TODO.
"""
from coverage import __version__
@@ -97,17 +101,22 @@ class coverage(object):
# 1: defaults:
self.config = CoverageConfig()
- # 2: from the coveragerc file:
+ # 2: from the .coveragerc or setup.cfg file:
if config_file:
+ did_read_rc = should_read_setupcfg = False
if config_file is True:
config_file = ".coveragerc"
+ should_read_setupcfg = True
try:
- self.config.from_file(config_file)
+ did_read_rc = self.config.from_file(config_file)
except ValueError as err:
raise CoverageException(
"Couldn't read config file %s: %s" % (config_file, err)
)
+ if not did_read_rc and should_read_setupcfg:
+ self.config.from_file("setup.cfg", section_prefix="coverage:")
+
# 3: from environment variables:
self.config.from_environment('COVERAGE_OPTIONS')
env_data_file = os.environ.get('COVERAGE_FILE')
@@ -119,12 +128,21 @@ class coverage(object):
data_file=data_file, cover_pylib=cover_pylib, timid=timid,
branch=branch, parallel=bool_or_none(data_suffix),
source=source, omit=omit, include=include, debug=debug,
- coroutine=coroutine,
+ coroutine=coroutine, plugins=plugins,
)
# Create and configure the debugging controller.
self.debug = DebugControl(self.config.debug, debug_file or sys.stderr)
+ # Load plugins
+ self.plugins = Plugins.load_plugins(self.config.plugins, self.config)
+
+ self.trace_judges = []
+ for plugin in self.plugins:
+ if plugin_implements(plugin, "trace_judge"):
+ self.trace_judges.append(plugin)
+ self.trace_judges.append(None) # The Python case.
+
self.auto_data = auto_data
# _exclude_re is a dict mapping exclusion list names to compiled
@@ -147,8 +165,11 @@ class coverage(object):
self.include = prep_patterns(self.config.include)
self.collector = Collector(
- self._should_trace, timid=self.config.timid,
- branch=self.config.branch, warn=self._warn,
+ should_trace=self._should_trace,
+ check_include=self._tracing_check_include_omit_etc,
+ timid=self.config.timid,
+ branch=self.config.branch,
+ warn=self._warn,
coroutine=self.config.coroutine,
)
@@ -175,18 +196,16 @@ class coverage(object):
)
# The dirs for files considered "installed with the interpreter".
- self.pylib_dirs = []
+ self.pylib_dirs = set()
if not self.config.cover_pylib:
# Look at where some standard modules are located. That's the
# indication for "installed with the interpreter". In some
# environments (virtualenv, for example), these modules may be
# spread across a few locations. Look at all the candidate modules
# we've imported, and take all the different ones.
- for m in (atexit, os, random, socket, _structseq):
+ for m in (atexit, os, platform, random, socket, _structseq):
if m is not None and hasattr(m, "__file__"):
- m_dir = self._canonical_dir(m)
- if m_dir not in self.pylib_dirs:
- self.pylib_dirs.append(m_dir)
+ self.pylib_dirs.add(self._canonical_dir(m))
# To avoid tracing the coverage code itself, we skip anything located
# where we are.
@@ -214,7 +233,8 @@ class coverage(object):
def _canonical_dir(self, morf):
"""Return the canonical directory of the module or file `morf`."""
- return os.path.split(CodeUnit(morf, self.file_locator).filename)[0]
+ morf_filename = PythonCodeUnit(morf, self.file_locator).filename
+ return os.path.split(morf_filename)[0]
def _source_for_file(self, filename):
"""Return the source file for `filename`."""
@@ -231,22 +251,14 @@ class coverage(object):
This function is called from the trace function. As each new file name
is encountered, this function determines whether it is traced or not.
- Returns a pair of values: the first indicates whether the file should
- be traced: it's a canonicalized filename if it should be traced, None
- if it should not. The second value is a string, the resason for the
- decision.
+ Returns a FileDisposition object.
"""
- if not filename:
- # Empty string is pretty useless
- return None, "empty string isn't a filename"
-
- if filename.startswith('<'):
- # Lots of non-file execution is represented with artificial
- # filenames like "<string>", "<doctest readme.txt[0]>", or
- # "<exec_function>". Don't ever trace these executions, since we
- # can't do anything with the data later anyway.
- return None, "not a real filename"
+ disp = FileDisposition(filename)
+ def nope(disp, reason):
+ disp.trace = False
+ disp.reason = reason
+ return disp
self._check_for_packages()
@@ -260,53 +272,107 @@ class coverage(object):
if dunder_file:
filename = self._source_for_file(dunder_file)
+ if not filename:
+ # Empty string is pretty useless
+ return nope(disp, "empty string isn't a filename")
+
+ if filename.startswith('memory:'):
+ return nope(disp, "memory isn't traceable")
+
+ if filename.startswith('<'):
+ # Lots of non-file execution is represented with artificial
+ # filenames like "<string>", "<doctest readme.txt[0]>", or
+ # "<exec_function>". Don't ever trace these executions, since we
+ # can't do anything with the data later anyway.
+ return nope(disp, "not a real filename")
+
# Jython reports the .class file to the tracer, use the source file.
if filename.endswith("$py.class"):
filename = filename[:-9] + ".py"
canonical = self.file_locator.canonical_filename(filename)
+ disp.canonical_filename = canonical
+ # Try the plugins, see if they have an opinion about the file.
+ for plugin in self.trace_judges:
+ if plugin:
+ plugin.trace_judge(disp)
+ else:
+ disp.trace = True
+ disp.source_filename = canonical
+ if disp.trace:
+ disp.plugin = plugin
+
+ if disp.check_filters:
+ reason = self._check_include_omit_etc(disp.source_filename)
+ if reason:
+ nope(disp, reason)
+
+ return disp
+
+ return nope(disp, "no plugin found") # TODO: a test that causes this.
+
+ def _check_include_omit_etc(self, filename):
+ """Check a filename against the include, omit, etc, rules.
+
+ Returns a string or None. String means, don't trace, and is the reason
+ why. None means no reason found to not trace.
+
+ """
# If the user specified source or include, then that's authoritative
# about the outer bound of what to measure and we don't have to apply
# any canned exclusions. If they didn't, then we have to exclude the
# stdlib and coverage.py directories.
if self.source_match:
- if not self.source_match.match(canonical):
- return None, "falls outside the --source trees"
+ if not self.source_match.match(filename):
+ return "falls outside the --source trees"
elif self.include_match:
- if not self.include_match.match(canonical):
- return None, "falls outside the --include trees"
+ if not self.include_match.match(filename):
+ return "falls outside the --include trees"
else:
# If we aren't supposed to trace installed code, then check if this
# is near the Python standard library and skip it if so.
- if self.pylib_match and self.pylib_match.match(canonical):
- return None, "is in the stdlib"
+ if self.pylib_match and self.pylib_match.match(filename):
+ return "is in the stdlib"
# We exclude the coverage code itself, since a little of it will be
# measured otherwise.
- if self.cover_match and self.cover_match.match(canonical):
- return None, "is part of coverage.py"
+ if self.cover_match and self.cover_match.match(filename):
+ return "is part of coverage.py"
# Check the file against the omit pattern.
- if self.omit_match and self.omit_match.match(canonical):
- return None, "is inside an --omit pattern"
+ if self.omit_match and self.omit_match.match(filename):
+ return "is inside an --omit pattern"
- return canonical, "because we love you"
+ # No reason found to skip this file.
+ return None
def _should_trace(self, filename, frame):
"""Decide whether to trace execution in `filename`.
- Calls `_should_trace_with_reason`, and returns just the decision.
+ Calls `_should_trace_with_reason`, and returns the FileDisposition.
"""
- canonical, reason = self._should_trace_with_reason(filename, frame)
+ disp = self._should_trace_with_reason(filename, frame)
if self.debug.should('trace'):
- if not canonical:
- msg = "Not tracing %r: %s" % (filename, reason)
- else:
+ self.debug.write(disp.debug_message())
+ return disp
+
+ def _tracing_check_include_omit_etc(self, filename):
+ """Check a filename against the include, omit, etc, rules, and say so.
+
+ Returns a boolean: True if the file should be traced, False if not.
+
+ """
+ reason = self._check_include_omit_etc(filename)
+ if self.debug.should('trace'):
+ if not reason:
msg = "Tracing %r" % (filename,)
+ else:
+ msg = "Not tracing %r: %s" % (filename, reason)
self.debug.write(msg)
- return canonical
+
+ return not reason
def _warn(self, msg):
"""Use `msg` as a warning."""
@@ -524,8 +590,10 @@ class coverage(object):
if not self._measured:
return
+ # TODO: seems like this parallel structure is getting kinda old...
self.data.add_line_data(self.collector.get_line_data())
self.data.add_arc_data(self.collector.get_arc_data())
+ self.data.add_plugin_data(self.collector.get_plugin_data())
self.collector.reset()
# If there are still entries in the source_pkgs list, then we never
@@ -591,9 +659,17 @@ class coverage(object):
Returns an `Analysis` object.
"""
+ def get_plugin(filename):
+ """For code_unit_factory to use to find the plugin for a file."""
+ plugin = None
+ plugin_name = self.data.plugin_data().get(filename)
+ if plugin_name:
+ plugin = self.plugins.get(plugin_name)
+ return plugin
+
self._harvest_data()
if not isinstance(it, CodeUnit):
- it = code_unit_factory(it, self.file_locator)[0]
+ it = code_unit_factory(it, self.file_locator, get_plugin)[0]
return Analysis(self, it)
@@ -692,6 +768,13 @@ class coverage(object):
if self.config.xml_output == '-':
outfile = sys.stdout
else:
+ # Ensure that the output directory is created; done here
+ # because this report pre-opens the output file.
+ # HTMLReport does this using the Report plumbing because
+ # its task is more complex, being multiple files.
+ output_dir = os.path.dirname(self.config.xml_output)
+ if output_dir and not os.path.isdir(output_dir):
+ os.makedirs(output_dir)
outfile = open(self.config.xml_output, "w")
file_to_close = outfile
try:
@@ -710,7 +793,6 @@ class coverage(object):
"""Return a list of (key, value) pairs showing internal information."""
import coverage as covmod
- import platform, re
try:
implementation = platform.python_implementation()
@@ -732,10 +814,10 @@ class coverage(object):
('executable', sys.executable),
('cwd', os.getcwd()),
('path', sys.path),
- ('environment', sorted([
+ ('environment', sorted(
("%s = %s" % (k, v)) for k, v in iitems(os.environ)
- if re.search(r"^COV|^PY", k)
- ])),
+ if k.startswith(("COV", "PY"))
+ )),
('command_line', " ".join(getattr(sys, 'argv', ['???']))),
]
if self.source_match:
@@ -752,6 +834,26 @@ class coverage(object):
return info
+class FileDisposition(object):
+ """A simple object for noting a number of details of files to trace."""
+ def __init__(self, original_filename):
+ self.original_filename = original_filename
+ self.canonical_filename = original_filename
+ self.source_filename = None
+ self.check_filters = True
+ self.trace = False
+ self.reason = ""
+ self.plugin = None
+
+ def debug_message(self):
+ """Produce a debugging message explaining the outcome."""
+ if self.trace:
+ msg = "Tracing %r" % (self.original_filename,)
+ else:
+ msg = "Not tracing %r: %s" % (self.original_filename, self.reason)
+ return msg
+
+
def process_startup():
"""Call this at Python startup to perhaps measure coverage.
@@ -774,7 +876,7 @@ def process_startup():
"""
cps = os.environ.get("COVERAGE_PROCESS_START")
if cps:
- cov = coverage(config_file=cps, auto_data=True)
+ cov = Coverage(config_file=cps, auto_data=True)
cov.start()
cov._warn_no_data = False
cov._warn_unimported_source = False
diff --git a/coverage/data.py b/coverage/data.py
index 042b6405..e220a364 100644
--- a/coverage/data.py
+++ b/coverage/data.py
@@ -21,6 +21,11 @@ class CoverageData(object):
* arcs: a dict mapping filenames to sorted lists of line number pairs:
{ 'file1': [(17,23), (17,25), (25,26)], ... }
+ * plugins: a dict mapping filenames to plugin names:
+ { 'file1': "django.coverage", ... }
+ # TODO: how to handle the difference between a plugin module
+ # name, and the class in the module?
+
"""
def __init__(self, basename=None, collector=None, debug=None):
@@ -64,6 +69,14 @@ class CoverageData(object):
#
self.arcs = {}
+ # A map from canonical source file name to an plugin module name:
+ #
+ # {
+ # 'filename1.py': 'django.coverage',
+ # ...
+ # }
+ self.plugins = {}
+
def usefile(self, use_file=True):
"""Set whether or not to use a disk file for data."""
self.use_file = use_file
@@ -110,6 +123,9 @@ class CoverageData(object):
(f, sorted(amap.keys())) for f, amap in iitems(self.arcs)
)
+ def plugin_data(self):
+ return self.plugins
+
def write_file(self, filename):
"""Write the coverage data to `filename`."""
@@ -213,6 +229,9 @@ class CoverageData(object):
for filename, arcs in iitems(arc_data):
self.arcs.setdefault(filename, {}).update(arcs)
+ def add_plugin_data(self, plugin_data):
+ self.plugins.update(plugin_data)
+
def touch_file(self, filename):
"""Ensure that `filename` appears in the data, empty if needed."""
self.lines.setdefault(filename, {})
diff --git a/coverage/debug.py b/coverage/debug.py
index 6908383d..6e7af242 100644
--- a/coverage/debug.py
+++ b/coverage/debug.py
@@ -45,7 +45,7 @@ def info_formatter(info):
for label, data in info:
if data == []:
data = "-none-"
- if isinstance(data, (list, tuple)):
+ if isinstance(data, (list, set, tuple)):
prefix = "%*s:" % (label_len, label)
for e in data:
yield "%*s %s" % (label_len+1, prefix, e)
diff --git a/coverage/django.py b/coverage/django.py
new file mode 100644
index 00000000..00f2ed54
--- /dev/null
+++ b/coverage/django.py
@@ -0,0 +1,61 @@
+import sys
+
+
+ALL_TEMPLATE_MAP = {}
+
+def get_line_map(filename):
+ if filename not in ALL_TEMPLATE_MAP:
+ with open(filename) as template_file:
+ template_source = template_file.read()
+ line_lengths = [len(l) for l in template_source.splitlines(True)]
+ ALL_TEMPLATE_MAP[filename] = list(running_sum(line_lengths))
+ return ALL_TEMPLATE_MAP[filename]
+
+def get_line_number(line_map, offset):
+ for lineno, line_offset in enumerate(line_map, start=1):
+ if line_offset >= offset:
+ return lineno
+ return -1
+
+class DjangoTracer(object):
+ def should_trace(self, canonical):
+ return "/django/template/" in canonical
+
+ def source(self, frame):
+ if frame.f_code.co_name != 'render':
+ return None
+ that = frame.f_locals['self']
+ return getattr(that, "source", None)
+
+ def file_name(self, frame):
+ source = self.source(frame)
+ if not source:
+ return None
+ return source[0].name.encode(sys.getfilesystemencoding())
+
+ def line_number_range(self, frame):
+ source = self.source(frame)
+ if not source:
+ return -1, -1
+ filename = source[0].name
+ line_map = get_line_map(filename)
+ start = get_line_number(line_map, source[1][0])
+ end = get_line_number(line_map, source[1][1])
+ if start < 0 or end < 0:
+ return -1, -1
+ return start, end
+
+def running_sum(seq):
+ total = 0
+ for num in seq:
+ total += num
+ yield total
+
+def ppp(obj):
+ ret = []
+ import inspect
+ for name, value in inspect.getmembers(obj):
+ if not callable(value):
+ ret.append("%s=%r" % (name, value))
+ attrs = ", ".join(ret)
+ return "%s: %s" % (obj.__class__, attrs)
diff --git a/coverage/execfile.py b/coverage/execfile.py
index 7b90137a..b7877b6a 100644
--- a/coverage/execfile.py
+++ b/coverage/execfile.py
@@ -1,23 +1,83 @@
"""Execute files of Python code."""
-import imp, marshal, os, sys
+import marshal, os, sys, types
-from coverage.backward import open_source
+from coverage.backward import open_python_source, BUILTINS
+from coverage.backward import PYC_MAGIC_NUMBER, imp, importlib_util_find_spec
from coverage.misc import ExceptionDuringRun, NoCode, NoSource
-try:
- # In Py 2.x, the builtins were in __builtin__
- BUILTINS = sys.modules['__builtin__']
-except KeyError:
- # In Py 3.x, they're in builtins
- BUILTINS = sys.modules['builtins']
+if importlib_util_find_spec:
+ def find_module(modulename):
+ """Find the module named `modulename`.
+ Returns the file path of the module, and the name of the enclosing
+ package.
+ """
+ # pylint: disable=no-member
+ try:
+ spec = importlib_util_find_spec(modulename)
+ except ImportError as err:
+ raise NoSource(str(err))
+ if not spec:
+ raise NoSource("No module named %r" % (modulename,))
+ pathname = spec.origin
+ packagename = spec.name
+ if pathname.endswith("__init__.py"):
+ mod_main = modulename + ".__main__"
+ spec = importlib_util_find_spec(mod_main)
+ if not spec:
+ raise NoSource(
+ "No module named %s; "
+ "%r is a package and cannot be directly executed"
+ % (mod_main, modulename)
+ )
+ pathname = spec.origin
+ packagename = spec.name
+ packagename = packagename.rpartition(".")[0]
+ return pathname, packagename
+else:
+ def find_module(modulename):
+ """Find the module named `modulename`.
+
+ Returns the file path of the module, and the name of the enclosing
+ package.
+ """
+ openfile = None
+ glo, loc = globals(), locals()
+ try:
+ # Search for the module - inside its parent package, if any - using
+ # standard import mechanics.
+ if '.' in modulename:
+ packagename, name = modulename.rsplit('.', 1)
+ package = __import__(packagename, glo, loc, ['__path__'])
+ searchpath = package.__path__
+ else:
+ packagename, name = None, modulename
+ searchpath = None # "top-level search" in imp.find_module()
+ openfile, pathname, _ = imp.find_module(name, searchpath)
-def rsplit1(s, sep):
- """The same as s.rsplit(sep, 1), but works in 2.3"""
- parts = s.split(sep)
- return sep.join(parts[:-1]), parts[-1]
+ # Complain if this is a magic non-file module.
+ if openfile is None and pathname is None:
+ raise NoSource(
+ "module does not live in a file: %r" % modulename
+ )
+
+ # If `modulename` is actually a package, not a mere module, then we
+ # pretend to be Python 2.7 and try running its __main__.py script.
+ if openfile is None:
+ packagename = modulename
+ name = '__main__'
+ package = __import__(packagename, glo, loc, ['__path__'])
+ searchpath = package.__path__
+ openfile, pathname, _ = imp.find_module(name, searchpath)
+ except ImportError as err:
+ raise NoSource(str(err))
+ finally:
+ if openfile:
+ openfile.close()
+
+ return pathname, packagename
def run_python_module(modulename, args):
@@ -28,41 +88,8 @@ def run_python_module(modulename, args):
element naming the module being executed.
"""
- openfile = None
- glo, loc = globals(), locals()
- try:
- # Search for the module - inside its parent package, if any - using
- # standard import mechanics.
- if '.' in modulename:
- packagename, name = rsplit1(modulename, '.')
- package = __import__(packagename, glo, loc, ['__path__'])
- searchpath = package.__path__
- else:
- packagename, name = None, modulename
- searchpath = None # "top-level search" in imp.find_module()
- openfile, pathname, _ = imp.find_module(name, searchpath)
-
- # Complain if this is a magic non-file module.
- if openfile is None and pathname is None:
- raise NoSource(
- "module does not live in a file: %r" % modulename
- )
+ pathname, packagename = find_module(modulename)
- # If `modulename` is actually a package, not a mere module, then we
- # pretend to be Python 2.7 and try running its __main__.py script.
- if openfile is None:
- packagename = modulename
- name = '__main__'
- package = __import__(packagename, glo, loc, ['__path__'])
- searchpath = package.__path__
- openfile, pathname, _ = imp.find_module(name, searchpath)
- except ImportError as err:
- raise NoSource(str(err))
- finally:
- if openfile:
- openfile.close()
-
- # Finally, hand the file off to run_python_file for execution.
pathname = os.path.abspath(pathname)
args[0] = pathname
run_python_file(pathname, args, package=packagename)
@@ -79,7 +106,7 @@ def run_python_file(filename, args, package=None):
"""
# Create a module to serve as __main__
old_main_mod = sys.modules['__main__']
- main_mod = imp.new_module('__main__')
+ main_mod = types.ModuleType('__main__')
sys.modules['__main__'] = main_mod
main_mod.__file__ = filename
if package:
@@ -119,11 +146,12 @@ def run_python_file(filename, args, package=None):
# Restore the old argv and path
sys.argv = old_argv
+
def make_code_from_py(filename):
"""Get source from `filename` and make a code object of it."""
# Open the source file.
try:
- source_file = open_source(filename)
+ source_file = open_python_source(filename)
except IOError:
raise NoSource("No file to run: %r" % filename)
@@ -150,7 +178,7 @@ def make_code_from_pyc(filename):
# First four bytes are a version-specific magic number. It has to
# match or we won't run the file.
magic = fpyc.read(4)
- if magic != imp.get_magic():
+ if magic != PYC_MAGIC_NUMBER:
raise NoCode("Bad magic number in .pyc file")
# Skip the junk in the header that we don't need.
diff --git a/coverage/files.py b/coverage/files.py
index 94388f96..1ed7276e 100644
--- a/coverage/files.py
+++ b/coverage/files.py
@@ -1,7 +1,7 @@
"""File wrangling."""
from coverage.backward import to_string
-from coverage.misc import CoverageException
+from coverage.misc import CoverageException, join_regex
import fnmatch, os, os.path, re, sys
import ntpath, posixpath
@@ -147,7 +147,7 @@ def prep_patterns(patterns):
class TreeMatcher(object):
"""A matcher for files in a tree."""
def __init__(self, directories):
- self.dirs = directories[:]
+ self.dirs = list(directories)
def __repr__(self):
return "<TreeMatcher %r>" % self.dirs
@@ -177,6 +177,17 @@ class FnmatchMatcher(object):
"""A matcher for files by filename pattern."""
def __init__(self, pats):
self.pats = pats[:]
+ # fnmatch is platform-specific. On Windows, it does the Windows thing
+ # of treating / and \ as equivalent. But on other platforms, we need to
+ # take care of that ourselves.
+ fnpats = (fnmatch.translate(p) for p in pats)
+ fnpats = (p.replace(r"\/", r"[\\/]") for p in fnpats)
+ if sys.platform == 'win32':
+ # Windows is also case-insensitive. BTW: the regex docs say that
+ # flags like (?i) have to be at the beginning, but fnmatch puts
+ # them at the end, and have two there seems to work fine.
+ fnpats = (p + "(?i)" for p in fnpats)
+ self.re = re.compile(join_regex(fnpats))
def __repr__(self):
return "<FnmatchMatcher %r>" % self.pats
@@ -187,10 +198,7 @@ class FnmatchMatcher(object):
def match(self, fpath):
"""Does `fpath` match one of our filename patterns?"""
- for pat in self.pats:
- if fnmatch.fnmatch(fpath, pat):
- return True
- return False
+ return self.re.match(fpath) is not None
def sep(s):
diff --git a/coverage/html.py b/coverage/html.py
index d168e351..863d1508 100644
--- a/coverage/html.py
+++ b/coverage/html.py
@@ -5,7 +5,6 @@ import os, re, shutil, sys
import coverage
from coverage.backward import pickle
from coverage.misc import CoverageException, Hasher
-from coverage.phystokens import source_token_lines, source_encoding
from coverage.report import Reporter
from coverage.results import Numbers
from coverage.templite import Templite
@@ -149,9 +148,7 @@ class HtmlReporter(Reporter):
def html_file(self, cu, analysis):
"""Generate an HTML file for one source file."""
- source_file = cu.source_file()
- with source_file:
- source = source_file.read()
+ source = cu.source()
# Find out if the file on disk is already correct.
flat_rootname = cu.flat_rootname()
@@ -167,7 +164,7 @@ class HtmlReporter(Reporter):
# If need be, determine the encoding of the source file. We use it
# later to properly write the HTML.
if sys.version_info < (3, 0):
- encoding = source_encoding(source)
+ encoding = cu.source_encoding()
# Some UTF8 files have the dreaded UTF8 BOM. If so, junk it.
if encoding.startswith("utf-8") and source[:3] == "\xef\xbb\xbf":
source = source[3:]
@@ -187,7 +184,7 @@ class HtmlReporter(Reporter):
lines = []
- for lineno, line in enumerate(source_token_lines(source), start=1):
+ for lineno, line in enumerate(cu.source_token_lines(), start=1):
# Figure out how to mark this line.
line_class = []
annotate_html = ""
@@ -241,7 +238,9 @@ class HtmlReporter(Reporter):
}))
if sys.version_info < (3, 0):
- html = html.decode(encoding)
+ # In theory, all the characters in the source can be decoded, but
+ # strange things happen, so use 'replace' to keep errors at bay.
+ html = html.decode(encoding, 'replace')
html_filename = flat_rootname + ".html"
html_path = os.path.join(self.directory, html_filename)
diff --git a/coverage/misc.py b/coverage/misc.py
index c88d4ecd..6962ae32 100644
--- a/coverage/misc.py
+++ b/coverage/misc.py
@@ -1,10 +1,10 @@
"""Miscellaneous stuff for Coverage."""
import errno
+import hashlib
import inspect
import os
-from coverage.backward import md5
from coverage.backward import string_class, to_bytes
@@ -86,12 +86,7 @@ def bool_or_none(b):
def join_regex(regexes):
"""Combine a list of regexes into one that matches any of them."""
- if len(regexes) > 1:
- return "|".join("(%s)" % r for r in regexes)
- elif regexes:
- return regexes[0]
- else:
- return ""
+ return "|".join("(?:%s)" % r for r in regexes)
def file_be_gone(path):
@@ -106,7 +101,7 @@ def file_be_gone(path):
class Hasher(object):
"""Hashes Python data into md5."""
def __init__(self):
- self.md5 = md5()
+ self.md5 = hashlib.md5()
def update(self, v):
"""Add `v` to the hash, recursively if needed."""
diff --git a/coverage/parser.py b/coverage/parser.py
index de6590aa..c5e95baa 100644
--- a/coverage/parser.py
+++ b/coverage/parser.py
@@ -3,14 +3,31 @@
import collections, dis, re, token, tokenize
from coverage.backward import StringIO
-from coverage.backward import open_source, range # pylint: disable=W0622
-from coverage.backward import bytes_to_ints
+from coverage.backward import range # pylint: disable=W0622
+from coverage.backward import bytes_to_ints, open_python_source
from coverage.bytecode import ByteCodes, CodeObjects
from coverage.misc import nice_pair, expensive, join_regex
from coverage.misc import CoverageException, NoSource, NotPython
class CodeParser(object):
+ """
+ Base class for any code parser.
+ """
+ def translate_lines(self, lines):
+ return set(lines)
+
+ def translate_arcs(self, arcs):
+ return arcs
+
+ def exit_counts(self):
+ return {}
+
+ def arcs(self):
+ return []
+
+
+class PythonParser(CodeParser):
"""Parse code to find executable lines, excluded lines, etc."""
def __init__(self, text=None, filename=None, exclude=None):
@@ -20,12 +37,12 @@ class CodeParser(object):
`exclude`, a regex.
"""
- assert text or filename, "CodeParser needs either text or filename"
+ assert text or filename, "PythonParser needs either text or filename"
self.filename = filename or "<code>"
self.text = text
if not self.text:
try:
- with open_source(self.filename) as sourcef:
+ with open_python_source(self.filename) as sourcef:
self.text = sourcef.read()
except IOError as err:
raise NoSource(
@@ -137,9 +154,8 @@ class CodeParser(object):
# We're at the end of a line, and we've ended on a
# different line than the first line of the statement,
# so record a multi-line range.
- rng = (first_line, elineno)
for l in range(first_line, elineno+1):
- self.multiline[l] = rng
+ self.multiline[l] = first_line
first_line = None
if ttext.strip() and toktype != tokenize.COMMENT:
@@ -163,33 +179,29 @@ class CodeParser(object):
def first_line(self, line):
"""Return the first line number of the statement including `line`."""
- rng = self.multiline.get(line)
- if rng:
- first_line = rng[0]
+ first_line = self.multiline.get(line)
+ if first_line:
+ return first_line
else:
- first_line = line
- return first_line
+ return line
- def first_lines(self, lines, *ignores):
+ def first_lines(self, lines):
"""Map the line numbers in `lines` to the correct first line of the
statement.
- Skip any line mentioned in any of the sequences in `ignores`.
-
Returns a set of the first lines.
"""
- ignore = set()
- for ign in ignores:
- ignore.update(ign)
- lset = set()
- for l in lines:
- if l in ignore:
- continue
- new_l = self.first_line(l)
- if new_l not in ignore:
- lset.add(new_l)
- return lset
+ return set(self.first_line(l) for l in lines)
+
+ def translate_lines(self, lines):
+ return self.first_lines(lines)
+
+ def translate_arcs(self, arcs):
+ return [
+ (self.first_line(a), self.first_line(b))
+ for (a, b) in arcs
+ ]
def parse_source(self):
"""Parse source text to find executable lines, excluded lines, etc.
@@ -211,11 +223,12 @@ class CodeParser(object):
)
excluded_lines = self.first_lines(self.excluded)
- lines = self.first_lines(
- self.statement_starts,
- excluded_lines,
- self.docstrings
- )
+ ignore = set()
+ ignore.update(excluded_lines)
+ ignore.update(self.docstrings)
+ starts = self.statement_starts - ignore
+ lines = self.first_lines(starts)
+ lines -= ignore
return lines, excluded_lines
@@ -328,7 +341,7 @@ class ByteParser(object):
else:
if not text:
assert filename, "If no code or text, need a filename"
- with open_source(filename) as sourcef:
+ with open_python_source(filename) as sourcef:
text = sourcef.read()
self.text = text
diff --git a/coverage/phystokens.py b/coverage/phystokens.py
index e79ce01f..867388f7 100644
--- a/coverage/phystokens.py
+++ b/coverage/phystokens.py
@@ -120,7 +120,7 @@ def source_encoding(source):
# This is mostly code adapted from Py3.2's tokenize module.
- cookie_re = re.compile(r"coding[:=]\s*([-\w.]+)")
+ cookie_re = re.compile(r"^\s*#.*coding[:=]\s*([-\w.]+)")
# Do this so the detect_encode code we copied will work.
readline = iter(source.splitlines(True)).next
diff --git a/coverage/plugin.py b/coverage/plugin.py
new file mode 100644
index 00000000..35be41a9
--- /dev/null
+++ b/coverage/plugin.py
@@ -0,0 +1,108 @@
+"""Plugin management for coverage.py"""
+
+import sys
+
+
+class CoveragePlugin(object):
+ """Base class for coverage.py plugins."""
+ def __init__(self, options):
+ self.options = options
+
+ def trace_judge(self, disposition):
+ """Decide whether to trace this file with this plugin.
+
+ Set disposition.trace to True if this plugin should trace this file.
+ May also set other attributes in `disposition`.
+
+ """
+ return None
+
+ def source_file_name(self, filename):
+ """Return the source name for a given Python filename.
+
+ Can return None if tracing shouldn't continue.
+
+ """
+ return filename
+
+ def dynamic_source_file_name(self):
+ """Returns a callable that can return a source name for a frame.
+
+ The callable should take a filename and a frame, and return either a
+ filename or None:
+
+ def dynamic_source_filename_func(filename, frame)
+
+ Can return None if dynamic filenames aren't needed.
+
+ """
+ return None
+
+ def code_unit_class(self, morf):
+ """Return the CodeUnit class to use for a module or filename."""
+ return None
+
+
+class Plugins(object):
+ """The currently loaded collection of coverage.py plugins."""
+
+ def __init__(self):
+ self.order = []
+ self.names = {}
+
+ @classmethod
+ def load_plugins(cls, modules, config):
+ """Load plugins from `modules`.
+
+ Returns a list of loaded and configured plugins.
+
+ """
+ plugins = cls()
+
+ for module in modules:
+ __import__(module)
+ mod = sys.modules[module]
+
+ plugin_class = getattr(mod, "Plugin", None)
+ if plugin_class:
+ options = config.get_plugin_options(module)
+ plugin = plugin_class(options)
+ plugin.__name__ = module
+ plugins.order.append(plugin)
+ plugins.names[module] = plugin
+
+ return plugins
+
+ def __iter__(self):
+ return iter(self.order)
+
+ def get(self, module):
+ return self.names[module]
+
+
+def overrides(obj, method_name, base_class):
+ """Does `obj` override the `method_name` it got from `base_class`?
+
+ Determine if `obj` implements the method called `method_name`, which it
+ inherited from `base_class`.
+
+ Returns a boolean.
+
+ """
+ klass = obj.__class__
+ klass_func = getattr(klass, method_name)
+ base_func = getattr(base_class, method_name)
+
+ # Python 2/3 compatibility: Python 2 returns an instancemethod object, the
+ # function is the .im_func attribute. Python 3 returns a plain function
+ # object already.
+ if sys.version_info < (3, 0):
+ klass_func = klass_func.im_func
+ base_func = base_func.im_func
+
+ return klass_func is not base_func
+
+
+def plugin_implements(obj, method_name):
+ """Does the plugin `obj` implement `method_name`?"""
+ return overrides(obj, method_name, CoveragePlugin)
diff --git a/coverage/pytracer.py b/coverage/pytracer.py
new file mode 100644
index 00000000..7563ae11
--- /dev/null
+++ b/coverage/pytracer.py
@@ -0,0 +1,163 @@
+"""Raw data collector for Coverage."""
+
+import sys
+
+
+class PyTracer(object):
+ """Python implementation of the raw data tracer."""
+
+ # Because of poor implementations of trace-function-manipulating tools,
+ # the Python trace function must be kept very simple. In particular, there
+ # must be only one function ever set as the trace function, both through
+ # sys.settrace, and as the return value from the trace function. Put
+ # another way, the trace function must always return itself. It cannot
+ # swap in other functions, or return None to avoid tracing a particular
+ # frame.
+ #
+ # The trace manipulator that introduced this restriction is DecoratorTools,
+ # which sets a trace function, and then later restores the pre-existing one
+ # by calling sys.settrace with a function it found in the current frame.
+ #
+ # Systems that use DecoratorTools (or similar trace manipulations) must use
+ # PyTracer to get accurate results. The command-line --timid argument is
+ # used to force the use of this tracer.
+
+ def __init__(self):
+ # Attributes set from the collector:
+ self.data = None
+ self.arcs = False
+ self.should_trace = None
+ self.should_trace_cache = None
+ self.warn = None
+ self.plugin_data = None
+ # The threading module to use, if any.
+ self.threading = None
+
+ self.plugin = []
+ self.cur_file_dict = []
+ self.last_line = [0]
+
+ self.data_stack = []
+ self.last_exc_back = None
+ self.last_exc_firstlineno = 0
+ self.thread = None
+ self.stopped = False
+
+ def __repr__(self):
+ return "<PyTracer at 0x{0:0x}: {1} lines in {2} files>".format(
+ id(self),
+ sum(len(v) for v in self.data.values()),
+ len(self.data),
+ )
+
+ def _trace(self, frame, event, arg_unused):
+ """The trace function passed to sys.settrace."""
+
+ if self.stopped:
+ return
+
+ if self.last_exc_back: # TODO: bring this up to speed
+ if frame == self.last_exc_back:
+ # Someone forgot a return event.
+ if self.arcs and self.cur_file_dict:
+ pair = (self.last_line, -self.last_exc_firstlineno)
+ self.cur_file_dict[pair] = None
+ self.plugin, self.cur_file_dict, self.last_line = (
+ self.data_stack.pop()
+ )
+ self.last_exc_back = None
+
+ if event == 'call':
+ # Entering a new function context. Decide if we should trace
+ # in this file.
+ self.data_stack.append(
+ (self.plugin, self.cur_file_dict, self.last_line)
+ )
+ filename = frame.f_code.co_filename
+ disp = self.should_trace_cache.get(filename)
+ if disp is None:
+ disp = self.should_trace(filename, frame)
+ self.should_trace_cache[filename] = disp
+
+ self.plugin = None
+ self.cur_file_dict = None
+ if disp.trace:
+ tracename = disp.source_filename
+ if disp.plugin:
+ dyn_func = disp.plugin.dynamic_source_file_name()
+ if dyn_func:
+ tracename = dyn_func(tracename, frame)
+ if tracename:
+ if not self.check_include(tracename):
+ tracename = None
+ else:
+ tracename = None
+ if tracename:
+ if tracename not in self.data:
+ self.data[tracename] = {}
+ if disp.plugin:
+ self.plugin_data[tracename] = disp.plugin.__name__
+ self.cur_file_dict = self.data[tracename]
+ self.plugin = disp.plugin
+ # Set the last_line to -1 because the next arc will be entering a
+ # code block, indicated by (-1, n).
+ self.last_line = -1
+ elif event == 'line':
+ # Record an executed line.
+ if self.plugin:
+ lineno_from, lineno_to = self.plugin.line_number_range(frame)
+ else:
+ lineno_from, lineno_to = frame.f_lineno, frame.f_lineno
+ if lineno_from != -1:
+ if self.cur_file_dict is not None:
+ if self.arcs:
+ self.cur_file_dict[
+ (self.last_line, lineno_from)
+ ] = None
+ else:
+ for lineno in range(lineno_from, lineno_to+1):
+ self.cur_file_dict[lineno] = None
+ self.last_line = lineno_to
+ elif event == 'return':
+ if self.arcs and self.cur_file_dict:
+ first = frame.f_code.co_firstlineno
+ self.cur_file_dict[(self.last_line, -first)] = None
+ # Leaving this function, pop the filename stack.
+ self.plugin, self.cur_file_dict, self.last_line = (
+ self.data_stack.pop()
+ )
+ elif event == 'exception':
+ self.last_exc_back = frame.f_back
+ self.last_exc_firstlineno = frame.f_code.co_firstlineno
+ return self._trace
+
+ def start(self):
+ """Start this Tracer.
+
+ Return a Python function suitable for use with sys.settrace().
+
+ """
+ if self.threading:
+ self.thread = self.threading.currentThread()
+ sys.settrace(self._trace)
+ return self._trace
+
+ def stop(self):
+ """Stop this Tracer."""
+ self.stopped = True
+ if self.threading and self.thread != self.threading.currentThread():
+ # Called on a different thread than started us: we can't unhook
+ # ourseves, but we've set the flag that we should stop, so we won't
+ # do any more tracing.
+ return
+
+ if self.warn:
+ if sys.gettrace() != self._trace:
+ msg = "Trace function changed, measurement is likely wrong: %r"
+ self.warn(msg % (sys.gettrace(),))
+
+ sys.settrace(None)
+
+ def get_stats(self):
+ """Return a dictionary of statistics, or None."""
+ return None
diff --git a/coverage/report.py b/coverage/report.py
index 34f44422..b93749c8 100644
--- a/coverage/report.py
+++ b/coverage/report.py
@@ -1,8 +1,8 @@
"""Reporter foundation for Coverage."""
-import fnmatch, os
+import os
from coverage.codeunit import code_unit_factory
-from coverage.files import prep_patterns
+from coverage.files import prep_patterns, FnmatchMatcher
from coverage.misc import CoverageException, NoSource, NotPython
class Reporter(object):
@@ -33,26 +33,24 @@ class Reporter(object):
"""
morfs = morfs or self.coverage.data.measured_files()
file_locator = self.coverage.file_locator
- self.code_units = code_unit_factory(morfs, file_locator)
+ get_plugin = self.coverage.data.plugin_data().get
+ self.code_units = code_unit_factory(morfs, file_locator, get_plugin)
if self.config.include:
patterns = prep_patterns(self.config.include)
+ matcher = FnmatchMatcher(patterns)
filtered = []
for cu in self.code_units:
- for pattern in patterns:
- if fnmatch.fnmatch(cu.filename, pattern):
- filtered.append(cu)
- break
+ if matcher.match(cu.filename):
+ filtered.append(cu)
self.code_units = filtered
if self.config.omit:
patterns = prep_patterns(self.config.omit)
+ matcher = FnmatchMatcher(patterns)
filtered = []
for cu in self.code_units:
- for pattern in patterns:
- if fnmatch.fnmatch(cu.filename, pattern):
- break
- else:
+ if not matcher.match(cu.filename):
filtered.append(cu)
self.code_units = filtered
diff --git a/coverage/results.py b/coverage/results.py
index 0576ae1f..6cbcbfc8 100644
--- a/coverage/results.py
+++ b/coverage/results.py
@@ -1,11 +1,9 @@
"""Results of coverage measurement."""
import collections
-import os
from coverage.backward import iitems
-from coverage.misc import format_lines, join_regex, NoSource
-from coverage.parser import CodeParser
+from coverage.misc import format_lines, join_regex
class Analysis(object):
@@ -16,18 +14,15 @@ class Analysis(object):
self.code_unit = code_unit
self.filename = self.code_unit.filename
- actual_filename, source = self.find_source(self.filename)
-
- self.parser = CodeParser(
- text=source, filename=actual_filename,
+ self.parser = code_unit.get_parser(
exclude=self.coverage._exclude_regex('exclude')
)
self.statements, self.excluded = self.parser.parse_source()
# Identify missing statements.
executed = self.coverage.data.executed_lines(self.filename)
- exec1 = self.parser.first_lines(executed)
- self.missing = self.statements - exec1
+ executed = self.parser.translate_lines(executed)
+ self.missing = self.statements - executed
if self.coverage.data.has_arcs():
self.no_branch = self.parser.lines_matching(
@@ -54,44 +49,6 @@ class Analysis(object):
n_missing_branches=n_missing_branches,
)
- def find_source(self, filename):
- """Find the source for `filename`.
-
- Returns two values: the actual filename, and the source.
-
- The source returned depends on which of these cases holds:
-
- * The filename seems to be a non-source file: returns None
-
- * The filename is a source file, and actually exists: returns None.
-
- * The filename is a source file, and is in a zip file or egg:
- returns the source.
-
- * The filename is a source file, but couldn't be found: raises
- `NoSource`.
-
- """
- source = None
-
- base, ext = os.path.splitext(filename)
- TRY_EXTS = {
- '.py': ['.py', '.pyw'],
- '.pyw': ['.pyw'],
- }
- try_exts = TRY_EXTS.get(ext)
- if not try_exts:
- return filename, None
-
- for try_ext in try_exts:
- try_filename = base + try_ext
- if os.path.exists(try_filename):
- return try_filename, None
- source = self.coverage.file_locator.get_zip_data(try_filename)
- if source:
- return try_filename, source
- raise NoSource("No source for code: '%s'" % filename)
-
def missing_formatted(self):
"""The missing line numbers, formatted nicely.
@@ -112,8 +69,7 @@ class Analysis(object):
def arcs_executed(self):
"""Returns a sorted list of the arcs actually executed in the code."""
executed = self.coverage.data.executed_arcs(self.filename)
- m2fl = self.parser.first_line
- executed = ((m2fl(l1), m2fl(l2)) for (l1,l2) in executed)
+ executed = self.parser.translate_arcs(executed)
return sorted(executed)
def arcs_missing(self):
@@ -127,6 +83,23 @@ class Analysis(object):
)
return sorted(missing)
+ def arcs_missing_formatted(self):
+ """ The missing branch arcs, formatted nicely.
+
+ Returns a string like "1->2, 1->3, 16->20". Omits any mention of
+ missing lines, so if line 17 is missing, then 16->17 won't be included.
+
+ """
+ arcs = self.missing_branch_arcs()
+ missing = self.missing
+ line_exits = sorted(iitems(arcs))
+ pairs = []
+ for line, exits in line_exits:
+ for ex in sorted(exits):
+ if line not in missing and ex not in missing:
+ pairs.append('%d->%d' % (line, ex))
+ return ', '.join(pairs)
+
def arcs_unpredicted(self):
"""Returns a sorted list of the executed arcs missing from the code."""
possible = self.arc_possibilities()
diff --git a/coverage/summary.py b/coverage/summary.py
index c99c5303..9d31c226 100644
--- a/coverage/summary.py
+++ b/coverage/summary.py
@@ -59,12 +59,19 @@ class SummaryReporter(Reporter):
args += (nums.n_branches, nums.n_missing_branches)
args += (nums.pc_covered_str,)
if self.config.show_missing:
- args += (analysis.missing_formatted(),)
+ missing_fmtd = analysis.missing_formatted()
+ if self.branches:
+ branches_fmtd = analysis.arcs_missing_formatted()
+ if branches_fmtd:
+ if missing_fmtd:
+ missing_fmtd += ", "
+ missing_fmtd += branches_fmtd
+ args += (missing_fmtd,)
outfile.write(fmt_coverage % args)
total += nums
except KeyboardInterrupt: # pragma: not covered
raise
- except:
+ except Exception:
report_it = not self.config.ignore_errors
if report_it:
typ, msg = sys.exc_info()[:2]
diff --git a/coverage/templite.py b/coverage/templite.py
index 1829aa82..53824e08 100644
--- a/coverage/templite.py
+++ b/coverage/templite.py
@@ -5,47 +5,55 @@
import re
+class TempliteSyntaxError(ValueError):
+ """Raised when a template has a syntax error."""
+ pass
+
+
class CodeBuilder(object):
"""Build source code conveniently."""
def __init__(self, indent=0):
self.code = []
- self.indent_amount = indent
+ self.indent_level = indent
+
+ def __str__(self):
+ return "".join(str(c) for c in self.code)
def add_line(self, line):
"""Add a line of source to the code.
- Don't include indentations or newlines.
+ Indentation and newline will be added for you, don't provide them.
"""
- self.code.append(" " * self.indent_amount)
- self.code.append(line)
- self.code.append("\n")
+ self.code.extend([" " * self.indent_level, line, "\n"])
def add_section(self):
"""Add a section, a sub-CodeBuilder."""
- sect = CodeBuilder(self.indent_amount)
- self.code.append(sect)
- return sect
+ section = CodeBuilder(self.indent_level)
+ self.code.append(section)
+ return section
+
+ INDENT_STEP = 4 # PEP8 says so!
def indent(self):
"""Increase the current indent for following lines."""
- self.indent_amount += 4
+ self.indent_level += self.INDENT_STEP
def dedent(self):
"""Decrease the current indent for following lines."""
- self.indent_amount -= 4
+ self.indent_level -= self.INDENT_STEP
- def __str__(self):
- return "".join(str(c) for c in self.code)
-
- def get_function(self, fn_name):
- """Compile the code, and return the function `fn_name`."""
- assert self.indent_amount == 0
- g = {}
- code_text = str(self)
- exec(code_text, g)
- return g[fn_name]
+ def get_globals(self):
+ """Execute the code, and return a dict of globals it defines."""
+ # A check that the caller really finished all the blocks they started.
+ assert self.indent_level == 0
+ # Get the Python source as a single string.
+ python_source = str(self)
+ # Execute the source, defining globals, and return them.
+ global_namespace = {}
+ exec(python_source, global_namespace)
+ return global_namespace
class Templite(object):
@@ -68,7 +76,20 @@ class Templite(object):
{# This will be ignored #}
Construct a Templite with the template text, then use `render` against a
- dictionary context to create a finished string.
+ dictionary context to create a finished string::
+
+ templite = Templite('''
+ <h1>Hello {{name|upper}}!</h1>
+ {% for topic in topics %}
+ <p>You are interested in {{topic}}.</p>
+ {% endif %}
+ ''',
+ {'upper': str.upper},
+ )
+ text = templite.render({
+ 'name': "Ned",
+ 'topics': ['Python', 'Geometry', 'Juggling'],
+ })
"""
def __init__(self, text, *contexts):
@@ -78,110 +99,135 @@ class Templite(object):
These are good for filters and global values.
"""
- self.text = text
self.context = {}
for context in contexts:
self.context.update(context)
+ self.all_vars = set()
+ self.loop_vars = set()
+
# We construct a function in source form, then compile it and hold onto
# it, and execute it to render the template.
code = CodeBuilder()
- code.add_line("def render(ctx, dot):")
+ code.add_line("def render_function(context, do_dots):")
code.indent()
vars_code = code.add_section()
- self.all_vars = set()
- self.loop_vars = set()
code.add_line("result = []")
- code.add_line("a = result.append")
- code.add_line("e = result.extend")
- code.add_line("s = str")
+ code.add_line("append_result = result.append")
+ code.add_line("extend_result = result.extend")
+ code.add_line("to_str = str")
buffered = []
def flush_output():
"""Force `buffered` to the code builder."""
if len(buffered) == 1:
- code.add_line("a(%s)" % buffered[0])
+ code.add_line("append_result(%s)" % buffered[0])
elif len(buffered) > 1:
- code.add_line("e([%s])" % ",".join(buffered))
+ code.add_line("extend_result([%s])" % ", ".join(buffered))
del buffered[:]
+ ops_stack = []
+
# Split the text to form a list of tokens.
- toks = re.split(r"(?s)({{.*?}}|{%.*?%}|{#.*?#})", text)
+ tokens = re.split(r"(?s)({{.*?}}|{%.*?%}|{#.*?#})", text)
- ops_stack = []
- for tok in toks:
- if tok.startswith('{{'):
- # An expression to evaluate.
- buffered.append("s(%s)" % self.expr_code(tok[2:-2].strip()))
- elif tok.startswith('{#'):
+ for token in tokens:
+ if token.startswith('{#'):
# Comment: ignore it and move on.
continue
- elif tok.startswith('{%'):
+ elif token.startswith('{{'):
+ # An expression to evaluate.
+ expr = self._expr_code(token[2:-2].strip())
+ buffered.append("to_str(%s)" % expr)
+ elif token.startswith('{%'):
# Action tag: split into words and parse further.
flush_output()
- words = tok[2:-2].strip().split()
+ words = token[2:-2].strip().split()
if words[0] == 'if':
# An if statement: evaluate the expression to determine if.
- assert len(words) == 2
+ if len(words) != 2:
+ self._syntax_error("Don't understand if", token)
ops_stack.append('if')
- code.add_line("if %s:" % self.expr_code(words[1]))
+ code.add_line("if %s:" % self._expr_code(words[1]))
code.indent()
elif words[0] == 'for':
# A loop: iterate over expression result.
- assert len(words) == 4 and words[2] == 'in'
+ if len(words) != 4 or words[2] != 'in':
+ self._syntax_error("Don't understand for", token)
ops_stack.append('for')
- self.loop_vars.add(words[1])
+ self._variable(words[1], self.loop_vars)
code.add_line(
"for c_%s in %s:" % (
words[1],
- self.expr_code(words[3])
+ self._expr_code(words[3])
)
)
code.indent()
elif words[0].startswith('end'):
- # Endsomething. Pop the ops stack
+ # Endsomething. Pop the ops stack.
+ if len(words) != 1:
+ self._syntax_error("Don't understand end", token)
end_what = words[0][3:]
- if ops_stack[-1] != end_what:
- raise SyntaxError("Mismatched end tag: %r" % end_what)
- ops_stack.pop()
+ if not ops_stack:
+ self._syntax_error("Too many ends", token)
+ start_what = ops_stack.pop()
+ if start_what != end_what:
+ self._syntax_error("Mismatched end tag", end_what)
code.dedent()
else:
- raise SyntaxError("Don't understand tag: %r" % words[0])
+ self._syntax_error("Don't understand tag", words[0])
else:
# Literal content. If it isn't empty, output it.
- if tok:
- buffered.append("%r" % tok)
+ if token:
+ buffered.append(repr(token))
+
+ if ops_stack:
+ self._syntax_error("Unmatched action tag", ops_stack[-1])
+
flush_output()
for var_name in self.all_vars - self.loop_vars:
- vars_code.add_line("c_%s = ctx[%r]" % (var_name, var_name))
-
- if ops_stack:
- raise SyntaxError("Unmatched action tag: %r" % ops_stack[-1])
+ vars_code.add_line("c_%s = context[%r]" % (var_name, var_name))
code.add_line("return ''.join(result)")
code.dedent()
- self.render_function = code.get_function('render')
+ self._render_function = code.get_globals()['render_function']
- def expr_code(self, expr):
+ def _expr_code(self, expr):
"""Generate a Python expression for `expr`."""
if "|" in expr:
pipes = expr.split("|")
- code = self.expr_code(pipes[0])
+ code = self._expr_code(pipes[0])
for func in pipes[1:]:
- self.all_vars.add(func)
+ self._variable(func, self.all_vars)
code = "c_%s(%s)" % (func, code)
elif "." in expr:
dots = expr.split(".")
- code = self.expr_code(dots[0])
+ code = self._expr_code(dots[0])
args = ", ".join(repr(d) for d in dots[1:])
- code = "dot(%s, %s)" % (code, args)
+ code = "do_dots(%s, %s)" % (code, args)
else:
- self.all_vars.add(expr)
+ self._variable(expr, self.all_vars)
code = "c_%s" % expr
return code
+ def _syntax_error(self, msg, thing):
+ """Raise a syntax error using `msg`, and showing `thing`."""
+ raise TempliteSyntaxError("%s: %r" % (msg, thing))
+
+ def _variable(self, name, vars_set):
+ """Track that `name` is used as a variable.
+
+ Adds the name to `vars_set`, a set of variable names.
+
+ Raises an syntax error if `name` is not a valid name.
+
+ """
+ if not re.match(r"[_a-zA-Z][_a-zA-Z0-9]*$", name):
+ self._syntax_error("Not a valid name", name)
+ vars_set.add(name)
+
def render(self, context=None):
"""Render this template by applying it to `context`.
@@ -189,18 +235,18 @@ class Templite(object):
"""
# Make the complete context we'll use.
- ctx = dict(self.context)
+ render_context = dict(self.context)
if context:
- ctx.update(context)
- return self.render_function(ctx, self.do_dots)
+ render_context.update(context)
+ return self._render_function(render_context, self._do_dots)
- def do_dots(self, value, *dots):
+ def _do_dots(self, value, *dots):
"""Evaluate dotted expressions at runtime."""
for dot in dots:
try:
value = getattr(value, dot)
except AttributeError:
value = value[dot]
- if hasattr(value, '__call__'):
+ if callable(value):
value = value()
return value
diff --git a/coverage/test_helpers.py b/coverage/test_helpers.py
new file mode 100644
index 00000000..efe68dcd
--- /dev/null
+++ b/coverage/test_helpers.py
@@ -0,0 +1,258 @@
+"""Mixin classes to help make good tests."""
+
+import atexit
+import collections
+import os
+import random
+import shutil
+import sys
+import tempfile
+import textwrap
+
+from coverage.backunittest import TestCase
+from coverage.backward import StringIO, to_bytes
+
+
+class Tee(object):
+ """A file-like that writes to all the file-likes it has."""
+
+ def __init__(self, *files):
+ """Make a Tee that writes to all the files in `files.`"""
+ self._files = files
+ if hasattr(files[0], "encoding"):
+ self.encoding = files[0].encoding
+
+ def write(self, data):
+ """Write `data` to all the files."""
+ for f in self._files:
+ f.write(data)
+
+ if 0:
+ # Use this if you need to use a debugger, though it makes some tests
+ # fail, I'm not sure why...
+ def __getattr__(self, name):
+ return getattr(self._files[0], name)
+
+
+class ModuleAwareMixin(TestCase):
+ """A test case mixin that isolates changes to sys.modules."""
+
+ def setUp(self):
+ super(ModuleAwareMixin, self).setUp()
+
+ # Record sys.modules here so we can restore it in tearDown.
+ self.old_modules = dict(sys.modules)
+ self.addCleanup(self.cleanup_modules)
+
+ def cleanup_modules(self):
+ """Remove any new modules imported during the test run.
+
+ This lets us import the same source files for more than one test.
+
+ """
+ for m in [m for m in sys.modules if m not in self.old_modules]:
+ del sys.modules[m]
+
+
+class SysPathAwareMixin(TestCase):
+ """A test case mixin that isolates changes to sys.path."""
+
+ def setUp(self):
+ super(SysPathAwareMixin, self).setUp()
+
+ self.old_syspath = sys.path[:]
+ self.addCleanup(self.cleanup_syspath)
+
+ def cleanup_syspath(self):
+ """Restore the original sys.path."""
+ sys.path = self.old_syspath
+
+
+class EnvironmentAwareMixin(TestCase):
+ """A test case mixin that isolates changes to the environment."""
+
+ def setUp(self):
+ super(EnvironmentAwareMixin, self).setUp()
+
+ # Record environment variables that we changed with set_environ.
+ self.environ_undos = {}
+
+ self.addCleanup(self.cleanup_environ)
+
+ def set_environ(self, name, value):
+ """Set an environment variable `name` to be `value`.
+
+ The environment variable is set, and record is kept that it was set,
+ so that `tearDown` can restore its original value.
+
+ """
+ if name not in self.environ_undos:
+ self.environ_undos[name] = os.environ.get(name)
+ os.environ[name] = value
+
+ def cleanup_environ(self):
+ """Undo all the changes made by `set_environ`."""
+ for name, value in self.environ_undos.items():
+ if value is None:
+ del os.environ[name]
+ else:
+ os.environ[name] = value
+
+
+class StdStreamCapturingMixin(TestCase):
+ """A test case mixin that captures stdout and stderr."""
+
+ def setUp(self):
+ super(StdStreamCapturingMixin, self).setUp()
+
+ # Capture stdout and stderr so we can examine them in tests.
+ # nose keeps stdout from littering the screen, so we can safely Tee it,
+ # but it doesn't capture stderr, so we don't want to Tee stderr to the
+ # real stderr, since it will interfere with our nice field of dots.
+ self.old_stdout = sys.stdout
+ self.captured_stdout = StringIO()
+ sys.stdout = Tee(sys.stdout, self.captured_stdout)
+ self.old_stderr = sys.stderr
+ self.captured_stderr = StringIO()
+ sys.stderr = self.captured_stderr
+
+ self.addCleanup(self.cleanup_std_streams)
+
+ def cleanup_std_streams(self):
+ """Restore stdout and stderr."""
+ sys.stdout = self.old_stdout
+ sys.stderr = self.old_stderr
+
+ def stdout(self):
+ """Return the data written to stdout during the test."""
+ return self.captured_stdout.getvalue()
+
+ def stderr(self):
+ """Return the data written to stderr during the test."""
+ return self.captured_stderr.getvalue()
+
+
+class TempDirMixin(TestCase):
+ """A test case mixin that creates a temp directory and files in it."""
+
+ # Our own setting: most of these tests run in their own temp directory.
+ run_in_temp_dir = True
+
+ def setUp(self):
+ super(TempDirMixin, self).setUp()
+
+ if self.run_in_temp_dir:
+ # Create a temporary directory.
+ noise = str(random.random())[2:]
+ self.temp_root = os.path.join(tempfile.gettempdir(), 'test_cover')
+ self.temp_dir = os.path.join(self.temp_root, noise)
+ os.makedirs(self.temp_dir)
+ self.old_dir = os.getcwd()
+ os.chdir(self.temp_dir)
+
+ # Modules should be importable from this temp directory. We don't
+ # use '' because we make lots of different temp directories and
+ # nose's caching importer can get confused. The full path prevents
+ # problems.
+ sys.path.insert(0, os.getcwd())
+
+ class_behavior = self.class_behavior()
+ class_behavior.tests += 1
+ class_behavior.test_method_made_any_files = False
+ class_behavior.temp_dir = self.run_in_temp_dir
+
+ self.addCleanup(self.cleanup_temp_dir)
+
+ def cleanup_temp_dir(self):
+ """Clean up the temp directories we made."""
+
+ if self.run_in_temp_dir:
+ # Get rid of the temporary directory.
+ os.chdir(self.old_dir)
+ shutil.rmtree(self.temp_root)
+
+ class_behavior = self.class_behavior()
+ if class_behavior.test_method_made_any_files:
+ class_behavior.tests_making_files += 1
+
+ def make_file(self, filename, text="", newline=None):
+ """Create a file for testing.
+
+ `filename` is the relative path to the file, including directories if
+ desired, which will be created if need be. `text` is the content to
+ create in the file. If `newline` is provided, it is a string that will
+ be used as the line endings in the created file, otherwise the line
+ endings are as provided in `text`.
+
+ Returns `filename`.
+
+ """
+ # Tests that call `make_file` should be run in a temp environment.
+ assert self.run_in_temp_dir
+ self.class_behavior().test_method_made_any_files = True
+
+ text = textwrap.dedent(text)
+ if newline:
+ text = text.replace("\n", newline)
+
+ # Make sure the directories are available.
+ dirs, _ = os.path.split(filename)
+ if dirs and not os.path.exists(dirs):
+ os.makedirs(dirs)
+
+ # Create the file.
+ with open(filename, 'wb') as f:
+ f.write(to_bytes(text))
+
+ return filename
+
+ # We run some tests in temporary directories, because they may need to make
+ # files for the tests. But this is expensive, so we can change per-class
+ # whether a temp dir is used or not. It's easy to forget to set that
+ # option properly, so we track information about what the tests did, and
+ # then report at the end of the process on test classes that were set
+ # wrong.
+
+ class ClassBehavior(object):
+ """A value object to store per-class."""
+ def __init__(self):
+ self.tests = 0
+ self.temp_dir = True
+ self.tests_making_files = 0
+ self.test_method_made_any_files = False
+
+ # Map from class to info about how it ran.
+ class_behaviors = collections.defaultdict(ClassBehavior)
+
+ @classmethod
+ def report_on_class_behavior(cls):
+ """Called at process exit to report on class behavior."""
+ for test_class, behavior in cls.class_behaviors.items():
+ if behavior.temp_dir and behavior.tests_making_files == 0:
+ bad = "Inefficient"
+ elif not behavior.temp_dir and behavior.tests_making_files > 0:
+ bad = "Unsafe"
+ else:
+ bad = ""
+
+ if bad:
+ if behavior.temp_dir:
+ where = "in a temp directory"
+ else:
+ where = "without a temp directory"
+ print(
+ "%s: %s ran %d tests, %d made files %s" % (
+ bad,
+ test_class.__name__,
+ behavior.tests,
+ behavior.tests_making_files,
+ where,
+ )
+ )
+
+ def class_behavior(self):
+ """Get the ClassBehavior instance for this test."""
+ return self.class_behaviors[self.__class__]
+
+# When the process ends, find out about bad classes.
+atexit.register(TempDirMixin.report_on_class_behavior)
diff --git a/coverage/tracer.c b/coverage/tracer.c
index 97dd113b..5bf5c462 100644
--- a/coverage/tracer.c
+++ b/coverage/tracer.c
@@ -30,6 +30,7 @@
#define MyText_AS_BYTES(o) PyUnicode_AsASCIIString(o)
#define MyText_AS_STRING(o) PyBytes_AS_STRING(o)
#define MyInt_FromLong(l) PyLong_FromLong(l)
+#define MyInt_AsLong(o) PyLong_AsLong(o)
#define MyType_HEAD_INIT PyVarObject_HEAD_INIT(NULL, 0)
@@ -40,6 +41,7 @@
#define MyText_AS_BYTES(o) (Py_INCREF(o), o)
#define MyText_AS_STRING(o) PyString_AS_STRING(o)
#define MyInt_FromLong(l) PyInt_FromLong(l)
+#define MyInt_AsLong(o) PyInt_AsLong(o)
#define MyType_HEAD_INIT PyObject_HEAD_INIT(NULL) 0,
@@ -54,10 +56,23 @@
frame.
*/
typedef struct {
- PyObject * file_data; /* PyMem_Malloc'ed, a borrowed ref. */
+ /* The current file_data dictionary. Borrowed. */
+ PyObject * file_data;
+
+ /* The line number of the last line recorded, for tracing arcs.
+ -1 means there was no previous line, as when entering a code object.
+ */
int last_line;
} DataStackEntry;
+/* A data stack is a dynamically allocated vector of DataStackEntry's. */
+typedef struct {
+ int depth; /* The index of the last-used entry in stack. */
+ int alloc; /* number of entries allocated at stack. */
+ /* The file data at each level, or NULL if not recording. */
+ DataStackEntry * stack;
+} DataStack;
+
/* The CTracer type. */
typedef struct {
@@ -66,7 +81,9 @@ typedef struct {
/* Python objects manipulated directly by the Collector class. */
PyObject * should_trace;
PyObject * warn;
+ PyObject * coroutine_id_func;
PyObject * data;
+ PyObject * plugin_data;
PyObject * should_trace_cache;
PyObject * arcs;
@@ -86,19 +103,17 @@ typedef struct {
the keys are line numbers. In both cases, the value is irrelevant
(None).
*/
- /* The index of the last-used entry in data_stack. */
- int depth;
- /* The file data at each level, or NULL if not recording. */
- DataStackEntry * data_stack;
- int data_stack_alloc; /* number of entries allocated at data_stack. */
- /* The current file_data dictionary. Borrowed. */
- PyObject * cur_file_data;
+ DataStack data_stack; /* Used if we aren't doing coroutines. */
+ PyObject * data_stack_index; /* Used if we are doing coroutines. */
+ DataStack * data_stacks;
+ int data_stacks_alloc;
+ int data_stacks_used;
- /* The line number of the last line recorded, for tracing arcs.
- -1 means there was no previous line, as when entering a code object.
- */
- int last_line;
+ DataStack * pdata_stack;
+
+ /* The current file's data stack entry, copied from the stack. */
+ DataStackEntry cur_entry;
/* The parent frame for the last exception event, to fix missing returns. */
PyFrameObject * last_exc_back;
@@ -119,9 +134,47 @@ typedef struct {
#endif /* COLLECT_STATS */
} CTracer;
+
#define STACK_DELTA 100
static int
+DataStack_init(CTracer *self, DataStack *pdata_stack)
+{
+ pdata_stack->depth = -1;
+ pdata_stack->stack = NULL;
+ pdata_stack->alloc = 0;
+ return RET_OK;
+}
+
+static void
+DataStack_dealloc(CTracer *self, DataStack *pdata_stack)
+{
+ PyMem_Free(pdata_stack->stack);
+}
+
+static int
+DataStack_grow(CTracer *self, DataStack *pdata_stack)
+{
+ pdata_stack->depth++;
+ if (pdata_stack->depth >= pdata_stack->alloc) {
+ STATS( self->stats.stack_reallocs++; )
+ /* We've outgrown our data_stack array: make it bigger. */
+ int bigger = pdata_stack->alloc + STACK_DELTA;
+ DataStackEntry * bigger_data_stack = PyMem_Realloc(pdata_stack->stack, bigger * sizeof(DataStackEntry));
+ if (bigger_data_stack == NULL) {
+ STATS( self->stats.errors++; )
+ PyErr_NoMemory();
+ pdata_stack->depth--;
+ return RET_ERROR;
+ }
+ pdata_stack->stack = bigger_data_stack;
+ pdata_stack->alloc = bigger;
+ }
+ return RET_OK;
+}
+
+
+static int
CTracer_init(CTracer *self, PyObject *args_unused, PyObject *kwds_unused)
{
#if COLLECT_STATS
@@ -138,24 +191,32 @@ CTracer_init(CTracer *self, PyObject *args_unused, PyObject *kwds_unused)
self->should_trace = NULL;
self->warn = NULL;
+ self->coroutine_id_func = NULL;
self->data = NULL;
+ self->plugin_data = NULL;
self->should_trace_cache = NULL;
self->arcs = NULL;
self->started = 0;
self->tracing_arcs = 0;
- self->depth = -1;
- self->data_stack = PyMem_Malloc(STACK_DELTA*sizeof(DataStackEntry));
- if (self->data_stack == NULL) {
+ if (DataStack_init(self, &self->data_stack)) {
+ return RET_ERROR;
+ }
+ self->data_stack_index = PyDict_New();
+ if (self->data_stack_index == NULL) {
STATS( self->stats.errors++; )
- PyErr_NoMemory();
return RET_ERROR;
}
- self->data_stack_alloc = STACK_DELTA;
- self->cur_file_data = NULL;
- self->last_line = -1;
+ self->data_stacks = NULL;
+ self->data_stacks_alloc = 0;
+ self->data_stacks_used = 0;
+
+ self->pdata_stack = &self->data_stack;
+
+ self->cur_entry.file_data = NULL;
+ self->cur_entry.last_line = -1;
self->last_exc_back = NULL;
@@ -165,16 +226,28 @@ CTracer_init(CTracer *self, PyObject *args_unused, PyObject *kwds_unused)
static void
CTracer_dealloc(CTracer *self)
{
+ int i;
+
if (self->started) {
PyEval_SetTrace(NULL, NULL);
}
Py_XDECREF(self->should_trace);
Py_XDECREF(self->warn);
+ Py_XDECREF(self->coroutine_id_func);
Py_XDECREF(self->data);
+ Py_XDECREF(self->plugin_data);
Py_XDECREF(self->should_trace_cache);
- PyMem_Free(self->data_stack);
+ DataStack_dealloc(self, &self->data_stack);
+ if (self->data_stacks) {
+ for (i = 0; i < self->data_stacks_used; i++) {
+ DataStack_dealloc(self, self->data_stacks + i);
+ }
+ PyMem_Free(self->data_stacks);
+ }
+
+ Py_XDECREF(self->data_stack_index);
Py_TYPE(self)->tp_free((PyObject*)self);
}
@@ -229,7 +302,7 @@ showlog(int depth, int lineno, PyObject * filename, const char * msg)
static const char * what_sym[] = {"CALL", "EXC ", "LINE", "RET "};
#endif
-/* Record a pair of integers in self->cur_file_data. */
+/* Record a pair of integers in self->cur_entry.file_data. */
static int
CTracer_record_pair(CTracer *self, int l1, int l2)
{
@@ -237,7 +310,7 @@ CTracer_record_pair(CTracer *self, int l1, int l2)
PyObject * t = Py_BuildValue("(ii)", l1, l2);
if (t != NULL) {
- if (PyDict_SetItem(self->cur_file_data, t, Py_None) < 0) {
+ if (PyDict_SetItem(self->cur_entry.file_data, t, Py_None) < 0) {
STATS( self->stats.errors++; )
ret = RET_ERROR;
}
@@ -250,6 +323,63 @@ CTracer_record_pair(CTracer *self, int l1, int l2)
return ret;
}
+/* Set self->pdata_stack to the proper data_stack to use. */
+static int
+CTracer_set_pdata_stack(CTracer *self)
+{
+ if (self->coroutine_id_func != Py_None) {
+ PyObject * co_obj = NULL;
+ PyObject * stack_index = NULL;
+ long the_index = 0;
+
+ co_obj = PyObject_CallObject(self->coroutine_id_func, NULL);
+ if (co_obj == NULL) {
+ return RET_ERROR;
+ }
+ stack_index = PyDict_GetItem(self->data_stack_index, co_obj);
+ if (stack_index == NULL) {
+ /* A new coroutine object. Make a new data stack. */
+ the_index = self->data_stacks_used;
+ stack_index = MyInt_FromLong(the_index);
+ if (PyDict_SetItem(self->data_stack_index, co_obj, stack_index) < 0) {
+ STATS( self->stats.errors++; )
+ Py_XDECREF(co_obj);
+ Py_XDECREF(stack_index);
+ return RET_ERROR;
+ }
+ self->data_stacks_used++;
+ if (self->data_stacks_used >= self->data_stacks_alloc) {
+ int bigger = self->data_stacks_alloc + 10;
+ DataStack * bigger_stacks = PyMem_Realloc(self->data_stacks, bigger * sizeof(DataStack));
+ if (bigger_stacks == NULL) {
+ STATS( self->stats.errors++; )
+ PyErr_NoMemory();
+ Py_XDECREF(co_obj);
+ Py_XDECREF(stack_index);
+ return RET_ERROR;
+ }
+ self->data_stacks = bigger_stacks;
+ self->data_stacks_alloc = bigger;
+ }
+ DataStack_init(self, &self->data_stacks[the_index]);
+ }
+ else {
+ Py_INCREF(stack_index);
+ the_index = MyInt_AsLong(stack_index);
+ }
+
+ self->pdata_stack = &self->data_stacks[the_index];
+
+ Py_XDECREF(co_obj);
+ Py_XDECREF(stack_index);
+ }
+ else {
+ self->pdata_stack = &self->data_stack;
+ }
+
+ return RET_OK;
+}
+
/*
* The Trace Function
*/
@@ -259,6 +389,8 @@ CTracer_trace(CTracer *self, PyFrameObject *frame, int what, PyObject *arg_unuse
int ret = RET_OK;
PyObject * filename = NULL;
PyObject * tracename = NULL;
+ PyObject * disposition = NULL;
+ PyObject * disp_trace = NULL;
#if WHAT_LOG || TRACE_LOG
PyObject * ascii = NULL;
#endif
@@ -293,16 +425,18 @@ CTracer_trace(CTracer *self, PyFrameObject *frame, int what, PyObject *arg_unuse
we'll need to keep more of the missed frame's state.
*/
STATS( self->stats.missed_returns++; )
- if (self->depth >= 0) {
- if (self->tracing_arcs && self->cur_file_data) {
- if (CTracer_record_pair(self, self->last_line, -self->last_exc_firstlineno) < 0) {
+ if (CTracer_set_pdata_stack(self)) {
+ return RET_ERROR;
+ }
+ if (self->pdata_stack->depth >= 0) {
+ if (self->tracing_arcs && self->cur_entry.file_data) {
+ if (CTracer_record_pair(self, self->cur_entry.last_line, -self->last_exc_firstlineno) < 0) {
return RET_ERROR;
}
}
- SHOWLOG(self->depth, frame->f_lineno, frame->f_code->co_filename, "missedreturn");
- self->cur_file_data = self->data_stack[self->depth].file_data;
- self->last_line = self->data_stack[self->depth].last_line;
- self->depth--;
+ SHOWLOG(self->pdata_stack->depth, frame->f_lineno, frame->f_code->co_filename, "missedreturn");
+ self->cur_entry = self->pdata_stack->stack[self->pdata_stack->depth];
+ self->pdata_stack->depth--;
}
}
self->last_exc_back = NULL;
@@ -313,109 +447,158 @@ CTracer_trace(CTracer *self, PyFrameObject *frame, int what, PyObject *arg_unuse
case PyTrace_CALL: /* 0 */
STATS( self->stats.calls++; )
/* Grow the stack. */
- self->depth++;
- if (self->depth >= self->data_stack_alloc) {
- STATS( self->stats.stack_reallocs++; )
- /* We've outgrown our data_stack array: make it bigger. */
- int bigger = self->data_stack_alloc + STACK_DELTA;
- DataStackEntry * bigger_data_stack = PyMem_Realloc(self->data_stack, bigger * sizeof(DataStackEntry));
- if (bigger_data_stack == NULL) {
- STATS( self->stats.errors++; )
- PyErr_NoMemory();
- self->depth--;
- return RET_ERROR;
- }
- self->data_stack = bigger_data_stack;
- self->data_stack_alloc = bigger;
+ if (CTracer_set_pdata_stack(self)) {
+ return RET_ERROR;
+ }
+ if (DataStack_grow(self, self->pdata_stack)) {
+ return RET_ERROR;
}
/* Push the current state on the stack. */
- self->data_stack[self->depth].file_data = self->cur_file_data;
- self->data_stack[self->depth].last_line = self->last_line;
+ self->pdata_stack->stack[self->pdata_stack->depth] = self->cur_entry;
/* Check if we should trace this line. */
filename = frame->f_code->co_filename;
- tracename = PyDict_GetItem(self->should_trace_cache, filename);
- if (tracename == NULL) {
+ disposition = PyDict_GetItem(self->should_trace_cache, filename);
+ if (disposition == NULL) {
STATS( self->stats.new_files++; )
/* We've never considered this file before. */
/* Ask should_trace about it. */
PyObject * args = Py_BuildValue("(OO)", filename, frame);
- tracename = PyObject_Call(self->should_trace, args, NULL);
+ disposition = PyObject_Call(self->should_trace, args, NULL);
Py_DECREF(args);
- if (tracename == NULL) {
+ if (disposition == NULL) {
/* An error occurred inside should_trace. */
STATS( self->stats.errors++; )
return RET_ERROR;
}
- if (PyDict_SetItem(self->should_trace_cache, filename, tracename) < 0) {
+ if (PyDict_SetItem(self->should_trace_cache, filename, disposition) < 0) {
STATS( self->stats.errors++; )
return RET_ERROR;
}
}
else {
- Py_INCREF(tracename);
+ Py_INCREF(disposition);
+ }
+
+ disp_trace = PyObject_GetAttrString(disposition, "trace");
+ if (disp_trace == NULL) {
+ STATS( self->stats.errors++; )
+ Py_DECREF(disposition);
+ return RET_ERROR;
+ }
+
+ tracename = Py_None;
+ Py_INCREF(tracename);
+
+ if (disp_trace == Py_True) {
+ /* If tracename is a string, then we're supposed to trace. */
+ tracename = PyObject_GetAttrString(disposition, "source_filename");
+ if (tracename == NULL) {
+ STATS( self->stats.errors++; )
+ Py_DECREF(disposition);
+ Py_DECREF(disp_trace);
+ return RET_ERROR;
+ }
}
+ Py_DECREF(disp_trace);
- /* If tracename is a string, then we're supposed to trace. */
if (MyText_Check(tracename)) {
PyObject * file_data = PyDict_GetItem(self->data, tracename);
+ PyObject * disp_plugin = NULL;
+ PyObject * disp_plugin_name = NULL;
+
if (file_data == NULL) {
file_data = PyDict_New();
if (file_data == NULL) {
STATS( self->stats.errors++; )
+ Py_DECREF(tracename);
+ Py_DECREF(disposition);
return RET_ERROR;
}
ret = PyDict_SetItem(self->data, tracename, file_data);
Py_DECREF(file_data);
if (ret < 0) {
STATS( self->stats.errors++; )
+ Py_DECREF(tracename);
+ Py_DECREF(disposition);
return RET_ERROR;
}
+
+ if (self->plugin_data != NULL) {
+ /* If the disposition mentions a plugin, record that. */
+ disp_plugin = PyObject_GetAttrString(disposition, "plugin");
+ if (disp_plugin == NULL) {
+ STATS( self->stats.errors++; )
+ Py_DECREF(tracename);
+ Py_DECREF(disposition);
+ return RET_ERROR;
+ }
+ if (disp_plugin != Py_None) {
+ disp_plugin_name = PyObject_GetAttrString(disp_plugin, "__name__");
+ Py_DECREF(disp_plugin);
+ if (disp_plugin_name == NULL) {
+ STATS( self->stats.errors++; )
+ Py_DECREF(tracename);
+ Py_DECREF(disposition);
+ return RET_ERROR;
+ }
+ ret = PyDict_SetItem(self->plugin_data, tracename, disp_plugin_name);
+ Py_DECREF(disp_plugin_name);
+ if (ret < 0) {
+ Py_DECREF(tracename);
+ Py_DECREF(disposition);
+ return RET_ERROR;
+ }
+ }
+ }
}
- self->cur_file_data = file_data;
+ self->cur_entry.file_data = file_data;
/* Make the frame right in case settrace(gettrace()) happens. */
Py_INCREF(self);
frame->f_trace = (PyObject*)self;
- SHOWLOG(self->depth, frame->f_lineno, filename, "traced");
+ SHOWLOG(self->pdata_stack->depth, frame->f_lineno, filename, "traced");
}
else {
- self->cur_file_data = NULL;
- SHOWLOG(self->depth, frame->f_lineno, filename, "skipped");
+ self->cur_entry.file_data = NULL;
+ SHOWLOG(self->pdata_stack->depth, frame->f_lineno, filename, "skipped");
}
Py_DECREF(tracename);
+ Py_DECREF(disposition);
- self->last_line = -1;
+ self->cur_entry.last_line = -1;
break;
case PyTrace_RETURN: /* 3 */
STATS( self->stats.returns++; )
/* A near-copy of this code is above in the missing-return handler. */
- if (self->depth >= 0) {
- if (self->tracing_arcs && self->cur_file_data) {
+ if (CTracer_set_pdata_stack(self)) {
+ return RET_ERROR;
+ }
+ if (self->pdata_stack->depth >= 0) {
+ if (self->tracing_arcs && self->cur_entry.file_data) {
int first = frame->f_code->co_firstlineno;
- if (CTracer_record_pair(self, self->last_line, -first) < 0) {
+ if (CTracer_record_pair(self, self->cur_entry.last_line, -first) < 0) {
return RET_ERROR;
}
}
- SHOWLOG(self->depth, frame->f_lineno, frame->f_code->co_filename, "return");
- self->cur_file_data = self->data_stack[self->depth].file_data;
- self->last_line = self->data_stack[self->depth].last_line;
- self->depth--;
+ SHOWLOG(self->pdata_stack->depth, frame->f_lineno, frame->f_code->co_filename, "return");
+ self->cur_entry = self->pdata_stack->stack[self->pdata_stack->depth];
+ self->pdata_stack->depth--;
}
break;
case PyTrace_LINE: /* 2 */
STATS( self->stats.lines++; )
- if (self->depth >= 0) {
- SHOWLOG(self->depth, frame->f_lineno, frame->f_code->co_filename, "line");
- if (self->cur_file_data) {
+ if (self->pdata_stack->depth >= 0) {
+ SHOWLOG(self->pdata_stack->depth, frame->f_lineno, frame->f_code->co_filename, "line");
+ if (self->cur_entry.file_data) {
/* We're tracing in this frame: record something. */
if (self->tracing_arcs) {
/* Tracing arcs: key is (last_line,this_line). */
- if (CTracer_record_pair(self, self->last_line, frame->f_lineno) < 0) {
+ if (CTracer_record_pair(self, self->cur_entry.last_line, frame->f_lineno) < 0) {
return RET_ERROR;
}
}
@@ -426,7 +609,7 @@ CTracer_trace(CTracer *self, PyFrameObject *frame, int what, PyObject *arg_unuse
STATS( self->stats.errors++; )
return RET_ERROR;
}
- ret = PyDict_SetItem(self->cur_file_data, this_line, Py_None);
+ ret = PyDict_SetItem(self->cur_entry.file_data, this_line, Py_None);
Py_DECREF(this_line);
if (ret < 0) {
STATS( self->stats.errors++; )
@@ -434,7 +617,7 @@ CTracer_trace(CTracer *self, PyFrameObject *frame, int what, PyObject *arg_unuse
}
}
}
- self->last_line = frame->f_lineno;
+ self->cur_entry.last_line = frame->f_lineno;
}
break;
@@ -550,7 +733,7 @@ CTracer_start(CTracer *self, PyObject *args_unused)
PyEval_SetTrace((Py_tracefunc)CTracer_trace, (PyObject*)self);
self->started = 1;
self->tracing_arcs = self->arcs && PyObject_IsTrue(self->arcs);
- self->last_line = -1;
+ self->cur_entry.last_line = -1;
/* start() returns a trace function usable with sys.settrace() */
Py_INCREF(self);
@@ -582,7 +765,7 @@ CTracer_get_stats(CTracer *self)
"new_files", self->stats.new_files,
"missed_returns", self->stats.missed_returns,
"stack_reallocs", self->stats.stack_reallocs,
- "stack_alloc", self->data_stack_alloc,
+ "stack_alloc", self->pdata_stack->alloc,
"errors", self->stats.errors
);
#else
@@ -598,9 +781,15 @@ CTracer_members[] = {
{ "warn", T_OBJECT, offsetof(CTracer, warn), 0,
PyDoc_STR("Function for issuing warnings.") },
+ { "coroutine_id_func", T_OBJECT, offsetof(CTracer, coroutine_id_func), 0,
+ PyDoc_STR("Function for determining coroutine context") },
+
{ "data", T_OBJECT, offsetof(CTracer, data), 0,
PyDoc_STR("The raw dictionary of trace data.") },
+ { "plugin_data", T_OBJECT, offsetof(CTracer, plugin_data), 0,
+ PyDoc_STR("Mapping from filename to plugin name.") },
+
{ "should_trace_cache", T_OBJECT, offsetof(CTracer, should_trace_cache), 0,
PyDoc_STR("Dictionary caching should_trace results.") },
diff --git a/coverage/xmlreport.py b/coverage/xmlreport.py
index f5a4c1ba..92a8975a 100644
--- a/coverage/xmlreport.py
+++ b/coverage/xmlreport.py
@@ -17,7 +17,8 @@ class XmlReporter(Reporter):
def __init__(self, coverage, config):
super(XmlReporter, self).__init__(coverage, config)
- self.packages = None
+ self.source_paths = set()
+ self.packages = {}
self.xml_out = None
self.arcs = coverage.data.has_arcs()
@@ -47,16 +48,26 @@ class XmlReporter(Reporter):
xcoverage.appendChild(self.xml_out.createComment(
" Generated by coverage.py: %s " % __url__
))
- xpackages = self.xml_out.createElement("packages")
- xcoverage.appendChild(xpackages)
# Call xml_file for each file in the data.
- self.packages = {}
self.report_files(self.xml_file, morfs)
+ xsources = self.xml_out.createElement("sources")
+ xcoverage.appendChild(xsources)
+
+ # Populate the XML DOM with the source info.
+ for path in sorted(self.source_paths):
+ xsource = self.xml_out.createElement("source")
+ xsources.appendChild(xsource)
+ txt = self.xml_out.createTextNode(path)
+ xsource.appendChild(txt)
+
lnum_tot, lhits_tot = 0, 0
bnum_tot, bhits_tot = 0, 0
+ xpackages = self.xml_out.createElement("packages")
+ xcoverage.appendChild(xpackages)
+
# Populate the XML DOM with the package info.
for pkg_name in sorted(self.packages.keys()):
pkg_data = self.packages[pkg_name]
@@ -99,6 +110,7 @@ class XmlReporter(Reporter):
package_name = cu.name.rpartition(".")[0]
className = cu.name
+ self.source_paths.add(cu.file_locator.relative_dir.rstrip('/'))
package = self.packages.setdefault(package_name, [{}, 0, 0, 0, 0])
xclass = self.xml_out.createElement("class")
diff --git a/doc/api.rst b/doc/api.rst
index a43de17f..0a00947e 100644
--- a/doc/api.rst
+++ b/doc/api.rst
@@ -9,16 +9,17 @@ Coverage API
:history: 20100221T151500, docs for 3.3 (on the plane back from PyCon)
:history: 20100725T211700, updated for 3.4.
:history: 20121111T235800, added a bit of clarification.
+:history: 20140819T132600, change class name to Coverage
The API to coverage.py is very simple, contained in a single module called
-`coverage`. Most of the interface is in a single class, also called
-`coverage`. Methods on the coverage object correspond roughly to operations
+`coverage`. Most of the interface is in a single class, called
+`Coverage`. Methods on the Coverage object correspond roughly to operations
available in the command line interface. For example, a simple use would be::
import coverage
- cov = coverage.coverage()
+ cov = coverage.Coverage()
cov.start()
# .. call your code ..
@@ -34,7 +35,7 @@ The coverage module
.. module:: coverage
-.. autoclass:: coverage
+.. autoclass:: Coverage
:members:
diff --git a/doc/config.rst b/doc/config.rst
index 7ff82021..882fc777 100644
--- a/doc/config.rst
+++ b/doc/config.rst
@@ -41,7 +41,7 @@ Boolean values can be specified as ``on``, ``off``, ``true``, ``false``, ``1``,
or ``0`` and are case-insensitive.
Environment variables can be substituted in by using dollar signs: ``$WORD``
-``${WORD}`` will be replaced with the value of ``WORD`` in the environment.
+or ``${WORD}`` will be replaced with the value of ``WORD`` in the environment.
A dollar sign can be inserted with ``$$``. Missing environment variables
will result in empty strings with no error.
diff --git a/doc/faq.rst b/doc/faq.rst
index 78db591f..d7ae3641 100644
--- a/doc/faq.rst
+++ b/doc/faq.rst
@@ -50,6 +50,19 @@ If you are using the :ref:`API <api>`, you need to call coverage.start() before
importing the modules that define your functions.
+**Q: Coverage is much slower than I remember, what's going on?**
+
+Make sure you are using the C trace function. Coverage.py provides two
+implementations of the trace function. The C implementation runs much faster.
+To see what you are running, use ``coverage debug sys``. The output contains
+details of the environment, including a line that says either ``tracer: CTracer``
+or ``tracer: PyTracer``. If it says ``PyTracer`` then you are using the
+slow Python implementation.
+
+Try re-installing coverage.py to see what happened and if you get the CTracer
+as you should.
+
+
**Q: Does coverage.py work on Python 3.x?**
Yes, Python 3 is fully supported.
diff --git a/howto.txt b/howto.txt
index b5432535..3367a063 100644
--- a/howto.txt
+++ b/howto.txt
@@ -4,7 +4,7 @@
- Windows
- Ubuntu
- Mac
- - Pythons 2.3, 2.4, 2.5, 2.6, 2.7, 3.1, 3.2, 3.3
+ - Pythons 2.6, 2.7, 3.2, 3.3, 3.4
- Version number in coverage/version.py
- 3.1a1, 3.1b1, 3.1c1, 3.1
- Update CHANGES.txt, including release date.
diff --git a/igor.py b/igor.py
index f7879d7c..6c1c5d1a 100644
--- a/igor.py
+++ b/igor.py
@@ -12,9 +12,13 @@ import os
import platform
import socket
import sys
+import warnings
import zipfile
+warnings.simplefilter("default")
+
+
# Functions named do_* are executable from the command line: do_blah is run
# by "python igor.py blah".
@@ -41,6 +45,9 @@ def run_tests(tracer, *nose_args):
import nose.core
if tracer == "py":
label = "with Python tracer"
+ if os.environ.get("COVERAGE_NO_PYTRACER"):
+ print("Skipping tests, don't want PyTracer")
+ return
else:
label = "with C tracer"
if os.environ.get("COVERAGE_NO_EXTENSION"):
@@ -59,9 +66,11 @@ def run_tests_with_coverage(tracer, *nose_args):
os.environ['COVERAGE_HOME'] = os.getcwd()
# Create the .pth file that will let us measure coverage in sub-processes.
+ # The .pth file seems to have to be alphabetically after easy-install.pth
+ # or the sys.path entries aren't created right?
import nose
pth_dir = os.path.dirname(os.path.dirname(nose.__file__))
- pth_path = os.path.join(pth_dir, "covcov.pth")
+ pth_path = os.path.join(pth_dir, "zzz_metacov.pth")
with open(pth_path, "w") as pth_file:
pth_file.write("import coverage; coverage.process_startup()\n")
@@ -73,6 +82,7 @@ def run_tests_with_coverage(tracer, *nose_args):
# if we clobber the cover_prefix in the coverage object, we can defeat the
# self-detection.
cov.cover_prefix = "Please measure coverage.py!"
+ cov._warn_unimported_source = False
cov.erase()
cov.start()
@@ -215,7 +225,8 @@ def print_banner(label):
pypy_version = sys.pypy_version_info # pylint: disable=E1101
version += " (pypy %s)" % ".".join(str(v) for v in pypy_version)
- print('=== %s %s %s (%s) ===' % (impl, version, label, sys.executable))
+ which_python = os.path.relpath(sys.executable)
+ print('=== %s %s %s (%s) ===' % (impl, version, label, which_python))
def do_help():
diff --git a/lab/parser.py b/lab/parser.py
index a8e03eec..932480df 100644
--- a/lab/parser.py
+++ b/lab/parser.py
@@ -1,13 +1,17 @@
"""Parser.py: a main for invoking code in coverage/parser.py"""
+from __future__ import division
+
import glob, os, sys
+import collections
from optparse import OptionParser
import disgen
from coverage.misc import CoverageException
-from coverage.parser import ByteParser, CodeParser
+from coverage.parser import ByteParser, PythonParser
+opcode_counts = collections.Counter()
class ParserMain(object):
"""A main for code parsing experiments."""
@@ -25,6 +29,10 @@ class ParserMain(object):
help="Disassemble"
)
parser.add_option(
+ "-H", action="store_true", dest="histogram",
+ help="Count occurrences of opcodes"
+ )
+ parser.add_option(
"-R", action="store_true", dest="recursive",
help="Recurse to find source files"
)
@@ -51,18 +59,25 @@ class ParserMain(object):
else:
self.one_file(options, args[0])
+ if options.histogram:
+ total = sum(opcode_counts.values())
+ print("{} total opcodes".format(total))
+ for opcode, number in opcode_counts.most_common():
+ print("{:20s} {:6d} {:.1%}".format(opcode, number, number/total))
+
+
def one_file(self, options, filename):
"""Process just one file."""
try:
bp = ByteParser(filename=filename)
- except CoverageException as err:
+ except Exception as err:
print("%s" % (err,))
return
if options.dis:
print("Main code:")
- self.disassemble(bp)
+ self.disassemble(bp, histogram=options.histogram)
arcs = bp._all_arcs()
if options.chunks and not options.dis:
@@ -74,7 +89,7 @@ class ParserMain(object):
print("Arcs: %r" % sorted(arcs))
if options.source or options.tokens:
- cp = CodeParser(filename=filename, exclude=r"no\s*cover")
+ cp = PythonParser(filename=filename, exclude=r"no\s*cover")
cp.show_tokens = options.tokens
cp._raw_parse()
@@ -104,7 +119,7 @@ class ParserMain(object):
(lineno, m0, m1, m2, m3, a, ltext)
)
- def disassemble(self, byte_parser):
+ def disassemble(self, byte_parser, histogram=False):
"""Disassemble code, for ad-hoc experimenting."""
for bp in byte_parser.child_parsers():
@@ -117,6 +132,9 @@ class ParserMain(object):
print("\n%s: " % bp.code)
upto = None
for disline in disgen.disgen(bp.code):
+ if histogram:
+ opcode_counts[disline.opcode] += 1
+ continue
if disline.first:
if srclines:
upto = upto or disline.lineno-1
diff --git a/metacov.ini b/metacov.ini
index 75bd9d2d..f94b834c 100644
--- a/metacov.ini
+++ b/metacov.ini
@@ -1,24 +1,22 @@
# Settings to use when using coverage.py to measure itself.
[run]
branch = true
-data_file = $COVERAGE_HOME/.coverage.meta
+data_file = $COVERAGE_HOME/.metacov
parallel = true
source =
$COVERAGE_HOME/coverage
$COVERAGE_HOME/tests
- $COVERAGE_HOME/.tox/py25/lib/python2.5/site-packages/coverage
$COVERAGE_HOME/.tox/py26/lib/python2.6/site-packages/coverage
$COVERAGE_HOME/.tox/py27/lib/python2.7/site-packages/coverage
- $COVERAGE_HOME/.tox/py31/lib/python3.1/site-packages/coverage
$COVERAGE_HOME/.tox/py32/lib/python3.2/site-packages/coverage
$COVERAGE_HOME/.tox/py33/lib/python3.3/site-packages/coverage
+ $COVERAGE_HOME/.tox/py34/lib/python3.4/site-packages/coverage
$COVERAGE_HOME/.tox/pypy/site-packages/coverage
- $COVERAGE_HOME\.tox\py25\Lib\site-packages\coverage
$COVERAGE_HOME\.tox\py26\Lib\site-packages\coverage
$COVERAGE_HOME\.tox\py27\Lib\site-packages\coverage
- $COVERAGE_HOME\.tox\py31\Lib\site-packages\coverage
$COVERAGE_HOME\.tox\py32\Lib\site-packages\coverage
$COVERAGE_HOME\.tox\py33\Lib\site-packages\coverage
+ $COVERAGE_HOME\.tox\py34\Lib\site-packages\coverage
[report]
# We set a different pragma so our code won't be confused with test code.
@@ -38,18 +36,7 @@ precision = 1
[paths]
source =
- /home/ned/coverage/trunk
- /home/ned/coverage/trunk/.tox/py25/lib/python2.5/site-packages
- /home/ned/coverage/trunk/.tox/py26/lib/python2.6/site-packages
- /home/ned/coverage/trunk/.tox/py27/lib/python2.7/site-packages
- /home/ned/coverage/trunk/.tox/py31/lib/python3.1/site-packages
- /home/ned/coverage/trunk/.tox/py32/lib/python3.2/site-packages
- /home/ned/coverage/trunk/.tox/py33/lib/python3.3/site-packages
- /home/ned/coverage/trunk/.tox/pypy/site-packages
- C:\ned\coverage\trunk
- C:\ned\coverage\trunk\.tox\py25\Lib\site-packages
- C:\ned\coverage\trunk\.tox\py26\Lib\site-packages
- C:\ned\coverage\trunk\.tox\py27\Lib\site-packages
- C:\ned\coverage\trunk\.tox\py31\Lib\site-packages
- C:\ned\coverage\trunk\.tox\py32\Lib\site-packages
- C:\ned\coverage\trunk\.tox\py33\Lib\site-packages
+ .
+ */.tox/*/lib/*/site-packages
+ */.tox/pypy/site-packages
+ */coverage/trunk
diff --git a/pylintrc b/pylintrc
index f11174b4..c247fb78 100644
--- a/pylintrc
+++ b/pylintrc
@@ -68,6 +68,8 @@ disable=
# R0201: 42:Tracer.stop: Method could be a function
# E1103: 26:RunTests.test_run_python_file: Instance of 'file' has no 'getvalue' member (but some types could not be inferred)
R0201,E1103,
+# formatting stuff
+ superfluous-parens,bad-continuation,
# Messages that are noisy for now, eventually maybe we'll turn them on:
# C0103:256:coverage.morf_filename: Invalid name "f" (should match [a-z_][a-z0-9_]{2,30}$)
# W0212: 86:Reporter.report_files: Access to a protected member _analyze of a client class
@@ -79,9 +81,6 @@ disable=
# (visual studio) and html
output-format=text
-# Include message's id in output
-include-ids=yes
-
# Put messages in a separate file for each module / package specified on the
# command line instead of printing them on stdout. Reports (if any) will be
# written in a file name "pylint_global.[txt|html]".
@@ -239,13 +238,13 @@ max-locals=50
max-returns=20
# Maximum number of branch for function / method body
-max-branchs=50
+max-branches=50
# Maximum number of statements in function / method body
max-statements=150
# Maximum number of parents for a class (see R0901).
-max-parents=7
+max-parents=12
# Maximum number of attributes for a class (see R0902).
max-attributes=40
diff --git a/requirements.txt b/requirements.txt
index 4c667742..d13259a3 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,5 +1,5 @@
nose
mock
-tox
pylint
sphinx
+tox
diff --git a/setup.py b/setup.py
index e8780f3a..52eb086f 100644
--- a/setup.py
+++ b/setup.py
@@ -6,7 +6,7 @@ Coverage.py measures code coverage, typically during test execution. It uses
the code analysis tools and tracing hooks provided in the Python standard
library to determine which lines are executable, and which have been executed.
-Coverage.py runs on Pythons 2.6, 2.7, 3.2, 3.3, and PyPy 1.9.
+Coverage.py runs on CPython 2.6, 2.7, 3.2, 3.3, or 3.4, and PyPy 2.2.
Documentation is at `nedbatchelder.com <%s>`_. Code repository and issue
tracker are on `Bitbucket <http://bitbucket.org/ned/coveragepy>`_, with a
diff --git a/tests/backtest.py b/tests/backtest.py
index 89a25536..439493d1 100644
--- a/tests/backtest.py
+++ b/tests/backtest.py
@@ -4,41 +4,31 @@
# (Redefining built-in blah)
# The whole point of this file is to redefine built-ins, so shut up about it.
-import os
+import subprocess
-# Py2 and Py3 don't agree on how to run commands in a subprocess.
-try:
- import subprocess
-except ImportError:
- def run_command(cmd, status=0):
- """Run a command in a subprocess.
-
- Returns the exit status code and the combined stdout and stderr.
- """
- _, stdouterr = os.popen4(cmd)
- return status, stdouterr.read()
+# This isn't really a backward compatibility thing, should be moved into a
+# helpers file or something.
+def run_command(cmd):
+ """Run a command in a subprocess.
-else:
- def run_command(cmd, status=0):
- """Run a command in a subprocess.
+ Returns the exit status code and the combined stdout and stderr.
- Returns the exit status code and the combined stdout and stderr.
+ """
+ proc = subprocess.Popen(cmd, shell=True,
+ stdin=subprocess.PIPE, stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT
+ )
+ output, _ = proc.communicate()
+ status = proc.returncode # pylint: disable=E1101
- """
- proc = subprocess.Popen(cmd, shell=True,
- stdin=subprocess.PIPE, stdout=subprocess.PIPE,
- stderr=subprocess.STDOUT
- )
- output, _ = proc.communicate()
- status = proc.returncode # pylint: disable=E1101
+ # Get the output, and canonicalize it to strings with newlines.
+ if not isinstance(output, str):
+ output = output.decode('utf-8')
+ output = output.replace('\r', '')
- # Get the output, and canonicalize it to strings with newlines.
- if not isinstance(output, str):
- output = output.decode('utf-8')
- output = output.replace('\r', '')
+ return status, output
- return status, output
# No more execfile in Py3
try:
@@ -46,4 +36,6 @@ try:
except NameError:
def execfile(filename, globs):
"""A Python 3 implementation of execfile."""
- exec(compile(open(filename).read(), filename, 'exec'), globs)
+ with open(filename) as fobj:
+ code = fobj.read()
+ exec(compile(code, filename, 'exec'), globs)
diff --git a/tests/backunittest.py b/tests/backunittest.py
deleted file mode 100644
index ca741d37..00000000
--- a/tests/backunittest.py
+++ /dev/null
@@ -1,26 +0,0 @@
-"""Implementations of unittest features from the future."""
-
-# Use unittest2 if it's available, otherwise unittest. This gives us
-# backported features for 2.6.
-try:
- import unittest2 as unittest # pylint: disable=F0401
-except ImportError:
- import unittest
-
-
-def _need(method):
- """Do we need to define our own `method` method?"""
- return not hasattr(unittest.TestCase, method)
-
-
-class TestCase(unittest.TestCase):
- """Just like unittest.TestCase, but with assert methods added.
-
- Designed to be compatible with 3.1 unittest. Methods are only defined if
- `unittest` doesn't have them.
-
- """
- if _need('assertSameElements'):
- def assertSameElements(self, s1, s2):
- """Assert that the two arguments are equal as sets."""
- self.assertEqual(set(s1), set(s2))
diff --git a/tests/coveragetest.py b/tests/coveragetest.py
index 0467d808..4053059f 100644
--- a/tests/coveragetest.py
+++ b/tests/coveragetest.py
@@ -1,47 +1,39 @@
"""Base test case class for coverage testing."""
-import glob, imp, os, random, shlex, shutil, sys, tempfile, textwrap
-import atexit, collections
+import glob, os, random, re, shlex, shutil, sys
import coverage
-from coverage.backward import StringIO, to_bytes
+from coverage.backunittest import TestCase
+from coverage.backward import StringIO, import_local_file
+from coverage.backward import importlib # pylint: disable=unused-import
from coverage.control import _TEST_NAME_FILE
-from tests.backtest import run_command
-from tests.backunittest import TestCase
-
-class Tee(object):
- """A file-like that writes to all the file-likes it has."""
-
- def __init__(self, *files):
- """Make a Tee that writes to all the files in `files.`"""
- self._files = files
- if hasattr(files[0], "encoding"):
- self.encoding = files[0].encoding
-
- def write(self, data):
- """Write `data` to all the files."""
- for f in self._files:
- f.write(data)
+from coverage.test_helpers import (
+ ModuleAwareMixin, SysPathAwareMixin, EnvironmentAwareMixin,
+ StdStreamCapturingMixin, TempDirMixin,
+)
- if 0:
- # Use this if you need to use a debugger, though it makes some tests
- # fail, I'm not sure why...
- def __getattr__(self, name):
- return getattr(self._files[0], name)
+from tests.backtest import run_command
# Status returns for the command line.
OK, ERR = 0, 1
-class CoverageTest(TestCase):
+class CoverageTest(
+ ModuleAwareMixin,
+ SysPathAwareMixin,
+ EnvironmentAwareMixin,
+ StdStreamCapturingMixin,
+ TempDirMixin,
+ TestCase
+):
"""A base class for Coverage test cases."""
- # Our own setting: most CoverageTests run in their own temp directory.
- run_in_temp_dir = True
-
# Standard unittest setting: show me diffs even if they are very long.
maxDiff = None
+ # Tell newer unittest implementations to print long helpful messages.
+ longMessage = True
+
def setUp(self):
super(CoverageTest, self).setUp()
@@ -50,151 +42,6 @@ class CoverageTest(TestCase):
f.write("%s_%s" % (self.__class__.__name__, self._testMethodName))
f.close()
- # Tell newer unittest implementations to print long helpful messages.
- self.longMessage = True
-
- # tearDown will restore the original sys.path
- self.old_syspath = sys.path[:]
-
- if self.run_in_temp_dir:
- # Create a temporary directory.
- self.noise = str(random.random())[2:]
- self.temp_root = os.path.join(tempfile.gettempdir(), 'test_cover')
- self.temp_dir = os.path.join(self.temp_root, self.noise)
- os.makedirs(self.temp_dir)
- self.old_dir = os.getcwd()
- os.chdir(self.temp_dir)
-
- # Modules should be importable from this temp directory. We don't
- # use '' because we make lots of different temp directories and
- # nose's caching importer can get confused. The full path prevents
- # problems.
- sys.path.insert(0, os.getcwd())
-
- # Keep a counter to make every call to check_coverage unique.
- self.n = 0
-
- # Record environment variables that we changed with set_environ.
- self.environ_undos = {}
-
- # Capture stdout and stderr so we can examine them in tests.
- # nose keeps stdout from littering the screen, so we can safely Tee it,
- # but it doesn't capture stderr, so we don't want to Tee stderr to the
- # real stderr, since it will interfere with our nice field of dots.
- self.old_stdout = sys.stdout
- self.captured_stdout = StringIO()
- sys.stdout = Tee(sys.stdout, self.captured_stdout)
- self.old_stderr = sys.stderr
- self.captured_stderr = StringIO()
- sys.stderr = self.captured_stderr
-
- # Record sys.modules here so we can restore it in tearDown.
- self.old_modules = dict(sys.modules)
-
- class_behavior = self.class_behavior()
- class_behavior.tests += 1
- class_behavior.test_method_made_any_files = False
- class_behavior.temp_dir = self.run_in_temp_dir
-
- def tearDown(self):
- super(CoverageTest, self).tearDown()
-
- # Restore the original sys.path.
- sys.path = self.old_syspath
-
- if self.run_in_temp_dir:
- # Get rid of the temporary directory.
- os.chdir(self.old_dir)
- shutil.rmtree(self.temp_root)
-
- # Restore the environment.
- self.undo_environ()
-
- # Restore stdout and stderr
- sys.stdout = self.old_stdout
- sys.stderr = self.old_stderr
-
- self.clean_modules()
-
- class_behavior = self.class_behavior()
- if class_behavior.test_method_made_any_files:
- class_behavior.tests_making_files += 1
-
- def clean_modules(self):
- """Remove any new modules imported during the test run.
-
- This lets us import the same source files for more than one test.
-
- """
- for m in [m for m in sys.modules if m not in self.old_modules]:
- del sys.modules[m]
-
- def set_environ(self, name, value):
- """Set an environment variable `name` to be `value`.
-
- The environment variable is set, and record is kept that it was set,
- so that `tearDown` can restore its original value.
-
- """
- if name not in self.environ_undos:
- self.environ_undos[name] = os.environ.get(name)
- os.environ[name] = value
-
- def original_environ(self, name, if_missing=None):
- """The environment variable `name` from when the test started."""
- if name in self.environ_undos:
- ret = self.environ_undos[name]
- else:
- ret = os.environ.get(name)
- if ret is None:
- ret = if_missing
- return ret
-
- def undo_environ(self):
- """Undo all the changes made by `set_environ`."""
- for name, value in self.environ_undos.items():
- if value is None:
- del os.environ[name]
- else:
- os.environ[name] = value
-
- def stdout(self):
- """Return the data written to stdout during the test."""
- return self.captured_stdout.getvalue()
-
- def stderr(self):
- """Return the data written to stderr during the test."""
- return self.captured_stderr.getvalue()
-
- def make_file(self, filename, text="", newline=None):
- """Create a temp file.
-
- `filename` is the path to the file, including directories if desired,
- and `text` is the content. If `newline` is provided, it is a string
- that will be used as the line endings in the created file.
-
- Returns the path to the file.
-
- """
- # Tests that call `make_file` should be run in a temp environment.
- assert self.run_in_temp_dir
- self.class_behavior().test_method_made_any_files = True
-
- text = textwrap.dedent(text)
- if newline:
- text = text.replace("\n", newline)
-
- # Make sure the directories are available.
- dirs, _ = os.path.split(filename)
- if dirs and not os.path.exists(dirs):
- os.makedirs(dirs)
-
- # Create the file.
- with open(filename, 'wb') as f:
- f.write(to_bytes(text))
-
- return filename
-
def clean_local_file_imports(self):
"""Clean up the results of calls to `import_local_file`.
@@ -203,7 +50,7 @@ class CoverageTest(TestCase):
"""
# So that we can re-import files, clean them out first.
- self.clean_modules()
+ self.cleanup_modules()
# Also have to clean out the .pyc file, since the timestamp
# resolution is only one second, a changed file might not be
# picked up.
@@ -219,18 +66,7 @@ class CoverageTest(TestCase):
as `modname`, and returns the module object.
"""
- modfile = modname + '.py'
-
- for suff in imp.get_suffixes():
- if suff[0] == '.py':
- break
-
- with open(modfile, 'r') as f:
- # pylint: disable=W0631
- # (Using possibly undefined loop variable 'suff')
- mod = imp.load_module(modname, f, modfile, suff)
-
- return mod
+ return import_local_file(modname)
def start_import_stop(self, cov, modname):
"""Start coverage, import a file, then stop coverage.
@@ -252,13 +88,7 @@ class CoverageTest(TestCase):
def get_module_name(self):
"""Return the module name to use for this test run."""
- # We append self.n because otherwise two calls in one test will use the
- # same filename and whether the test works or not depends on the
- # timestamps in the .pyc file, so it becomes random whether the second
- # call will use the compiled version of the first call's code or not!
- modname = 'coverage_test_' + self.noise + str(self.n)
- self.n += 1
- return modname
+ return 'coverage_test_' + str(random.random())[2:]
# Map chars to numbers for arcz_to_arcs
_arcz_map = {'.': -1}
@@ -363,19 +193,21 @@ class CoverageTest(TestCase):
if statements == line_list:
break
else:
- self.fail("None of the lines choices matched %r" %
- statements
+ self.fail(
+ "None of the lines choices matched %r" % statements
)
+ missing_formatted = analysis.missing_formatted()
if type(missing) == type(""):
- self.assertEqual(analysis.missing_formatted(), missing)
+ self.assertEqual(missing_formatted, missing)
else:
for missing_list in missing:
- if analysis.missing_formatted() == missing_list:
+ if missing_formatted == missing_list:
break
else:
- self.fail("None of the missing choices matched %r" %
- analysis.missing_formatted()
+ self.fail(
+ "None of the missing choices matched %r" %
+ missing_formatted
)
if arcs is not None:
@@ -410,17 +242,17 @@ class CoverageTest(TestCase):
"""Assert that `flist1` and `flist2` are the same set of file names."""
flist1_nice = [self.nice_file(f) for f in flist1]
flist2_nice = [self.nice_file(f) for f in flist2]
- self.assertSameElements(flist1_nice, flist2_nice)
+ self.assertCountEqual(flist1_nice, flist2_nice)
def assert_exists(self, fname):
"""Assert that `fname` is a file that exists."""
msg = "File %r should exist" % fname
- self.assert_(os.path.exists(fname), msg)
+ self.assertTrue(os.path.exists(fname), msg)
def assert_doesnt_exist(self, fname):
"""Assert that `fname` is a file that doesn't exist."""
msg = "File %r shouldn't exist" % fname
- self.assert_(not os.path.exists(fname), msg)
+ self.assertTrue(not os.path.exists(fname), msg)
def assert_starts_with(self, s, prefix, msg=None):
"""Assert that `s` starts with `prefix`."""
@@ -464,7 +296,7 @@ class CoverageTest(TestCase):
_, output = self.run_command_status(cmd)
return output
- def run_command_status(self, cmd, status=0):
+ def run_command_status(self, cmd):
"""Run the command-line `cmd` in a subprocess, and print its output.
Use this when you need to test the process behavior of coverage.
@@ -473,9 +305,6 @@ class CoverageTest(TestCase):
Returns a pair: the process' exit status and stdout text.
- The `status` argument is returned as the status on older Pythons where
- we can't get the actual exit status of the process.
-
"""
# Add our test modules directory to PYTHONPATH. I'm sure there's too
# much path munging here, but...
@@ -488,58 +317,31 @@ class CoverageTest(TestCase):
pypath += testmods + os.pathsep + zipfile
self.set_environ('PYTHONPATH', pypath)
- status, output = run_command(cmd, status=status)
+ status, output = run_command(cmd)
print(output)
return status, output
- # We run some tests in temporary directories, because they may need to make
- # files for the tests. But this is expensive, so we can change per-class
- # whether a temp dir is used or not. It's easy to forget to set that
- # option properly, so we track information about what the tests did, and
- # then report at the end of the process on test classes that were set
- # wrong.
-
- class ClassBehavior(object):
- """A value object to store per-class in CoverageTest."""
- def __init__(self):
- self.tests = 0
- self.temp_dir = True
- self.tests_making_files = 0
- self.test_method_made_any_files = False
-
- # Map from class to info about how it ran.
- class_behaviors = collections.defaultdict(ClassBehavior)
-
- @classmethod
- def report_on_class_behavior(cls):
- """Called at process exit to report on class behavior."""
- for test_class, behavior in cls.class_behaviors.items():
- if behavior.temp_dir and behavior.tests_making_files == 0:
- bad = "Inefficient"
- elif not behavior.temp_dir and behavior.tests_making_files > 0:
- bad = "Unsafe"
- else:
- bad = ""
-
- if bad:
- if behavior.temp_dir:
- where = "in a temp directory"
- else:
- where = "without a temp directory"
- print(
- "%s: %s ran %d tests, %d made files %s" % (
- bad,
- test_class.__name__,
- behavior.tests,
- behavior.tests_making_files,
- where,
- )
- )
-
- def class_behavior(self):
- """Get the ClassBehavior instance for this test."""
- return self.class_behaviors[self.__class__]
-
-
-# When the process ends, find out about bad classes.
-atexit.register(CoverageTest.report_on_class_behavior)
+ def report_from_command(self, cmd):
+ """Return the report from the `cmd`, with some convenience added."""
+ report = self.run_command(cmd).replace('\\', '/')
+ self.assertNotIn("error", report.lower())
+ return report
+
+ def report_lines(self, report):
+ """Return the lines of the report, as a list."""
+ lines = report.split('\n')
+ self.assertEqual(lines[-1], "")
+ return lines[:-1]
+
+ def line_count(self, report):
+ """How many lines are in `report`?"""
+ return len(self.report_lines(report))
+
+ def squeezed_lines(self, report):
+ """Return a list of the lines in report, with the spaces squeezed."""
+ lines = self.report_lines(report)
+ return [re.sub(r"\s+", " ", l.strip()) for l in lines]
+
+ def last_line_squeezed(self, report):
+ """Return the last line of `report` with the spaces squeezed down."""
+ return self.squeezed_lines(report)[-1]
diff --git a/tests/farm/annotate/annotate_dir.py b/tests/farm/annotate/annotate_dir.py
index 3e37f9ed..86c18cab 100644
--- a/tests/farm/annotate/annotate_dir.py
+++ b/tests/farm/annotate/annotate_dir.py
@@ -1,7 +1,7 @@
copy("src", "run")
run("""
- coverage -e -x multi.py
- coverage -a -d out_anno_dir
+ coverage run multi.py
+ coverage annotate -d out_anno_dir
""", rundir="run")
compare("run/out_anno_dir", "gold_anno_dir", "*,cover", left_extra=True)
clean("run")
diff --git a/tests/farm/annotate/run.py b/tests/farm/annotate/run.py
index c645f21c..236f401f 100644
--- a/tests/farm/annotate/run.py
+++ b/tests/farm/annotate/run.py
@@ -1,7 +1,7 @@
copy("src", "out")
run("""
- coverage -e -x white.py
- coverage -a white.py
+ coverage run white.py
+ coverage annotate white.py
""", rundir="out")
compare("out", "gold", "*,cover")
clean("out")
diff --git a/tests/farm/annotate/run_multi.py b/tests/farm/annotate/run_multi.py
index 4e8252ed..ef1e8238 100644
--- a/tests/farm/annotate/run_multi.py
+++ b/tests/farm/annotate/run_multi.py
@@ -1,7 +1,7 @@
copy("src", "out_multi")
run("""
- coverage -e -x multi.py
- coverage -a
+ coverage run multi.py
+ coverage annotate
""", rundir="out_multi")
compare("out_multi", "gold_multi", "*,cover")
clean("out_multi")
diff --git a/tests/farm/html/gold_x_xml/coverage.xml b/tests/farm/html/gold_x_xml/coverage.xml
index 912112f2..d5a8c442 100644
--- a/tests/farm/html/gold_x_xml/coverage.xml
+++ b/tests/farm/html/gold_x_xml/coverage.xml
@@ -3,6 +3,9 @@
SYSTEM 'http://cobertura.sourceforge.net/xml/coverage-03.dtd'>
<coverage branch-rate="0" line-rate="0.6667" timestamp="1253972570431" version="3.1b1">
<!-- Generated by coverage.py: http://nedbatchelder.com/code/coverage/VER -->
+ <sources>
+ <source></source>
+ </sources>
<packages>
<package branch-rate="0" complexity="0" line-rate="0.6667" name="">
<classes>
diff --git a/tests/farm/html/gold_y_xml_branch/coverage.xml b/tests/farm/html/gold_y_xml_branch/coverage.xml
index ecbe0073..86e9e73c 100644
--- a/tests/farm/html/gold_y_xml_branch/coverage.xml
+++ b/tests/farm/html/gold_y_xml_branch/coverage.xml
@@ -3,6 +3,9 @@
SYSTEM 'http://cobertura.sourceforge.net/xml/coverage-03.dtd'>
<coverage branch-rate="0.5" line-rate="0.8" timestamp="1259288252325" version="3.2b4">
<!-- Generated by coverage.py: http://nedbatchelder.com/code/coverage/VER -->
+ <sources>
+ <source></source>
+ </sources>
<packages>
<package branch-rate="0.5" complexity="0" line-rate="0.8" name="">
<classes>
diff --git a/tests/farm/html/run_a_xml_1.py b/tests/farm/html/run_a_xml_1.py
index 3d187023..83f8c86d 100644
--- a/tests/farm/html/run_a_xml_1.py
+++ b/tests/farm/html/run_a_xml_1.py
@@ -1,3 +1,5 @@
+source_path = None
+
def html_it():
"""Run coverage and make an XML report for a."""
import coverage
@@ -6,6 +8,8 @@ def html_it():
import a # pragma: nested
cov.stop() # pragma: nested
cov.xml_report(a, outfile="../xml_1/coverage.xml")
+ global source_path
+ source_path = cov.file_locator.relative_dir.rstrip('/')
import os
if not os.path.exists("xml_1"):
@@ -16,6 +20,7 @@ runfunc(html_it, rundir="src")
compare("gold_x_xml", "xml_1", scrubs=[
(r' timestamp="\d+"', ' timestamp="TIMESTAMP"'),
(r' version="[-.\w]+"', ' version="VERSION"'),
+ (r'<source>\s*.*?\s*</source>', '<source>%s</source>' % source_path),
(r'/code/coverage/?[-.\w]*', '/code/coverage/VER'),
])
clean("xml_1")
diff --git a/tests/farm/html/run_a_xml_2.py b/tests/farm/html/run_a_xml_2.py
index 53691ead..6dd44225 100644
--- a/tests/farm/html/run_a_xml_2.py
+++ b/tests/farm/html/run_a_xml_2.py
@@ -1,3 +1,5 @@
+source_path = None
+
def html_it():
"""Run coverage and make an XML report for a."""
import coverage
@@ -6,6 +8,8 @@ def html_it():
import a # pragma: nested
cov.stop() # pragma: nested
cov.xml_report(a)
+ global source_path
+ source_path = cov.file_locator.relative_dir.rstrip('/')
import os
if not os.path.exists("xml_2"):
@@ -16,6 +20,7 @@ runfunc(html_it, rundir="src")
compare("gold_x_xml", "xml_2", scrubs=[
(r' timestamp="\d+"', ' timestamp="TIMESTAMP"'),
(r' version="[-.\w]+"', ' version="VERSION"'),
+ (r'<source>\s*.*?\s*</source>', '<source>%s</source>' % source_path),
(r'/code/coverage/?[-.\w]*', '/code/coverage/VER'),
])
clean("xml_2")
diff --git a/tests/farm/html/run_unicode.py b/tests/farm/html/run_unicode.py
index cef26ee5..c8cb6c50 100644
--- a/tests/farm/html/run_unicode.py
+++ b/tests/farm/html/run_unicode.py
@@ -1,5 +1,3 @@
-import sys
-
def html_it():
"""Run coverage and make an HTML report for unicode.py."""
import coverage
@@ -18,13 +16,9 @@ contains("html_unicode/unicode.html",
"<span class='str'>&quot;&#654;d&#729;&#477;b&#592;&#633;&#477;&#652;o&#596;&quot;</span>",
)
-if sys.maxunicode == 65535:
- contains("html_unicode/unicode.html",
- "<span class='str'>&quot;db40,dd00: x&#56128;&#56576;&quot;</span>",
- )
-else:
- contains("html_unicode/unicode.html",
- "<span class='str'>&quot;db40,dd00: x&#917760;&quot;</span>",
- )
+contains_any("html_unicode/unicode.html",
+ "<span class='str'>&quot;db40,dd00: x&#56128;&#56576;&quot;</span>",
+ "<span class='str'>&quot;db40,dd00: x&#917760;&quot;</span>",
+ )
clean("html_unicode")
diff --git a/tests/farm/html/run_y_xml_branch.py b/tests/farm/html/run_y_xml_branch.py
index 88a2e44e..9ae9a9f0 100644
--- a/tests/farm/html/run_y_xml_branch.py
+++ b/tests/farm/html/run_y_xml_branch.py
@@ -1,3 +1,5 @@
+source_path = None
+
def xml_it():
"""Run coverage and make an XML report for y."""
import coverage
@@ -6,6 +8,8 @@ def xml_it():
import y # pragma: nested
cov.stop() # pragma: nested
cov.xml_report(y, outfile="../xml_branch/coverage.xml")
+ global source_path
+ source_path = cov.file_locator.relative_dir.rstrip('/')
import os
if not os.path.exists("xml_branch"):
@@ -16,6 +20,7 @@ runfunc(xml_it, rundir="src")
compare("gold_y_xml_branch", "xml_branch", scrubs=[
(r' timestamp="\d+"', ' timestamp="TIMESTAMP"'),
(r' version="[-.\w]+"', ' version="VERSION"'),
+ (r'<source>\s*.*?\s*</source>', '<source>%s</source>' % source_path),
(r'/code/coverage/?[-.\w]*', '/code/coverage/VER'),
])
clean("xml_branch")
diff --git a/tests/farm/html/src/coverage.xml b/tests/farm/html/src/coverage.xml
index 128cf750..e20cdaec 100644
--- a/tests/farm/html/src/coverage.xml
+++ b/tests/farm/html/src/coverage.xml
@@ -3,6 +3,9 @@
SYSTEM 'http://cobertura.sourceforge.net/xml/coverage-03.dtd'>
<coverage branch-rate="0.0" line-rate="0.666666666667" timestamp="1263087779313" version="3.3a1">
<!-- Generated by coverage.py: http://nedbatchelder.com/code/coverage -->
+ <sources>
+ <source></source>
+ </sources>
<packages>
<package branch-rate="0.0" complexity="0.0" line-rate="0.666666666667" name="">
<classes>
diff --git a/tests/farm/run/run_chdir.py b/tests/farm/run/run_chdir.py
index f459f500..367cd0ad 100644
--- a/tests/farm/run/run_chdir.py
+++ b/tests/farm/run/run_chdir.py
@@ -1,7 +1,7 @@
copy("src", "out")
run("""
coverage run chdir.py
- coverage -r
+ coverage report
""", rundir="out", outfile="stdout.txt")
contains("out/stdout.txt",
"Line One",
diff --git a/tests/farm/run/run_timid.py b/tests/farm/run/run_timid.py
index ce78fff1..d4e69a46 100644
--- a/tests/farm/run/run_timid.py
+++ b/tests/farm/run/run_timid.py
@@ -17,8 +17,8 @@ if os.environ.get('COVERAGE_COVERAGE', ''):
copy("src", "out")
run("""
python showtrace.py none
- coverage -e -x showtrace.py regular
- coverage -e -x --timid showtrace.py timid
+ coverage run showtrace.py regular
+ coverage run --timid showtrace.py timid
""", rundir="out", outfile="showtraceout.txt")
# When running without coverage, no trace function
@@ -42,8 +42,8 @@ old_opts = os.environ.get('COVERAGE_OPTIONS')
os.environ['COVERAGE_OPTIONS'] = '--timid'
run("""
- coverage -e -x showtrace.py regular
- coverage -e -x --timid showtrace.py timid
+ coverage run showtrace.py regular
+ coverage run --timid showtrace.py timid
""", rundir="out", outfile="showtraceout.txt")
contains("out/showtraceout.txt",
diff --git a/tests/farm/run/run_xxx.py b/tests/farm/run/run_xxx.py
index 19e94a42..6fedc934 100644
--- a/tests/farm/run/run_xxx.py
+++ b/tests/farm/run/run_xxx.py
@@ -1,7 +1,7 @@
copy("src", "out")
run("""
- coverage -e -x xxx
- coverage -r
+ coverage run xxx
+ coverage report
""", rundir="out", outfile="stdout.txt")
contains("out/stdout.txt",
"xxx: 3 4 0 7",
diff --git a/tests/modules/pkg1/p1a.py b/tests/modules/pkg1/p1a.py
index be5fcdd3..337add49 100644
--- a/tests/modules/pkg1/p1a.py
+++ b/tests/modules/pkg1/p1a.py
@@ -1,5 +1,5 @@
import os, sys
# Invoke functions in os and sys so we can see if we measure code there.
-x = sys.getcheckinterval()
+x = sys.getfilesystemencoding()
y = os.getcwd()
diff --git a/tests/modules/plugins/__init__.py b/tests/modules/plugins/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/tests/modules/plugins/__init__.py
diff --git a/tests/modules/plugins/a_plugin.py b/tests/modules/plugins/a_plugin.py
new file mode 100644
index 00000000..2ff84dac
--- /dev/null
+++ b/tests/modules/plugins/a_plugin.py
@@ -0,0 +1,6 @@
+"""A plugin for tests to reference."""
+
+from coverage import CoveragePlugin
+
+class Plugin(CoveragePlugin):
+ pass
diff --git a/tests/modules/plugins/another.py b/tests/modules/plugins/another.py
new file mode 100644
index 00000000..2ff84dac
--- /dev/null
+++ b/tests/modules/plugins/another.py
@@ -0,0 +1,6 @@
+"""A plugin for tests to reference."""
+
+from coverage import CoveragePlugin
+
+class Plugin(CoveragePlugin):
+ pass
diff --git a/tests/test_api.py b/tests/test_api.py
index 097947d2..31bfc57f 100644
--- a/tests/test_api.py
+++ b/tests/test_api.py
@@ -100,7 +100,7 @@ class ApiTest(CoverageTest):
"""Assert that the files here are `files`, ignoring the usual junk."""
here = os.listdir(".")
here = self.clean_files(here, ["*.pyc", "__pycache__"])
- self.assertSameElements(here, files)
+ self.assertCountEqual(here, files)
def test_unexecuted_file(self):
cov = coverage.coverage()
@@ -221,7 +221,7 @@ class ApiTest(CoverageTest):
self.assertEqual(cov.get_exclude_list(), ["foo"])
cov.exclude("bar")
self.assertEqual(cov.get_exclude_list(), ["foo", "bar"])
- self.assertEqual(cov._exclude_regex('exclude'), "(foo)|(bar)")
+ self.assertEqual(cov._exclude_regex('exclude'), "(?:foo)|(?:bar)")
cov.clear_exclude()
self.assertEqual(cov.get_exclude_list(), [])
@@ -233,7 +233,9 @@ class ApiTest(CoverageTest):
self.assertEqual(cov.get_exclude_list(which='partial'), ["foo"])
cov.exclude("bar", which='partial')
self.assertEqual(cov.get_exclude_list(which='partial'), ["foo", "bar"])
- self.assertEqual(cov._exclude_regex(which='partial'), "(foo)|(bar)")
+ self.assertEqual(
+ cov._exclude_regex(which='partial'), "(?:foo)|(?:bar)"
+ )
cov.clear_exclude(which='partial')
self.assertEqual(cov.get_exclude_list(which='partial'), [])
diff --git a/tests/test_backward.py b/tests/test_backward.py
index e98017ae..09803ba7 100644
--- a/tests/test_backward.py
+++ b/tests/test_backward.py
@@ -1,18 +1,16 @@
"""Tests that our version shims in backward.py are working."""
+from coverage.backunittest import TestCase
from coverage.backward import iitems, binary_bytes, byte_to_int, bytes_to_ints
-from tests.backunittest import TestCase
class BackwardTest(TestCase):
"""Tests of things from backward.py."""
- run_in_temp_dir = False
-
def test_iitems(self):
d = {'a': 1, 'b': 2, 'c': 3}
items = [('a', 1), ('b', 2), ('c', 3)]
- self.assertSameElements(list(iitems(d)), items)
+ self.assertCountEqual(list(iitems(d)), items)
def test_binary_bytes(self):
byte_values = [0, 255, 17, 23, 42, 57]
diff --git a/tests/test_cmdline.py b/tests/test_cmdline.py
index 99bae516..08f7937a 100644
--- a/tests/test_cmdline.py
+++ b/tests/test_cmdline.py
@@ -72,7 +72,7 @@ class CmdLineTest(CoverageTest):
code = re.sub(r"(?m)^\.", "m2.", code)
m2 = self.model_object()
code_obj = compile(code, "<code>", "exec")
- eval(code_obj, globals(), { 'm2': m2 })
+ eval(code_obj, globals(), { 'm2': m2 }) # pylint: disable=eval-used
# Many of our functions take a lot of arguments, and cmdline.py
# calls them with many. But most of them are just the defaults, which
@@ -754,7 +754,7 @@ class CmdMainTest(CoverageTest):
self.assertEqual(err[-2], 'Exception: oh noes!')
def test_internalraise(self):
- with self.assertRaisesRegexp(ValueError, "coverage is broken"):
+ with self.assertRaisesRegex(ValueError, "coverage is broken"):
coverage.cmdline.main(['internalraise'])
def test_exit(self):
diff --git a/tests/test_codeunit.py b/tests/test_codeunit.py
index e4912e11..fe82ea1c 100644
--- a/tests/test_codeunit.py
+++ b/tests/test_codeunit.py
@@ -31,9 +31,9 @@ class CodeUnitTest(CoverageTest):
self.assertEqual(acu[0].flat_rootname(), "aa_afile")
self.assertEqual(bcu[0].flat_rootname(), "aa_bb_bfile")
self.assertEqual(ccu[0].flat_rootname(), "aa_bb_cc_cfile")
- self.assertEqual(acu[0].source_file().read(), "# afile.py\n")
- self.assertEqual(bcu[0].source_file().read(), "# bfile.py\n")
- self.assertEqual(ccu[0].source_file().read(), "# cfile.py\n")
+ self.assertEqual(acu[0].source(), "# afile.py\n")
+ self.assertEqual(bcu[0].source(), "# bfile.py\n")
+ self.assertEqual(ccu[0].source(), "# cfile.py\n")
def test_odd_filenames(self):
acu = code_unit_factory("aa/afile.odd.py", FileLocator())
@@ -45,9 +45,9 @@ class CodeUnitTest(CoverageTest):
self.assertEqual(acu[0].flat_rootname(), "aa_afile_odd")
self.assertEqual(bcu[0].flat_rootname(), "aa_bb_bfile_odd")
self.assertEqual(b2cu[0].flat_rootname(), "aa_bb_odd_bfile")
- self.assertEqual(acu[0].source_file().read(), "# afile.odd.py\n")
- self.assertEqual(bcu[0].source_file().read(), "# bfile.odd.py\n")
- self.assertEqual(b2cu[0].source_file().read(), "# bfile.py\n")
+ self.assertEqual(acu[0].source(), "# afile.odd.py\n")
+ self.assertEqual(bcu[0].source(), "# bfile.odd.py\n")
+ self.assertEqual(b2cu[0].source(), "# bfile.py\n")
def test_modules(self):
import aa, aa.bb, aa.bb.cc
@@ -58,9 +58,9 @@ class CodeUnitTest(CoverageTest):
self.assertEqual(cu[0].flat_rootname(), "aa")
self.assertEqual(cu[1].flat_rootname(), "aa_bb")
self.assertEqual(cu[2].flat_rootname(), "aa_bb_cc")
- self.assertEqual(cu[0].source_file().read(), "# aa\n")
- self.assertEqual(cu[1].source_file().read(), "# bb\n")
- self.assertEqual(cu[2].source_file().read(), "") # yes, empty
+ self.assertEqual(cu[0].source(), "# aa\n")
+ self.assertEqual(cu[1].source(), "# bb\n")
+ self.assertEqual(cu[2].source(), "") # yes, empty
def test_module_files(self):
import aa.afile, aa.bb.bfile, aa.bb.cc.cfile
@@ -72,9 +72,9 @@ class CodeUnitTest(CoverageTest):
self.assertEqual(cu[0].flat_rootname(), "aa_afile")
self.assertEqual(cu[1].flat_rootname(), "aa_bb_bfile")
self.assertEqual(cu[2].flat_rootname(), "aa_bb_cc_cfile")
- self.assertEqual(cu[0].source_file().read(), "# afile.py\n")
- self.assertEqual(cu[1].source_file().read(), "# bfile.py\n")
- self.assertEqual(cu[2].source_file().read(), "# cfile.py\n")
+ self.assertEqual(cu[0].source(), "# afile.py\n")
+ self.assertEqual(cu[1].source(), "# bfile.py\n")
+ self.assertEqual(cu[2].source(), "# cfile.py\n")
def test_comparison(self):
acu = code_unit_factory("aa/afile.py", FileLocator())[0]
@@ -97,7 +97,7 @@ class CodeUnitTest(CoverageTest):
self.assert_doesnt_exist(egg1.__file__)
cu = code_unit_factory([egg1, egg1.egg1], FileLocator())
- self.assertEqual(cu[0].source_file().read(), "")
- self.assertEqual(cu[1].source_file().read().split("\n")[0],
+ self.assertEqual(cu[0].source(), "")
+ self.assertEqual(cu[1].source().split("\n")[0],
"# My egg file!"
)
diff --git a/tests/test_config.py b/tests/test_config.py
index 7fa31208..bf84423d 100644
--- a/tests/test_config.py
+++ b/tests/test_config.py
@@ -1,6 +1,8 @@
# -*- coding: utf-8 -*-
"""Test the config file handling for coverage.py"""
+import sys, os
+
import coverage
from coverage.misc import CoverageException
@@ -125,58 +127,84 @@ class ConfigTest(CoverageTest):
class ConfigFileTest(CoverageTest):
"""Tests of the config file settings in particular."""
- def test_config_file_settings(self):
- # This sample file tries to use lots of variation of syntax...
- self.make_file(".coveragerc", """\
- # This is a settings file for coverage.py
- [run]
- timid = yes
- data_file = something_or_other.dat
- branch = 1
- cover_pylib = TRUE
- parallel = on
- include = a/ , b/
-
- [report]
- ; these settings affect reporting.
- exclude_lines =
- if 0:
-
- pragma:?\\s+no cover
- another_tab
-
- ignore_errors = TRUE
- omit =
- one, another, some_more,
- yet_more
- precision = 3
-
- partial_branches =
- pragma:?\\s+no branch
- partial_branches_always =
- if 0:
- while True:
-
- show_missing= TruE
-
- [html]
-
- directory = c:\\tricky\\dir.somewhere
- extra_css=something/extra.css
- title = Title & nums # nums!
- [xml]
- output=mycov.xml
-
- [paths]
- source =
- .
- /home/ned/src/
-
- other = other, /home/ned/other, c:\\Ned\\etc
-
- """)
- cov = coverage.coverage()
-
+ def setUp(self):
+ super(ConfigFileTest, self).setUp()
+ # Parent class saves and restores sys.path, we can just modify it.
+ # Add modules to the path so we can import plugins.
+ sys.path.append(self.nice_file(os.path.dirname(__file__), 'modules'))
+
+ # This sample file tries to use lots of variation of syntax...
+ # The {section} placeholder lets us nest these settings in another file.
+ LOTSA_SETTINGS = """\
+ # This is a settings file for coverage.py
+ [{section}run]
+ timid = yes
+ data_file = something_or_other.dat
+ branch = 1
+ cover_pylib = TRUE
+ parallel = on
+ include = a/ , b/
+ plugins =
+ plugins.a_plugin
+ plugins.another
+
+ [{section}report]
+ ; these settings affect reporting.
+ exclude_lines =
+ if 0:
+
+ pragma:?\\s+no cover
+ another_tab
+
+ ignore_errors = TRUE
+ omit =
+ one, another, some_more,
+ yet_more
+ precision = 3
+
+ partial_branches =
+ pragma:?\\s+no branch
+ partial_branches_always =
+ if 0:
+ while True:
+
+ show_missing= TruE
+
+ [{section}html]
+
+ directory = c:\\tricky\\dir.somewhere
+ extra_css=something/extra.css
+ title = Title & nums # nums!
+ [{section}xml]
+ output=mycov.xml
+
+ [{section}paths]
+ source =
+ .
+ /home/ned/src/
+
+ other = other, /home/ned/other, c:\\Ned\\etc
+
+ [{section}plugins.a_plugin]
+ hello = world
+ ; comments still work.
+ names = Jane/John/Jenny
+ """
+
+ # Just some sample setup.cfg text from the docs.
+ SETUP_CFG = """\
+ [bdist_rpm]
+ release = 1
+ packager = Jane Packager <janep@pysoft.com>
+ doc_files = CHANGES.txt
+ README.txt
+ USAGE.txt
+ doc/
+ examples/
+ """
+
+ def assert_config_settings_are_correct(self, cov):
+ """Check that `cov` has all the settings from LOTSA_SETTINGS."""
self.assertTrue(cov.config.timid)
self.assertEqual(cov.config.data_file, "something_or_other.dat")
self.assertTrue(cov.config.branch)
@@ -199,6 +227,9 @@ class ConfigFileTest(CoverageTest):
self.assertEqual(cov.config.partial_always_list,
["if 0:", "while True:"]
)
+ self.assertEqual(cov.config.plugins,
+ ["plugins.a_plugin", "plugins.another"]
+ )
self.assertTrue(cov.config.show_missing)
self.assertEqual(cov.config.html_dir, r"c:\tricky\dir.somewhere")
self.assertEqual(cov.config.extra_css, "something/extra.css")
@@ -211,8 +242,39 @@ class ConfigFileTest(CoverageTest):
'other': ['other', '/home/ned/other', 'c:\\Ned\\etc']
})
+ self.assertEqual(cov.config.get_plugin_options("plugins.a_plugin"), {
+ 'hello': 'world',
+ 'names': 'Jane/John/Jenny',
+ })
+ self.assertEqual(cov.config.get_plugin_options("plugins.another"), {})
+
+ def test_config_file_settings(self):
+ self.make_file(".coveragerc", self.LOTSA_SETTINGS.format(section=""))
+ cov = coverage.coverage()
+ self.assert_config_settings_are_correct(cov)
+
+ def test_config_file_settings_in_setupcfg(self):
+ nested = self.LOTSA_SETTINGS.format(section="coverage:")
+ self.make_file("setup.cfg", nested + "\n" + self.SETUP_CFG)
+ cov = coverage.coverage()
+ self.assert_config_settings_are_correct(cov)
+
+ def test_setupcfg_only_if_not_coveragerc(self):
+ self.make_file(".coveragerc", """\
+ [run]
+ include = foo
+ """)
+ self.make_file("setup.cfg", """\
+ [run]
+ omit = bar
+ branch = true
+ """)
+ cov = coverage.coverage()
+ self.assertEqual(cov.config.include, ["foo"])
+ self.assertEqual(cov.config.omit, None)
+ self.assertEqual(cov.config.branch, False)
+
def test_one(self):
- # This sample file tries to use lots of variation of syntax...
self.make_file(".coveragerc", """\
[html]
title = tabblo & «ταБЬℓσ» # numbers
diff --git a/tests/test_coroutine.py b/tests/test_coroutine.py
new file mode 100644
index 00000000..4abdd6f6
--- /dev/null
+++ b/tests/test_coroutine.py
@@ -0,0 +1,208 @@
+"""Tests for coroutining."""
+
+import os, os.path, sys, threading
+
+import coverage
+
+from tests.coveragetest import CoverageTest
+
+
+# These libraries aren't always available, we'll skip tests if they aren't.
+
+try:
+ import eventlet # pylint: disable=import-error
+except ImportError:
+ eventlet = None
+
+try:
+ import gevent # pylint: disable=import-error
+except ImportError:
+ gevent = None
+
+try:
+ import greenlet # pylint: disable=import-error
+except ImportError:
+ greenlet = None
+
+# Are we running with the C tracer or not?
+C_TRACER = os.getenv('COVERAGE_TEST_TRACER', 'c') == 'c'
+
+
+def line_count(s):
+ """How many non-blank non-comment lines are in `s`?"""
+ def code_line(l):
+ """Is this a code line? Not blank, and not a full-line comment."""
+ return l.strip() and not l.strip().startswith('#')
+ return sum(1 for l in s.splitlines() if code_line(l))
+
+
+class CoroutineTest(CoverageTest):
+ """Tests of the coroutine support in coverage.py."""
+
+ LIMIT = 1000
+
+ # The code common to all the concurrency models.
+ COMMON = """
+ class Producer(threading.Thread):
+ def __init__(self, q):
+ threading.Thread.__init__(self)
+ self.q = q
+
+ def run(self):
+ for i in range({LIMIT}):
+ self.q.put(i)
+ self.q.put(None)
+
+ class Consumer(threading.Thread):
+ def __init__(self, q):
+ threading.Thread.__init__(self)
+ self.q = q
+
+ def run(self):
+ sum = 0
+ while True:
+ i = self.q.get()
+ if i is None:
+ print(sum)
+ break
+ sum += i
+
+ q = queue.Queue()
+ c = Consumer(q)
+ p = Producer(q)
+ c.start()
+ p.start()
+
+ p.join()
+ c.join()
+ """.format(LIMIT=LIMIT)
+
+ # Import the things to use threads.
+ if sys.version_info < (3, 0):
+ THREAD = """\
+ import threading
+ import Queue as queue
+ """ + COMMON
+ else:
+ THREAD = """\
+ import threading
+ import queue
+ """ + COMMON
+
+ # Import the things to use eventlet.
+ EVENTLET = """\
+ import eventlet.green.threading as threading
+ import eventlet.queue as queue
+ """ + COMMON
+
+ # Import the things to use gevent.
+ GEVENT = """\
+ from gevent import monkey
+ monkey.patch_thread()
+ import threading
+ import gevent.queue as queue
+ """ + COMMON
+
+ # Uncomplicated code that doesn't use any of the coroutining stuff, to test
+ # the simple case under each of the regimes.
+ SIMPLE = """\
+ total = 0
+ for i in range({LIMIT}):
+ total += i
+ print(total)
+ """.format(LIMIT=LIMIT)
+
+ def try_some_code(self, code, coroutine, the_module, expected_out=None):
+ """Run some coroutine testing code and see that it was all covered.
+
+ `code` is the Python code to execute. `coroutine` is the name of the
+ coroutine regime to test it under. `the_module` is the imported module
+ that must be available for this to work at all. `expected_out` is the
+ text we expect the code to produce.
+
+ """
+
+ self.make_file("try_it.py", code)
+
+ cmd = "coverage run --coroutine=%s try_it.py" % coroutine
+ out = self.run_command(cmd)
+
+ if not the_module:
+ # We don't even have the underlying module installed, we expect
+ # coverage to alert us to this fact.
+ expected_out = (
+ "Couldn't trace with coroutine=%s, "
+ "the module isn't installed.\n" % coroutine
+ )
+ self.assertEqual(out, expected_out)
+ elif C_TRACER or coroutine == "thread":
+ # We can fully measure the code if we are using the C tracer, which
+ # can support all the coroutining, or if we are using threads.
+ if expected_out is None:
+ expected_out = "%d\n" % (sum(range(self.LIMIT)))
+ self.assertEqual(out, expected_out)
+
+ # Read the coverage file and see that try_it.py has all its lines
+ # executed.
+ data = coverage.CoverageData()
+ data.read_file(".coverage")
+
+ # If the test fails, it's helpful to see this info:
+ fname = os.path.abspath("try_it.py")
+ linenos = data.executed_lines(fname).keys()
+ print("{0}: {1}".format(len(linenos), linenos))
+ print_simple_annotation(code, linenos)
+
+ lines = line_count(code)
+ self.assertEqual(data.summary()['try_it.py'], lines)
+ else:
+ expected_out = (
+ "Can't support coroutine=%s with PyTracer, "
+ "only threads are supported\n" % coroutine
+ )
+ self.assertEqual(out, expected_out)
+
+ def test_threads(self):
+ self.try_some_code(self.THREAD, "thread", threading)
+
+ def test_threads_simple_code(self):
+ self.try_some_code(self.SIMPLE, "thread", threading)
+
+ def test_eventlet(self):
+ self.try_some_code(self.EVENTLET, "eventlet", eventlet)
+
+ def test_eventlet_simple_code(self):
+ self.try_some_code(self.SIMPLE, "eventlet", eventlet)
+
+ def test_gevent(self):
+ self.try_some_code(self.GEVENT, "gevent", gevent)
+
+ def test_gevent_simple_code(self):
+ self.try_some_code(self.SIMPLE, "gevent", gevent)
+
+ def test_greenlet(self):
+ GREENLET = """\
+ from greenlet import greenlet
+
+ def test1(x, y):
+ z = gr2.switch(x+y)
+ print(z)
+
+ def test2(u):
+ print(u)
+ gr1.switch(42)
+
+ gr1 = greenlet(test1)
+ gr2 = greenlet(test2)
+ gr1.switch("hello", " world")
+ """
+ self.try_some_code(GREENLET, "greenlet", greenlet, "hello world\n42\n")
+
+ def test_greenlet_simple_code(self):
+ self.try_some_code(self.SIMPLE, "greenlet", greenlet)
+
+
+def print_simple_annotation(code, linenos):
+ """Print the lines in `code` with X for each line number in `linenos`."""
+ for lineno, line in enumerate(code.splitlines(), start=1):
+ print(" {0} {1}".format("X" if lineno in linenos else " ", line))
diff --git a/tests/test_coverage.py b/tests/test_coverage.py
index 33f644fa..565fa4e1 100644
--- a/tests/test_coverage.py
+++ b/tests/test_coverage.py
@@ -46,7 +46,7 @@ class TestCoverageTest(CoverageTest):
def test_failed_coverage(self):
# If the lines are wrong, the message shows right and wrong.
- with self.assertRaisesRegexp(AssertionError, r"\[1, 2] != \[1]"):
+ with self.assertRaisesRegex(AssertionError, r"\[1, 2] != \[1]"):
self.check_coverage("""\
a = 1
b = 2
@@ -55,7 +55,7 @@ class TestCoverageTest(CoverageTest):
)
# If the list of lines possibilities is wrong, the msg shows right.
msg = r"None of the lines choices matched \[1, 2]"
- with self.assertRaisesRegexp(AssertionError, msg):
+ with self.assertRaisesRegex(AssertionError, msg):
self.check_coverage("""\
a = 1
b = 2
@@ -63,7 +63,7 @@ class TestCoverageTest(CoverageTest):
([1], [2])
)
# If the missing lines are wrong, the message shows right and wrong.
- with self.assertRaisesRegexp(AssertionError, r"'3' != '37'"):
+ with self.assertRaisesRegex(AssertionError, r"'3' != '37'"):
self.check_coverage("""\
a = 1
if a == 2:
@@ -74,7 +74,7 @@ class TestCoverageTest(CoverageTest):
)
# If the missing lines possibilities are wrong, the msg shows right.
msg = r"None of the missing choices matched '3'"
- with self.assertRaisesRegexp(AssertionError, msg):
+ with self.assertRaisesRegex(AssertionError, msg):
self.check_coverage("""\
a = 1
if a == 2:
@@ -1671,7 +1671,7 @@ class ReportingTest(CoverageTest):
def test_no_data_to_report_on_annotate(self):
# Reporting with no data produces a nice message and no output dir.
- with self.assertRaisesRegexp(CoverageException, "No data to report."):
+ with self.assertRaisesRegex(CoverageException, "No data to report."):
self.command_line("annotate -d ann")
self.assert_doesnt_exist("ann")
@@ -1681,12 +1681,12 @@ class ReportingTest(CoverageTest):
def test_no_data_to_report_on_html(self):
# Reporting with no data produces a nice message and no output dir.
- with self.assertRaisesRegexp(CoverageException, "No data to report."):
+ with self.assertRaisesRegex(CoverageException, "No data to report."):
self.command_line("html -d htmlcov")
self.assert_doesnt_exist("htmlcov")
def test_no_data_to_report_on_xml(self):
# Reporting with no data produces a nice message.
- with self.assertRaisesRegexp(CoverageException, "No data to report."):
+ with self.assertRaisesRegex(CoverageException, "No data to report."):
self.command_line("xml")
self.assert_doesnt_exist("coverage.xml")
diff --git a/tests/test_data.py b/tests/test_data.py
index 31578f26..b048fd18 100644
--- a/tests/test_data.py
+++ b/tests/test_data.py
@@ -33,7 +33,7 @@ class DataTest(CoverageTest):
def assert_measured_files(self, covdata, measured):
"""Check that `covdata`'s measured files are `measured`."""
- self.assertSameElements(covdata.measured_files(), measured)
+ self.assertCountEqual(covdata.measured_files(), measured)
def test_reading_empty(self):
covdata = CoverageData()
@@ -96,9 +96,9 @@ class DataTest(CoverageTest):
data = pickle.load(fdata)
lines = data['lines']
- self.assertSameElements(lines.keys(), MEASURED_FILES_1)
- self.assertSameElements(lines['a.py'], A_PY_LINES_1)
- self.assertSameElements(lines['b.py'], B_PY_LINES_1)
+ self.assertCountEqual(lines.keys(), MEASURED_FILES_1)
+ self.assertCountEqual(lines['a.py'], A_PY_LINES_1)
+ self.assertCountEqual(lines['b.py'], B_PY_LINES_1)
# If not measuring branches, there's no arcs entry.
self.assertEqual(data.get('arcs', 'not there'), 'not there')
@@ -111,10 +111,10 @@ class DataTest(CoverageTest):
with open(".coverage", 'rb') as fdata:
data = pickle.load(fdata)
- self.assertSameElements(data['lines'].keys(), [])
+ self.assertCountEqual(data['lines'].keys(), [])
arcs = data['arcs']
- self.assertSameElements(arcs['x.py'], X_PY_ARCS_3)
- self.assertSameElements(arcs['y.py'], Y_PY_ARCS_3)
+ self.assertCountEqual(arcs['x.py'], X_PY_ARCS_3)
+ self.assertCountEqual(arcs['y.py'], Y_PY_ARCS_3)
def test_combining_with_aliases(self):
covdata1 = CoverageData()
diff --git a/tests/test_execfile.py b/tests/test_execfile.py
index 7cd8ac4e..69616e84 100644
--- a/tests/test_execfile.py
+++ b/tests/test_execfile.py
@@ -16,7 +16,7 @@ class RunFileTest(CoverageTest):
def test_run_python_file(self):
tryfile = os.path.join(here, "try_execfile.py")
run_python_file(tryfile, [tryfile, "arg1", "arg2"])
- mod_globs = eval(self.stdout())
+ mod_globs = eval(self.stdout()) # pylint: disable=eval-used
# The file should think it is __main__
self.assertEqual(mod_globs['__name__'], "__main__")
@@ -118,11 +118,11 @@ class RunPycFileTest(CoverageTest):
fpyc.write(binary_bytes([0x2a, 0xeb, 0x0d, 0x0a]))
fpyc.close()
- with self.assertRaisesRegexp(NoCode, "Bad magic number in .pyc file"):
+ with self.assertRaisesRegex(NoCode, "Bad magic number in .pyc file"):
run_python_file(pycfile, [pycfile])
def test_no_such_pyc_file(self):
- with self.assertRaisesRegexp(NoCode, "No file to run: 'xyzzy.pyc'"):
+ with self.assertRaisesRegex(NoCode, "No file to run: 'xyzzy.pyc'"):
run_python_file("xyzzy.pyc", [])
@@ -138,22 +138,27 @@ class RunModuleTest(CoverageTest):
def test_runmod1(self):
run_python_module("runmod1", ["runmod1", "hello"])
+ self.assertEqual(self.stderr(), "")
self.assertEqual(self.stdout(), "runmod1: passed hello\n")
def test_runmod2(self):
run_python_module("pkg1.runmod2", ["runmod2", "hello"])
+ self.assertEqual(self.stderr(), "")
self.assertEqual(self.stdout(), "runmod2: passed hello\n")
def test_runmod3(self):
run_python_module("pkg1.sub.runmod3", ["runmod3", "hello"])
+ self.assertEqual(self.stderr(), "")
self.assertEqual(self.stdout(), "runmod3: passed hello\n")
def test_pkg1_main(self):
run_python_module("pkg1", ["pkg1", "hello"])
+ self.assertEqual(self.stderr(), "")
self.assertEqual(self.stdout(), "pkg1.__main__: passed hello\n")
def test_pkg1_sub_main(self):
run_python_module("pkg1.sub", ["pkg1.sub", "hello"])
+ self.assertEqual(self.stderr(), "")
self.assertEqual(self.stdout(), "pkg1.sub.__main__: passed hello\n")
def test_no_such_module(self):
diff --git a/tests/test_farm.py b/tests/test_farm.py
index c86983e5..47f9b7b7 100644
--- a/tests/test_farm.py
+++ b/tests/test_farm.py
@@ -15,6 +15,10 @@ def test_farm(clean_only=False):
yield (case,)
+# "rU" was deprecated in 3.4
+READ_MODE = "rU" if sys.version_info < (3, 4) else "r"
+
+
class FarmTestCase(object):
"""A test case from the farm tree.
@@ -22,8 +26,8 @@ class FarmTestCase(object):
copy("src", "out")
run('''
- coverage -x white.py
- coverage -a white.py
+ coverage run white.py
+ coverage annotate white.py
''', rundir="out")
compare("out", "gold", "*,cover")
clean("out")
@@ -75,7 +79,8 @@ class FarmTestCase(object):
# Prepare a dictionary of globals for the run.py files to use.
fns = """
- copy run runfunc compare contains doesnt_contain clean skip
+ copy run runfunc clean skip
+ compare contains contains_any doesnt_contain
""".split()
if self.clean_only:
glo = dict((fn, self.noop) for fn in fns)
@@ -238,8 +243,10 @@ class FarmTestCase(object):
# guide for size comparison.
wrong_size = []
for f in diff_files:
- left = open(os.path.join(dir1, f), "rb").read()
- right = open(os.path.join(dir2, f), "rb").read()
+ with open(os.path.join(dir1, f), "rb") as fobj:
+ left = fobj.read()
+ with open(os.path.join(dir2, f), "rb") as fobj:
+ right = fobj.read()
size_l, size_r = len(left), len(right)
big, little = max(size_l, size_r), min(size_l, size_r)
if (big - little) / float(little) > size_within/100.0:
@@ -256,14 +263,18 @@ class FarmTestCase(object):
# ourselves.
text_diff = []
for f in diff_files:
- left = open(os.path.join(dir1, f), "rU").readlines()
- right = open(os.path.join(dir2, f), "rU").readlines()
+ with open(os.path.join(dir1, f), READ_MODE) as fobj:
+ left = fobj.read()
+ with open(os.path.join(dir2, f), READ_MODE) as fobj:
+ right = fobj.read()
if scrubs:
left = self._scrub(left, scrubs)
right = self._scrub(right, scrubs)
if left != right:
text_diff.append(f)
- print("".join(list(difflib.Differ().compare(left, right))))
+ left = left.splitlines()
+ right = right.splitlines()
+ print("\n".join(difflib.Differ().compare(left, right)))
assert not text_diff, "Files differ: %s" % text_diff
if not left_extra:
@@ -271,19 +282,16 @@ class FarmTestCase(object):
if not right_extra:
assert not right_only, "Files in %s only: %s" % (dir2, right_only)
- def _scrub(self, strlist, scrubs):
- """Scrub uninteresting data from the strings in `strlist`.
+ def _scrub(self, strdata, scrubs):
+ """Scrub uninteresting data from the payload in `strdata`.
- `scrubs is a list of (find, replace) pairs of regexes that are used on
- each string in `strlist`. A list of scrubbed strings is returned.
+ `scrubs` is a list of (find, replace) pairs of regexes that are used on
+ `strdata`. A string is returned.
"""
- scrubbed = []
- for s in strlist:
- for rgx_find, rgx_replace in scrubs:
- s = re.sub(rgx_find, rgx_replace, s)
- scrubbed.append(s)
- return scrubbed
+ for rgx_find, rgx_replace in scrubs:
+ strdata = re.sub(rgx_find, rgx_replace, strdata)
+ return strdata
def contains(self, filename, *strlist):
"""Check that the file contains all of a list of strings.
@@ -292,10 +300,27 @@ class FarmTestCase(object):
missing in `filename`.
"""
- text = open(filename, "r").read()
+ with open(filename, "r") as fobj:
+ text = fobj.read()
for s in strlist:
assert s in text, "Missing content in %s: %r" % (filename, s)
+ def contains_any(self, filename, *strlist):
+ """Check that the file contains at least one of a list of strings.
+
+ An assert will be raised if none of the arguments in `strlist` is in
+ `filename`.
+
+ """
+ with open(filename, "r") as fobj:
+ text = fobj.read()
+ for s in strlist:
+ if s in text:
+ return
+ assert False, "Missing content in %s: %r [1 of %d]" % (
+ filename, strlist[0], len(strlist),
+ )
+
def doesnt_contain(self, filename, *strlist):
"""Check that the file contains none of a list of strings.
@@ -303,7 +328,8 @@ class FarmTestCase(object):
`filename`.
"""
- text = open(filename, "r").read()
+ with open(filename, "r") as fobj:
+ text = fobj.read()
for s in strlist:
assert s not in text, "Forbidden content in %s: %r" % (filename, s)
diff --git a/tests/test_files.py b/tests/test_files.py
index 85c0ac7b..648c76a9 100644
--- a/tests/test_files.py
+++ b/tests/test_files.py
@@ -50,41 +50,62 @@ class FileLocatorTest(CoverageTest):
class MatcherTest(CoverageTest):
"""Tests of file matchers."""
+ def setUp(self):
+ super(MatcherTest, self).setUp()
+ self.fl = FileLocator()
+
+ def assertMatches(self, matcher, filepath, matches):
+ """The `matcher` should agree with `matches` about `filepath`."""
+ canonical = self.fl.canonical_filename(filepath)
+ self.assertEqual(
+ matcher.match(canonical), matches,
+ "File %s should have matched as %s" % (filepath, matches)
+ )
+
def test_tree_matcher(self):
- file1 = self.make_file("sub/file1.py")
- file2 = self.make_file("sub/file2.c")
- file3 = self.make_file("sub2/file3.h")
- file4 = self.make_file("sub3/file4.py")
- file5 = self.make_file("sub3/file5.c")
+ matches_to_try = [
+ (self.make_file("sub/file1.py"), True),
+ (self.make_file("sub/file2.c"), True),
+ (self.make_file("sub2/file3.h"), False),
+ (self.make_file("sub3/file4.py"), True),
+ (self.make_file("sub3/file5.c"), False),
+ ]
fl = FileLocator()
trees = [
fl.canonical_filename("sub"),
- fl.canonical_filename(file4),
+ fl.canonical_filename("sub3/file4.py"),
]
tm = TreeMatcher(trees)
- self.assertTrue(tm.match(fl.canonical_filename(file1)))
- self.assertTrue(tm.match(fl.canonical_filename(file2)))
- self.assertFalse(tm.match(fl.canonical_filename(file3)))
- self.assertTrue(tm.match(fl.canonical_filename(file4)))
- self.assertFalse(tm.match(fl.canonical_filename(file5)))
-
self.assertEqual(tm.info(), trees)
+ for filepath, matches in matches_to_try:
+ self.assertMatches(tm, filepath, matches)
def test_fnmatch_matcher(self):
- file1 = self.make_file("sub/file1.py")
- file2 = self.make_file("sub/file2.c")
- file3 = self.make_file("sub2/file3.h")
- file4 = self.make_file("sub3/file4.py")
- file5 = self.make_file("sub3/file5.c")
- fl = FileLocator()
+ matches_to_try = [
+ (self.make_file("sub/file1.py"), True),
+ (self.make_file("sub/file2.c"), False),
+ (self.make_file("sub2/file3.h"), True),
+ (self.make_file("sub3/file4.py"), True),
+ (self.make_file("sub3/file5.c"), False),
+ ]
fnm = FnmatchMatcher(["*.py", "*/sub2/*"])
- self.assertTrue(fnm.match(fl.canonical_filename(file1)))
- self.assertFalse(fnm.match(fl.canonical_filename(file2)))
- self.assertTrue(fnm.match(fl.canonical_filename(file3)))
- self.assertTrue(fnm.match(fl.canonical_filename(file4)))
- self.assertFalse(fnm.match(fl.canonical_filename(file5)))
-
self.assertEqual(fnm.info(), ["*.py", "*/sub2/*"])
+ for filepath, matches in matches_to_try:
+ self.assertMatches(fnm, filepath, matches)
+
+ def test_fnmatch_matcher_overload(self):
+ fnm = FnmatchMatcher(["*x%03d*.txt" % i for i in range(500)])
+ self.assertMatches(fnm, "x007foo.txt", True)
+ self.assertMatches(fnm, "x123foo.txt", True)
+ self.assertMatches(fnm, "x798bar.txt", False)
+
+ def test_fnmatch_windows_paths(self):
+ # We should be able to match Windows paths even if we are running on
+ # a non-Windows OS.
+ fnm = FnmatchMatcher(["*/foo.py"])
+ self.assertMatches(fnm, r"dir\foo.py", True)
+ fnm = FnmatchMatcher([r"*\foo.py"])
+ self.assertMatches(fnm, r"dir\foo.py", True)
class PathAliasesTest(CoverageTest):
@@ -124,11 +145,11 @@ class PathAliasesTest(CoverageTest):
def test_cant_have_wildcard_at_end(self):
aliases = PathAliases()
msg = "Pattern must not end with wildcards."
- with self.assertRaisesRegexp(CoverageException, msg):
+ with self.assertRaisesRegex(CoverageException, msg):
aliases.add("/ned/home/*", "fooey")
- with self.assertRaisesRegexp(CoverageException, msg):
+ with self.assertRaisesRegex(CoverageException, msg):
aliases.add("/ned/home/*/", "fooey")
- with self.assertRaisesRegexp(CoverageException, msg):
+ with self.assertRaisesRegex(CoverageException, msg):
aliases.add("/ned/home/*/*/", "fooey")
def test_no_accidental_munging(self):
@@ -170,7 +191,7 @@ class RelativePathAliasesTest(CoverageTest):
aliases.add(d, '/the/source')
the_file = os.path.join(d, 'a.py')
the_file = os.path.expanduser(the_file)
- the_file = os.path.abspath(the_file)
+ the_file = os.path.abspath(os.path.realpath(the_file))
assert '~' not in the_file # to be sure the test is pure.
self.assertEqual(aliases.map(the_file), '/the/source/a.py')
diff --git a/tests/test_html.py b/tests/test_html.py
index 41859382..8e43e7cf 100644
--- a/tests/test_html.py
+++ b/tests/test_html.py
@@ -1,7 +1,7 @@
# -*- coding: utf-8 -*-
"""Tests that HTML generation is awesome."""
-import os.path, re
+import os.path, re, sys
import coverage
import coverage.html
from coverage.misc import CoverageException, NotPython, NoSource
@@ -42,6 +42,13 @@ class HtmlTestHelpers(CoverageTest):
os.remove("htmlcov/helper1.html")
os.remove("htmlcov/helper2.html")
+ def get_html_report_content(self, module):
+ """Return the content of the HTML report for `module`."""
+ filename = module.replace(".py", ".html").replace("/", "_")
+ filename = os.path.join("htmlcov", filename)
+ with open(filename) as f:
+ return f.read()
+
class HtmlDeltaTest(HtmlTestHelpers, CoverageTest):
"""Tests of the HTML delta speed-ups."""
@@ -208,7 +215,7 @@ class HtmlTitleTest(HtmlTestHelpers, CoverageTest):
)
-class HtmlWithUnparsableFilesTest(CoverageTest):
+class HtmlWithUnparsableFilesTest(HtmlTestHelpers, CoverageTest):
"""Test the behavior when measuring unparsable files."""
def test_dotpy_not_python(self):
@@ -217,7 +224,7 @@ class HtmlWithUnparsableFilesTest(CoverageTest):
self.start_import_stop(cov, "innocuous")
self.make_file("innocuous.py", "<h1>This isn't python!</h1>")
msg = "Couldn't parse '.*innocuous.py' as Python source: .* at line 1"
- with self.assertRaisesRegexp(NotPython, msg):
+ with self.assertRaisesRegex(NotPython, msg):
cov.html_report()
def test_dotpy_not_python_ignored(self):
@@ -267,6 +274,31 @@ class HtmlWithUnparsableFilesTest(CoverageTest):
cov.html_report()
self.assert_exists("htmlcov/index.html")
+ def test_decode_error(self):
+ # imp.load_module won't load a file with an undecodable character
+ # in a comment, though Python will run them. So we'll change the
+ # file after running.
+ self.make_file("main.py", "import sub.not_ascii")
+ self.make_file("sub/__init__.py")
+ self.make_file("sub/not_ascii.py", """\
+ a = 1 # Isn't this great?!
+ """)
+ cov = coverage.coverage()
+ self.start_import_stop(cov, "main")
+
+ # Create the undecodable version of the file.
+ self.make_file("sub/not_ascii.py", """\
+ a = 1 # Isn't this great?\xcb!
+ """)
+ cov.html_report()
+
+ html_report = self.get_html_report_content("sub/not_ascii.py")
+ if sys.version_info < (3, 0):
+ expected = "# Isn&#39;t this great?&#65533;!"
+ else:
+ expected = "# Isn&#39;t this great?&#203;!"
+ self.assertIn(expected, html_report)
+
class HtmlTest(CoverageTest):
"""Moar HTML tests."""
@@ -283,7 +315,7 @@ class HtmlTest(CoverageTest):
missing_file = os.path.join(self.temp_dir, "sub", "another.py")
missing_file = os.path.realpath(missing_file)
msg = "(?i)No source for code: '%s'" % re.escape(missing_file)
- with self.assertRaisesRegexp(NoSource, msg):
+ with self.assertRaisesRegex(NoSource, msg):
cov.html_report()
class HtmlStaticFileTest(CoverageTest):
@@ -340,5 +372,5 @@ class HtmlStaticFileTest(CoverageTest):
cov = coverage.coverage()
self.start_import_stop(cov, "main")
msg = "Couldn't find static file '.*'"
- with self.assertRaisesRegexp(CoverageException, msg):
+ with self.assertRaisesRegex(CoverageException, msg):
cov.html_report()
diff --git a/tests/test_oddball.py b/tests/test_oddball.py
index 786ede94..47f492f6 100644
--- a/tests/test_oddball.py
+++ b/tests/test_oddball.py
@@ -116,9 +116,8 @@ class RecursionTest(CoverageTest):
self.assertEqual(statements, [1,2,3,5,7,8,9,10,11])
self.assertEqual(missing, expected_missing)
- # We can get a warning about the stackoverflow effect on the tracing
- # function only if we have sys.gettrace
- if pytrace and hasattr(sys, "gettrace"):
+ # Get a warning about the stackoverflow effect on the tracing function.
+ if pytrace:
self.assertEqual(cov._warnings,
["Trace function changed, measurement is likely wrong: None"]
)
@@ -368,35 +367,34 @@ class DoctestTest(CoverageTest):
[1,11,12,14,16,17], "")
-if hasattr(sys, 'gettrace'):
- class GettraceTest(CoverageTest):
- """Tests that we work properly with `sys.gettrace()`."""
- def test_round_trip(self):
- self.check_coverage('''\
- import sys
- def foo(n):
- return 3*n
- def bar(n):
- return 5*n
- a = foo(6)
+class GettraceTest(CoverageTest):
+ """Tests that we work properly with `sys.gettrace()`."""
+ def test_round_trip(self):
+ self.check_coverage('''\
+ import sys
+ def foo(n):
+ return 3*n
+ def bar(n):
+ return 5*n
+ a = foo(6)
+ sys.settrace(sys.gettrace())
+ a = bar(8)
+ ''',
+ [1,2,3,4,5,6,7,8], "")
+
+ def test_multi_layers(self):
+ self.check_coverage('''\
+ import sys
+ def level1():
+ a = 3
+ level2()
+ b = 5
+ def level2():
+ c = 7
sys.settrace(sys.gettrace())
- a = bar(8)
- ''',
- [1,2,3,4,5,6,7,8], "")
-
- def test_multi_layers(self):
- self.check_coverage('''\
- import sys
- def level1():
- a = 3
- level2()
- b = 5
- def level2():
- c = 7
- sys.settrace(sys.gettrace())
- d = 9
- e = 10
- level1()
- f = 12
- ''',
- [1,2,3,4,5,6,7,8,9,10,11,12], "")
+ d = 9
+ e = 10
+ level1()
+ f = 12
+ ''',
+ [1,2,3,4,5,6,7,8,9,10,11,12], "")
diff --git a/tests/test_parser.py b/tests/test_parser.py
index 80773c74..a392ea03 100644
--- a/tests/test_parser.py
+++ b/tests/test_parser.py
@@ -2,23 +2,23 @@
import textwrap
from tests.coveragetest import CoverageTest
-from coverage.parser import CodeParser
+from coverage.parser import PythonParser
-class ParserTest(CoverageTest):
- """Tests for Coverage.py's code parsing."""
+class PythonParserTest(CoverageTest):
+ """Tests for Coverage.py's Python code parsing."""
run_in_temp_dir = False
def parse_source(self, text):
- """Parse `text` as source, and return the `CodeParser` used."""
+ """Parse `text` as source, and return the `PythonParser` used."""
text = textwrap.dedent(text)
- cp = CodeParser(text=text, exclude="nocover")
- cp.parse_source()
- return cp
+ parser = PythonParser(text=text, exclude="nocover")
+ parser.parse_source()
+ return parser
def test_exit_counts(self):
- cp = self.parse_source("""\
+ parser = self.parse_source("""\
# check some basic branch counting
class Foo:
def foo(self, a):
@@ -30,12 +30,12 @@ class ParserTest(CoverageTest):
class Bar:
pass
""")
- self.assertEqual(cp.exit_counts(), {
+ self.assertEqual(parser.exit_counts(), {
2:1, 3:1, 4:2, 5:1, 7:1, 9:1, 10:1
})
def test_try_except(self):
- cp = self.parse_source("""\
+ parser = self.parse_source("""\
try:
a = 2
except ValueError:
@@ -46,12 +46,12 @@ class ParserTest(CoverageTest):
a = 8
b = 9
""")
- self.assertEqual(cp.exit_counts(), {
+ self.assertEqual(parser.exit_counts(), {
1: 1, 2:1, 3:1, 4:1, 5:1, 6:1, 7:1, 8:1, 9:1
})
def test_excluded_classes(self):
- cp = self.parse_source("""\
+ parser = self.parse_source("""\
class Foo:
def __init__(self):
pass
@@ -60,20 +60,20 @@ class ParserTest(CoverageTest):
class Bar:
pass
""")
- self.assertEqual(cp.exit_counts(), {
+ self.assertEqual(parser.exit_counts(), {
1:0, 2:1, 3:1
})
def test_missing_branch_to_excluded_code(self):
- cp = self.parse_source("""\
+ parser = self.parse_source("""\
if fooey:
a = 2
else: # nocover
a = 4
b = 5
""")
- self.assertEqual(cp.exit_counts(), { 1:1, 2:1, 5:1 })
- cp = self.parse_source("""\
+ self.assertEqual(parser.exit_counts(), { 1:1, 2:1, 5:1 })
+ parser = self.parse_source("""\
def foo():
if fooey:
a = 3
@@ -81,8 +81,8 @@ class ParserTest(CoverageTest):
a = 5
b = 6
""")
- self.assertEqual(cp.exit_counts(), { 1:1, 2:2, 3:1, 5:1, 6:1 })
- cp = self.parse_source("""\
+ self.assertEqual(parser.exit_counts(), { 1:1, 2:2, 3:1, 5:1, 6:1 })
+ parser = self.parse_source("""\
def foo():
if fooey:
a = 3
@@ -90,17 +90,17 @@ class ParserTest(CoverageTest):
a = 5
b = 6
""")
- self.assertEqual(cp.exit_counts(), { 1:1, 2:1, 3:1, 6:1 })
+ self.assertEqual(parser.exit_counts(), { 1:1, 2:1, 3:1, 6:1 })
class ParserFileTest(CoverageTest):
"""Tests for Coverage.py's code parsing from files."""
def parse_file(self, filename):
- """Parse `text` as source, and return the `CodeParser` used."""
- cp = CodeParser(filename=filename, exclude="nocover")
- cp.parse_source()
- return cp
+ """Parse `text` as source, and return the `PythonParser` used."""
+ parser = PythonParser(filename=filename, exclude="nocover")
+ parser.parse_source()
+ return parser
def test_line_endings(self):
text = """\
@@ -120,12 +120,12 @@ class ParserFileTest(CoverageTest):
for fname, newline in name_endings:
fname = fname + ".py"
self.make_file(fname, text, newline=newline)
- cp = self.parse_file(fname)
- self.assertEqual(cp.exit_counts(), counts)
+ parser = self.parse_file(fname)
+ self.assertEqual(parser.exit_counts(), counts)
def test_encoding(self):
self.make_file("encoded.py", """\
coverage = "\xe7\xf6v\xear\xe3g\xe9"
""")
- cp = self.parse_file("encoded.py")
- cp.exit_counts()
+ parser = self.parse_file("encoded.py")
+ parser.exit_counts() # TODO: This value should be tested!
diff --git a/tests/test_phystokens.py b/tests/test_phystokens.py
index e15400b6..4755c167 100644
--- a/tests/test_phystokens.py
+++ b/tests/test_phystokens.py
@@ -97,6 +97,11 @@ if sys.version_info < (3, 0):
source = "# This Python file uses this encoding: utf-8\n"
self.assertEqual(source_encoding(source), 'utf-8')
+ def test_detect_source_encoding_not_in_comment(self):
+ # Should not detect anything here
+ source = 'def parse(src, encoding=None):\n pass'
+ self.assertEqual(source_encoding(source), 'ascii')
+
def test_detect_source_encoding_on_second_line(self):
# A coding declaration should be found despite a first blank line.
source = "\n# coding=cp850\n\n"
diff --git a/tests/test_plugins.py b/tests/test_plugins.py
new file mode 100644
index 00000000..9c5a037d
--- /dev/null
+++ b/tests/test_plugins.py
@@ -0,0 +1,217 @@
+"""Tests for plugins."""
+
+import os.path
+
+import coverage
+from coverage.codeunit import CodeUnit
+from coverage.parser import CodeParser
+from coverage.plugin import Plugins, overrides
+
+from tests.coveragetest import CoverageTest
+
+
+class FakeConfig(object):
+ """A fake config for use in tests."""
+
+ def __init__(self, plugin, options):
+ self.plugin = plugin
+ self.options = options
+ self.asked_for = []
+
+ def get_plugin_options(self, module):
+ """Just return the options for `module` if this is the right module."""
+ self.asked_for.append(module)
+ if module == self.plugin:
+ return self.options
+ else:
+ return {}
+
+
+class PluginUnitTest(CoverageTest):
+ """Test Plugins.load_plugins directly."""
+
+ def test_importing_and_configuring(self):
+ self.make_file("plugin1.py", """\
+ from coverage import CoveragePlugin
+
+ class Plugin(CoveragePlugin):
+ def __init__(self, options):
+ super(Plugin, self).__init__(options)
+ self.this_is = "me"
+ """)
+
+ config = FakeConfig("plugin1", {'a':'hello'})
+ plugins = list(Plugins.load_plugins(["plugin1"], config))
+
+ self.assertEqual(len(plugins), 1)
+ self.assertEqual(plugins[0].this_is, "me")
+ self.assertEqual(plugins[0].options, {'a':'hello'})
+ self.assertEqual(config.asked_for, ['plugin1'])
+
+ def test_importing_and_configuring_more_than_one(self):
+ self.make_file("plugin1.py", """\
+ from coverage import CoveragePlugin
+
+ class Plugin(CoveragePlugin):
+ def __init__(self, options):
+ super(Plugin, self).__init__(options)
+ self.this_is = "me"
+ """)
+ self.make_file("plugin2.py", """\
+ from coverage import CoveragePlugin
+
+ class Plugin(CoveragePlugin):
+ pass
+ """)
+
+ config = FakeConfig("plugin1", {'a':'hello'})
+ plugins = list(Plugins.load_plugins(["plugin1", "plugin2"], config))
+
+ self.assertEqual(len(plugins), 2)
+ self.assertEqual(plugins[0].this_is, "me")
+ self.assertEqual(plugins[0].options, {'a':'hello'})
+ self.assertEqual(plugins[1].options, {})
+ self.assertEqual(config.asked_for, ['plugin1', 'plugin2'])
+
+ def test_cant_import(self):
+ with self.assertRaises(ImportError):
+ _ = Plugins.load_plugins(["plugin_not_there"], None)
+
+ def test_ok_to_not_define_plugin(self):
+ self.make_file("plugin2.py", """\
+ from coverage import CoveragePlugin
+
+ Nothing = 0
+ """)
+ plugins = list(Plugins.load_plugins(["plugin2"], None))
+ self.assertEqual(plugins, [])
+
+
+class PluginTest(CoverageTest):
+ """Test plugins through the Coverage class."""
+
+ def test_plugin_imported(self):
+ # Prove that a plugin will be imported.
+ self.make_file("my_plugin.py", """\
+ with open("evidence.out", "w") as f:
+ f.write("we are here!")
+ """)
+
+ self.assert_doesnt_exist("evidence.out")
+ _ = coverage.Coverage(plugins=["my_plugin"])
+
+ with open("evidence.out") as f:
+ self.assertEqual(f.read(), "we are here!")
+
+ def test_missing_plugin_raises_import_error(self):
+ # Prove that a missing plugin will raise an ImportError.
+ with self.assertRaises(ImportError):
+ cov = coverage.Coverage(plugins=["does_not_exist_woijwoicweo"])
+ cov.start()
+
+ def test_bad_plugin_isnt_hidden(self):
+ # Prove that a plugin with an error in it will raise the error.
+ self.make_file("plugin_over_zero.py", """\
+ 1/0
+ """)
+ with self.assertRaises(ZeroDivisionError):
+ _ = coverage.Coverage(plugins=["plugin_over_zero"])
+
+ def test_importing_myself(self):
+ self.make_file("simple.py", """\
+ import try_xyz
+ a = 1
+ b = 2
+ """)
+ self.make_file("try_xyz.py", """\
+ c = 3
+ d = 4
+ """)
+
+ cov = coverage.Coverage(plugins=["tests.test_plugins"])
+
+ # Import the python file, executing it.
+ self.start_import_stop(cov, "simple")
+
+ _, statements, missing, _ = cov.analysis("simple.py")
+ self.assertEqual(statements, [1,2,3])
+ self.assertEqual(missing, [])
+ _, statements, _, _ = cov.analysis("/src/try_ABC.zz")
+ self.assertEqual(statements, [105, 106, 107, 205, 206, 207])
+
+
+class Plugin(coverage.CoveragePlugin):
+ def trace_judge(self, disp):
+ if "xyz.py" in disp.original_filename:
+ disp.trace = True
+ disp.source_filename = os.path.join(
+ "/src",
+ os.path.basename(
+ disp.original_filename.replace("xyz.py", "ABC.zz")
+ )
+ )
+
+ def line_number_range(self, frame):
+ lineno = frame.f_lineno
+ return lineno*100+5, lineno*100+7
+
+ def code_unit_class(self, filename):
+ return PluginCodeUnit
+
+class PluginCodeUnit(CodeUnit):
+ def get_parser(self, exclude=None):
+ return PluginParser()
+
+class PluginParser(CodeParser):
+ def parse_source(self):
+ return set([105, 106, 107, 205, 206, 207]), set([])
+
+
+class OverridesTest(CoverageTest):
+ """Test plugins.py:overrides."""
+
+ run_in_temp_dir = False
+
+ def test_overrides(self):
+ class SomeBase(object):
+ """Base class, two base methods."""
+ def method1(self):
+ pass
+
+ def method2(self):
+ pass
+
+ class Derived1(SomeBase):
+ """Simple single inheritance."""
+ def method1(self):
+ pass
+
+ self.assertTrue(overrides(Derived1(), "method1", SomeBase))
+ self.assertFalse(overrides(Derived1(), "method2", SomeBase))
+
+ class FurtherDerived1(Derived1):
+ """Derive again from Derived1, inherit its method1."""
+ pass
+
+ self.assertTrue(overrides(FurtherDerived1(), "method1", SomeBase))
+ self.assertFalse(overrides(FurtherDerived1(), "method2", SomeBase))
+
+ class FurtherDerived2(Derived1):
+ """Override the overridden method."""
+ def method1(self):
+ pass
+
+ self.assertTrue(overrides(FurtherDerived2(), "method1", SomeBase))
+ self.assertFalse(overrides(FurtherDerived2(), "method2", SomeBase))
+
+ class Mixin(object):
+ """A mixin that overrides method1."""
+ def method1(self):
+ pass
+
+ class Derived2(Mixin, SomeBase):
+ """A class that gets the method from the mixin."""
+ pass
+
+ self.assertTrue(overrides(Derived2(), "method1", SomeBase))
+ self.assertFalse(overrides(Derived2(), "method2", SomeBase))
diff --git a/tests/test_process.py b/tests/test_process.py
index fa4759a8..3a0980dc 100644
--- a/tests/test_process.py
+++ b/tests/test_process.py
@@ -26,7 +26,7 @@ class ProcessTest(CoverageTest):
""")
self.assert_doesnt_exist(".coverage")
- self.run_command("coverage -x mycode.py")
+ self.run_command("coverage run mycode.py")
self.assert_exists(".coverage")
def test_environment(self):
@@ -39,7 +39,7 @@ class ProcessTest(CoverageTest):
""")
self.assert_doesnt_exist(".coverage")
- out = self.run_command("coverage -x mycode.py")
+ out = self.run_command("coverage run mycode.py")
self.assert_exists(".coverage")
self.assertEqual(out, 'done\n')
@@ -55,11 +55,11 @@ class ProcessTest(CoverageTest):
print('done')
""")
- out = self.run_command("coverage -x -p b_or_c.py b")
+ out = self.run_command("coverage run -p b_or_c.py b")
self.assertEqual(out, 'done\n')
self.assert_doesnt_exist(".coverage")
- out = self.run_command("coverage -x -p b_or_c.py c")
+ out = self.run_command("coverage run -p b_or_c.py c")
self.assertEqual(out, 'done\n')
self.assert_doesnt_exist(".coverage")
@@ -67,7 +67,7 @@ class ProcessTest(CoverageTest):
self.assertEqual(self.number_of_data_files(), 2)
# Combine the parallel coverage data files into .coverage .
- self.run_command("coverage -c")
+ self.run_command("coverage combine")
self.assert_exists(".coverage")
# After combining, there should be only the .coverage file.
@@ -91,23 +91,23 @@ class ProcessTest(CoverageTest):
print('done')
""")
- out = self.run_command("coverage -x -p b_or_c.py b")
+ out = self.run_command("coverage run -p b_or_c.py b")
self.assertEqual(out, 'done\n')
self.assert_doesnt_exist(".coverage")
self.assertEqual(self.number_of_data_files(), 1)
# Combine the (one) parallel coverage data file into .coverage .
- self.run_command("coverage -c")
+ self.run_command("coverage combine")
self.assert_exists(".coverage")
self.assertEqual(self.number_of_data_files(), 1)
- out = self.run_command("coverage -x -p b_or_c.py c")
+ out = self.run_command("coverage run --append -p b_or_c.py c")
self.assertEqual(out, 'done\n')
self.assert_exists(".coverage")
self.assertEqual(self.number_of_data_files(), 2)
# Combine the parallel coverage data files into .coverage .
- self.run_command("coverage -c")
+ self.run_command("coverage combine")
self.assert_exists(".coverage")
# After combining, there should be only the .coverage file.
@@ -229,7 +229,7 @@ class ProcessTest(CoverageTest):
self.run_command("coverage run fleeting.py")
os.remove("fleeting.py")
out = self.run_command("coverage html -d htmlcov")
- self.assertRegexpMatches(out, "No source for code: '.*fleeting.py'")
+ self.assertRegex(out, "No source for code: '.*fleeting.py'")
self.assertNotIn("Traceback", out)
# It happens that the code paths are different for *.py and other
@@ -240,14 +240,14 @@ class ProcessTest(CoverageTest):
self.run_command("coverage run fleeting")
os.remove("fleeting")
- status, out = self.run_command_status("coverage html -d htmlcov", 1)
- self.assertRegexpMatches(out, "No source for code: '.*fleeting'")
+ status, out = self.run_command_status("coverage html -d htmlcov")
+ self.assertRegex(out, "No source for code: '.*fleeting'")
self.assertNotIn("Traceback", out)
self.assertEqual(status, 1)
def test_running_missing_file(self):
- status, out = self.run_command_status("coverage run xyzzy.py", 1)
- self.assertRegexpMatches(out, "No file to run: .*xyzzy.py")
+ status, out = self.run_command_status("coverage run xyzzy.py")
+ self.assertRegex(out, "No file to run: .*xyzzy.py")
self.assertNotIn("raceback", out)
self.assertNotIn("rror", out)
self.assertEqual(status, 1)
@@ -265,7 +265,7 @@ class ProcessTest(CoverageTest):
# The important thing is for "coverage run" and "python" to report the
# same traceback.
- status, out = self.run_command_status("coverage run throw.py", 1)
+ status, out = self.run_command_status("coverage run throw.py")
out2 = self.run_command("python throw.py")
if '__pypy__' in sys.builtin_module_names:
# Pypy has an extra frame in the traceback for some reason
@@ -294,8 +294,8 @@ class ProcessTest(CoverageTest):
# The important thing is for "coverage run" and "python" to have the
# same output. No traceback.
- status, out = self.run_command_status("coverage run exit.py", 17)
- status2, out2 = self.run_command_status("python exit.py", 17)
+ status, out = self.run_command_status("coverage run exit.py")
+ status2, out2 = self.run_command_status("python exit.py")
self.assertMultiLineEqual(out, out2)
self.assertMultiLineEqual(out, "about to exit..\n")
self.assertEqual(status, status2)
@@ -310,8 +310,8 @@ class ProcessTest(CoverageTest):
f1()
""")
- status, out = self.run_command_status("coverage run exit_none.py", 0)
- status2, out2 = self.run_command_status("python exit_none.py", 0)
+ status, out = self.run_command_status("coverage run exit_none.py")
+ status2, out2 = self.run_command_status("python exit_none.py")
self.assertMultiLineEqual(out, out2)
self.assertMultiLineEqual(out, "about to exit quietly..\n")
self.assertEqual(status, status2)
@@ -378,7 +378,7 @@ class ProcessTest(CoverageTest):
self.assertEqual(self.number_of_data_files(), 2)
# Combine the parallel coverage data files into .coverage .
- self.run_command("coverage -c")
+ self.run_command("coverage combine")
self.assert_exists(".coverage")
# After combining, there should be only the .coverage file.
@@ -470,8 +470,7 @@ class ProcessTest(CoverageTest):
self.assertIn("Hello\n", out)
self.assertIn("Goodbye\n", out)
- if hasattr(sys, "gettrace"):
- self.assertIn("Trace function changed", out)
+ self.assertIn("Trace function changed", out)
if sys.version_info >= (3, 0): # This only works on 3.x for now.
# It only works with the C tracer,
@@ -502,6 +501,18 @@ class ProcessTest(CoverageTest):
# about 5.
self.assertGreater(data.summary()['os.py'], 50)
+ def test_deprecation_warnings(self):
+ # Test that coverage doesn't trigger deprecation warnings.
+ # https://bitbucket.org/ned/coveragepy/issue/305/pendingdeprecationwarning-the-imp-module
+ self.make_file("allok.py", """\
+ import warnings
+ warnings.simplefilter('default')
+ import coverage
+ print("No warnings!")
+ """)
+ out = self.run_command("python allok.py")
+ self.assertEqual(out, "No warnings!\n")
+
class AliasedCommandTest(CoverageTest):
"""Tests of the version-specific command aliases."""
@@ -556,32 +567,47 @@ class FailUnderTest(CoverageTest):
def setUp(self):
super(FailUnderTest, self).setUp()
- self.make_file("fifty.py", """\
- # I have 50% coverage!
+ self.make_file("forty_two_plus.py", """\
+ # I have 42.857% (3/7) coverage!
a = 1
- if a > 2:
- b = 3
- c = 4
+ b = 2
+ if a > 3:
+ b = 4
+ c = 5
+ d = 6
+ e = 7
""")
- st, _ = self.run_command_status("coverage run fifty.py", 0)
+ st, _ = self.run_command_status("coverage run forty_two_plus.py")
+ self.assertEqual(st, 0)
+ st, out = self.run_command_status("coverage report")
self.assertEqual(st, 0)
+ self.assertEqual(
+ self.last_line_squeezed(out),
+ "forty_two_plus 7 4 43%"
+ )
def test_report(self):
- st, _ = self.run_command_status("coverage report --fail-under=50", 0)
+ st, _ = self.run_command_status("coverage report --fail-under=42")
+ self.assertEqual(st, 0)
+ st, _ = self.run_command_status("coverage report --fail-under=43")
self.assertEqual(st, 0)
- st, _ = self.run_command_status("coverage report --fail-under=51", 2)
+ st, _ = self.run_command_status("coverage report --fail-under=44")
self.assertEqual(st, 2)
def test_html_report(self):
- st, _ = self.run_command_status("coverage html --fail-under=50", 0)
+ st, _ = self.run_command_status("coverage html --fail-under=42")
self.assertEqual(st, 0)
- st, _ = self.run_command_status("coverage html --fail-under=51", 2)
+ st, _ = self.run_command_status("coverage html --fail-under=43")
+ self.assertEqual(st, 0)
+ st, _ = self.run_command_status("coverage html --fail-under=44")
self.assertEqual(st, 2)
def test_xml_report(self):
- st, _ = self.run_command_status("coverage xml --fail-under=50", 0)
+ st, _ = self.run_command_status("coverage xml --fail-under=42")
+ self.assertEqual(st, 0)
+ st, _ = self.run_command_status("coverage xml --fail-under=43")
self.assertEqual(st, 0)
- st, _ = self.run_command_status("coverage xml --fail-under=51", 2)
+ st, _ = self.run_command_status("coverage xml --fail-under=44")
self.assertEqual(st, 2)
diff --git a/tests/test_summary.py b/tests/test_summary.py
index 29167bf8..7bd1c496 100644
--- a/tests/test_summary.py
+++ b/tests/test_summary.py
@@ -21,26 +21,10 @@ class SummaryTest(CoverageTest):
# Parent class saves and restores sys.path, we can just modify it.
sys.path.append(self.nice_file(os.path.dirname(__file__), 'modules'))
- def report_from_command(self, cmd):
- """Return the report from the `cmd`, with some convenience added."""
- report = self.run_command(cmd).replace('\\', '/')
- self.assertNotIn("error", report.lower())
- return report
-
- def line_count(self, report):
- """How many lines are in `report`?"""
- self.assertEqual(report.split('\n')[-1], "")
- return len(report.split('\n')) - 1
-
- def last_line_squeezed(self, report):
- """Return the last line of `report` with the spaces squeezed down."""
- last_line = report.split('\n')[-2]
- return re.sub(r"\s+", " ", last_line)
-
def test_report(self):
- out = self.run_command("coverage -x mycode.py")
+ out = self.run_command("coverage run mycode.py")
self.assertEqual(out, 'done\n')
- report = self.report_from_command("coverage -r")
+ report = self.report_from_command("coverage report")
# Name Stmts Miss Cover
# ---------------------------------------------------------------------
@@ -58,8 +42,24 @@ class SummaryTest(CoverageTest):
def test_report_just_one(self):
# Try reporting just one module
- self.run_command("coverage -x mycode.py")
- report = self.report_from_command("coverage -r mycode.py")
+ self.run_command("coverage run mycode.py")
+ report = self.report_from_command("coverage report mycode.py")
+
+ # Name Stmts Miss Cover
+ # ----------------------------
+ # mycode 4 0 100%
+
+ self.assertEqual(self.line_count(report), 3)
+ self.assertNotIn("/coverage/", report)
+ self.assertNotIn("/tests/modules/covmod1 ", report)
+ self.assertNotIn("/tests/zipmods.zip/covmodzip1 ", report)
+ self.assertIn("mycode ", report)
+ self.assertEqual(self.last_line_squeezed(report), "mycode 4 0 100%")
+
+ def test_report_wildcard(self):
+ # Try reporting using wildcards to get the modules.
+ self.run_command("coverage run mycode.py")
+ report = self.report_from_command("coverage report my*.py")
# Name Stmts Miss Cover
# ----------------------------
@@ -75,8 +75,10 @@ class SummaryTest(CoverageTest):
def test_report_omitting(self):
# Try reporting while omitting some modules
prefix = os.path.split(__file__)[0]
- self.run_command("coverage -x mycode.py")
- report = self.report_from_command("coverage -r -o '%s/*'" % prefix)
+ self.run_command("coverage run mycode.py")
+ report = self.report_from_command(
+ "coverage report --omit '%s/*'" % prefix
+ )
# Name Stmts Miss Cover
# ----------------------------
@@ -126,13 +128,109 @@ class SummaryTest(CoverageTest):
self.assertEqual(self.last_line_squeezed(report),
"mybranch 5 0 2 1 86%")
+ def test_report_show_missing(self):
+ self.make_file("mymissing.py", """\
+ def missing(x, y):
+ if x:
+ print("x")
+ return x
+ if y:
+ print("y")
+ try:
+ print("z")
+ 1/0
+ print("Never!")
+ except ZeroDivisionError:
+ pass
+ return x
+ missing(0, 1)
+ """)
+ out = self.run_command("coverage run mymissing.py")
+ self.assertEqual(out, 'y\nz\n')
+ report = self.report_from_command("coverage report --show-missing")
+
+ # Name Stmts Miss Cover Missing
+ # -----------------------------------------
+ # mymissing 14 3 79% 3-4, 10
+
+ self.assertEqual(self.line_count(report), 3)
+ self.assertIn("mymissing ", report)
+ self.assertEqual(self.last_line_squeezed(report),
+ "mymissing 14 3 79% 3-4, 10")
+
+ def test_report_show_missing_branches(self):
+ self.make_file("mybranch.py", """\
+ def branch(x, y):
+ if x:
+ print("x")
+ if y:
+ print("y")
+ return x
+ branch(1, 1)
+ """)
+ out = self.run_command("coverage run --branch mybranch.py")
+ self.assertEqual(out, 'x\ny\n')
+ report = self.report_from_command("coverage report --show-missing")
+
+ # Name Stmts Miss Branch BrMiss Cover Missing
+ # -------------------------------------------------------
+ # mybranch 7 0 4 2 82% 2->4, 4->6
+
+ self.assertEqual(self.line_count(report), 3)
+ self.assertIn("mybranch ", report)
+ self.assertEqual(self.last_line_squeezed(report),
+ "mybranch 7 0 4 2 82% 2->4, 4->6")
+
+ def test_report_show_missing_branches_and_lines(self):
+ self.make_file("main.py", """\
+ import mybranch
+ """)
+ self.make_file("mybranch.py", """\
+ def branch(x, y, z):
+ if x:
+ print("x")
+ if y:
+ print("y")
+ if z:
+ if x and y:
+ print("z")
+ return x
+ branch(1, 1, 0)
+ """)
+ out = self.run_command("coverage run --branch main.py")
+ self.assertEqual(out, 'x\ny\n')
+ report = self.report_from_command("coverage report --show-missing")
+
+ # pylint: disable=C0301
+ # Name Stmts Miss Branch BrMiss Cover Missing
+ # -------------------------------------------------------
+ # main 1 0 0 0 100%
+ # mybranch 10 2 8 5 61% 7-8, 2->4, 4->6
+ # -------------------------------------------------------
+ # TOTAL 11 2 8 5 63%
+
+ self.assertEqual(self.line_count(report), 6)
+ squeezed = self.squeezed_lines(report)
+ self.assertEqual(
+ squeezed[2],
+ "main 1 0 0 0 100%"
+ )
+ self.assertEqual(
+ squeezed[3],
+ "mybranch 10 2 8 5 61% 7-8, 2->4, 4->6"
+ )
+ self.assertEqual(
+ squeezed[5],
+ "TOTAL 11 2 8 5 63%"
+ )
+
def test_dotpy_not_python(self):
# We run a .py file, and when reporting, we can't parse it as Python.
# We should get an error message in the report.
self.run_command("coverage run mycode.py")
self.make_file("mycode.py", "This isn't python at all!")
- report = self.report_from_command("coverage -r mycode.py")
+ report = self.report_from_command("coverage report mycode.py")
# pylint: disable=C0301
# Name Stmts Miss Cover
@@ -155,7 +253,7 @@ class SummaryTest(CoverageTest):
# but we've said to ignore errors, so there's no error reported.
self.run_command("coverage run mycode.py")
self.make_file("mycode.py", "This isn't python at all!")
- report = self.report_from_command("coverage -r -i mycode.py")
+ report = self.report_from_command("coverage report -i mycode.py")
# Name Stmts Miss Cover
# ----------------------------
@@ -171,7 +269,7 @@ class SummaryTest(CoverageTest):
self.run_command("coverage run mycode.html")
# Before reporting, change it to be an HTML file.
self.make_file("mycode.html", "<h1>This isn't python at all!</h1>")
- report = self.report_from_command("coverage -r mycode.html")
+ report = self.report_from_command("coverage report mycode.html")
# Name Stmts Miss Cover
# ----------------------------
diff --git a/tests/test_templite.py b/tests/test_templite.py
index 48e53ab4..a4667a62 100644
--- a/tests/test_templite.py
+++ b/tests/test_templite.py
@@ -1,6 +1,7 @@
"""Tests for coverage.templite."""
-from coverage.templite import Templite
+import re
+from coverage.templite import Templite, TempliteSyntaxError
from tests.coveragetest import CoverageTest
# pylint: disable=W0612,E1101
@@ -23,9 +24,23 @@ class TempliteTest(CoverageTest):
run_in_temp_dir = False
- def try_render(self, text, ctx, result):
- """Render `text` through `ctx`, and it had better be `result`."""
- self.assertEqual(Templite(text).render(ctx), result)
+ def try_render(self, text, ctx=None, result=None):
+ """Render `text` through `ctx`, and it had better be `result`.
+
+ Result defaults to None so we can shorten the calls where we expect
+ an exception and never get to the result comparison.
+ """
+ actual = Templite(text).render(ctx or {})
+ if result:
+ self.assertEqual(actual, result)
+
+ def assertSynErr(self, msg):
+ """Assert that a `TempliteSyntaxError` will happen.
+
+ A context manager, and the message should be `msg`.
+ """
+ pat = "^" + re.escape(msg) + "$"
+ return self.assertRaisesRegex(TempliteSyntaxError, pat)
def test_passthrough(self):
# Strings without variables are passed through unchanged.
@@ -42,7 +57,7 @@ class TempliteTest(CoverageTest):
def test_undefined_variables(self):
# Using undefined names is an error.
with self.assertRaises(Exception):
- self.try_render("Hi, {{name}}!", {}, "xyz")
+ self.try_render("Hi, {{name}}!")
def test_pipes(self):
# Variables can be filtered with pipes.
@@ -225,15 +240,42 @@ class TempliteTest(CoverageTest):
"Hey {{foo.bar.baz}} there", {'foo': None}, "Hey ??? there"
)
+ def test_bad_names(self):
+ with self.assertSynErr("Not a valid name: 'var%&!@'"):
+ self.try_render("Wat: {{ var%&!@ }}")
+ with self.assertSynErr("Not a valid name: 'filter%&!@'"):
+ self.try_render("Wat: {{ foo|filter%&!@ }}")
+ with self.assertSynErr("Not a valid name: '@'"):
+ self.try_render("Wat: {% for @ in x %}{% endfor %}")
+
def test_bogus_tag_syntax(self):
- msg = "Don't understand tag: 'bogus'"
- with self.assertRaisesRegexp(SyntaxError, msg):
- self.try_render("Huh: {% bogus %}!!{% endbogus %}??", {}, "")
+ with self.assertSynErr("Don't understand tag: 'bogus'"):
+ self.try_render("Huh: {% bogus %}!!{% endbogus %}??")
+
+ def test_malformed_if(self):
+ with self.assertSynErr("Don't understand if: '{% if %}'"):
+ self.try_render("Buh? {% if %}hi!{% endif %}")
+ with self.assertSynErr("Don't understand if: '{% if this or that %}'"):
+ self.try_render("Buh? {% if this or that %}hi!{% endif %}")
+
+ def test_malformed_for(self):
+ with self.assertSynErr("Don't understand for: '{% for %}'"):
+ self.try_render("Weird: {% for %}loop{% endfor %}")
+ with self.assertSynErr("Don't understand for: '{% for x from y %}'"):
+ self.try_render("Weird: {% for x from y %}loop{% endfor %}")
+ with self.assertSynErr("Don't understand for: '{% for x, y in z %}'"):
+ self.try_render("Weird: {% for x, y in z %}loop{% endfor %}")
def test_bad_nesting(self):
- msg = "Unmatched action tag: 'if'"
- with self.assertRaisesRegexp(SyntaxError, msg):
- self.try_render("{% if x %}X", {}, "")
- msg = "Mismatched end tag: 'for'"
- with self.assertRaisesRegexp(SyntaxError, msg):
- self.try_render("{% if x %}X{% endfor %}", {}, "")
+ with self.assertSynErr("Unmatched action tag: 'if'"):
+ self.try_render("{% if x %}X")
+ with self.assertSynErr("Mismatched end tag: 'for'"):
+ self.try_render("{% if x %}X{% endfor %}")
+ with self.assertSynErr("Too many ends: '{% endif %}'"):
+ self.try_render("{% if x %}{% endif %}{% endif %}")
+
+ def test_malformed_end(self):
+ with self.assertSynErr("Don't understand end: '{% end if %}'"):
+ self.try_render("{% if x %}X{% end if %}")
+ with self.assertSynErr("Don't understand end: '{% endif now %}'"):
+ self.try_render("{% if x %}X{% endif now %}")
diff --git a/tests/test_testing.py b/tests/test_testing.py
index a89a59a9..4a19098f 100644
--- a/tests/test_testing.py
+++ b/tests/test_testing.py
@@ -2,45 +2,44 @@
"""Tests that our test infrastructure is really working!"""
import os, sys
+from coverage.backunittest import TestCase
from coverage.backward import to_bytes
-from tests.backunittest import TestCase
-from tests.coveragetest import CoverageTest
+from tests.coveragetest import TempDirMixin, CoverageTest
class TestingTest(TestCase):
"""Tests of helper methods on `backunittest.TestCase`."""
- run_in_temp_dir = False
-
- def test_assert_same_elements(self):
- self.assertSameElements(set(), set())
- self.assertSameElements(set([1,2,3]), set([3,1,2]))
+ def test_assert_count_equal(self):
+ self.assertCountEqual(set(), set())
+ self.assertCountEqual(set([1,2,3]), set([3,1,2]))
with self.assertRaises(AssertionError):
- self.assertSameElements(set([1,2,3]), set())
+ self.assertCountEqual(set([1,2,3]), set())
with self.assertRaises(AssertionError):
- self.assertSameElements(set([1,2,3]), set([4,5,6]))
+ self.assertCountEqual(set([1,2,3]), set([4,5,6]))
-class CoverageTestTest(CoverageTest):
- """Test the methods in `CoverageTest`."""
+class TempDirMixinTest(TempDirMixin, TestCase):
+ """Test the methods in TempDirMixin."""
def file_text(self, fname):
"""Return the text read from a file."""
- return open(fname, "rb").read().decode('ascii')
+ with open(fname, "rb") as f:
+ return f.read().decode('ascii')
def test_make_file(self):
# A simple file.
self.make_file("fooey.boo", "Hello there")
- self.assertEqual(open("fooey.boo").read(), "Hello there")
+ self.assertEqual(self.file_text("fooey.boo"), "Hello there")
# A file in a sub-directory
self.make_file("sub/another.txt", "Another")
- self.assertEqual(open("sub/another.txt").read(), "Another")
+ self.assertEqual(self.file_text("sub/another.txt"), "Another")
# A second file in that sub-directory
self.make_file("sub/second.txt", "Second")
- self.assertEqual(open("sub/second.txt").read(), "Second")
+ self.assertEqual(self.file_text("sub/second.txt"), "Second")
# A deeper directory
self.make_file("sub/deeper/evenmore/third.txt")
- self.assertEqual(open("sub/deeper/evenmore/third.txt").read(), "")
+ self.assertEqual(self.file_text("sub/deeper/evenmore/third.txt"), "")
def test_make_file_newline(self):
self.make_file("unix.txt", "Hello\n")
@@ -52,10 +51,13 @@ class CoverageTestTest(CoverageTest):
def test_make_file_non_ascii(self):
self.make_file("unicode.txt", "tabblo: «ταБЬℓσ»")
- self.assertEqual(
- open("unicode.txt", "rb").read(),
- to_bytes("tabblo: «ταБЬℓσ»")
- )
+ with open("unicode.txt", "rb") as f:
+ text = f.read()
+ self.assertEqual(text, to_bytes("tabblo: «ταБЬℓσ»"))
+
+
+class CoverageTestTest(CoverageTest):
+ """Test the methods in `CoverageTest`."""
def test_file_exists(self):
self.make_file("whoville.txt", "We are here!")
diff --git a/tests/test_xml.py b/tests/test_xml.py
index 0801bad3..37ada3cb 100644
--- a/tests/test_xml.py
+++ b/tests/test_xml.py
@@ -26,6 +26,13 @@ class XmlReportTest(CoverageTest):
self.assert_doesnt_exist("coverage.xml")
self.assert_exists("put_it_there.xml")
+ def test_config_file_directory_does_not_exist(self):
+ self.run_mycode()
+ self.run_command("coverage xml -o nonexistent/put_it_there.xml")
+ self.assert_doesnt_exist("coverage.xml")
+ self.assert_doesnt_exist("put_it_there.xml")
+ self.assert_exists("nonexistent/put_it_there.xml")
+
def test_config_affects_xml_placement(self):
self.run_mycode()
self.make_file(".coveragerc", "[xml]\noutput = xml.out\n")
diff --git a/tox.ini b/tox.ini
index e5984660..ca0d1721 100644
--- a/tox.ini
+++ b/tox.ini
@@ -4,7 +4,7 @@
# and then run "tox" from this directory.
[tox]
-envlist = py26, py27, py32, py33, pypy
+envlist = py26, py27, py32, py33, py34, pypy
[testenv]
commands =
@@ -28,19 +28,33 @@ deps =
[testenv:py26]
deps =
- nose
- mock
+ {[testenv]deps}
unittest2
gevent
eventlet
+ greenlet
[testenv:py27]
deps =
- nose
- mock
- unittest2
+ {[testenv]deps}
gevent
eventlet
+ greenlet
+
+[testenv:py32]
+deps =
+ {[testenv]deps}
+ greenlet
+
+[testenv:py33]
+deps =
+ {[testenv]deps}
+ greenlet
+
+[testenv:py34]
+deps =
+ {[testenv]deps}
+ greenlet
[testenv:pypy]
# PyPy has no C extensions