summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--coverage/cmdline.py3
-rw-r--r--coverage/data.py39
-rw-r--r--tests/test_api.py3
-rw-r--r--tests/test_concurrency.py3
-rw-r--r--tests/test_data.py8
-rw-r--r--tests/test_plugins.py9
-rw-r--r--tests/test_process.py35
7 files changed, 53 insertions, 47 deletions
diff --git a/coverage/cmdline.py b/coverage/cmdline.py
index 14948d1c..23d2aec3 100644
--- a/coverage/cmdline.py
+++ b/coverage/cmdline.py
@@ -14,6 +14,7 @@ import traceback
from coverage import env
from coverage.collector import CTracer
+from coverage.data import line_counts
from coverage.debug import info_formatter, info_header
from coverage.execfile import run_python_file, run_python_module
from coverage.misc import BaseCoverageException, ExceptionDuringRun, NoSource
@@ -660,7 +661,7 @@ class CoverageScript(object):
print("path: %s" % self.coverage.get_data().filename)
if data:
print("has_arcs: %r" % data.has_arcs())
- summary = data.line_counts(fullpath=True)
+ summary = line_counts(data, fullpath=True)
filenames = sorted(summary.keys())
print("\n%d files:" % len(filenames))
for f in filenames:
diff --git a/coverage/data.py b/coverage/data.py
index 9c82ccef..44b75439 100644
--- a/coverage/data.py
+++ b/coverage/data.py
@@ -244,25 +244,6 @@ class CoverageJsonData(object):
"""A list of all files that had been measured."""
return list(self._arcs or self._lines or {})
- def line_counts(self, fullpath=False):
- """Return a dict summarizing the line coverage data.
-
- Keys are based on the file names, and values are the number of executed
- lines. If `fullpath` is true, then the keys are the full pathnames of
- the files, otherwise they are the basenames of the files.
-
- Returns a dict mapping file names to counts of lines.
-
- """
- summ = {}
- if fullpath:
- filename_fn = lambda f: f
- else:
- filename_fn = os.path.basename
- for filename in self.measured_files():
- summ[filename_fn(filename)] = len(self.lines(filename))
- return summ
-
def __nonzero__(self):
return bool(self._lines or self._arcs)
@@ -662,6 +643,26 @@ elif which == "sql":
CoverageData = CoverageSqliteData
+def line_counts(data, fullpath=False):
+ """Return a dict summarizing the line coverage data.
+
+ Keys are based on the file names, and values are the number of executed
+ lines. If `fullpath` is true, then the keys are the full pathnames of
+ the files, otherwise they are the basenames of the files.
+
+ Returns a dict mapping file names to counts of lines.
+
+ """
+ summ = {}
+ if fullpath:
+ filename_fn = lambda f: f
+ else:
+ filename_fn = os.path.basename
+ for filename in data.measured_files():
+ summ[filename_fn(filename)] = len(data.lines(filename))
+ return summ
+
+
def add_data_to_hash(data, filename, hasher):
"""Contribute `filename`'s data to the `hasher`.
diff --git a/tests/test_api.py b/tests/test_api.py
index a860c7da..3e7e2f06 100644
--- a/tests/test_api.py
+++ b/tests/test_api.py
@@ -13,6 +13,7 @@ import warnings
import coverage
from coverage import env
from coverage.backward import StringIO, import_local_file
+from coverage.data import line_counts
from coverage.misc import CoverageException
from coverage.report import Reporter
@@ -576,7 +577,7 @@ class SourceOmitIncludeTest(OmitIncludeTestsMixin, CoverageTest):
import usepkgs # pragma: nested # pylint: disable=import-error, unused-variable
cov.stop() # pragma: nested
data = cov.get_data()
- summary = data.line_counts()
+ summary = line_counts(data)
for k, v in list(summary.items()):
assert k.endswith(".py")
summary[k[:-3]] = v
diff --git a/tests/test_concurrency.py b/tests/test_concurrency.py
index a4f700ed..9e2d73d9 100644
--- a/tests/test_concurrency.py
+++ b/tests/test_concurrency.py
@@ -14,6 +14,7 @@ from flaky import flaky
import coverage
from coverage import env
from coverage.backward import import_local_file
+from coverage.data import line_counts
from coverage.files import abs_file
from tests.coveragetest import CoverageTest
@@ -245,7 +246,7 @@ class ConcurrencyTest(CoverageTest):
print_simple_annotation(code, linenos)
lines = line_count(code)
- self.assertEqual(data.line_counts()['try_it.py'], lines)
+ self.assertEqual(line_counts(data)['try_it.py'], lines)
def test_threads(self):
code = (THREAD + SUM_RANGE_Q + PRINT_SUM_RANGE).format(QLIMIT=self.QLIMIT)
diff --git a/tests/test_data.py b/tests/test_data.py
index a450f90b..7ca6f655 100644
--- a/tests/test_data.py
+++ b/tests/test_data.py
@@ -12,7 +12,7 @@ import re
import mock
from coverage.data import CoverageData, debug_main, canonicalize_json_data, combine_parallel_data
-from coverage.data import add_data_to_hash
+from coverage.data import add_data_to_hash, line_counts
from coverage.debug import DebugControlString
from coverage.files import PathAliases, canonical_filename
from coverage.misc import CoverageException
@@ -74,9 +74,9 @@ MEASURED_FILES_3_4 = ['x.py', 'y.py', 'z.py']
class DataTestHelpers(CoverageTest):
"""Test helpers for data tests."""
- def assert_line_counts(self, covdata, line_counts, fullpath=False):
- """Check that the line_counts of `covdata` is `line_counts`."""
- self.assertEqual(covdata.line_counts(fullpath), line_counts)
+ def assert_line_counts(self, covdata, counts, fullpath=False):
+ """Check that the line_counts of `covdata` is `counts`."""
+ self.assertEqual(line_counts(covdata, fullpath), counts)
def assert_measured_files(self, covdata, measured):
"""Check that `covdata`'s measured files are `measured`."""
diff --git a/tests/test_plugins.py b/tests/test_plugins.py
index 0987e41a..2d0f8426 100644
--- a/tests/test_plugins.py
+++ b/tests/test_plugins.py
@@ -8,6 +8,7 @@ import os.path
import coverage
from coverage import env
from coverage.backward import StringIO
+from coverage.data import line_counts
from coverage.control import Plugins
from coverage.misc import CoverageException
@@ -369,19 +370,19 @@ class GoodFileTracerTest(FileTracerTest):
_, statements, missing, _ = cov.analysis("foo_7.html")
self.assertEqual(statements, [1, 2, 3, 4, 5, 6, 7])
self.assertEqual(missing, [1, 2, 3, 6, 7])
- self.assertIn("foo_7.html", cov.get_data().line_counts())
+ self.assertIn("foo_7.html", line_counts(cov.get_data()))
_, statements, missing, _ = cov.analysis("bar_4.html")
self.assertEqual(statements, [1, 2, 3, 4])
self.assertEqual(missing, [1, 4])
- self.assertIn("bar_4.html", cov.get_data().line_counts())
+ self.assertIn("bar_4.html", line_counts(cov.get_data()))
- self.assertNotIn("quux_5.html", cov.get_data().line_counts())
+ self.assertNotIn("quux_5.html", line_counts(cov.get_data()))
_, statements, missing, _ = cov.analysis("uni_3.html")
self.assertEqual(statements, [1, 2, 3])
self.assertEqual(missing, [1])
- self.assertIn("uni_3.html", cov.get_data().line_counts())
+ self.assertIn("uni_3.html", line_counts(cov.get_data()))
def test_plugin2_with_branch(self):
self.make_render_and_caller()
diff --git a/tests/test_process.py b/tests/test_process.py
index ede86691..48083f22 100644
--- a/tests/test_process.py
+++ b/tests/test_process.py
@@ -16,6 +16,7 @@ import pytest
import coverage
from coverage import env, CoverageData
+from coverage.data import line_counts
from coverage.misc import output_encoding
from tests.coveragetest import CoverageTest
@@ -91,7 +92,7 @@ class ProcessTest(CoverageTest):
# executed.
data = coverage.CoverageData()
data.read()
- self.assertEqual(data.line_counts()['b_or_c.py'], 8)
+ self.assertEqual(line_counts(data)['b_or_c.py'], 8)
# Running combine again should fail, because there are no parallel data
# files to combine.
@@ -102,7 +103,7 @@ class ProcessTest(CoverageTest):
# And the originally combined data is still there.
data = coverage.CoverageData()
data.read()
- self.assertEqual(data.line_counts()['b_or_c.py'], 8)
+ self.assertEqual(line_counts(data)['b_or_c.py'], 8)
def test_combine_parallel_data_with_a_corrupt_file(self):
self.make_b_or_c_py()
@@ -138,7 +139,7 @@ class ProcessTest(CoverageTest):
# executed.
data = coverage.CoverageData()
data.read()
- self.assertEqual(data.line_counts()['b_or_c.py'], 8)
+ self.assertEqual(line_counts(data)['b_or_c.py'], 8)
def test_combine_no_usable_files(self):
# https://bitbucket.org/ned/coveragepy/issues/629/multiple-use-of-combine-leads-to-empty
@@ -173,7 +174,7 @@ class ProcessTest(CoverageTest):
# executed (we only did b, not c).
data = coverage.CoverageData()
data.read()
- self.assertEqual(data.line_counts()['b_or_c.py'], 6)
+ self.assertEqual(line_counts(data)['b_or_c.py'], 6)
def test_combine_parallel_data_in_two_steps(self):
self.make_b_or_c_py()
@@ -204,7 +205,7 @@ class ProcessTest(CoverageTest):
# executed.
data = coverage.CoverageData()
data.read()
- self.assertEqual(data.line_counts()['b_or_c.py'], 8)
+ self.assertEqual(line_counts(data)['b_or_c.py'], 8)
def test_combine_parallel_data_no_append(self):
self.make_b_or_c_py()
@@ -236,7 +237,7 @@ class ProcessTest(CoverageTest):
# because we didn't keep the data from running b.
data = coverage.CoverageData()
data.read()
- self.assertEqual(data.line_counts()['b_or_c.py'], 7)
+ self.assertEqual(line_counts(data)['b_or_c.py'], 7)
def test_append_data(self):
self.make_b_or_c_py()
@@ -255,7 +256,7 @@ class ProcessTest(CoverageTest):
# executed.
data = coverage.CoverageData()
data.read()
- self.assertEqual(data.line_counts()['b_or_c.py'], 8)
+ self.assertEqual(line_counts(data)['b_or_c.py'], 8)
def test_append_data_with_different_file(self):
self.make_b_or_c_py()
@@ -279,7 +280,7 @@ class ProcessTest(CoverageTest):
# executed.
data = coverage.CoverageData(".mycovdata")
data.read()
- self.assertEqual(data.line_counts()['b_or_c.py'], 8)
+ self.assertEqual(line_counts(data)['b_or_c.py'], 8)
def test_append_can_create_a_data_file(self):
self.make_b_or_c_py()
@@ -293,7 +294,7 @@ class ProcessTest(CoverageTest):
# executed.
data = coverage.CoverageData()
data.read()
- self.assertEqual(data.line_counts()['b_or_c.py'], 6)
+ self.assertEqual(line_counts(data)['b_or_c.py'], 6)
def test_combine_with_rc(self):
self.make_b_or_c_py()
@@ -326,7 +327,7 @@ class ProcessTest(CoverageTest):
# executed.
data = coverage.CoverageData()
data.read()
- self.assertEqual(data.line_counts()['b_or_c.py'], 8)
+ self.assertEqual(line_counts(data)['b_or_c.py'], 8)
# Reporting should still work even with the .rc file
out = self.run_command("coverage report")
@@ -380,7 +381,7 @@ class ProcessTest(CoverageTest):
# files have been combined together.
data = coverage.CoverageData()
data.read()
- summary = data.line_counts(fullpath=True)
+ summary = line_counts(data, fullpath=True)
self.assertEqual(len(summary), 1)
actual = os.path.normcase(os.path.abspath(list(summary.keys())[0]))
expected = os.path.normcase(os.path.abspath('src/x.py'))
@@ -544,7 +545,7 @@ class ProcessTest(CoverageTest):
data = coverage.CoverageData()
data.read()
- self.assertEqual(data.line_counts()['fork.py'], 9)
+ self.assertEqual(line_counts(data)['fork.py'], 9)
def test_warnings_during_reporting(self):
# While fixing issue #224, the warnings were being printed far too
@@ -684,7 +685,7 @@ class ProcessTest(CoverageTest):
# The actual number of executed lines in os.py when it's
# imported is 120 or so. Just running os.getenv executes
# about 5.
- self.assertGreater(data.line_counts()['os.py'], 50)
+ self.assertGreater(line_counts(data)['os.py'], 50)
def test_lang_c(self):
if env.JYTHON:
@@ -911,7 +912,7 @@ class ExcepthookTest(CoverageTest):
# executed.
data = coverage.CoverageData()
data.read()
- self.assertEqual(data.line_counts()['excepthook.py'], 7)
+ self.assertEqual(line_counts(data)['excepthook.py'], 7)
def test_excepthook_exit(self):
if env.PYPY or env.JYTHON:
@@ -1257,7 +1258,7 @@ class ProcessStartupTest(ProcessCoverageMixin, CoverageTest):
self.assert_exists(".mycovdata")
data = coverage.CoverageData(".mycovdata")
data.read()
- self.assertEqual(data.line_counts()['sub.py'], 3)
+ self.assertEqual(line_counts(data)['sub.py'], 3)
def test_subprocess_with_pth_files_and_parallel(self): # pragma: no metacov
# https://bitbucket.org/ned/coveragepy/issues/492/subprocess-coverage-strange-detection-of
@@ -1281,7 +1282,7 @@ class ProcessStartupTest(ProcessCoverageMixin, CoverageTest):
self.assert_exists(".coverage")
data = coverage.CoverageData()
data.read()
- self.assertEqual(data.line_counts()['sub.py'], 3)
+ self.assertEqual(line_counts(data)['sub.py'], 3)
# assert that there are *no* extra data files left over after a combine
data_files = glob.glob(os.getcwd() + '/.coverage*')
@@ -1371,7 +1372,7 @@ class ProcessStartupWithSourceTest(ProcessCoverageMixin, CoverageTest):
self.assert_exists(".coverage")
data = coverage.CoverageData()
data.read()
- summary = data.line_counts()
+ summary = line_counts(data)
print(summary)
self.assertEqual(summary[source + '.py'], 3)
self.assertEqual(len(summary), 1)