diff options
Diffstat (limited to 'coverage')
| -rw-r--r-- | coverage/__init__.py | 6 | ||||
| -rw-r--r-- | coverage/backunittest.py | 41 | ||||
| -rw-r--r-- | coverage/codeunit.py | 51 | ||||
| -rw-r--r-- | coverage/collector.py | 156 | ||||
| -rw-r--r-- | coverage/config.py | 31 | ||||
| -rw-r--r-- | coverage/control.py | 186 | ||||
| -rw-r--r-- | coverage/data.py | 16 | ||||
| -rw-r--r-- | coverage/debug.py | 2 | ||||
| -rw-r--r-- | coverage/extension.py | 20 | ||||
| -rw-r--r-- | coverage/files.py | 2 | ||||
| -rw-r--r-- | coverage/html.py | 4 | ||||
| -rw-r--r-- | coverage/plugin.py | 108 | ||||
| -rw-r--r-- | coverage/report.py | 4 | ||||
| -rw-r--r-- | coverage/templite.py | 45 | ||||
| -rw-r--r-- | coverage/test_helpers.py | 268 | ||||
| -rw-r--r-- | coverage/tracer.c | 59 |
16 files changed, 769 insertions, 230 deletions
diff --git a/coverage/__init__.py b/coverage/__init__.py index 193b7a10..5ae32aba 100644 --- a/coverage/__init__.py +++ b/coverage/__init__.py @@ -7,10 +7,14 @@ http://nedbatchelder.com/code/coverage from coverage.version import __version__, __url__ -from coverage.control import coverage, process_startup +from coverage.control import Coverage, process_startup from coverage.data import CoverageData from coverage.cmdline import main, CoverageScript from coverage.misc import CoverageException +from coverage.plugin import CoveragePlugin + +# Backward compatibility. +coverage = Coverage # Module-level functions. The original API to this module was based on # functions defined directly in the module, with a singleton of the coverage() diff --git a/coverage/backunittest.py b/coverage/backunittest.py new file mode 100644 index 00000000..6498397f --- /dev/null +++ b/coverage/backunittest.py @@ -0,0 +1,41 @@ +"""Implementations of unittest features from the future.""" + +# Use unittest2 if it's available, otherwise unittest. This gives us +# backported features for 2.6. +try: + import unittest2 as unittest # pylint: disable=F0401 +except ImportError: + import unittest + + +def unittest_has(method): + """Does `unitttest.TestCase` have `method` defined?""" + return hasattr(unittest.TestCase, method) + + +class TestCase(unittest.TestCase): + """Just like unittest.TestCase, but with assert methods added. + + Designed to be compatible with 3.1 unittest. Methods are only defined if + `unittest` doesn't have them. + + """ + # pylint: disable=missing-docstring + + if not unittest_has('assertCountEqual'): + if unittest_has('assertSameElements'): + def assertCountEqual(self, *args, **kwargs): + # pylint: disable=no-member + return self.assertSameElements(*args, **kwargs) + else: + def assertCountEqual(self, s1, s2): + """Assert these have the same elements, regardless of order.""" + self.assertEqual(set(s1), set(s2)) + + if not unittest_has('assertRaisesRegex'): + def assertRaisesRegex(self, *args, **kwargs): + return self.assertRaisesRegexp(*args, **kwargs) + + if not unittest_has('assertRegex'): + def assertRegex(self, *args, **kwargs): + return self.assertRegexpMatches(*args, **kwargs) diff --git a/coverage/codeunit.py b/coverage/codeunit.py index 35167a72..4e752aaa 100644 --- a/coverage/codeunit.py +++ b/coverage/codeunit.py @@ -10,14 +10,16 @@ from coverage.phystokens import source_token_lines, source_encoding from coverage.django import DjangoTracer -def code_unit_factory(morfs, file_locator, get_ext=None): +def code_unit_factory(morfs, file_locator, get_plugin=None): """Construct a list of CodeUnits from polymorphic inputs. `morfs` is a module or a filename, or a list of same. `file_locator` is a FileLocator that can help resolve filenames. - `get_ext` TODO + `get_plugin` is a function taking a filename, and returning a plugin + responsible for the file. It can also return None if there is no plugin + claiming the file. Returns a list of CodeUnit objects. @@ -26,15 +28,14 @@ def code_unit_factory(morfs, file_locator, get_ext=None): if not isinstance(morfs, (list, tuple)): morfs = [morfs] - django_tracer = DjangoTracer() - code_units = [] for morf in morfs: - ext = None - if isinstance(morf, string_class) and get_ext: - ext = get_ext(morf) - if ext: - klass = DjangoTracer # NOT REALLY! TODO + plugin = None + if isinstance(morf, string_class) and get_plugin: + plugin = get_plugin(morf) + if plugin: + klass = plugin.code_unit_class(morf) + #klass = DjangoTracer # NOT REALLY! TODO # Hacked-in Mako support. Define COVERAGE_MAKO_PATH as a fragment of # the path that indicates the Python file is actually a compiled Mako # template. THIS IS TEMPORARY! @@ -91,6 +92,8 @@ class CodeUnit(object): self.name = n self.modname = modname + self._source = None + def __repr__(self): return "<CodeUnit name=%r filename=%r>" % (self.name, self.filename) @@ -131,6 +134,11 @@ class CodeUnit(object): return root.replace('\\', '_').replace('/', '_').replace('.', '_') def source(self): + if self._source is None: + self._source = self.get_source() + return self._source + + def get_source(self): """Return the source code, as a string.""" if os.path.exists(self.filename): # A regular text file: open it. @@ -147,10 +155,9 @@ class CodeUnit(object): "No source for code '%s'." % self.filename ) - def source_token_lines(self, source): + def source_token_lines(self): """Return the 'tokenized' text for the code.""" - # TODO: Taking source here is wrong, change it? - for line in source.splitlines(): + for line in self.source().splitlines(): yield [('txt', line)] def should_be_python(self): @@ -162,6 +169,9 @@ class CodeUnit(object): """ return False + def get_parser(self, exclude=None): + raise NotImplementedError + class PythonCodeUnit(CodeUnit): """Represents a Python file.""" @@ -238,11 +248,11 @@ class PythonCodeUnit(CodeUnit): # Everything else is probably not Python. return False - def source_token_lines(self, source): - return source_token_lines(source) + def source_token_lines(self): + return source_token_lines(self.source()) - def source_encoding(self, source): - return source_encoding(source) + def source_encoding(self): + return source_encoding(self.source()) class MakoParser(CodeParser): @@ -271,26 +281,25 @@ class MakoCodeUnit(CodeUnit): py_source = open(self.filename).read() self.metadata = ModuleInfo.get_module_source_metadata(py_source, full_line_map=True) - def source(self): + def get_source(self): return open(self.metadata['filename']).read() def get_parser(self, exclude=None): return MakoParser(self.metadata) - def source_encoding(self, source): - # TODO: Taking source here is wrong, change it! + def source_encoding(self): return self.metadata['source_encoding'] class DjangoCodeUnit(CodeUnit): - def source(self): + def get_source(self): with open(self.filename) as f: return f.read() def get_parser(self, exclude=None): return DjangoParser(self.filename) - def source_encoding(self, source): + def source_encoding(self): return "utf8" diff --git a/coverage/collector.py b/coverage/collector.py index 546525d2..4caf6363 100644 --- a/coverage/collector.py +++ b/coverage/collector.py @@ -1,6 +1,6 @@ """Raw data collector for Coverage.""" -import collections, os, sys, threading +import collections, os, sys try: # Use the C extension code when we can, for speed. @@ -47,12 +47,14 @@ class PyTracer(object): self.should_trace = None self.should_trace_cache = None self.warn = None - self.extensions = None + self.plugin_data = None + # The threading module to use, if any. + self.threading = None + + self.plugin = [] + self.cur_file_dict = [] + self.last_line = [0] - self.extension = None - self.cur_tracename = None # TODO: This is only maintained for the if0 debugging output. Get rid of it eventually. - self.cur_file_data = None - self.last_line = 0 self.data_stack = [] self.data_stacks = collections.defaultdict(list) self.last_exc_back = None @@ -62,48 +64,28 @@ class PyTracer(object): self.coroutine_id_func = None self.last_coroutine = None + def __repr__(self): + return "<PyTracer at 0x{0:0x}: {1} lines in {2} files>".format( + id(self), + sum(len(v) for v in self.data.values()), + len(self.data), + ) + def _trace(self, frame, event, arg_unused): """The trace function passed to sys.settrace.""" if self.stopped: return - if 0: - # A lot of debugging to try to understand why gevent isn't right. - import os.path, pprint - def short_ident(ident): - return "{}:{:06X}".format(ident.__class__.__name__, id(ident) & 0xFFFFFF) - - ident = None - if self.coroutine_id_func: - ident = short_ident(self.coroutine_id_func()) - sys.stdout.write("trace event: %s %s %r @%d\n" % ( - event, ident, frame.f_code.co_filename, frame.f_lineno - )) - pprint.pprint( - dict( - ( - short_ident(ident), - [ - (os.path.basename(tn or ""), sorted((cfd or {}).keys()), ll) - for ex, tn, cfd, ll in data_stacks - ] - ) - for ident, data_stacks in self.data_stacks.items() - ) - , width=250) - pprint.pprint(sorted((self.cur_file_data or {}).keys()), width=250) - print("TRYING: {}".format(sorted(next((v for k,v in self.data.items() if k.endswith("try_it.py")), {}).keys()))) - - if self.last_exc_back: + if self.last_exc_back: # TODO: bring this up to speed if frame == self.last_exc_back: # Someone forgot a return event. - if self.arcs and self.cur_file_data: + if self.arcs and self.cur_file_dict: pair = (self.last_line, -self.last_exc_firstlineno) - self.cur_file_data[pair] = None + self.cur_file_dict[pair] = None if self.coroutine_id_func: self.data_stack = self.data_stacks[self.coroutine_id_func()] - self.handler, _, self.cur_file_data, self.last_line = self.data_stack.pop() + self.plugin, self.cur_file_dict, self.last_line = self.data_stack.pop() self.last_exc_back = None if event == 'call': @@ -112,27 +94,33 @@ class PyTracer(object): if self.coroutine_id_func: self.data_stack = self.data_stacks[self.coroutine_id_func()] self.last_coroutine = self.coroutine_id_func() - self.data_stack.append((self.extension, self.cur_tracename, self.cur_file_data, self.last_line)) + self.data_stack.append((self.plugin, self.cur_file_dict, self.last_line)) filename = frame.f_code.co_filename disp = self.should_trace_cache.get(filename) if disp is None: disp = self.should_trace(filename, frame) self.should_trace_cache[filename] = disp - #print("called, stack is %d deep, tracename is %r" % ( - # len(self.data_stack), tracename)) - tracename = disp.filename - if tracename and disp.extension: - tracename = disp.extension.file_name(frame) + + self.plugin = None + self.cur_file_dict = None + if disp.trace: + tracename = disp.source_filename + if disp.plugin: + dyn_func = disp.plugin.dynamic_source_file_name() + if dyn_func: + tracename = dyn_func(tracename, frame) + if tracename: + if not self.check_include(tracename): + tracename = None + else: + tracename = None if tracename: if tracename not in self.data: self.data[tracename] = {} - if disp.extension: - self.extensions[tracename] = disp.extension.__name__ - self.cur_tracename = tracename - self.cur_file_data = self.data[tracename] - self.extension = disp.extension - else: - self.cur_file_data = None + if disp.plugin: + self.plugin_data[tracename] = disp.plugin.__name__ + self.cur_file_dict = self.data[tracename] + self.plugin = disp.plugin # Set the last_line to -1 because the next arc will be entering a # code block, indicated by (-1, n). self.last_line = -1 @@ -142,32 +130,29 @@ class PyTracer(object): this_coroutine = self.coroutine_id_func() if self.last_coroutine != this_coroutine: print("mismatch: {0} != {1}".format(self.last_coroutine, this_coroutine)) - if self.extension: - lineno_from, lineno_to = self.extension.line_number_range(frame) + + if self.plugin: + lineno_from, lineno_to = self.plugin.line_number_range(frame) else: lineno_from, lineno_to = frame.f_lineno, frame.f_lineno if lineno_from != -1: - if self.cur_file_data is not None: + if self.cur_file_dict is not None: if self.arcs: - #print("lin", self.last_line, frame.f_lineno) - self.cur_file_data[(self.last_line, lineno_from)] = None + self.cur_file_dict[(self.last_line, lineno_from)] = None else: - #print("lin", frame.f_lineno) for lineno in range(lineno_from, lineno_to+1): - self.cur_file_data[lineno] = None + self.cur_file_dict[lineno] = None self.last_line = lineno_to elif event == 'return': - if self.arcs and self.cur_file_data: + if self.arcs and self.cur_file_dict: first = frame.f_code.co_firstlineno - self.cur_file_data[(self.last_line, -first)] = None + self.cur_file_dict[(self.last_line, -first)] = None # Leaving this function, pop the filename stack. if self.coroutine_id_func: self.data_stack = self.data_stacks[self.coroutine_id_func()] self.last_coroutine = self.coroutine_id_func() - self.extension, _, self.cur_file_data, self.last_line = self.data_stack.pop() - #print("returned, stack is %d deep" % (len(self.data_stack))) + self.plugin, self.cur_file_dict, self.last_line = self.data_stack.pop() elif event == 'exception': - #print("exc", self.last_line, frame.f_lineno) self.last_exc_back = frame.f_back self.last_exc_firstlineno = frame.f_code.co_firstlineno return self._trace @@ -178,14 +163,15 @@ class PyTracer(object): Return a Python function suitable for use with sys.settrace(). """ - self.thread = threading.currentThread() + if self.threading: + self.thread = self.threading.currentThread() sys.settrace(self._trace) return self._trace def stop(self): """Stop this Tracer.""" self.stopped = True - if self.thread != threading.currentThread(): + if self.threading and self.thread != self.threading.currentThread(): # Called on a different thread than started us: we can't unhook # ourseves, but we've set the flag that we should stop, so we won't # do any more tracing. @@ -195,7 +181,7 @@ class PyTracer(object): if sys.gettrace() != self._trace: msg = "Trace function changed, measurement is likely wrong: %r" self.warn(msg % (sys.gettrace(),)) - #print("Stopping tracer on %s" % threading.current_thread().ident) + sys.settrace(None) def get_stats(self): @@ -224,13 +210,15 @@ class Collector(object): # the top, and resumed when they become the top again. _collectors = [] - def __init__(self, should_trace, timid, branch, warn, coroutine): + def __init__(self, should_trace, check_include, timid, branch, warn, coroutine): """Create a collector. `should_trace` is a function, taking a filename, and returning a canonicalized filename, or None depending on whether the file should be traced or not. + TODO: `check_include` + If `timid` is true, then a slower simpler trace function will be used. This is important for some environments where manipulation of tracing functions make the faster more sophisticated trace function not @@ -243,10 +231,15 @@ class Collector(object): `warn` is a warning function, taking a single string message argument, to be used if a warning needs to be issued. + TODO: `coroutine` + """ self.should_trace = should_trace + self.check_include = check_include self.warn = warn self.branch = branch + self.threading = None + if coroutine == "greenlet": import greenlet self.coroutine_id_func = greenlet.getcurrent @@ -257,7 +250,13 @@ class Collector(object): import gevent self.coroutine_id_func = gevent.getcurrent else: + # It's important to import threading only if we need it. If it's + # imported early, and the program being measured uses gevent, then + # gevent's monkey-patching won't work properly. + import threading self.coroutine_id_func = None + self.threading = threading + self.reset() if timid: @@ -281,7 +280,7 @@ class Collector(object): # or mapping filenames to dicts with linenumber pairs as keys. self.data = {} - self.extensions = {} + self.plugin_data = {} # A cache of the results from should_trace, the decision about whether # to trace execution in a file. A dict of filename to (filename or @@ -301,8 +300,10 @@ class Collector(object): tracer.warn = self.warn if hasattr(tracer, 'coroutine_id_func'): tracer.coroutine_id_func = self.coroutine_id_func - if hasattr(tracer, 'extensions'): - tracer.extensions = self.extensions + if hasattr(tracer, 'plugin_data'): + tracer.plugin_data = self.plugin_data + if hasattr(tracer, 'threading'): + tracer.threading = self.threading fn = tracer.start() self.tracers.append(tracer) return fn @@ -331,7 +332,6 @@ class Collector(object): if self._collectors: self._collectors[-1].pause() self._collectors.append(self) - #print("Started: %r" % self._collectors, file=sys.stderr) # Check to see whether we had a fullcoverage tracer installed. traces0 = [] @@ -356,11 +356,11 @@ class Collector(object): # Install our installation tracer in threading, to jump start other # threads. - threading.settrace(self._installation_trace) + if self.threading: + self.threading.settrace(self._installation_trace) def stop(self): """Stop collecting trace information.""" - #print >>sys.stderr, "Stopping: %r" % self._collectors assert self._collectors assert self._collectors[-1] is self @@ -382,13 +382,17 @@ class Collector(object): print("\nCoverage.py tracer stats:") for k in sorted(stats.keys()): print("%16s: %s" % (k, stats[k])) - threading.settrace(None) + if self.threading: + self.threading.settrace(None) def resume(self): """Resume tracing after a `pause`.""" for tracer in self.tracers: tracer.start() - threading.settrace(self._installation_trace) + if self.threading: + self.threading.settrace(self._installation_trace) + else: + self._start_tracer() def get_line_data(self): """Return the line data collected. @@ -420,5 +424,5 @@ class Collector(object): else: return {} - def get_extension_data(self): - return self.extensions + def get_plugin_data(self): + return self.plugin_data diff --git a/coverage/config.py b/coverage/config.py index 064bc1ca..c671ef75 100644 --- a/coverage/config.py +++ b/coverage/config.py @@ -37,6 +37,13 @@ class HandyConfigParser(configparser.RawConfigParser): section = self.section_prefix + section return configparser.RawConfigParser.options(self, section) + def get_section(self, section): + """Get the contents of a section, as a dictionary.""" + d = {} + for opt in self.options(section): + d[opt] = self.get(section, opt) + return d + def get(self, section, *args, **kwargs): """Get a value, replacing environment variables also. @@ -140,7 +147,7 @@ class CoverageConfig(object): self.timid = False self.source = None self.debug = [] - self.extensions = [] + self.plugins = [] # Defaults for [report] self.exclude_list = DEFAULT_EXCLUDE[:] @@ -163,6 +170,9 @@ class CoverageConfig(object): # Defaults for [paths] self.paths = {} + # Options for plugins + self.plugin_options = {} + def from_environment(self, env_var): """Read configuration from the `env_var` environment variable.""" # Timidity: for nose users, read an environment variable. This is a @@ -172,7 +182,7 @@ class CoverageConfig(object): if env: self.timid = ('--timid' in env) - MUST_BE_LIST = ["omit", "include", "debug", "extensions"] + MUST_BE_LIST = ["omit", "include", "debug", "plugins"] def from_args(self, **kwargs): """Read config values from `kwargs`.""" @@ -200,17 +210,22 @@ class CoverageConfig(object): self.config_files.extend(files_read) for option_spec in self.CONFIG_FILE_OPTIONS: - self.set_attr_from_config_option(cp, *option_spec) + self._set_attr_from_config_option(cp, *option_spec) # [paths] is special if cp.has_section('paths'): for option in cp.options('paths'): self.paths[option] = cp.getlist('paths', option) + # plugins can have options + for plugin in self.plugins: + if cp.has_section(plugin): + self.plugin_options[plugin] = cp.get_section(plugin) + return True CONFIG_FILE_OPTIONS = [ - # These are *args for set_attr_from_config_option: + # These are *args for _set_attr_from_config_option: # (attr, where, type_="") # # attr is the attribute to set on the CoverageConfig object. @@ -224,7 +239,7 @@ class CoverageConfig(object): ('cover_pylib', 'run:cover_pylib', 'boolean'), ('data_file', 'run:data_file'), ('debug', 'run:debug', 'list'), - ('extensions', 'run:extensions', 'list'), + ('plugins', 'run:plugins', 'list'), ('include', 'run:include', 'list'), ('omit', 'run:omit', 'list'), ('parallel', 'run:parallel', 'boolean'), @@ -250,9 +265,13 @@ class CoverageConfig(object): ('xml_output', 'xml:output'), ] - def set_attr_from_config_option(self, cp, attr, where, type_=''): + def _set_attr_from_config_option(self, cp, attr, where, type_=''): """Set an attribute on self if it exists in the ConfigParser.""" section, option = where.split(":") if cp.has_option(section, option): method = getattr(cp, 'get'+type_) setattr(self, attr, method(section, option)) + + def get_plugin_options(self, plugin): + """Get a dictionary of options for the plugin named `plugin`.""" + return self.plugin_options.get(plugin, {}) diff --git a/coverage/control.py b/coverage/control.py index cb917e52..86a2ae23 100644 --- a/coverage/control.py +++ b/coverage/control.py @@ -1,6 +1,6 @@ """Core control stuff for Coverage.""" -import atexit, os, random, socket, sys +import atexit, os, platform, random, socket, sys from coverage.annotate import AnnotateReporter from coverage.backward import string_class, iitems @@ -9,7 +9,7 @@ from coverage.collector import Collector from coverage.config import CoverageConfig from coverage.data import CoverageData from coverage.debug import DebugControl -from coverage.extension import load_extensions +from coverage.plugin import Plugins, plugin_implements from coverage.files import FileLocator, TreeMatcher, FnmatchMatcher from coverage.files import PathAliases, find_python_files, prep_patterns from coverage.html import HtmlReporter @@ -28,14 +28,14 @@ except ImportError: _structseq = None -class coverage(object): +class Coverage(object): """Programmatic access to coverage.py. To use:: from coverage import coverage - cov = coverage() + cov = Coverage() cov.start() #.. call your code .. cov.stop() @@ -45,7 +45,7 @@ class coverage(object): def __init__(self, data_file=None, data_suffix=None, cover_pylib=None, auto_data=False, timid=None, branch=None, config_file=True, source=None, omit=None, include=None, debug=None, - debug_file=None, coroutine=None): + debug_file=None, coroutine=None, plugins=None): """ `data_file` is the base name of the data file to use, defaulting to ".coverage". `data_suffix` is appended (with a dot) to `data_file` to @@ -87,7 +87,9 @@ class coverage(object): `coroutine` is a string indicating the coroutining library being used in the measured code. Without this, coverage.py will get incorrect results. Valid strings are "greenlet", "eventlet", or "gevent", which - are all equivalent. + are all equivalent. TODO: really? + + `plugins` TODO. """ from coverage import __version__ @@ -126,15 +128,20 @@ class coverage(object): data_file=data_file, cover_pylib=cover_pylib, timid=timid, branch=branch, parallel=bool_or_none(data_suffix), source=source, omit=omit, include=include, debug=debug, - coroutine=coroutine, + coroutine=coroutine, plugins=plugins, ) # Create and configure the debugging controller. self.debug = DebugControl(self.config.debug, debug_file or sys.stderr) - # Load extensions - tracer_classes = load_extensions(self.config.extensions, "tracer") - self.tracer_extensions = [cls() for cls in tracer_classes] + # Load plugins + self.plugins = Plugins.load_plugins(self.config.plugins, self.config) + + self.trace_judges = [] + for plugin in self.plugins: + if plugin_implements(plugin, "trace_judge"): + self.trace_judges.append(plugin) + self.trace_judges.append(None) # The Python case. self.auto_data = auto_data @@ -158,8 +165,11 @@ class coverage(object): self.include = prep_patterns(self.config.include) self.collector = Collector( - self._should_trace, timid=self.config.timid, - branch=self.config.branch, warn=self._warn, + should_trace=self._should_trace, + check_include=self._tracing_check_include_omit_etc, + timid=self.config.timid, + branch=self.config.branch, + warn=self._warn, coroutine=self.config.coroutine, ) @@ -186,18 +196,16 @@ class coverage(object): ) # The dirs for files considered "installed with the interpreter". - self.pylib_dirs = [] + self.pylib_dirs = set() if not self.config.cover_pylib: # Look at where some standard modules are located. That's the # indication for "installed with the interpreter". In some # environments (virtualenv, for example), these modules may be # spread across a few locations. Look at all the candidate modules # we've imported, and take all the different ones. - for m in (atexit, os, random, socket, _structseq): + for m in (atexit, os, platform, random, socket, _structseq): if m is not None and hasattr(m, "__file__"): - m_dir = self._canonical_dir(m) - if m_dir not in self.pylib_dirs: - self.pylib_dirs.append(m_dir) + self.pylib_dirs.add(self._canonical_dir(m)) # To avoid tracing the coverage code itself, we skip anything located # where we are. @@ -247,20 +255,10 @@ class coverage(object): """ disp = FileDisposition(filename) - - if not filename: - # Empty string is pretty useless - return disp.nope("empty string isn't a filename") - - if filename.startswith('memory:'): - return disp.nope("memory isn't traceable") - - if filename.startswith('<'): - # Lots of non-file execution is represented with artificial - # filenames like "<string>", "<doctest readme.txt[0]>", or - # "<exec_function>". Don't ever trace these executions, since we - # can't do anything with the data later anyway. - return disp.nope("not a real filename") + def nope(disp, reason): + disp.trace = False + disp.reason = reason + return disp self._check_for_packages() @@ -274,46 +272,80 @@ class coverage(object): if dunder_file: filename = self._source_for_file(dunder_file) + if not filename: + # Empty string is pretty useless + return nope(disp, "empty string isn't a filename") + + if filename.startswith('memory:'): + return nope(disp, "memory isn't traceable") + + if filename.startswith('<'): + # Lots of non-file execution is represented with artificial + # filenames like "<string>", "<doctest readme.txt[0]>", or + # "<exec_function>". Don't ever trace these executions, since we + # can't do anything with the data later anyway. + return nope(disp, "not a real filename") + # Jython reports the .class file to the tracer, use the source file. if filename.endswith("$py.class"): filename = filename[:-9] + ".py" canonical = self.file_locator.canonical_filename(filename) + disp.canonical_filename = canonical + + # Try the plugins, see if they have an opinion about the file. + for plugin in self.trace_judges: + if plugin: + plugin.trace_judge(disp) + else: + disp.trace = True + disp.source_filename = canonical + if disp.trace: + disp.plugin = plugin + + if disp.check_filters: + reason = self._check_include_omit_etc(disp.source_filename) + if reason: + nope(disp, reason) + + return disp + + return nope(disp, "no plugin found") # TODO: a test that causes this. - # Try the extensions, see if they have an opinion about the file. - for tracer in self.tracer_extensions: - ext_disp = tracer.should_trace(canonical) - if ext_disp: - ext_disp.extension = tracer - return ext_disp + def _check_include_omit_etc(self, filename): + """Check a filename against the include, omit, etc, rules. + Returns a string or None. String means, don't trace, and is the reason + why. None means no reason found to not trace. + + """ # If the user specified source or include, then that's authoritative # about the outer bound of what to measure and we don't have to apply # any canned exclusions. If they didn't, then we have to exclude the # stdlib and coverage.py directories. if self.source_match: - if not self.source_match.match(canonical): - return disp.nope("falls outside the --source trees") + if not self.source_match.match(filename): + return "falls outside the --source trees" elif self.include_match: - if not self.include_match.match(canonical): - return disp.nope("falls outside the --include trees") + if not self.include_match.match(filename): + return "falls outside the --include trees" else: # If we aren't supposed to trace installed code, then check if this # is near the Python standard library and skip it if so. - if self.pylib_match and self.pylib_match.match(canonical): - return disp.nope("is in the stdlib") + if self.pylib_match and self.pylib_match.match(filename): + return "is in the stdlib" # We exclude the coverage code itself, since a little of it will be # measured otherwise. - if self.cover_match and self.cover_match.match(canonical): - return disp.nope("is part of coverage.py") + if self.cover_match and self.cover_match.match(filename): + return "is part of coverage.py" # Check the file against the omit pattern. - if self.omit_match and self.omit_match.match(canonical): - return disp.nope("is inside an --omit pattern") + if self.omit_match and self.omit_match.match(filename): + return "is inside an --omit pattern" - disp.filename = canonical - return disp + # No reason found to skip this file. + return None def _should_trace(self, filename, frame): """Decide whether to trace execution in `filename`. @@ -326,6 +358,22 @@ class coverage(object): self.debug.write(disp.debug_message()) return disp + def _tracing_check_include_omit_etc(self, filename): + """Check a filename against the include, omit, etc, rules, and say so. + + Returns a boolean: True if the file should be traced, False if not. + + """ + reason = self._check_include_omit_etc(filename) + if self.debug.should('trace'): + if not reason: + msg = "Tracing %r" % (filename,) + else: + msg = "Not tracing %r: %s" % (filename, reason) + self.debug.write(msg) + + return not reason + def _warn(self, msg): """Use `msg` as a warning.""" self._warnings.append(msg) @@ -545,7 +593,7 @@ class coverage(object): # TODO: seems like this parallel structure is getting kinda old... self.data.add_line_data(self.collector.get_line_data()) self.data.add_arc_data(self.collector.get_arc_data()) - self.data.add_extension_data(self.collector.get_extension_data()) + self.data.add_plugin_data(self.collector.get_plugin_data()) self.collector.reset() # If there are still entries in the source_pkgs list, then we never @@ -611,10 +659,17 @@ class coverage(object): Returns an `Analysis` object. """ + def get_plugin(filename): + """For code_unit_factory to use to find the plugin for a file.""" + plugin = None + plugin_name = self.data.plugin_data().get(filename) + if plugin_name: + plugin = self.plugins.get(plugin_name) + return plugin + self._harvest_data() if not isinstance(it, CodeUnit): - get_ext = self.data.extension_data().get - it = code_unit_factory(it, self.file_locator, get_ext)[0] + it = code_unit_factory(it, self.file_locator, get_plugin)[0] return Analysis(self, it) @@ -738,7 +793,6 @@ class coverage(object): """Return a list of (key, value) pairs showing internal information.""" import coverage as covmod - import platform, re try: implementation = platform.python_implementation() @@ -760,10 +814,10 @@ class coverage(object): ('executable', sys.executable), ('cwd', os.getcwd()), ('path', sys.path), - ('environment', sorted([ + ('environment', sorted( ("%s = %s" % (k, v)) for k, v in iitems(os.environ) - if re.search(r"^COV|^PY", k) - ])), + if k.startswith(("COV", "PY")) + )), ('command_line', " ".join(getattr(sys, 'argv', ['???']))), ] if self.source_match: @@ -784,21 +838,19 @@ class FileDisposition(object): """A simple object for noting a number of details of files to trace.""" def __init__(self, original_filename): self.original_filename = original_filename - self.filename = None + self.canonical_filename = original_filename + self.source_filename = None + self.check_filters = True + self.trace = False self.reason = "" - self.extension = None - - def nope(self, reason): - """A helper for returning a NO answer from should_trace.""" - self.reason = reason - return self + self.plugin = None def debug_message(self): """Produce a debugging message explaining the outcome.""" - if not self.filename: - msg = "Not tracing %r: %s" % (self.original_filename, self.reason) - else: + if self.trace: msg = "Tracing %r" % (self.original_filename,) + else: + msg = "Not tracing %r: %s" % (self.original_filename, self.reason) return msg @@ -824,7 +876,7 @@ def process_startup(): """ cps = os.environ.get("COVERAGE_PROCESS_START") if cps: - cov = coverage(config_file=cps, auto_data=True) + cov = Coverage(config_file=cps, auto_data=True) cov.start() cov._warn_no_data = False cov._warn_unimported_source = False diff --git a/coverage/data.py b/coverage/data.py index b78c931d..e220a364 100644 --- a/coverage/data.py +++ b/coverage/data.py @@ -21,9 +21,9 @@ class CoverageData(object): * arcs: a dict mapping filenames to sorted lists of line number pairs: { 'file1': [(17,23), (17,25), (25,26)], ... } - * extensions: a dict mapping filenames to extension names: + * plugins: a dict mapping filenames to plugin names: { 'file1': "django.coverage", ... } - # TODO: how to handle the difference between a extension module + # TODO: how to handle the difference between a plugin module # name, and the class in the module? """ @@ -69,13 +69,13 @@ class CoverageData(object): # self.arcs = {} - # A map from canonical source file name to an extension module name: + # A map from canonical source file name to an plugin module name: # # { # 'filename1.py': 'django.coverage', # ... # } - self.extensions = {} + self.plugins = {} def usefile(self, use_file=True): """Set whether or not to use a disk file for data.""" @@ -123,8 +123,8 @@ class CoverageData(object): (f, sorted(amap.keys())) for f, amap in iitems(self.arcs) ) - def extension_data(self): - return self.extensions + def plugin_data(self): + return self.plugins def write_file(self, filename): """Write the coverage data to `filename`.""" @@ -229,8 +229,8 @@ class CoverageData(object): for filename, arcs in iitems(arc_data): self.arcs.setdefault(filename, {}).update(arcs) - def add_extension_data(self, extension_data): - self.extensions.update(extension_data) + def add_plugin_data(self, plugin_data): + self.plugins.update(plugin_data) def touch_file(self, filename): """Ensure that `filename` appears in the data, empty if needed.""" diff --git a/coverage/debug.py b/coverage/debug.py index 6908383d..6e7af242 100644 --- a/coverage/debug.py +++ b/coverage/debug.py @@ -45,7 +45,7 @@ def info_formatter(info): for label, data in info: if data == []: data = "-none-" - if isinstance(data, (list, tuple)): + if isinstance(data, (list, set, tuple)): prefix = "%*s:" % (label_len, label) for e in data: yield "%*s %s" % (label_len+1, prefix, e) diff --git a/coverage/extension.py b/coverage/extension.py deleted file mode 100644 index 8c89b88e..00000000 --- a/coverage/extension.py +++ /dev/null @@ -1,20 +0,0 @@ -"""Extension management for coverage.py""" - -def load_extensions(modules, name): - """Load extensions from `modules`, finding them by `name`. - - Yields the loaded extensions. - - """ - - for module in modules: - try: - __import__(module) - mod = sys.modules[module] - except ImportError: - blah() - continue - - entry = getattr(mod, name, None) - if entry: - yield entry diff --git a/coverage/files.py b/coverage/files.py index 08ce1e84..72c0bf92 100644 --- a/coverage/files.py +++ b/coverage/files.py @@ -147,7 +147,7 @@ def prep_patterns(patterns): class TreeMatcher(object): """A matcher for files in a tree.""" def __init__(self, directories): - self.dirs = directories[:] + self.dirs = list(directories) def __repr__(self): return "<TreeMatcher %r>" % self.dirs diff --git a/coverage/html.py b/coverage/html.py index 85f47ab4..863d1508 100644 --- a/coverage/html.py +++ b/coverage/html.py @@ -164,7 +164,7 @@ class HtmlReporter(Reporter): # If need be, determine the encoding of the source file. We use it # later to properly write the HTML. if sys.version_info < (3, 0): - encoding = cu.source_encoding(source) + encoding = cu.source_encoding() # Some UTF8 files have the dreaded UTF8 BOM. If so, junk it. if encoding.startswith("utf-8") and source[:3] == "\xef\xbb\xbf": source = source[3:] @@ -184,7 +184,7 @@ class HtmlReporter(Reporter): lines = [] - for lineno, line in enumerate(cu.source_token_lines(source), start=1): + for lineno, line in enumerate(cu.source_token_lines(), start=1): # Figure out how to mark this line. line_class = [] annotate_html = "" diff --git a/coverage/plugin.py b/coverage/plugin.py new file mode 100644 index 00000000..35be41a9 --- /dev/null +++ b/coverage/plugin.py @@ -0,0 +1,108 @@ +"""Plugin management for coverage.py""" + +import sys + + +class CoveragePlugin(object): + """Base class for coverage.py plugins.""" + def __init__(self, options): + self.options = options + + def trace_judge(self, disposition): + """Decide whether to trace this file with this plugin. + + Set disposition.trace to True if this plugin should trace this file. + May also set other attributes in `disposition`. + + """ + return None + + def source_file_name(self, filename): + """Return the source name for a given Python filename. + + Can return None if tracing shouldn't continue. + + """ + return filename + + def dynamic_source_file_name(self): + """Returns a callable that can return a source name for a frame. + + The callable should take a filename and a frame, and return either a + filename or None: + + def dynamic_source_filename_func(filename, frame) + + Can return None if dynamic filenames aren't needed. + + """ + return None + + def code_unit_class(self, morf): + """Return the CodeUnit class to use for a module or filename.""" + return None + + +class Plugins(object): + """The currently loaded collection of coverage.py plugins.""" + + def __init__(self): + self.order = [] + self.names = {} + + @classmethod + def load_plugins(cls, modules, config): + """Load plugins from `modules`. + + Returns a list of loaded and configured plugins. + + """ + plugins = cls() + + for module in modules: + __import__(module) + mod = sys.modules[module] + + plugin_class = getattr(mod, "Plugin", None) + if plugin_class: + options = config.get_plugin_options(module) + plugin = plugin_class(options) + plugin.__name__ = module + plugins.order.append(plugin) + plugins.names[module] = plugin + + return plugins + + def __iter__(self): + return iter(self.order) + + def get(self, module): + return self.names[module] + + +def overrides(obj, method_name, base_class): + """Does `obj` override the `method_name` it got from `base_class`? + + Determine if `obj` implements the method called `method_name`, which it + inherited from `base_class`. + + Returns a boolean. + + """ + klass = obj.__class__ + klass_func = getattr(klass, method_name) + base_func = getattr(base_class, method_name) + + # Python 2/3 compatibility: Python 2 returns an instancemethod object, the + # function is the .im_func attribute. Python 3 returns a plain function + # object already. + if sys.version_info < (3, 0): + klass_func = klass_func.im_func + base_func = base_func.im_func + + return klass_func is not base_func + + +def plugin_implements(obj, method_name): + """Does the plugin `obj` implement `method_name`?""" + return overrides(obj, method_name, CoveragePlugin) diff --git a/coverage/report.py b/coverage/report.py index 7627d1aa..b93749c8 100644 --- a/coverage/report.py +++ b/coverage/report.py @@ -33,8 +33,8 @@ class Reporter(object): """ morfs = morfs or self.coverage.data.measured_files() file_locator = self.coverage.file_locator - get_ext = self.coverage.data.extension_data().get - self.code_units = code_unit_factory(morfs, file_locator, get_ext) + get_plugin = self.coverage.data.plugin_data().get + self.code_units = code_unit_factory(morfs, file_locator, get_plugin) if self.config.include: patterns = prep_patterns(self.config.include) diff --git a/coverage/templite.py b/coverage/templite.py index a71caf63..53824e08 100644 --- a/coverage/templite.py +++ b/coverage/templite.py @@ -15,7 +15,7 @@ class CodeBuilder(object): def __init__(self, indent=0): self.code = [] - self.ident_level = indent + self.indent_level = indent def __str__(self): return "".join(str(c) for c in self.code) @@ -26,28 +26,28 @@ class CodeBuilder(object): Indentation and newline will be added for you, don't provide them. """ - self.code.extend([" " * self.ident_level, line, "\n"]) + self.code.extend([" " * self.indent_level, line, "\n"]) - def add_subbuilder(self): + def add_section(self): """Add a section, a sub-CodeBuilder.""" - sect = CodeBuilder(self.ident_level) - self.code.append(sect) - return sect + section = CodeBuilder(self.indent_level) + self.code.append(section) + return section INDENT_STEP = 4 # PEP8 says so! def indent(self): """Increase the current indent for following lines.""" - self.ident_level += self.INDENT_STEP + self.indent_level += self.INDENT_STEP def dedent(self): """Decrease the current indent for following lines.""" - self.ident_level -= self.INDENT_STEP + self.indent_level -= self.INDENT_STEP def get_globals(self): - """Compile the code, and return a dict of globals it defines.""" + """Execute the code, and return a dict of globals it defines.""" # A check that the caller really finished all the blocks they started. - assert self.ident_level == 0 + assert self.indent_level == 0 # Get the Python source as a single string. python_source = str(self) # Execute the source, defining globals, and return them. @@ -110,21 +110,21 @@ class Templite(object): # it, and execute it to render the template. code = CodeBuilder() - code.add_line("def render_function(ctx, do_dots):") + code.add_line("def render_function(context, do_dots):") code.indent() - vars_code = code.add_subbuilder() + vars_code = code.add_section() code.add_line("result = []") - code.add_line("a = result.append") - code.add_line("e = result.extend") - code.add_line("s = str") + code.add_line("append_result = result.append") + code.add_line("extend_result = result.extend") + code.add_line("to_str = str") buffered = [] def flush_output(): """Force `buffered` to the code builder.""" if len(buffered) == 1: - code.add_line("a(%s)" % buffered[0]) + code.add_line("append_result(%s)" % buffered[0]) elif len(buffered) > 1: - code.add_line("e([%s])" % ", ".join(buffered)) + code.add_line("extend_result([%s])" % ", ".join(buffered)) del buffered[:] ops_stack = [] @@ -138,7 +138,8 @@ class Templite(object): continue elif token.startswith('{{'): # An expression to evaluate. - buffered.append("s(%s)" % self._expr_code(token[2:-2].strip())) + expr = self._expr_code(token[2:-2].strip()) + buffered.append("to_str(%s)" % expr) elif token.startswith('{%'): # Action tag: split into words and parse further. flush_output() @@ -187,7 +188,7 @@ class Templite(object): flush_output() for var_name in self.all_vars - self.loop_vars: - vars_code.add_line("c_%s = ctx[%r]" % (var_name, var_name)) + vars_code.add_line("c_%s = context[%r]" % (var_name, var_name)) code.add_line("return ''.join(result)") code.dedent() @@ -234,10 +235,10 @@ class Templite(object): """ # Make the complete context we'll use. - ctx = dict(self.context) + render_context = dict(self.context) if context: - ctx.update(context) - return self._render_function(ctx, self._do_dots) + render_context.update(context) + return self._render_function(render_context, self._do_dots) def _do_dots(self, value, *dots): """Evaluate dotted expressions at runtime.""" diff --git a/coverage/test_helpers.py b/coverage/test_helpers.py new file mode 100644 index 00000000..1bc17048 --- /dev/null +++ b/coverage/test_helpers.py @@ -0,0 +1,268 @@ +"""Mixin classes to help make good tests.""" + +import atexit +import collections +import os +import random +import shutil +import sys +import tempfile +import textwrap + +from coverage.backunittest import TestCase +from coverage.backward import StringIO, to_bytes + + +class Tee(object): + """A file-like that writes to all the file-likes it has.""" + + def __init__(self, *files): + """Make a Tee that writes to all the files in `files.`""" + self._files = files + if hasattr(files[0], "encoding"): + self.encoding = files[0].encoding + + def write(self, data): + """Write `data` to all the files.""" + for f in self._files: + f.write(data) + + if 0: + # Use this if you need to use a debugger, though it makes some tests + # fail, I'm not sure why... + def __getattr__(self, name): + return getattr(self._files[0], name) + + +class ModuleAwareMixin(TestCase): + """A test case mixin that isolates changes to sys.modules.""" + + def setUp(self): + super(ModuleAwareMixin, self).setUp() + + # Record sys.modules here so we can restore it in tearDown. + self.old_modules = dict(sys.modules) + self.addCleanup(self.cleanup_modules) + + def cleanup_modules(self): + """Remove any new modules imported during the test run. + + This lets us import the same source files for more than one test. + + """ + for m in [m for m in sys.modules if m not in self.old_modules]: + del sys.modules[m] + + +class SysPathAwareMixin(TestCase): + """A test case mixin that isolates changes to sys.path.""" + + def setUp(self): + super(SysPathAwareMixin, self).setUp() + + self.old_syspath = sys.path[:] + self.addCleanup(self.cleanup_syspath) + + def cleanup_syspath(self): + """Restore the original sys.path.""" + sys.path = self.old_syspath + + +class EnvironmentAwareMixin(TestCase): + """A test case mixin that isolates changes to the environment.""" + + def setUp(self): + super(EnvironmentAwareMixin, self).setUp() + + # Record environment variables that we changed with set_environ. + self.environ_undos = {} + + self.addCleanup(self.cleanup_environ) + + def set_environ(self, name, value): + """Set an environment variable `name` to be `value`. + + The environment variable is set, and record is kept that it was set, + so that `tearDown` can restore its original value. + + """ + if name not in self.environ_undos: + self.environ_undos[name] = os.environ.get(name) + os.environ[name] = value + + def original_environ(self, name, if_missing=None): + """The environment variable `name` from when the test started.""" + if name in self.environ_undos: + ret = self.environ_undos[name] + else: + ret = os.environ.get(name) + if ret is None: + ret = if_missing + return ret + + def cleanup_environ(self): + """Undo all the changes made by `set_environ`.""" + for name, value in self.environ_undos.items(): + if value is None: + del os.environ[name] + else: + os.environ[name] = value + + +class StdStreamCapturingMixin(TestCase): + """A test case mixin that captures stdout and stderr.""" + + def setUp(self): + super(StdStreamCapturingMixin, self).setUp() + + # Capture stdout and stderr so we can examine them in tests. + # nose keeps stdout from littering the screen, so we can safely Tee it, + # but it doesn't capture stderr, so we don't want to Tee stderr to the + # real stderr, since it will interfere with our nice field of dots. + self.old_stdout = sys.stdout + self.captured_stdout = StringIO() + sys.stdout = Tee(sys.stdout, self.captured_stdout) + self.old_stderr = sys.stderr + self.captured_stderr = StringIO() + sys.stderr = self.captured_stderr + + self.addCleanup(self.cleanup_std_streams) + + def cleanup_std_streams(self): + """Restore stdout and stderr.""" + sys.stdout = self.old_stdout + sys.stderr = self.old_stderr + + def stdout(self): + """Return the data written to stdout during the test.""" + return self.captured_stdout.getvalue() + + def stderr(self): + """Return the data written to stderr during the test.""" + return self.captured_stderr.getvalue() + + +class TempDirMixin(TestCase): + """A test case mixin that creates a temp directory and files in it.""" + + # Our own setting: most of these tests run in their own temp directory. + run_in_temp_dir = True + + def setUp(self): + super(TempDirMixin, self).setUp() + + if self.run_in_temp_dir: + # Create a temporary directory. + noise = str(random.random())[2:] + self.temp_root = os.path.join(tempfile.gettempdir(), 'test_cover') + self.temp_dir = os.path.join(self.temp_root, noise) + os.makedirs(self.temp_dir) + self.old_dir = os.getcwd() + os.chdir(self.temp_dir) + + # Modules should be importable from this temp directory. We don't + # use '' because we make lots of different temp directories and + # nose's caching importer can get confused. The full path prevents + # problems. + sys.path.insert(0, os.getcwd()) + + class_behavior = self.class_behavior() + class_behavior.tests += 1 + class_behavior.test_method_made_any_files = False + class_behavior.temp_dir = self.run_in_temp_dir + + self.addCleanup(self.cleanup_temp_dir) + + def cleanup_temp_dir(self): + """Clean up the temp directories we made.""" + + if self.run_in_temp_dir: + # Get rid of the temporary directory. + os.chdir(self.old_dir) + shutil.rmtree(self.temp_root) + + class_behavior = self.class_behavior() + if class_behavior.test_method_made_any_files: + class_behavior.tests_making_files += 1 + + def make_file(self, filename, text="", newline=None): + """Create a file for testing. + + `filename` is the relative path to the file, including directories if + desired, which will be created if need be. `text` is the content to + create in the file. If `newline` is provided, it is a string that will + be used as the line endings in the created file, otherwise the line + endings are as provided in `text`. + + Returns `filename`. + + """ + # Tests that call `make_file` should be run in a temp environment. + assert self.run_in_temp_dir + self.class_behavior().test_method_made_any_files = True + + text = textwrap.dedent(text) + if newline: + text = text.replace("\n", newline) + + # Make sure the directories are available. + dirs, _ = os.path.split(filename) + if dirs and not os.path.exists(dirs): + os.makedirs(dirs) + + # Create the file. + with open(filename, 'wb') as f: + f.write(to_bytes(text)) + + return filename + + # We run some tests in temporary directories, because they may need to make + # files for the tests. But this is expensive, so we can change per-class + # whether a temp dir is used or not. It's easy to forget to set that + # option properly, so we track information about what the tests did, and + # then report at the end of the process on test classes that were set + # wrong. + + class ClassBehavior(object): + """A value object to store per-class.""" + def __init__(self): + self.tests = 0 + self.temp_dir = True + self.tests_making_files = 0 + self.test_method_made_any_files = False + + # Map from class to info about how it ran. + class_behaviors = collections.defaultdict(ClassBehavior) + + @classmethod + def report_on_class_behavior(cls): + """Called at process exit to report on class behavior.""" + for test_class, behavior in cls.class_behaviors.items(): + if behavior.temp_dir and behavior.tests_making_files == 0: + bad = "Inefficient" + elif not behavior.temp_dir and behavior.tests_making_files > 0: + bad = "Unsafe" + else: + bad = "" + + if bad: + if behavior.temp_dir: + where = "in a temp directory" + else: + where = "without a temp directory" + print( + "%s: %s ran %d tests, %d made files %s" % ( + bad, + test_class.__name__, + behavior.tests, + behavior.tests_making_files, + where, + ) + ) + + def class_behavior(self): + """Get the ClassBehavior instance for this test.""" + return self.class_behaviors[self.__class__] + +# When the process ends, find out about bad classes. +atexit.register(TempDirMixin.report_on_class_behavior) diff --git a/coverage/tracer.c b/coverage/tracer.c index 3131d7df..cb242219 100644 --- a/coverage/tracer.c +++ b/coverage/tracer.c @@ -83,6 +83,7 @@ typedef struct { PyObject * warn; PyObject * coroutine_id_func; PyObject * data; + PyObject * plugin_data; PyObject * should_trace_cache; PyObject * arcs; @@ -192,6 +193,7 @@ CTracer_init(CTracer *self, PyObject *args_unused, PyObject *kwds_unused) self->warn = NULL; self->coroutine_id_func = NULL; self->data = NULL; + self->plugin_data = NULL; self->should_trace_cache = NULL; self->arcs = NULL; @@ -232,6 +234,7 @@ CTracer_dealloc(CTracer *self) Py_XDECREF(self->warn); Py_XDECREF(self->coroutine_id_func); Py_XDECREF(self->data); + Py_XDECREF(self->plugin_data); Py_XDECREF(self->should_trace_cache); DataStack_dealloc(self, &self->data_stack); @@ -385,6 +388,7 @@ CTracer_trace(CTracer *self, PyFrameObject *frame, int what, PyObject *arg_unuse PyObject * filename = NULL; PyObject * tracename = NULL; PyObject * disposition = NULL; + PyObject * disp_trace = NULL; #if WHAT_LOG || TRACE_LOG PyObject * ascii = NULL; #endif @@ -475,15 +479,33 @@ CTracer_trace(CTracer *self, PyFrameObject *frame, int what, PyObject *arg_unuse Py_INCREF(disposition); } - /* If tracename is a string, then we're supposed to trace. */ - tracename = PyObject_GetAttrString(disposition, "filename"); - if (tracename == NULL) { + disp_trace = PyObject_GetAttrString(disposition, "trace"); + if (disp_trace == NULL) { STATS( self->stats.errors++; ) Py_DECREF(disposition); return RET_ERROR; } + + tracename = Py_None; + Py_INCREF(tracename); + + if (disp_trace == Py_True) { + /* If tracename is a string, then we're supposed to trace. */ + tracename = PyObject_GetAttrString(disposition, "source_filename"); + if (tracename == NULL) { + STATS( self->stats.errors++; ) + Py_DECREF(disposition); + Py_DECREF(disp_trace); + return RET_ERROR; + } + } + Py_DECREF(disp_trace); + if (MyText_Check(tracename)) { PyObject * file_data = PyDict_GetItem(self->data, tracename); + PyObject * disp_plugin = NULL; + PyObject * disp_plugin_name = NULL; + if (file_data == NULL) { file_data = PyDict_New(); if (file_data == NULL) { @@ -500,6 +522,34 @@ CTracer_trace(CTracer *self, PyFrameObject *frame, int what, PyObject *arg_unuse Py_DECREF(disposition); return RET_ERROR; } + + if (self->plugin_data != NULL) { + /* If the disposition mentions a plugin, record that. */ + disp_plugin = PyObject_GetAttrString(disposition, "plugin"); + if (disp_plugin == NULL) { + STATS( self->stats.errors++; ) + Py_DECREF(tracename); + Py_DECREF(disposition); + return RET_ERROR; + } + if (disp_plugin != Py_None) { + disp_plugin_name = PyObject_GetAttrString(disp_plugin, "__name__"); + Py_DECREF(disp_plugin); + if (disp_plugin_name == NULL) { + STATS( self->stats.errors++; ) + Py_DECREF(tracename); + Py_DECREF(disposition); + return RET_ERROR; + } + ret = PyDict_SetItem(self->plugin_data, tracename, disp_plugin_name); + Py_DECREF(disp_plugin_name); + if (ret < 0) { + Py_DECREF(tracename); + Py_DECREF(disposition); + return RET_ERROR; + } + } + } } self->cur_entry.file_data = file_data; /* Make the frame right in case settrace(gettrace()) happens. */ @@ -735,6 +785,9 @@ CTracer_members[] = { { "data", T_OBJECT, offsetof(CTracer, data), 0, PyDoc_STR("The raw dictionary of trace data.") }, + { "plugin_data", T_OBJECT, offsetof(CTracer, plugin_data), 0, + PyDoc_STR("Mapping from filename to plugin name.") }, + { "should_trace_cache", T_OBJECT, offsetof(CTracer, should_trace_cache), 0, PyDoc_STR("Dictionary caching should_trace results.") }, |
