diff options
Diffstat (limited to 'coverage')
-rw-r--r-- | coverage/ctracer/tracer.c | 2 | ||||
-rw-r--r-- | coverage/ctracer/util.h | 8 | ||||
-rw-r--r-- | coverage/env.py | 33 | ||||
-rw-r--r-- | coverage/execfile.py | 10 | ||||
-rw-r--r-- | coverage/files.py | 5 | ||||
-rw-r--r--[-rwxr-xr-x] | coverage/htmlfiles/keybd_closed.png | bin | 112 -> 112 bytes | |||
-rw-r--r--[-rwxr-xr-x] | coverage/htmlfiles/keybd_open.png | bin | 112 -> 112 bytes | |||
-rw-r--r-- | coverage/parser.py | 2 | ||||
-rw-r--r-- | coverage/sqldata.py | 173 | ||||
-rw-r--r-- | coverage/xmlreport.py | 13 |
10 files changed, 169 insertions, 77 deletions
diff --git a/coverage/ctracer/tracer.c b/coverage/ctracer/tracer.c index 7d639112..d497a94d 100644 --- a/coverage/ctracer/tracer.c +++ b/coverage/ctracer/tracer.c @@ -541,7 +541,7 @@ CTracer_handle_call(CTracer *self, PyFrameObject *frame) /* Make the frame right in case settrace(gettrace()) happens. */ Py_INCREF(self); - My_XSETREF(frame->f_trace, (PyObject*)self); + Py_XSETREF(frame->f_trace, (PyObject*)self); /* A call event is really a "start frame" event, and can happen for * re-entering a generator also. f_lasti is -1 for a true call, and a diff --git a/coverage/ctracer/util.h b/coverage/ctracer/util.h index 96d2e51c..cb8aceb9 100644 --- a/coverage/ctracer/util.h +++ b/coverage/ctracer/util.h @@ -44,14 +44,6 @@ #endif /* Py3k */ -// Undocumented, and not in 2.6, so our own copy of it. -#define My_XSETREF(op, op2) \ - do { \ - PyObject *_py_tmp = (PyObject *)(op); \ - (op) = (op2); \ - Py_XDECREF(_py_tmp); \ - } while (0) - /* The values returned to indicate ok or error. */ #define RET_OK 0 #define RET_ERROR -1 diff --git a/coverage/env.py b/coverage/env.py index d97b193c..83b4be65 100644 --- a/coverage/env.py +++ b/coverage/env.py @@ -28,6 +28,39 @@ PY3 = PYVERSION >= (3, 0) class PYBEHAVIOR(object): """Flags indicating this Python's behavior.""" + # Is "if __debug__" optimized away? + optimize_if_debug = (not PYPY) + + # If "if not __debug__" optimized away? + optimize_if_not_debug = (not PYPY) and (PYVERSION >= (3, 7, 0, 'alpha', 4)) + + # Do we have yield-from? + yield_from = (PYVERSION >= (3, 3)) + + # Do we have PEP 420 namespace packages? + namespaces_pep420 = (PYVERSION >= (3, 3)) + + # Do .pyc files have the source file size recorded in them? + size_in_pyc = (PYVERSION >= (3, 3)) + + # Do we have async and await syntax? + async_syntax = (PYVERSION >= (3, 5)) + + # PEP 448 defined additional unpacking generalizations + unpackings_pep448 = (PYVERSION >= (3, 5)) + + # Can co_lnotab have negative deltas? + negative_lnotab = (PYVERSION >= (3, 6)) + + # Do .pyc files conform to PEP 552? Hash-based pyc's. + hashed_pyc_pep552 = (PYVERSION >= (3, 7, 0, 'alpha', 4)) + + # Python 3.7.0b3 changed the behavior of the sys.path[0] entry for -m. It + # used to be an empty string (meaning the current directory). It changed + # to be the actual path to the current directory, so that os.chdir wouldn't + # affect the outcome. + actual_syspath0_dash_m = (PYVERSION >= (3, 7, 0, 'beta', 3)) + # When a break/continue/return statement in a try block jumps to a finally # block, does the finally block do the break/continue/return (pre-3.8), or # does the finally jump back to the break/continue/return (3.8) to do the diff --git a/coverage/execfile.py b/coverage/execfile.py index 4fc6a85f..97997b06 100644 --- a/coverage/execfile.py +++ b/coverage/execfile.py @@ -124,11 +124,7 @@ class PyRunner(object): should_update_sys_path = True if self.as_module: - # Python 3.7.0b3 changed the behavior of the sys.path[0] entry for -m. It - # used to be an empty string (meaning the current directory). It changed - # to be the actual path to the current directory, so that os.chdir wouldn't - # affect the outcome. - if env.PYVERSION >= (3, 7, 0, 'beta', 3): + if env.PYBEHAVIOR.actual_syspath0_dash_m: path0 = os.getcwd() else: path0 = "" @@ -290,7 +286,7 @@ def make_code_from_pyc(filename): raise NoCode("Bad magic number in .pyc file") date_based = True - if env.PYVERSION >= (3, 7, 0, 'alpha', 4): + if env.PYBEHAVIOR.hashed_pyc_pep552: flags = struct.unpack('<L', fpyc.read(4))[0] hash_based = flags & 0x01 if hash_based: @@ -299,7 +295,7 @@ def make_code_from_pyc(filename): if date_based: # Skip the junk in the header that we don't need. fpyc.read(4) # Skip the moddate. - if env.PYVERSION >= (3, 3): + if env.PYBEHAVIOR.size_in_pyc: # 3.3 added another long to the header (size), skip it. fpyc.read(4) diff --git a/coverage/files.py b/coverage/files.py index b328f653..d9495912 100644 --- a/coverage/files.py +++ b/coverage/files.py @@ -59,6 +59,7 @@ def canonical_filename(filename): """ if filename not in CANONICAL_FILENAME_CACHE: + cf = filename if not os.path.isabs(filename): for path in [os.curdir] + sys.path: if path is None: @@ -69,9 +70,9 @@ def canonical_filename(filename): except UnicodeError: exists = False if exists: - filename = f + cf = f break - cf = abs_file(filename) + cf = abs_file(cf) CANONICAL_FILENAME_CACHE[filename] = cf return CANONICAL_FILENAME_CACHE[filename] diff --git a/coverage/htmlfiles/keybd_closed.png b/coverage/htmlfiles/keybd_closed.png Binary files differindex db114023..db114023 100755..100644 --- a/coverage/htmlfiles/keybd_closed.png +++ b/coverage/htmlfiles/keybd_closed.png diff --git a/coverage/htmlfiles/keybd_open.png b/coverage/htmlfiles/keybd_open.png Binary files differindex db114023..db114023 100755..100644 --- a/coverage/htmlfiles/keybd_open.png +++ b/coverage/htmlfiles/keybd_open.png diff --git a/coverage/parser.py b/coverage/parser.py index 1c19f69e..6ae99fe4 100644 --- a/coverage/parser.py +++ b/coverage/parser.py @@ -409,7 +409,7 @@ class ByteParser(object): yield (byte_num, line_num) last_line_num = line_num byte_num += byte_incr - if env.PYVERSION >= (3, 6) and line_incr >= 0x80: + if env.PYBEHAVIOR.negative_lnotab and line_incr >= 0x80: line_incr -= 0x100 line_num += line_incr if line_num != last_line_num: diff --git a/coverage/sqldata.py b/coverage/sqldata.py index 24e5d6b8..065a1b4e 100644 --- a/coverage/sqldata.py +++ b/coverage/sqldata.py @@ -334,6 +334,11 @@ class CoverageSqliteData(SimpleReprMixin): self.add_file_tracers({filename: plugin_name}) def update(self, other_data, aliases=None): + """Update this data with data from several other `CoverageData` instances. + + If `aliases` is provided, it's a `PathAliases` object that is used to + re-map paths to match the local machine's. + """ if self._has_lines and other_data._has_arcs: raise CoverageException("Can't combine arc data with line data") if self._has_arcs and other_data._has_lines: @@ -341,57 +346,135 @@ class CoverageSqliteData(SimpleReprMixin): aliases = aliases or PathAliases() - # See what we had already measured, for accurate conflict reporting. - this_measured = self.measured_files() - - other_files = set() - # Force the database we're writing to to exist before we start nesting # contexts. self._start_using() - # Start a single transaction in each file. - with self._connect(), other_data._connect(): - # lines - if other_data._has_lines: - for context in other_data.measured_contexts(): - self.set_context(context) - for filename in other_data.measured_files(): - lines = set(other_data.lines(filename, context=context)) - if lines: - other_files.add(filename) - filename = aliases.map(filename) - lines.update(self.lines(filename, context=context) or ()) - self.add_lines({filename: lines}) - - # arcs - if other_data._has_arcs: - for context in other_data.measured_contexts(): - self.set_context(context) - for filename in other_data.measured_files(): - arcs = set(other_data.arcs(filename, context=context)) - if arcs: - other_files.add(filename) - filename = aliases.map(filename) - arcs.update(self.arcs(filename, context=context) or ()) - self.add_arcs({filename: arcs}) - - # file_tracers - for filename in other_files: - other_plugin = other_data.file_tracer(filename) - filename = aliases.map(filename) - if filename in this_measured: - this_plugin = self.file_tracer(filename) - else: - this_plugin = None - if this_plugin is None: - self.add_file_tracers({filename: other_plugin}) - elif this_plugin != other_plugin: + # Collector for all arcs, lines and tracers + other_data.read() + with other_data._connect() as conn: + # Get files data. + cur = conn.execute('select path from file') + files = {path: aliases.map(path) for (path,) in cur} + cur.close() + + # Get contexts data. + cur = conn.execute('select context from context') + contexts = [context for (context,) in cur] + cur.close() + + # Get arc data. + cur = conn.execute( + 'select file.path, context.context, arc.fromno, arc.tono ' + 'from arc ' + 'inner join file on file.id = arc.file_id ' + 'inner join context on context.id = arc.context_id' + ) + arcs = [(files[path], context, fromno, tono) for (path, context, fromno, tono) in cur] + cur.close() + + # Get line data. + cur = conn.execute( + 'select file.path, context.context, line.lineno ' + 'from line ' + 'inner join file on file.id = line.file_id ' + 'inner join context on context.id = line.context_id' + ) + lines = [(files[path], context, lineno) for (path, context, lineno) in cur] + cur.close() + + # Get tracer data. + cur = conn.execute( + 'select file.path, tracer ' + 'from tracer ' + 'inner join file on file.id = tracer.file_id' + ) + tracers = {files[path]: tracer for (path, tracer) in cur} + cur.close() + + with self._connect() as conn: + conn.isolation_level = 'IMMEDIATE' + + # Get all tracers in the DB. Files not in the tracers are assumed + # to have an empty string tracer. Since Sqlite does not support + # full outer joins, we have to make two queries to fill the + # dictionary. + this_tracers = {path: '' for path, in conn.execute('select path from file')} + this_tracers.update({ + aliases.map(path): tracer + for path, tracer in conn.execute( + 'select file.path, tracer from tracer ' + 'inner join file on file.id = tracer.file_id' + ) + }) + + # Create all file and context rows in the DB. + conn.executemany( + 'insert or ignore into file (path) values (?)', + ((file,) for file in files.values()) + ) + file_ids = { + path: id + for id, path in conn.execute('select id, path from file') + } + conn.executemany( + 'insert or ignore into context (context) values (?)', + ((context,) for context in contexts) + ) + context_ids = { + context: id + for id, context in conn.execute('select id, context from context') + } + + # Prepare tracers and fail, if a conflict is found. + # tracer_paths is used to ensure consistency over the tracer data + # and tracer_map tracks the tracers to be inserted. + tracer_map = {} + for path in files.values(): + this_tracer = this_tracers.get(path) + other_tracer = tracers.get(path, '') + # If there is no tracer, there is always the None tracer. + if this_tracer is not None and this_tracer != other_tracer: raise CoverageException( "Conflicting file tracer name for '%s': %r vs %r" % ( - filename, this_plugin, other_plugin, + path, this_tracer, other_tracer ) ) + tracer_map[path] = other_tracer + + # Prepare arc and line rows to be inserted by converting the file + # and context strings with integer ids. Then use the efficient + # `executemany()` to insert all rows at once. + arc_rows = [ + (file_ids[file], context_ids[context], fromno, tono) + for file, context, fromno, tono in arcs + ] + line_rows = [ + (file_ids[file], context_ids[context], lineno) + for file, context, lineno in lines + ] + + self._choose_lines_or_arcs(arcs=bool(arcs), lines=bool(lines)) + + conn.executemany( + 'insert or ignore into arc ' + '(file_id, context_id, fromno, tono) values (?, ?, ?, ?)', + ((file_ids[file], context_ids[context], fromno, tono) + for file, context, fromno, tono in arcs) + ) + conn.executemany( + 'insert or ignore into line ' + '(file_id, context_id, lineno) values (?, ?, ?)', + ((file_ids[file], context_ids[context], lineno) for file, context, lineno in lines) + ) + conn.executemany( + 'insert or ignore into tracer (file_id, tracer) values (?, ?)', + ((file_ids[filename], tracer) for filename, tracer in tracer_map.items()) + ) + + # Update all internal cache data. + self._reset() + self.read() def erase(self, parallel=False): """Erase the data in this object. @@ -476,7 +559,7 @@ class CoverageSqliteData(SimpleReprMixin): if file_id is None: return None else: - query = "select lineno from line where file_id = ?" + query = "select distinct lineno from line where file_id = ?" data = [file_id] if context is not None: query += " and context_id = ?" @@ -491,7 +574,7 @@ class CoverageSqliteData(SimpleReprMixin): if file_id is None: return None else: - query = "select fromno, tono from arc where file_id = ?" + query = "select distinct fromno, tono from arc where file_id = ?" data = [file_id] if context is not None: query += " and context_id = ?" diff --git a/coverage/xmlreport.py b/coverage/xmlreport.py index 6c07337a..8ecdc24a 100644 --- a/coverage/xmlreport.py +++ b/coverage/xmlreport.py @@ -6,7 +6,6 @@ import os import os.path -import re import sys import time import xml.dom.minidom @@ -225,16 +224,4 @@ def serialize_xml(dom): out = dom.toprettyxml() if env.PY2: out = out.encode("utf8") - # In Python 3.8, minidom lost the sorting of attributes: https://bugs.python.org/issue34160 - # For the limited kinds of XML we produce, this re-sorts them. - if env.PYVERSION >= (3, 8): - rx_attr = r' [\w-]+="[^"]*"' - rx_attrs = r'(' + rx_attr + ')+' - fixed_lines = [] - for line in out.splitlines(True): - hollow_line = re.sub(rx_attrs, u"☺", line) - attrs = sorted(re.findall(rx_attr, line)) - new_line = hollow_line.replace(u"☺", "".join(attrs)) - fixed_lines.append(new_line) - out = "".join(fixed_lines) return out |