summaryrefslogtreecommitdiff
path: root/Tools/Scripts/webkitpy/layout_tests/controllers
diff options
context:
space:
mode:
Diffstat (limited to 'Tools/Scripts/webkitpy/layout_tests/controllers')
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/controllers/__init__.py1
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/controllers/manager.py1535
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/controllers/manager_unittest.py339
-rwxr-xr-xTools/Scripts/webkitpy/layout_tests/controllers/manager_worker_broker.py299
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/controllers/manager_worker_broker_unittest.py250
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/controllers/message_broker.py202
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/controllers/message_broker_unittest.py76
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/controllers/single_test_runner.py306
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/controllers/test_expectations_editor.py172
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/controllers/test_expectations_editor_unittest.py375
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/controllers/test_result_writer.py274
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/controllers/test_result_writer_unittest.py60
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/controllers/worker.py254
13 files changed, 4143 insertions, 0 deletions
diff --git a/Tools/Scripts/webkitpy/layout_tests/controllers/__init__.py b/Tools/Scripts/webkitpy/layout_tests/controllers/__init__.py
new file mode 100644
index 000000000..ef65bee5b
--- /dev/null
+++ b/Tools/Scripts/webkitpy/layout_tests/controllers/__init__.py
@@ -0,0 +1 @@
+# Required for Python to search this directory for module files
diff --git a/Tools/Scripts/webkitpy/layout_tests/controllers/manager.py b/Tools/Scripts/webkitpy/layout_tests/controllers/manager.py
new file mode 100644
index 000000000..aff604833
--- /dev/null
+++ b/Tools/Scripts/webkitpy/layout_tests/controllers/manager.py
@@ -0,0 +1,1535 @@
+#!/usr/bin/env python
+# Copyright (C) 2010 Google Inc. All rights reserved.
+# Copyright (C) 2010 Gabor Rapcsanyi (rgabor@inf.u-szeged.hu), University of Szeged
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""
+The Manager runs a series of tests (TestType interface) against a set
+of test files. If a test file fails a TestType, it returns a list of TestFailure
+objects to the Manager. The Manager then aggregates the TestFailures to
+create a final report.
+"""
+
+from __future__ import with_statement
+
+import errno
+import logging
+import math
+import Queue
+import random
+import re
+import sys
+import time
+
+from webkitpy.layout_tests.controllers import manager_worker_broker
+from webkitpy.layout_tests.controllers import worker
+from webkitpy.layout_tests.layout_package import json_layout_results_generator
+from webkitpy.layout_tests.layout_package import json_results_generator
+from webkitpy.layout_tests.models import test_expectations
+from webkitpy.layout_tests.models import test_failures
+from webkitpy.layout_tests.models import test_results
+from webkitpy.layout_tests.models.test_input import TestInput
+from webkitpy.layout_tests.models.result_summary import ResultSummary
+from webkitpy.layout_tests.views import printing
+
+from webkitpy.tool import grammar
+
+_log = logging.getLogger(__name__)
+
+# Builder base URL where we have the archived test results.
+BUILDER_BASE_URL = "http://build.chromium.org/buildbot/layout_test_results/"
+
+TestExpectations = test_expectations.TestExpectations
+
+
+def interpret_test_failures(port, test_name, failures):
+ """Interpret test failures and returns a test result as dict.
+
+ Args:
+ port: interface to port-specific hooks
+ test_name: test name relative to layout_tests directory
+ failures: list of test failures
+ Returns:
+ A dictionary like {'is_reftest': True, ...}
+ """
+ test_dict = {}
+ failure_types = [type(failure) for failure in failures]
+ # FIXME: get rid of all this is_* values once there is a 1:1 map between
+ # TestFailure type and test_expectations.EXPECTATION.
+ if test_failures.FailureMissingAudio in failure_types:
+ test_dict['is_missing_audio'] = True
+
+ for failure in failures:
+ if isinstance(failure, test_failures.FailureImageHashMismatch):
+ test_dict['image_diff_percent'] = failure.diff_percent
+ elif isinstance(failure, test_failures.FailureReftestMismatch):
+ test_dict['is_reftest'] = True
+ test_dict['ref_file'] = port.relative_test_filename(failure.reference_filename)
+ elif isinstance(failure, test_failures.FailureReftestMismatchDidNotOccur):
+ test_dict['is_mismatch_reftest'] = True
+ test_dict['ref_file'] = port.relative_test_filename(failure.reference_filename)
+
+ if test_failures.FailureMissingResult in failure_types:
+ test_dict['is_missing_text'] = True
+
+ if test_failures.FailureMissingImage in failure_types or test_failures.FailureMissingImageHash in failure_types:
+ test_dict['is_missing_image'] = True
+ return test_dict
+
+
+# FIXME: This should be on the Manager class (since that's the only caller)
+# or split off from Manager onto another helper class, but should not be a free function.
+# Most likely this should be made into its own class, and this super-long function
+# split into many helper functions.
+def summarize_results(port_obj, expectations, result_summary, retry_summary, test_timings, only_unexpected, interrupted):
+ """Summarize failing results as a dict.
+
+ FIXME: split this data structure into a separate class?
+
+ Args:
+ port_obj: interface to port-specific hooks
+ expectations: test_expectations.TestExpectations object
+ result_summary: summary object from initial test runs
+ retry_summary: summary object from final test run of retried tests
+ test_timings: a list of TestResult objects which contain test runtimes in seconds
+ only_unexpected: whether to return a summary only for the unexpected results
+ Returns:
+ A dictionary containing a summary of the unexpected results from the
+ run, with the following fields:
+ 'version': a version indicator
+ 'fixable': The number of fixable tests (NOW - PASS)
+ 'skipped': The number of skipped tests (NOW & SKIPPED)
+ 'num_regressions': The number of non-flaky failures
+ 'num_flaky': The number of flaky failures
+ 'num_missing': The number of tests with missing results
+ 'num_passes': The number of unexpected passes
+ 'tests': a dict of tests -> {'expected': '...', 'actual': '...'}
+ """
+ results = {}
+ results['version'] = 3
+
+ tbe = result_summary.tests_by_expectation
+ tbt = result_summary.tests_by_timeline
+ results['fixable'] = len(tbt[test_expectations.NOW] - tbe[test_expectations.PASS])
+ results['skipped'] = len(tbt[test_expectations.NOW] & tbe[test_expectations.SKIP])
+
+ num_passes = 0
+ num_flaky = 0
+ num_missing = 0
+ num_regressions = 0
+ keywords = {}
+ for expecation_string, expectation_enum in TestExpectations.EXPECTATIONS.iteritems():
+ keywords[expectation_enum] = expecation_string.upper()
+
+ for modifier_string, modifier_enum in TestExpectations.MODIFIERS.iteritems():
+ keywords[modifier_enum] = modifier_string.upper()
+
+ tests = {}
+ original_results = result_summary.unexpected_results if only_unexpected else result_summary.results
+
+ for test_name, result in original_results.iteritems():
+ # Note that if a test crashed in the original run, we ignore
+ # whether or not it crashed when we retried it (if we retried it),
+ # and always consider the result not flaky.
+ expected = expectations.get_expectations_string(test_name)
+ result_type = result.type
+ actual = [keywords[result_type]]
+
+ if result_type == test_expectations.SKIP:
+ continue
+
+ test_dict = {}
+ if result.has_stderr:
+ test_dict['has_stderr'] = True
+
+ if result_type == test_expectations.PASS:
+ num_passes += 1
+ # FIXME: include passing tests that have stderr output.
+ if expected == 'PASS':
+ continue
+ elif result_type == test_expectations.CRASH:
+ num_regressions += 1
+ elif result_type == test_expectations.MISSING:
+ if test_name in result_summary.unexpected_results:
+ num_missing += 1
+ elif test_name in result_summary.unexpected_results:
+ if test_name not in retry_summary.unexpected_results:
+ actual.extend(expectations.get_expectations_string(test_name).split(" "))
+ num_flaky += 1
+ else:
+ retry_result_type = retry_summary.unexpected_results[test_name].type
+ if result_type != retry_result_type:
+ actual.append(keywords[retry_result_type])
+ num_flaky += 1
+ else:
+ num_regressions += 1
+
+ test_dict['expected'] = expected
+ test_dict['actual'] = " ".join(actual)
+ # FIXME: Set this correctly once https://webkit.org/b/37739 is fixed
+ # and only set it if there actually is stderr data.
+
+ test_dict.update(interpret_test_failures(port_obj, test_name, result.failures))
+
+ # Store test hierarchically by directory. e.g.
+ # foo/bar/baz.html: test_dict
+ # foo/bar/baz1.html: test_dict
+ #
+ # becomes
+ # foo: {
+ # bar: {
+ # baz.html: test_dict,
+ # baz1.html: test_dict
+ # }
+ # }
+ parts = test_name.split('/')
+ current_map = tests
+ for i, part in enumerate(parts):
+ if i == (len(parts) - 1):
+ current_map[part] = test_dict
+ break
+ if part not in current_map:
+ current_map[part] = {}
+ current_map = current_map[part]
+
+ results['tests'] = tests
+ results['num_passes'] = num_passes
+ results['num_flaky'] = num_flaky
+ results['num_missing'] = num_missing
+ results['num_regressions'] = num_regressions
+ results['uses_expectations_file'] = port_obj.uses_test_expectations_file()
+ results['interrupted'] = interrupted # Does results.html have enough information to compute this itself? (by checking total number of results vs. total number of tests?)
+ results['layout_tests_dir'] = port_obj.layout_tests_dir()
+ results['has_wdiff'] = port_obj.wdiff_available()
+ results['has_pretty_patch'] = port_obj.pretty_patch_available()
+ try:
+ results['revision'] = port_obj.host.scm().head_svn_revision()
+ except Exception, e:
+ _log.warn("Failed to determine svn revision for checkout (cwd: %s, webkit_base: %s), leaving 'revision' key blank in full_results.json.\n%s" % (port_obj._filesystem.getcwd(), port_obj.path_from_webkit_base(), e))
+ # Handle cases where we're running outside of version control.
+ import traceback
+ _log.debug('Failed to learn head svn revision:')
+ _log.debug(traceback.format_exc())
+ results['revision'] = ""
+
+ return results
+
+
+class TestRunInterruptedException(Exception):
+ """Raised when a test run should be stopped immediately."""
+ def __init__(self, reason):
+ Exception.__init__(self)
+ self.reason = reason
+ self.msg = reason
+
+ def __reduce__(self):
+ return self.__class__, (self.reason,)
+
+
+class WorkerException(Exception):
+ """Raised when we receive an unexpected/unknown exception from a worker."""
+ pass
+
+
+class TestShard(object):
+ """A test shard is a named list of TestInputs."""
+
+ # FIXME: Make this class visible, used by workers as well.
+ def __init__(self, name, test_inputs):
+ self.name = name
+ self.test_inputs = test_inputs
+
+ def __repr__(self):
+ return "TestShard(name='%s', test_inputs=%s'" % (self.name, self.test_inputs)
+
+ def __eq__(self, other):
+ return self.name == other.name and self.test_inputs == other.test_inputs
+
+
+class Manager(object):
+ """A class for managing running a series of tests on a series of layout
+ test files."""
+
+
+ # The per-test timeout in milliseconds, if no --time-out-ms option was
+ # given to run_webkit_tests. This should correspond to the default timeout
+ # in DumpRenderTree.
+ DEFAULT_TEST_TIMEOUT_MS = 6 * 1000
+
+ def __init__(self, port, options, printer):
+ """Initialize test runner data structures.
+
+ Args:
+ port: an object implementing port-specific
+ options: a dictionary of command line options
+ printer: a Printer object to record updates to.
+ """
+ self._port = port
+ self._filesystem = port.host.filesystem
+ self._options = options
+ self._printer = printer
+ self._message_broker = None
+ self._expectations = None
+
+ self.HTTP_SUBDIR = 'http' + port.TEST_PATH_SEPARATOR
+ self.WEBSOCKET_SUBDIR = 'websocket' + port.TEST_PATH_SEPARATOR
+ self.LAYOUT_TESTS_DIRECTORY = 'LayoutTests'
+ self._has_http_lock = False
+
+ self._remaining_locked_shards = []
+
+ # disable wss server. need to install pyOpenSSL on buildbots.
+ # self._websocket_secure_server = websocket_server.PyWebSocket(
+ # options.results_directory, use_tls=True, port=9323)
+
+ # a set of test files, and the same tests as a list
+
+ # FIXME: Rename to test_names.
+ self._test_files = set()
+ self._test_files_list = None
+ self._result_queue = Queue.Queue()
+ self._retrying = False
+ self._results_directory = self._port.results_directory()
+
+ self._all_results = []
+ self._group_stats = {}
+ self._current_result_summary = None
+
+ # This maps worker names to the state we are tracking for each of them.
+ self._worker_states = {}
+
+ def collect_tests(self, args):
+ """Find all the files to test.
+
+ Args:
+ args: list of test arguments from the command line
+
+ """
+ paths = self._strip_test_dir_prefixes(args)
+ if self._options.test_list:
+ paths += self._strip_test_dir_prefixes(read_test_files(self._filesystem, self._options.test_list, self._port.TEST_PATH_SEPARATOR))
+ self._test_files = self._port.tests(paths)
+
+ def _strip_test_dir_prefixes(self, paths):
+ return [self._strip_test_dir_prefix(path) for path in paths if path]
+
+ def _strip_test_dir_prefix(self, path):
+ # Handle both "LayoutTests/foo/bar.html" and "LayoutTests\foo\bar.html" if
+ # the filesystem uses '\\' as a directory separator.
+ if path.startswith(self.LAYOUT_TESTS_DIRECTORY + self._port.TEST_PATH_SEPARATOR):
+ return path[len(self.LAYOUT_TESTS_DIRECTORY + self._port.TEST_PATH_SEPARATOR):]
+ if path.startswith(self.LAYOUT_TESTS_DIRECTORY + self._filesystem.sep):
+ return path[len(self.LAYOUT_TESTS_DIRECTORY + self._filesystem.sep):]
+ return path
+
+ def lint(self):
+ lint_failed = False
+ for test_configuration in self._port.all_test_configurations():
+ try:
+ self.lint_expectations(test_configuration)
+ except test_expectations.ParseError:
+ lint_failed = True
+ self._printer.write("")
+
+ if lint_failed:
+ _log.error("Lint failed.")
+ return -1
+
+ _log.info("Lint succeeded.")
+ return 0
+
+ def lint_expectations(self, config):
+ port = self._port
+ test_expectations.TestExpectations(
+ port,
+ None,
+ port.test_expectations(),
+ config,
+ self._options.lint_test_files,
+ port.test_expectations_overrides())
+
+ def _is_http_test(self, test):
+ return self.HTTP_SUBDIR in test or self.WEBSOCKET_SUBDIR in test
+
+ def _http_tests(self):
+ return set(test for test in self._test_files if self._is_http_test(test))
+
+ def parse_expectations(self):
+ """Parse the expectations from the test_list files and return a data
+ structure holding them. Throws an error if the test_list files have
+ invalid syntax."""
+ port = self._port
+ self._expectations = test_expectations.TestExpectations(
+ port,
+ self._test_files,
+ port.test_expectations(),
+ port.test_configuration(),
+ self._options.lint_test_files,
+ port.test_expectations_overrides())
+
+ def _split_into_chunks_if_necessary(self, skipped):
+ if not self._options.run_chunk and not self._options.run_part:
+ return skipped
+
+ # If the user specifies they just want to run a subset of the tests,
+ # just grab a subset of the non-skipped tests.
+ chunk_value = self._options.run_chunk or self._options.run_part
+ test_files = self._test_files_list
+ try:
+ (chunk_num, chunk_len) = chunk_value.split(":")
+ chunk_num = int(chunk_num)
+ assert(chunk_num >= 0)
+ test_size = int(chunk_len)
+ assert(test_size > 0)
+ except AssertionError:
+ _log.critical("invalid chunk '%s'" % chunk_value)
+ return None
+
+ # Get the number of tests
+ num_tests = len(test_files)
+
+ # Get the start offset of the slice.
+ if self._options.run_chunk:
+ chunk_len = test_size
+ # In this case chunk_num can be really large. We need
+ # to make the slave fit in the current number of tests.
+ slice_start = (chunk_num * chunk_len) % num_tests
+ else:
+ # Validate the data.
+ assert(test_size <= num_tests)
+ assert(chunk_num <= test_size)
+
+ # To count the chunk_len, and make sure we don't skip
+ # some tests, we round to the next value that fits exactly
+ # all the parts.
+ rounded_tests = num_tests
+ if rounded_tests % test_size != 0:
+ rounded_tests = (num_tests + test_size - (num_tests % test_size))
+
+ chunk_len = rounded_tests / test_size
+ slice_start = chunk_len * (chunk_num - 1)
+ # It does not mind if we go over test_size.
+
+ # Get the end offset of the slice.
+ slice_end = min(num_tests, slice_start + chunk_len)
+
+ files = test_files[slice_start:slice_end]
+
+ tests_run_msg = 'Running: %d tests (chunk slice [%d:%d] of %d)' % ((slice_end - slice_start), slice_start, slice_end, num_tests)
+ self._printer.print_expected(tests_run_msg)
+
+ # If we reached the end and we don't have enough tests, we run some
+ # from the beginning.
+ if slice_end - slice_start < chunk_len:
+ extra = chunk_len - (slice_end - slice_start)
+ extra_msg = (' last chunk is partial, appending [0:%d]' % extra)
+ self._printer.print_expected(extra_msg)
+ tests_run_msg += "\n" + extra_msg
+ files.extend(test_files[0:extra])
+ tests_run_filename = self._filesystem.join(self._results_directory, "tests_run.txt")
+ self._filesystem.write_text_file(tests_run_filename, tests_run_msg)
+
+ len_skip_chunk = int(len(files) * len(skipped) / float(len(self._test_files)))
+ skip_chunk_list = list(skipped)[0:len_skip_chunk]
+ skip_chunk = set(skip_chunk_list)
+
+ # FIXME: This is a total hack.
+ # Update expectations so that the stats are calculated correctly.
+ # We need to pass a list that includes the right # of skipped files
+ # to ParseExpectations so that ResultSummary() will get the correct
+ # stats. So, we add in the subset of skipped files, and then
+ # subtract them back out.
+ self._test_files_list = files + skip_chunk_list
+ self._test_files = set(self._test_files_list)
+
+ self.parse_expectations()
+
+ self._test_files = set(files)
+ self._test_files_list = files
+
+ return skip_chunk
+
+ # FIXME: This method is way too long and needs to be broken into pieces.
+ def prepare_lists_and_print_output(self):
+ """Create appropriate subsets of test lists and returns a
+ ResultSummary object. Also prints expected test counts.
+ """
+
+ # Remove skipped - both fixable and ignored - files from the
+ # top-level list of files to test.
+ num_all_test_files = len(self._test_files)
+ self._printer.print_expected("Found: %d tests" % (len(self._test_files)))
+ if not num_all_test_files:
+ _log.critical('No tests to run.')
+ return None
+
+ skipped = set()
+
+ if not self._options.http:
+ skipped = skipped.union(self._http_tests())
+
+ if num_all_test_files > 1 and not self._options.force:
+ skipped = skipped.union(self._expectations.get_tests_with_result_type(test_expectations.SKIP))
+ if self._options.skip_failing_tests:
+ failing = self._expectations.get_tests_with_result_type(test_expectations.FAIL)
+ self._test_files -= failing
+
+ self._test_files -= skipped
+
+ # Create a sorted list of test files so the subset chunk,
+ # if used, contains alphabetically consecutive tests.
+ self._test_files_list = list(self._test_files)
+ if self._options.randomize_order:
+ random.shuffle(self._test_files_list)
+ else:
+ self._test_files_list.sort(key=lambda test: test_key(self._port, test))
+
+ skipped = self._split_into_chunks_if_necessary(skipped)
+
+ # FIXME: It's unclear how --repeat-each and --iterations should interact with chunks?
+ if self._options.repeat_each:
+ list_with_repetitions = []
+ for test in self._test_files_list:
+ list_with_repetitions += ([test] * self._options.repeat_each)
+ self._test_files_list = list_with_repetitions
+
+ if self._options.iterations:
+ self._test_files_list = self._test_files_list * self._options.iterations
+
+ result_summary = ResultSummary(self._expectations, self._test_files | skipped)
+ self._print_expected_results_of_type(result_summary, test_expectations.PASS, "passes")
+ self._print_expected_results_of_type(result_summary, test_expectations.FAIL, "failures")
+ self._print_expected_results_of_type(result_summary, test_expectations.FLAKY, "flaky")
+ self._print_expected_results_of_type(result_summary, test_expectations.SKIP, "skipped")
+
+ if self._options.force:
+ self._printer.print_expected('Running all tests, including skips (--force)')
+ else:
+ # Note that we don't actually run the skipped tests (they were
+ # subtracted out of self._test_files, above), but we stub out the
+ # results here so the statistics can remain accurate.
+ for test in skipped:
+ result = test_results.TestResult(test)
+ result.type = test_expectations.SKIP
+ result_summary.add(result, expected=True)
+ self._printer.print_expected('')
+
+ # Check to make sure we didn't filter out all of the tests.
+ if not len(self._test_files):
+ _log.info("All tests are being skipped")
+ return None
+
+ return result_summary
+
+ def _get_dir_for_test_file(self, test_file):
+ """Returns the highest-level directory by which to shard the given
+ test file."""
+ directory, test_file = self._port.split_test(test_file)
+
+ # The http tests are very stable on mac/linux.
+ # TODO(ojan): Make the http server on Windows be apache so we can
+ # turn shard the http tests there as well. Switching to apache is
+ # what made them stable on linux/mac.
+ return directory
+
+ def _get_test_input_for_file(self, test_file):
+ """Returns the appropriate TestInput object for the file. Mostly this
+ is used for looking up the timeout value (in ms) to use for the given
+ test."""
+ if self._test_is_slow(test_file):
+ return TestInput(test_file, self._options.slow_time_out_ms)
+ return TestInput(test_file, self._options.time_out_ms)
+
+ def _test_requires_lock(self, test_file):
+ """Return True if the test needs to be locked when
+ running multiple copies of NRWTs."""
+ return self._is_http_test(test_file)
+
+ def _test_is_slow(self, test_file):
+ return self._expectations.has_modifier(test_file, test_expectations.SLOW)
+
+ def _shard_tests(self, test_files, num_workers, fully_parallel):
+ """Groups tests into batches.
+ This helps ensure that tests that depend on each other (aka bad tests!)
+ continue to run together as most cross-tests dependencies tend to
+ occur within the same directory.
+ Return:
+ Two list of TestShards. The first contains tests that must only be
+ run under the server lock, the second can be run whenever.
+ """
+
+ # FIXME: Move all of the sharding logic out of manager into its
+ # own class or module. Consider grouping it with the chunking logic
+ # in prepare_lists as well.
+ if num_workers == 1:
+ return self._shard_in_two(test_files)
+ elif fully_parallel:
+ return self._shard_every_file(test_files)
+ return self._shard_by_directory(test_files, num_workers)
+
+ def _shard_in_two(self, test_files):
+ """Returns two lists of shards, one with all the tests requiring a lock and one with the rest.
+
+ This is used when there's only one worker, to minimize the per-shard overhead."""
+ locked_inputs = []
+ unlocked_inputs = []
+ for test_file in test_files:
+ test_input = self._get_test_input_for_file(test_file)
+ if self._test_requires_lock(test_file):
+ locked_inputs.append(test_input)
+ else:
+ unlocked_inputs.append(test_input)
+
+ locked_shards = []
+ unlocked_shards = []
+ if locked_inputs:
+ locked_shards = [TestShard('locked_tests', locked_inputs)]
+ if unlocked_inputs:
+ unlocked_shards = [TestShard('unlocked_tests', unlocked_inputs)]
+
+ return locked_shards, unlocked_shards
+
+ def _shard_every_file(self, test_files):
+ """Returns two lists of shards, each shard containing a single test file.
+
+ This mode gets maximal parallelism at the cost of much higher flakiness."""
+ locked_shards = []
+ unlocked_shards = []
+ for test_file in test_files:
+ test_input = self._get_test_input_for_file(test_file)
+
+ # Note that we use a '.' for the shard name; the name doesn't really
+ # matter, and the only other meaningful value would be the filename,
+ # which would be really redundant.
+ if self._test_requires_lock(test_file):
+ locked_shards.append(TestShard('.', [test_input]))
+ else:
+ unlocked_shards.append(TestShard('.', [test_input]))
+
+ return locked_shards, unlocked_shards
+
+ def _shard_by_directory(self, test_files, num_workers):
+ """Returns two lists of shards, each shard containing all the files in a directory.
+
+ This is the default mode, and gets as much parallelism as we can while
+ minimizing flakiness caused by inter-test dependencies."""
+ locked_shards = []
+ unlocked_shards = []
+ tests_by_dir = {}
+ # FIXME: Given that the tests are already sorted by directory,
+ # we can probably rewrite this to be clearer and faster.
+ for test_file in test_files:
+ directory = self._get_dir_for_test_file(test_file)
+ test_input = self._get_test_input_for_file(test_file)
+ tests_by_dir.setdefault(directory, [])
+ tests_by_dir[directory].append(test_input)
+
+ for directory, test_inputs in tests_by_dir.iteritems():
+ shard = TestShard(directory, test_inputs)
+ if self._test_requires_lock(directory):
+ locked_shards.append(shard)
+ else:
+ unlocked_shards.append(shard)
+
+ # Sort the shards by directory name.
+ locked_shards.sort(key=lambda shard: shard.name)
+ unlocked_shards.sort(key=lambda shard: shard.name)
+
+ return (self._resize_shards(locked_shards, self._max_locked_shards(num_workers),
+ 'locked_shard'),
+ unlocked_shards)
+
+ def _max_locked_shards(self, num_workers):
+ # Put a ceiling on the number of locked shards, so that we
+ # don't hammer the servers too badly.
+
+ # FIXME: For now, limit to one shard. After testing to make sure we
+ # can handle multiple shards, we should probably do something like
+ # limit this to no more than a quarter of all workers, e.g.:
+ # return max(math.ceil(num_workers / 4.0), 1)
+ return 1
+
+ def _resize_shards(self, old_shards, max_new_shards, shard_name_prefix):
+ """Takes a list of shards and redistributes the tests into no more
+ than |max_new_shards| new shards."""
+
+ # This implementation assumes that each input shard only contains tests from a
+ # single directory, and that tests in each shard must remain together; as a
+ # result, a given input shard is never split between output shards.
+ #
+ # Each output shard contains the tests from one or more input shards and
+ # hence may contain tests from multiple directories.
+
+ def divide_and_round_up(numerator, divisor):
+ return int(math.ceil(float(numerator) / divisor))
+
+ def extract_and_flatten(shards):
+ test_inputs = []
+ for shard in shards:
+ test_inputs.extend(shard.test_inputs)
+ return test_inputs
+
+ def split_at(seq, index):
+ return (seq[:index], seq[index:])
+
+ num_old_per_new = divide_and_round_up(len(old_shards), max_new_shards)
+ new_shards = []
+ remaining_shards = old_shards
+ while remaining_shards:
+ some_shards, remaining_shards = split_at(remaining_shards, num_old_per_new)
+ new_shards.append(TestShard('%s_%d' % (shard_name_prefix, len(new_shards) + 1),
+ extract_and_flatten(some_shards)))
+ return new_shards
+
+ def _log_num_workers(self, num_workers, num_shards, num_locked_shards):
+ driver_name = self._port.driver_name()
+ if num_workers == 1:
+ self._printer.print_config("Running 1 %s over %s" %
+ (driver_name, grammar.pluralize('shard', num_shards)))
+ else:
+ self._printer.print_config("Running %d %ss in parallel over %d shards (%d locked)" %
+ (num_workers, driver_name, num_shards, num_locked_shards))
+
+ def _run_tests(self, file_list, result_summary):
+ """Runs the tests in the file_list.
+
+ Return: A tuple (interrupted, keyboard_interrupted, thread_timings,
+ test_timings, individual_test_timings)
+ interrupted is whether the run was interrupted
+ keyboard_interrupted is whether the interruption was because someone
+ typed Ctrl^C
+ thread_timings is a list of dicts with the total runtime
+ of each thread with 'name', 'num_tests', 'total_time' properties
+ test_timings is a list of timings for each sharded subdirectory
+ of the form [time, directory_name, num_tests]
+ individual_test_timings is a list of run times for each test
+ in the form {filename:filename, test_run_time:test_run_time}
+ result_summary: summary object to populate with the results
+ """
+ self._current_result_summary = result_summary
+ self._all_results = []
+ self._group_stats = {}
+ self._worker_states = {}
+
+ keyboard_interrupted = False
+ interrupted = False
+ thread_timings = []
+
+ self._printer.print_update('Sharding tests ...')
+ locked_shards, unlocked_shards = self._shard_tests(file_list, int(self._options.child_processes), self._options.experimental_fully_parallel)
+
+ # FIXME: We don't have a good way to coordinate the workers so that
+ # they don't try to run the shards that need a lock if we don't actually
+ # have the lock. The easiest solution at the moment is to grab the
+ # lock at the beginning of the run, and then run all of the locked
+ # shards first. This minimizes the time spent holding the lock, but
+ # means that we won't be running tests while we're waiting for the lock.
+ # If this becomes a problem in practice we'll need to change this.
+
+ all_shards = locked_shards + unlocked_shards
+ self._remaining_locked_shards = locked_shards
+ if locked_shards:
+ self.start_servers_with_lock()
+
+ num_workers = min(int(self._options.child_processes), len(all_shards))
+ self._log_num_workers(num_workers, len(all_shards), len(locked_shards))
+
+ manager_connection = manager_worker_broker.get(self._port, self._options, self, worker.Worker)
+
+ if self._options.dry_run:
+ return (keyboard_interrupted, interrupted, thread_timings, self._group_stats, self._all_results)
+
+ self._printer.print_update('Starting %s ...' % grammar.pluralize('worker', num_workers))
+ for worker_number in xrange(num_workers):
+ worker_connection = manager_connection.start_worker(worker_number, self.results_directory())
+ worker_state = _WorkerState(worker_number, worker_connection)
+ self._worker_states[worker_connection.name] = worker_state
+
+ # FIXME: If we start workers up too quickly, DumpRenderTree appears
+ # to thrash on something and time out its first few tests. Until
+ # we can figure out what's going on, sleep a bit in between
+ # workers. This needs a bug filed.
+ time.sleep(0.1)
+
+ self._printer.print_update("Starting testing ...")
+ for shard in all_shards:
+ # FIXME: Change 'test_list' to 'shard', make sharding public.
+ manager_connection.post_message('test_list', shard.name, shard.test_inputs)
+
+ # We post one 'stop' message for each worker. Because the stop message
+ # are sent after all of the tests, and because each worker will stop
+ # reading messsages after receiving a stop, we can be sure each
+ # worker will get a stop message and hence they will all shut down.
+ for _ in xrange(num_workers):
+ manager_connection.post_message('stop')
+
+ try:
+ while not self.is_done():
+ manager_connection.run_message_loop(delay_secs=1.0)
+
+ # Make sure all of the workers have shut down (if possible).
+ for worker_state in self._worker_states.values():
+ if worker_state.worker_connection.is_alive():
+ _log.debug('Waiting for worker %d to exit' % worker_state.number)
+ worker_state.worker_connection.join(5.0)
+ if worker_state.worker_connection.is_alive():
+ _log.error('Worker %d did not exit in time.' % worker_state.number)
+
+ except KeyboardInterrupt:
+ self._printer.print_update('Interrupted, exiting ...')
+ self.cancel_workers()
+ keyboard_interrupted = True
+ except TestRunInterruptedException, e:
+ _log.warning(e.reason)
+ self.cancel_workers()
+ interrupted = True
+ except WorkerException:
+ self.cancel_workers()
+ raise
+ except:
+ # Unexpected exception; don't try to clean up workers.
+ _log.error("Exception raised, exiting")
+ self.cancel_workers()
+ raise
+ finally:
+ self.stop_servers_with_lock()
+
+ thread_timings = [worker_state.stats for worker_state in self._worker_states.values()]
+
+ # FIXME: should this be a class instead of a tuple?
+ return (interrupted, keyboard_interrupted, thread_timings, self._group_stats, self._all_results)
+
+ def results_directory(self):
+ if not self._retrying:
+ return self._results_directory
+ else:
+ self._filesystem.maybe_make_directory(self._filesystem.join(self._results_directory, 'retries'))
+ return self._filesystem.join(self._results_directory, 'retries')
+
+ def update(self):
+ self.update_summary(self._current_result_summary)
+
+ def _collect_timing_info(self, threads):
+ test_timings = {}
+ individual_test_timings = []
+ thread_timings = []
+
+ for thread in threads:
+ thread_timings.append({'name': thread.getName(),
+ 'num_tests': thread.get_num_tests(),
+ 'total_time': thread.get_total_time()})
+ test_timings.update(thread.get_test_group_timing_stats())
+ individual_test_timings.extend(thread.get_test_results())
+
+ return (thread_timings, test_timings, individual_test_timings)
+
+ def needs_servers(self):
+ return any(self._test_requires_lock(test_name) for test_name in self._test_files) and self._options.http
+
+ def set_up_run(self):
+ """Configures the system to be ready to run tests.
+
+ Returns a ResultSummary object if we should continue to run tests,
+ or None if we should abort.
+
+ """
+ # This must be started before we check the system dependencies,
+ # since the helper may do things to make the setup correct.
+ self._printer.print_update("Starting helper ...")
+ self._port.start_helper()
+
+ # Check that the system dependencies (themes, fonts, ...) are correct.
+ if not self._options.nocheck_sys_deps:
+ self._printer.print_update("Checking system dependencies ...")
+ if not self._port.check_sys_deps(self.needs_servers()):
+ self._port.stop_helper()
+ return None
+
+ if self._options.clobber_old_results:
+ self._clobber_old_results()
+
+ # Create the output directory if it doesn't already exist.
+ self._port.maybe_make_directory(self._results_directory)
+
+ self._port.setup_test_run()
+
+ self._printer.print_update("Preparing tests ...")
+ result_summary = self.prepare_lists_and_print_output()
+ if not result_summary:
+ return None
+
+ return result_summary
+
+ def run(self, result_summary):
+ """Run all our tests on all our test files.
+
+ For each test file, we run each test type. If there are any failures,
+ we collect them for reporting.
+
+ Args:
+ result_summary: a summary object tracking the test results.
+
+ Return:
+ The number of unexpected results (0 == success)
+ """
+ # collect_tests() must have been called first to initialize us.
+ # If we didn't find any files to test, we've errored out already in
+ # prepare_lists_and_print_output().
+ assert(len(self._test_files))
+
+ start_time = time.time()
+
+ interrupted, keyboard_interrupted, thread_timings, test_timings, individual_test_timings = self._run_tests(self._test_files_list, result_summary)
+
+ # We exclude the crashes from the list of results to retry, because
+ # we want to treat even a potentially flaky crash as an error.
+ failures = self._get_failures(result_summary, include_crashes=False, include_missing=False)
+ retry_summary = result_summary
+ while (len(failures) and self._options.retry_failures and not self._retrying and not interrupted and not keyboard_interrupted):
+ _log.info('')
+ _log.info("Retrying %d unexpected failure(s) ..." % len(failures))
+ _log.info('')
+ self._retrying = True
+ retry_summary = ResultSummary(self._expectations, failures.keys())
+ # Note that we intentionally ignore the return value here.
+ self._run_tests(failures.keys(), retry_summary)
+ failures = self._get_failures(retry_summary, include_crashes=True, include_missing=True)
+
+ end_time = time.time()
+
+ self._print_timing_statistics(end_time - start_time, thread_timings, test_timings, individual_test_timings, result_summary)
+ self._print_result_summary(result_summary)
+
+ sys.stdout.flush()
+ sys.stderr.flush()
+
+ self._printer.print_one_line_summary(result_summary.total, result_summary.expected, result_summary.unexpected)
+
+ unexpected_results = summarize_results(self._port, self._expectations, result_summary, retry_summary, individual_test_timings, only_unexpected=True, interrupted=interrupted)
+ self._printer.print_unexpected_results(unexpected_results)
+
+ # Re-raise a KeyboardInterrupt if necessary so the caller can handle it.
+ if keyboard_interrupted:
+ raise KeyboardInterrupt
+
+ # FIXME: remove record_results. It's just used for testing. There's no need
+ # for it to be a commandline argument.
+ if (self._options.record_results and not self._options.dry_run and not keyboard_interrupted):
+ self._port.print_leaks_summary()
+ # Write the same data to log files and upload generated JSON files to appengine server.
+ summarized_results = summarize_results(self._port, self._expectations, result_summary, retry_summary, individual_test_timings, only_unexpected=False, interrupted=interrupted)
+ self._upload_json_files(summarized_results, result_summary, individual_test_timings)
+
+ # Write the summary to disk (results.html) and display it if requested.
+ if not self._options.dry_run:
+ self._copy_results_html_file()
+ if self._options.show_results:
+ self._show_results_html_file(result_summary)
+
+ return self._port.exit_code_from_summarized_results(unexpected_results)
+
+ def start_servers_with_lock(self):
+ assert(self._options.http)
+ self._printer.print_update('Acquiring http lock ...')
+ self._port.acquire_http_lock()
+ self._printer.print_update('Starting HTTP server ...')
+ self._port.start_http_server()
+ self._printer.print_update('Starting WebSocket server ...')
+ self._port.start_websocket_server()
+ self._has_http_lock = True
+
+ def stop_servers_with_lock(self):
+ if self._has_http_lock:
+ self._printer.print_update('Stopping HTTP server ...')
+ self._port.stop_http_server()
+ self._printer.print_update('Stopping WebSocket server ...')
+ self._port.stop_websocket_server()
+ self._printer.print_update('Releasing server lock ...')
+ self._port.release_http_lock()
+ self._has_http_lock = False
+
+ def clean_up_run(self):
+ """Restores the system after we're done running tests."""
+
+ _log.debug("flushing stdout")
+ sys.stdout.flush()
+ _log.debug("flushing stderr")
+ sys.stderr.flush()
+ _log.debug("stopping helper")
+ self._port.stop_helper()
+
+ def update_summary(self, result_summary):
+ """Update the summary and print results with any completed tests."""
+ while True:
+ try:
+ result = test_results.TestResult.loads(self._result_queue.get_nowait())
+ except Queue.Empty:
+ self._printer.print_progress(result_summary, self._retrying, self._test_files_list)
+ return
+
+ self._update_summary_with_result(result_summary, result)
+
+ def _interrupt_if_at_failure_limits(self, result_summary):
+ # Note: The messages in this method are constructed to match old-run-webkit-tests
+ # so that existing buildbot grep rules work.
+ def interrupt_if_at_failure_limit(limit, failure_count, result_summary, message):
+ if limit and failure_count >= limit:
+ message += " %d tests run." % (result_summary.expected + result_summary.unexpected)
+ raise TestRunInterruptedException(message)
+
+ interrupt_if_at_failure_limit(
+ self._options.exit_after_n_failures,
+ result_summary.unexpected_failures,
+ result_summary,
+ "Exiting early after %d failures." % result_summary.unexpected_failures)
+ interrupt_if_at_failure_limit(
+ self._options.exit_after_n_crashes_or_timeouts,
+ result_summary.unexpected_crashes + result_summary.unexpected_timeouts,
+ result_summary,
+ # This differs from ORWT because it does not include WebProcess crashes.
+ "Exiting early after %d crashes and %d timeouts." % (result_summary.unexpected_crashes, result_summary.unexpected_timeouts))
+
+ def _update_summary_with_result(self, result_summary, result):
+ if result.type == test_expectations.SKIP:
+ result_summary.add(result, expected=True)
+ else:
+ expected = self._expectations.matches_an_expected_result(result.test_name, result.type, self._options.pixel_tests)
+ result_summary.add(result, expected)
+ exp_str = self._expectations.get_expectations_string(result.test_name)
+ got_str = self._expectations.expectation_to_string(result.type)
+ self._printer.print_test_result(result, expected, exp_str, got_str)
+ self._printer.print_progress(result_summary, self._retrying, self._test_files_list)
+ self._interrupt_if_at_failure_limits(result_summary)
+
+ def _clobber_old_results(self):
+ # Just clobber the actual test results directories since the other
+ # files in the results directory are explicitly used for cross-run
+ # tracking.
+ self._printer.print_update("Clobbering old results in %s" %
+ self._results_directory)
+ layout_tests_dir = self._port.layout_tests_dir()
+ possible_dirs = self._port.test_dirs()
+ for dirname in possible_dirs:
+ if self._filesystem.isdir(self._filesystem.join(layout_tests_dir, dirname)):
+ self._filesystem.rmtree(self._filesystem.join(self._results_directory, dirname))
+
+ def _get_failures(self, result_summary, include_crashes, include_missing):
+ """Filters a dict of results and returns only the failures.
+
+ Args:
+ result_summary: the results of the test run
+ include_crashes: whether crashes are included in the output.
+ We use False when finding the list of failures to retry
+ to see if the results were flaky. Although the crashes may also be
+ flaky, we treat them as if they aren't so that they're not ignored.
+ Returns:
+ a dict of files -> results
+ """
+ failed_results = {}
+ for test, result in result_summary.unexpected_results.iteritems():
+ if (result.type == test_expectations.PASS or
+ (result.type == test_expectations.CRASH and not include_crashes) or
+ (result.type == test_expectations.MISSING and not include_missing)):
+ continue
+ failed_results[test] = result.type
+
+ return failed_results
+
+ def _char_for_result(self, result):
+ result = result.lower()
+ if result in TestExpectations.EXPECTATIONS:
+ result_enum_value = TestExpectations.EXPECTATIONS[result]
+ else:
+ result_enum_value = TestExpectations.MODIFIERS[result]
+ return json_layout_results_generator.JSONLayoutResultsGenerator.FAILURE_TO_CHAR[result_enum_value]
+
+ def _upload_json_files(self, summarized_results, result_summary, individual_test_timings):
+ """Writes the results of the test run as JSON files into the results
+ dir and upload the files to the appengine server.
+
+ Args:
+ unexpected_results: dict of unexpected results
+ summarized_results: dict of results
+ result_summary: full summary object
+ individual_test_timings: list of test times (used by the flakiness
+ dashboard).
+ """
+ _log.debug("Writing JSON files in %s." % self._results_directory)
+
+ times_trie = json_results_generator.test_timings_trie(self._port, individual_test_timings)
+ times_json_path = self._filesystem.join(self._results_directory, "times_ms.json")
+ json_results_generator.write_json(self._filesystem, times_trie, times_json_path)
+
+ full_results_path = self._filesystem.join(self._results_directory, "full_results.json")
+ # We write full_results.json out as jsonp because we need to load it from a file url and Chromium doesn't allow that.
+ json_results_generator.write_json(self._filesystem, summarized_results, full_results_path, callback="ADD_RESULTS")
+
+ generator = json_layout_results_generator.JSONLayoutResultsGenerator(
+ self._port, self._options.builder_name, self._options.build_name,
+ self._options.build_number, self._results_directory,
+ BUILDER_BASE_URL, individual_test_timings,
+ self._expectations, result_summary, self._test_files_list,
+ self._options.test_results_server,
+ "layout-tests",
+ self._options.master_name)
+
+ _log.debug("Finished writing JSON files.")
+
+ json_files = ["incremental_results.json", "full_results.json", "times_ms.json"]
+
+ generator.upload_json_files(json_files)
+
+ incremental_results_path = self._filesystem.join(self._results_directory, "incremental_results.json")
+
+ # Remove these files from the results directory so they don't take up too much space on the buildbot.
+ # The tools use the version we uploaded to the results server anyway.
+ self._filesystem.remove(times_json_path)
+ self._filesystem.remove(incremental_results_path)
+
+ def print_config(self):
+ """Prints the configuration for the test run."""
+ p = self._printer
+ p.print_config("Using port '%s'" % self._port.name())
+ p.print_config("Test configuration: %s" % self._port.test_configuration())
+ p.print_config("Placing test results in %s" % self._results_directory)
+ if self._options.new_baseline:
+ p.print_config("Placing new baselines in %s" %
+ self._port.baseline_path())
+
+ fallback_path = [self._filesystem.split(x)[1] for x in self._port.baseline_search_path()]
+ p.print_config("Baseline search path: %s -> generic" % " -> ".join(fallback_path))
+
+ p.print_config("Using %s build" % self._options.configuration)
+ if self._options.pixel_tests:
+ p.print_config("Pixel tests enabled")
+ else:
+ p.print_config("Pixel tests disabled")
+
+ p.print_config("Regular timeout: %s, slow test timeout: %s" %
+ (self._options.time_out_ms,
+ self._options.slow_time_out_ms))
+
+ p.print_config('Command line: ' +
+ ' '.join(self._port.driver_cmd_line()))
+ p.print_config("Worker model: %s" % self._options.worker_model)
+ p.print_config("")
+
+ def _print_expected_results_of_type(self, result_summary,
+ result_type, result_type_str):
+ """Print the number of the tests in a given result class.
+
+ Args:
+ result_summary - the object containing all the results to report on
+ result_type - the particular result type to report in the summary.
+ result_type_str - a string description of the result_type.
+ """
+ tests = self._expectations.get_tests_with_result_type(result_type)
+ now = result_summary.tests_by_timeline[test_expectations.NOW]
+ wontfix = result_summary.tests_by_timeline[test_expectations.WONTFIX]
+
+ # We use a fancy format string in order to print the data out in a
+ # nicely-aligned table.
+ fmtstr = ("Expect: %%5d %%-8s (%%%dd now, %%%dd wontfix)"
+ % (self._num_digits(now), self._num_digits(wontfix)))
+ self._printer.print_expected(fmtstr %
+ (len(tests), result_type_str, len(tests & now), len(tests & wontfix)))
+
+ def _num_digits(self, num):
+ """Returns the number of digits needed to represent the length of a
+ sequence."""
+ ndigits = 1
+ if len(num):
+ ndigits = int(math.log10(len(num))) + 1
+ return ndigits
+
+ def _print_timing_statistics(self, total_time, thread_timings,
+ directory_test_timings, individual_test_timings,
+ result_summary):
+ """Record timing-specific information for the test run.
+
+ Args:
+ total_time: total elapsed time (in seconds) for the test run
+ thread_timings: wall clock time each thread ran for
+ directory_test_timings: timing by directory
+ individual_test_timings: timing by file
+ result_summary: summary object for the test run
+ """
+ self._printer.print_timing("Test timing:")
+ self._printer.print_timing(" %6.2f total testing time" % total_time)
+ self._printer.print_timing("")
+ self._printer.print_timing("Thread timing:")
+ cuml_time = 0
+ for t in thread_timings:
+ self._printer.print_timing(" %10s: %5d tests, %6.2f secs" %
+ (t['name'], t['num_tests'], t['total_time']))
+ cuml_time += t['total_time']
+ self._printer.print_timing(" %6.2f cumulative, %6.2f optimal" %
+ (cuml_time, cuml_time / int(self._options.child_processes)))
+ self._printer.print_timing("")
+
+ self._print_aggregate_test_statistics(individual_test_timings)
+ self._print_individual_test_times(individual_test_timings,
+ result_summary)
+ self._print_directory_timings(directory_test_timings)
+
+ def _print_aggregate_test_statistics(self, individual_test_timings):
+ """Prints aggregate statistics (e.g. median, mean, etc.) for all tests.
+ Args:
+ individual_test_timings: List of TestResults for all tests.
+ """
+ times_for_dump_render_tree = [test_stats.test_run_time for test_stats in individual_test_timings]
+ self._print_statistics_for_test_timings("PER TEST TIME IN TESTSHELL (seconds):",
+ times_for_dump_render_tree)
+
+ def _print_individual_test_times(self, individual_test_timings,
+ result_summary):
+ """Prints the run times for slow, timeout and crash tests.
+ Args:
+ individual_test_timings: List of TestStats for all tests.
+ result_summary: summary object for test run
+ """
+ # Reverse-sort by the time spent in DumpRenderTree.
+ individual_test_timings.sort(lambda a, b:
+ cmp(b.test_run_time, a.test_run_time))
+
+ num_printed = 0
+ slow_tests = []
+ timeout_or_crash_tests = []
+ unexpected_slow_tests = []
+ for test_tuple in individual_test_timings:
+ test_name = test_tuple.test_name
+ is_timeout_crash_or_slow = False
+ if self._test_is_slow(test_name):
+ is_timeout_crash_or_slow = True
+ slow_tests.append(test_tuple)
+
+ if test_name in result_summary.failures:
+ result = result_summary.results[test_name].type
+ if (result == test_expectations.TIMEOUT or
+ result == test_expectations.CRASH):
+ is_timeout_crash_or_slow = True
+ timeout_or_crash_tests.append(test_tuple)
+
+ if (not is_timeout_crash_or_slow and
+ num_printed < printing.NUM_SLOW_TESTS_TO_LOG):
+ num_printed = num_printed + 1
+ unexpected_slow_tests.append(test_tuple)
+
+ self._printer.print_timing("")
+ self._print_test_list_timing("%s slowest tests that are not "
+ "marked as SLOW and did not timeout/crash:" %
+ printing.NUM_SLOW_TESTS_TO_LOG, unexpected_slow_tests)
+ self._printer.print_timing("")
+ self._print_test_list_timing("Tests marked as SLOW:", slow_tests)
+ self._printer.print_timing("")
+ self._print_test_list_timing("Tests that timed out or crashed:",
+ timeout_or_crash_tests)
+ self._printer.print_timing("")
+
+ def _print_test_list_timing(self, title, test_list):
+ """Print timing info for each test.
+
+ Args:
+ title: section heading
+ test_list: tests that fall in this section
+ """
+ if self._printer.disabled('slowest'):
+ return
+
+ self._printer.print_timing(title)
+ for test_tuple in test_list:
+ test_run_time = round(test_tuple.test_run_time, 1)
+ self._printer.print_timing(" %s took %s seconds" % (test_tuple.test_name, test_run_time))
+
+ def _print_directory_timings(self, directory_test_timings):
+ """Print timing info by directory for any directories that
+ take > 10 seconds to run.
+
+ Args:
+ directory_test_timing: time info for each directory
+ """
+ timings = []
+ for directory in directory_test_timings:
+ num_tests, time_for_directory = directory_test_timings[directory]
+ timings.append((round(time_for_directory, 1), directory,
+ num_tests))
+ timings.sort()
+
+ self._printer.print_timing("Time to process slowest subdirectories:")
+ min_seconds_to_print = 10
+ for timing in timings:
+ if timing[0] > min_seconds_to_print:
+ self._printer.print_timing(
+ " %s took %s seconds to run %s tests." % (timing[1],
+ timing[0], timing[2]))
+ self._printer.print_timing("")
+
+ def _print_statistics_for_test_timings(self, title, timings):
+ """Prints the median, mean and standard deviation of the values in
+ timings.
+
+ Args:
+ title: Title for these timings.
+ timings: A list of floats representing times.
+ """
+ self._printer.print_timing(title)
+ timings.sort()
+
+ num_tests = len(timings)
+ if not num_tests:
+ return
+ percentile90 = timings[int(.9 * num_tests)]
+ percentile99 = timings[int(.99 * num_tests)]
+
+ if num_tests % 2 == 1:
+ median = timings[((num_tests - 1) / 2) - 1]
+ else:
+ lower = timings[num_tests / 2 - 1]
+ upper = timings[num_tests / 2]
+ median = (float(lower + upper)) / 2
+
+ mean = sum(timings) / num_tests
+
+ for timing in timings:
+ sum_of_deviations = math.pow(timing - mean, 2)
+
+ std_deviation = math.sqrt(sum_of_deviations / num_tests)
+ self._printer.print_timing(" Median: %6.3f" % median)
+ self._printer.print_timing(" Mean: %6.3f" % mean)
+ self._printer.print_timing(" 90th percentile: %6.3f" % percentile90)
+ self._printer.print_timing(" 99th percentile: %6.3f" % percentile99)
+ self._printer.print_timing(" Standard dev: %6.3f" % std_deviation)
+ self._printer.print_timing("")
+
+ def _print_result_summary(self, result_summary):
+ """Print a short summary about how many tests passed.
+
+ Args:
+ result_summary: information to log
+ """
+ failed = len(result_summary.failures)
+ skipped = len(
+ result_summary.tests_by_expectation[test_expectations.SKIP])
+ total = result_summary.total
+ passed = total - failed - skipped
+ pct_passed = 0.0
+ if total > 0:
+ pct_passed = float(passed) * 100 / total
+
+ self._printer.print_actual("")
+ self._printer.print_actual("=> Results: %d/%d tests passed (%.1f%%)" %
+ (passed, total, pct_passed))
+ self._printer.print_actual("")
+ self._print_result_summary_entry(result_summary,
+ test_expectations.NOW, "Tests to be fixed")
+
+ self._printer.print_actual("")
+ self._print_result_summary_entry(result_summary,
+ test_expectations.WONTFIX,
+ "Tests that will only be fixed if they crash (WONTFIX)")
+ self._printer.print_actual("")
+
+ def _print_result_summary_entry(self, result_summary, timeline,
+ heading):
+ """Print a summary block of results for a particular timeline of test.
+
+ Args:
+ result_summary: summary to print results for
+ timeline: the timeline to print results for (NOT, WONTFIX, etc.)
+ heading: a textual description of the timeline
+ """
+ total = len(result_summary.tests_by_timeline[timeline])
+ not_passing = (total -
+ len(result_summary.tests_by_expectation[test_expectations.PASS] &
+ result_summary.tests_by_timeline[timeline]))
+ self._printer.print_actual("=> %s (%d):" % (heading, not_passing))
+
+ for result in TestExpectations.EXPECTATION_ORDER:
+ if result == test_expectations.PASS:
+ continue
+ results = (result_summary.tests_by_expectation[result] &
+ result_summary.tests_by_timeline[timeline])
+ desc = TestExpectations.EXPECTATION_DESCRIPTIONS[result]
+ if not_passing and len(results):
+ pct = len(results) * 100.0 / not_passing
+ self._printer.print_actual(" %5d %-24s (%4.1f%%)" %
+ (len(results), desc[len(results) != 1], pct))
+
+ def _copy_results_html_file(self):
+ base_dir = self._port.path_from_webkit_base('LayoutTests', 'fast', 'harness')
+ results_file = self._filesystem.join(base_dir, 'results.html')
+ # FIXME: What should we do if this doesn't exist (e.g., in unit tests)?
+ if self._filesystem.exists(results_file):
+ self._filesystem.copyfile(results_file, self._filesystem.join(self._results_directory, "results.html"))
+
+ def _show_results_html_file(self, result_summary):
+ """Shows the results.html page."""
+ if self._options.full_results_html:
+ test_files = result_summary.failures.keys()
+ else:
+ unexpected_failures = self._get_failures(result_summary, include_crashes=True, include_missing=True)
+ test_files = unexpected_failures.keys()
+
+ if not len(test_files):
+ return
+
+ results_filename = self._filesystem.join(self._results_directory, "results.html")
+ self._port.show_results_html_file(results_filename)
+
+ def name(self):
+ return 'Manager'
+
+ def is_done(self):
+ worker_states = self._worker_states.values()
+ return worker_states and all(self._worker_is_done(worker_state) for worker_state in worker_states)
+
+ # FIXME: Inline this function.
+ def _worker_is_done(self, worker_state):
+ return worker_state.done
+
+ def cancel_workers(self):
+ for worker_state in self._worker_states.values():
+ worker_state.worker_connection.cancel()
+
+ def handle_started_test(self, source, test_info, hang_timeout):
+ worker_state = self._worker_states[source]
+ worker_state.current_test_name = test_info.test_name
+ worker_state.next_timeout = time.time() + hang_timeout
+
+ def handle_done(self, source):
+ worker_state = self._worker_states[source]
+ worker_state.done = True
+
+ def handle_exception(self, source, exception_type, exception_value, stack):
+ if exception_type in (KeyboardInterrupt, TestRunInterruptedException):
+ raise exception_type(exception_value)
+ _log.error("%s raised %s('%s'):" % (
+ source,
+ exception_value.__class__.__name__,
+ str(exception_value)))
+ self._log_worker_stack(stack)
+ raise WorkerException(str(exception_value))
+
+ def handle_finished_list(self, source, list_name, num_tests, elapsed_time):
+ self._group_stats[list_name] = (num_tests, elapsed_time)
+
+ def find(name, test_lists):
+ for i in range(len(test_lists)):
+ if test_lists[i].name == name:
+ return i
+ return -1
+
+ index = find(list_name, self._remaining_locked_shards)
+ if index >= 0:
+ self._remaining_locked_shards.pop(index)
+ if not self._remaining_locked_shards:
+ self.stop_servers_with_lock()
+
+ def handle_finished_test(self, source, result, elapsed_time):
+ worker_state = self._worker_states[source]
+ worker_state.next_timeout = None
+ worker_state.current_test_name = None
+ worker_state.stats['total_time'] += elapsed_time
+ worker_state.stats['num_tests'] += 1
+
+ self._all_results.append(result)
+ self._update_summary_with_result(self._current_result_summary, result)
+
+ def _log_worker_stack(self, stack):
+ webkitpydir = self._port.path_from_webkit_base('Tools', 'Scripts', 'webkitpy') + self._filesystem.sep
+ for filename, line_number, function_name, text in stack:
+ if filename.startswith(webkitpydir):
+ filename = filename.replace(webkitpydir, '')
+ _log.error(' %s:%u (in %s)' % (filename, line_number, function_name))
+ _log.error(' %s' % text)
+
+
+def read_test_files(fs, filenames, test_path_separator):
+ tests = []
+ for filename in filenames:
+ try:
+ if test_path_separator != fs.sep:
+ filename = filename.replace(test_path_separator, fs.sep)
+ file_contents = fs.read_text_file(filename).split('\n')
+ for line in file_contents:
+ line = test_expectations.strip_comments(line)
+ if line:
+ tests.append(line)
+ except IOError, e:
+ if e.errno == errno.ENOENT:
+ _log.critical('')
+ _log.critical('--test-list file "%s" not found' % file)
+ raise
+ return tests
+
+
+# FIXME: These two free functions belong either on manager (since it's the only one
+# which uses them) or in a different file (if they need to be re-used).
+def test_key(port, test_name):
+ """Turns a test name into a list with two sublists, the natural key of the
+ dirname, and the natural key of the basename.
+
+ This can be used when sorting paths so that files in a directory.
+ directory are kept together rather than being mixed in with files in
+ subdirectories."""
+ dirname, basename = port.split_test(test_name)
+ return (natural_sort_key(dirname + port.TEST_PATH_SEPARATOR), natural_sort_key(basename))
+
+
+def natural_sort_key(string_to_split):
+ """ Turn a string into a list of string and number chunks.
+ "z23a" -> ["z", 23, "a"]
+
+ Can be used to implement "natural sort" order. See:
+ http://www.codinghorror.com/blog/2007/12/sorting-for-humans-natural-sort-order.html
+ http://nedbatchelder.com/blog/200712.html#e20071211T054956
+ """
+ def tryint(val):
+ try:
+ return int(val)
+ except ValueError:
+ return val
+
+ return [tryint(chunk) for chunk in re.split('(\d+)', string_to_split)]
+
+
+class _WorkerState(object):
+ """A class for the manager to use to track the current state of the workers."""
+ def __init__(self, number, worker_connection):
+ self.worker_connection = worker_connection
+ self.number = number
+ self.done = False
+ self.current_test_name = None
+ self.next_timeout = None
+ self.stats = {}
+ self.stats['name'] = worker_connection.name
+ self.stats['num_tests'] = 0
+ self.stats['total_time'] = 0
+
+ def __repr__(self):
+ return "_WorkerState(" + str(self.__dict__) + ")"
diff --git a/Tools/Scripts/webkitpy/layout_tests/controllers/manager_unittest.py b/Tools/Scripts/webkitpy/layout_tests/controllers/manager_unittest.py
new file mode 100644
index 000000000..98db2fc3c
--- /dev/null
+++ b/Tools/Scripts/webkitpy/layout_tests/controllers/manager_unittest.py
@@ -0,0 +1,339 @@
+#!/usr/bin/python
+# Copyright (C) 2010 Google Inc. All rights reserved.
+# Copyright (C) 2010 Gabor Rapcsanyi (rgabor@inf.u-szeged.hu), University of Szeged
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Unit tests for manager.py."""
+
+import StringIO
+import sys
+import unittest
+
+from webkitpy.common.system.filesystem_mock import MockFileSystem
+from webkitpy.common.system import outputcapture
+from webkitpy.thirdparty.mock import Mock
+from webkitpy import layout_tests
+from webkitpy.layout_tests.port import port_testcase
+
+from webkitpy import layout_tests
+from webkitpy.layout_tests import run_webkit_tests
+from webkitpy.layout_tests.controllers.manager import interpret_test_failures, Manager, natural_sort_key, test_key, TestRunInterruptedException, TestShard
+from webkitpy.layout_tests.models import test_failures
+from webkitpy.layout_tests.models.result_summary import ResultSummary
+from webkitpy.layout_tests.views import printing
+from webkitpy.tool.mocktool import MockOptions
+from webkitpy.common.system.executive_mock import MockExecutive
+from webkitpy.common.host_mock import MockHost
+
+
+class ManagerWrapper(Manager):
+ def _get_test_input_for_file(self, test_file):
+ return test_file
+
+
+class ShardingTests(unittest.TestCase):
+ test_list = [
+ "http/tests/websocket/tests/unicode.htm",
+ "animations/keyframes.html",
+ "http/tests/security/view-source-no-refresh.html",
+ "http/tests/websocket/tests/websocket-protocol-ignored.html",
+ "fast/css/display-none-inline-style-change-crash.html",
+ "http/tests/xmlhttprequest/supported-xml-content-types.html",
+ "dom/html/level2/html/HTMLAnchorElement03.html",
+ "ietestcenter/Javascript/11.1.5_4-4-c-1.html",
+ "dom/html/level2/html/HTMLAnchorElement06.html",
+ ]
+
+ def get_shards(self, num_workers, fully_parallel, test_list=None):
+ test_list = test_list or self.test_list
+ host = MockHost()
+ port = host.port_factory.get(port_name='test')
+ port._filesystem = MockFileSystem()
+ # FIXME: This should use MockOptions() instead of Mock()
+ self.manager = ManagerWrapper(port=port, options=Mock(), printer=Mock())
+ return self.manager._shard_tests(test_list, num_workers, fully_parallel)
+
+ def test_shard_by_dir(self):
+ locked, unlocked = self.get_shards(num_workers=2, fully_parallel=False)
+
+ # Note that although there are tests in multiple dirs that need locks,
+ # they are crammed into a single shard in order to reduce the # of
+ # workers hitting the server at once.
+ self.assertEquals(locked,
+ [TestShard('locked_shard_1',
+ ['http/tests/security/view-source-no-refresh.html',
+ 'http/tests/websocket/tests/unicode.htm',
+ 'http/tests/websocket/tests/websocket-protocol-ignored.html',
+ 'http/tests/xmlhttprequest/supported-xml-content-types.html'])])
+ self.assertEquals(unlocked,
+ [TestShard('animations',
+ ['animations/keyframes.html']),
+ TestShard('dom/html/level2/html',
+ ['dom/html/level2/html/HTMLAnchorElement03.html',
+ 'dom/html/level2/html/HTMLAnchorElement06.html']),
+ TestShard('fast/css',
+ ['fast/css/display-none-inline-style-change-crash.html']),
+ TestShard('ietestcenter/Javascript',
+ ['ietestcenter/Javascript/11.1.5_4-4-c-1.html'])])
+
+ def test_shard_every_file(self):
+ locked, unlocked = self.get_shards(num_workers=2, fully_parallel=True)
+ self.assertEquals(locked,
+ [TestShard('.', ['http/tests/websocket/tests/unicode.htm']),
+ TestShard('.', ['http/tests/security/view-source-no-refresh.html']),
+ TestShard('.', ['http/tests/websocket/tests/websocket-protocol-ignored.html']),
+ TestShard('.', ['http/tests/xmlhttprequest/supported-xml-content-types.html'])])
+ self.assertEquals(unlocked,
+ [TestShard('.', ['animations/keyframes.html']),
+ TestShard('.', ['fast/css/display-none-inline-style-change-crash.html']),
+ TestShard('.', ['dom/html/level2/html/HTMLAnchorElement03.html']),
+ TestShard('.', ['ietestcenter/Javascript/11.1.5_4-4-c-1.html']),
+ TestShard('.', ['dom/html/level2/html/HTMLAnchorElement06.html'])])
+
+ def test_shard_in_two(self):
+ locked, unlocked = self.get_shards(num_workers=1, fully_parallel=False)
+ self.assertEquals(locked,
+ [TestShard('locked_tests',
+ ['http/tests/websocket/tests/unicode.htm',
+ 'http/tests/security/view-source-no-refresh.html',
+ 'http/tests/websocket/tests/websocket-protocol-ignored.html',
+ 'http/tests/xmlhttprequest/supported-xml-content-types.html'])])
+ self.assertEquals(unlocked,
+ [TestShard('unlocked_tests',
+ ['animations/keyframes.html',
+ 'fast/css/display-none-inline-style-change-crash.html',
+ 'dom/html/level2/html/HTMLAnchorElement03.html',
+ 'ietestcenter/Javascript/11.1.5_4-4-c-1.html',
+ 'dom/html/level2/html/HTMLAnchorElement06.html'])])
+
+ def test_shard_in_two_has_no_locked_shards(self):
+ locked, unlocked = self.get_shards(num_workers=1, fully_parallel=False,
+ test_list=['animations/keyframe.html'])
+ self.assertEquals(len(locked), 0)
+ self.assertEquals(len(unlocked), 1)
+
+ def test_shard_in_two_has_no_unlocked_shards(self):
+ locked, unlocked = self.get_shards(num_workers=1, fully_parallel=False,
+ test_list=['http/tests/webcoket/tests/unicode.htm'])
+ self.assertEquals(len(locked), 1)
+ self.assertEquals(len(unlocked), 0)
+
+
+class ManagerTest(unittest.TestCase):
+ def get_options(self):
+ return MockOptions(pixel_tests=False, new_baseline=False, time_out_ms=6000, slow_time_out_ms=30000, worker_model='inline')
+
+ def get_printer(self):
+ class FakePrinter(object):
+ def __init__(self):
+ self.output = []
+
+ def print_config(self, msg):
+ self.output.append(msg)
+
+ return FakePrinter()
+
+ def test_fallback_path_in_config(self):
+ options = self.get_options()
+ host = MockHost()
+ port = host.port_factory.get('test-mac-leopard', options=options)
+ printer = self.get_printer()
+ manager = Manager(port, options, printer)
+ manager.print_config()
+ self.assertTrue('Baseline search path: test-mac-leopard -> test-mac-snowleopard -> generic' in printer.output)
+
+ def test_http_locking(tester):
+ class LockCheckingManager(Manager):
+ def __init__(self, port, options, printer):
+ super(LockCheckingManager, self).__init__(port, options, printer)
+ self._finished_list_called = False
+
+ def handle_finished_list(self, source, list_name, num_tests, elapsed_time):
+ if not self._finished_list_called:
+ tester.assertEquals(list_name, 'locked_tests')
+ tester.assertTrue(self._remaining_locked_shards)
+ tester.assertTrue(self._has_http_lock)
+
+ super(LockCheckingManager, self).handle_finished_list(source, list_name, num_tests, elapsed_time)
+
+ if not self._finished_list_called:
+ tester.assertEquals(self._remaining_locked_shards, [])
+ tester.assertFalse(self._has_http_lock)
+ self._finished_list_called = True
+
+ options, args = run_webkit_tests.parse_args(['--platform=test', '--print=nothing', 'http/tests/passes', 'passes'])
+ host = MockHost()
+ port = host.port_factory.get(port_name=options.platform, options=options)
+ run_webkit_tests._set_up_derived_options(port, options)
+ printer = printing.Printer(port, options, StringIO.StringIO(), StringIO.StringIO(), configure_logging=False)
+ manager = LockCheckingManager(port, options, printer)
+ manager.collect_tests(args)
+ manager.parse_expectations()
+ result_summary = manager.set_up_run()
+ num_unexpected_results = manager.run(result_summary)
+ manager.clean_up_run()
+ printer.cleanup()
+ tester.assertEquals(num_unexpected_results, 0)
+
+ def test_interrupt_if_at_failure_limits(self):
+ port = Mock() # FIXME: This should be a tighter mock.
+ port.TEST_PATH_SEPARATOR = '/'
+ port._filesystem = MockFileSystem()
+ manager = Manager(port=port, options=MockOptions(), printer=Mock())
+
+ manager._options = MockOptions(exit_after_n_failures=None, exit_after_n_crashes_or_timeouts=None)
+ result_summary = ResultSummary(expectations=Mock(), test_files=[])
+ result_summary.unexpected_failures = 100
+ result_summary.unexpected_crashes = 50
+ result_summary.unexpected_timeouts = 50
+ # No exception when the exit_after* options are None.
+ manager._interrupt_if_at_failure_limits(result_summary)
+
+ # No exception when we haven't hit the limit yet.
+ manager._options.exit_after_n_failures = 101
+ manager._options.exit_after_n_crashes_or_timeouts = 101
+ manager._interrupt_if_at_failure_limits(result_summary)
+
+ # Interrupt if we've exceeded either limit:
+ manager._options.exit_after_n_crashes_or_timeouts = 10
+ self.assertRaises(TestRunInterruptedException, manager._interrupt_if_at_failure_limits, result_summary)
+
+ manager._options.exit_after_n_crashes_or_timeouts = None
+ manager._options.exit_after_n_failures = 10
+ exception = self.assertRaises(TestRunInterruptedException, manager._interrupt_if_at_failure_limits, result_summary)
+
+ def test_needs_servers(self):
+ def get_manager_with_tests(test_names):
+ port = Mock() # FIXME: Use a tighter mock.
+ port.TEST_PATH_SEPARATOR = '/'
+ manager = Manager(port, options=MockOptions(http=True), printer=Mock())
+ manager._test_files = set(test_names)
+ manager._test_files_list = test_names
+ return manager
+
+ manager = get_manager_with_tests(['fast/html'])
+ self.assertFalse(manager.needs_servers())
+
+ manager = get_manager_with_tests(['http/tests/misc'])
+ self.assertTrue(manager.needs_servers())
+
+ def integration_test_needs_servers(self):
+ def get_manager_with_tests(test_names):
+ host = MockHost()
+ port = host.port_factory.get()
+ manager = Manager(port, options=MockOptions(test_list=None, http=True), printer=Mock())
+ manager.collect_tests(test_names)
+ return manager
+
+ manager = get_manager_with_tests(['fast/html'])
+ self.assertFalse(manager.needs_servers())
+
+ manager = get_manager_with_tests(['http/tests/mime'])
+ self.assertTrue(manager.needs_servers())
+
+ if sys.platform == 'win32':
+ manager = get_manager_with_tests(['fast\\html'])
+ self.assertFalse(manager.needs_servers())
+
+ manager = get_manager_with_tests(['http\\tests\\mime'])
+ self.assertTrue(manager.needs_servers())
+
+
+class NaturalCompareTest(unittest.TestCase):
+ def assert_cmp(self, x, y, result):
+ self.assertEquals(cmp(natural_sort_key(x), natural_sort_key(y)), result)
+
+ def test_natural_compare(self):
+ self.assert_cmp('a', 'a', 0)
+ self.assert_cmp('ab', 'a', 1)
+ self.assert_cmp('a', 'ab', -1)
+ self.assert_cmp('', '', 0)
+ self.assert_cmp('', 'ab', -1)
+ self.assert_cmp('1', '2', -1)
+ self.assert_cmp('2', '1', 1)
+ self.assert_cmp('1', '10', -1)
+ self.assert_cmp('2', '10', -1)
+ self.assert_cmp('foo_1.html', 'foo_2.html', -1)
+ self.assert_cmp('foo_1.1.html', 'foo_2.html', -1)
+ self.assert_cmp('foo_1.html', 'foo_10.html', -1)
+ self.assert_cmp('foo_2.html', 'foo_10.html', -1)
+ self.assert_cmp('foo_23.html', 'foo_10.html', 1)
+ self.assert_cmp('foo_23.html', 'foo_100.html', -1)
+
+
+class KeyCompareTest(unittest.TestCase):
+ def setUp(self):
+ host = MockHost()
+ self.port = host.port_factory.get('test')
+
+ def assert_cmp(self, x, y, result):
+ self.assertEquals(cmp(test_key(self.port, x), test_key(self.port, y)), result)
+
+ def test_test_key(self):
+ self.assert_cmp('/a', '/a', 0)
+ self.assert_cmp('/a', '/b', -1)
+ self.assert_cmp('/a2', '/a10', -1)
+ self.assert_cmp('/a2/foo', '/a10/foo', -1)
+ self.assert_cmp('/a/foo11', '/a/foo2', 1)
+ self.assert_cmp('/ab', '/a/a/b', -1)
+ self.assert_cmp('/a/a/b', '/ab', 1)
+ self.assert_cmp('/foo-bar/baz', '/foo/baz', -1)
+
+
+class ResultSummaryTest(unittest.TestCase):
+
+ def setUp(self):
+ host = MockHost()
+ self.port = host.port_factory.get(port_name='test')
+
+ def test_interpret_test_failures(self):
+ test_dict = interpret_test_failures(self.port, 'foo/reftest.html',
+ [test_failures.FailureReftestMismatch(self.port.abspath_for_test('foo/reftest-expected.html'))])
+ self.assertTrue('is_reftest' in test_dict)
+ self.assertFalse('is_mismatch_reftest' in test_dict)
+
+ test_dict = interpret_test_failures(self.port, 'foo/reftest.html',
+ [test_failures.FailureReftestMismatch(self.port.abspath_for_test('foo/common.html'))])
+ self.assertTrue('is_reftest' in test_dict)
+ self.assertFalse('is_mismatch_reftest' in test_dict)
+ self.assertEqual(test_dict['ref_file'], 'foo/common.html')
+
+ test_dict = interpret_test_failures(self.port, 'foo/reftest.html',
+ [test_failures.FailureReftestMismatchDidNotOccur(self.port.abspath_for_test('foo/reftest-expected-mismatch.html'))])
+ self.assertFalse('is_reftest' in test_dict)
+ self.assertTrue(test_dict['is_mismatch_reftest'])
+
+ test_dict = interpret_test_failures(self.port, 'foo/reftest.html',
+ [test_failures.FailureReftestMismatchDidNotOccur(self.port.abspath_for_test('foo/common.html'))])
+ self.assertFalse('is_reftest' in test_dict)
+ self.assertTrue(test_dict['is_mismatch_reftest'])
+ self.assertEqual(test_dict['ref_file'], 'foo/common.html')
+
+
+if __name__ == '__main__':
+ port_testcase.main()
diff --git a/Tools/Scripts/webkitpy/layout_tests/controllers/manager_worker_broker.py b/Tools/Scripts/webkitpy/layout_tests/controllers/manager_worker_broker.py
new file mode 100755
index 000000000..82ba91ff5
--- /dev/null
+++ b/Tools/Scripts/webkitpy/layout_tests/controllers/manager_worker_broker.py
@@ -0,0 +1,299 @@
+# Copyright (C) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Module for handling messages and concurrency for run-webkit-tests.
+
+This module implements a message broker that connects the manager to the
+workers: it provides a messaging abstraction and message loops (building on
+top of message_broker), and handles starting workers by launching processes.
+
+There are a lot of classes and objects involved in a fully connected system.
+They interact more or less like:
+
+ Manager --> _InlineManager ---> _InlineWorker <-> Worker
+ ^ \ / ^
+ | v v |
+ \-------------------- MessageBroker -------------/
+"""
+
+import logging
+import optparse
+import Queue
+import sys
+
+
+# Handle Python < 2.6 where multiprocessing isn't available.
+try:
+ import multiprocessing
+except ImportError:
+ multiprocessing = None
+
+# These are needed when workers are launched in new child processes.
+from webkitpy.common.host import Host
+from webkitpy.common.host_mock import MockHost
+
+from webkitpy.layout_tests.controllers import message_broker
+from webkitpy.layout_tests.views import printing
+
+
+_log = logging.getLogger(__name__)
+
+#
+# Topic names for Manager <-> Worker messaging
+#
+MANAGER_TOPIC = 'managers'
+ANY_WORKER_TOPIC = 'workers'
+
+
+def runtime_options():
+ """Return a list of optparse.Option objects for any runtime values used
+ by this module."""
+ options = [
+ optparse.make_option("--worker-model", action="store",
+ help=("controls worker model. Valid values are "
+ "'inline' and 'processes'.")),
+ ]
+ return options
+
+
+def get(port, options, client, worker_class):
+ """Return a connection to a manager/worker message_broker
+
+ Args:
+ port - handle to layout_tests/port object for port-specific stuff
+ options - optparse argument for command-line options
+ client - message_broker.BrokerClient implementation to dispatch
+ replies to.
+ worker_class - type of workers to create. This class must implement
+ the methods in AbstractWorker.
+ Returns:
+ A handle to an object that will talk to a message broker configured
+ for the normal manager/worker communication.
+ """
+ worker_model = options.worker_model
+ if worker_model == 'inline':
+ queue_class = Queue.Queue
+ manager_class = _InlineManager
+ elif worker_model == 'processes' and multiprocessing:
+ queue_class = multiprocessing.Queue
+ manager_class = _MultiProcessManager
+ else:
+ raise ValueError("unsupported value for --worker-model: %s" % worker_model)
+
+ broker = message_broker.Broker(options, queue_class)
+ return manager_class(broker, port, options, client, worker_class)
+
+
+class AbstractWorker(message_broker.BrokerClient):
+ def __init__(self, worker_connection, worker_number, results_directory, options):
+ """The constructor should be used to do any simple initialization
+ necessary, but should not do anything that creates data structures
+ that cannot be Pickled or sent across processes (like opening
+ files or sockets). Complex initialization should be done at the
+ start of the run() call.
+
+ Args:
+ worker_connection - handle to the BrokerConnection object creating
+ the worker and that can be used for messaging.
+ worker_number - identifier for this particular worker
+ options - command-line argument object from optparse"""
+ message_broker.BrokerClient.__init__(self)
+ self._worker_connection = worker_connection
+ self._options = options
+ self._worker_number = worker_number
+ self._name = 'worker/%d' % worker_number
+ self._results_directory = results_directory
+
+ def run(self, port):
+ """Callback for the worker to start executing. Typically does any
+ remaining initialization and then calls broker_connection.run_message_loop()."""
+ raise NotImplementedError
+
+ def cancel(self):
+ """Called when possible to indicate to the worker to stop processing
+ messages and shut down. Note that workers may be stopped without this
+ method being called, so clients should not rely solely on this."""
+ raise NotImplementedError
+
+
+class _ManagerConnection(message_broker.BrokerConnection):
+ def __init__(self, broker, options, client, worker_class):
+ """Base initialization for all Manager objects.
+
+ Args:
+ broker: handle to the message_broker object
+ options: command line options object
+ client: callback object (the caller)
+ worker_class: class object to use to create workers.
+ """
+ message_broker.BrokerConnection.__init__(self, broker, client,
+ MANAGER_TOPIC, ANY_WORKER_TOPIC)
+ self._options = options
+ self._worker_class = worker_class
+
+ def start_worker(self, worker_number, results_directory):
+ raise NotImplementedError
+
+
+class _InlineManager(_ManagerConnection):
+ def __init__(self, broker, port, options, client, worker_class):
+ _ManagerConnection.__init__(self, broker, options, client, worker_class)
+ self._port = port
+ self._inline_worker = None
+
+ def start_worker(self, worker_number, results_directory):
+ self._inline_worker = _InlineWorkerConnection(self._broker, self._port,
+ self._client, self._worker_class, worker_number, results_directory, self._options)
+ return self._inline_worker
+
+ def run_message_loop(self, delay_secs=None):
+ # Note that delay_secs is ignored in this case since we can't easily
+ # implement it.
+ self._inline_worker.run()
+ self._broker.run_all_pending(MANAGER_TOPIC, self._client)
+
+
+class _MultiProcessManager(_ManagerConnection):
+ def __init__(self, broker, port, options, client, worker_class):
+ # Note that this class does not keep a handle to the actual port
+ # object, because it isn't Picklable. Instead it keeps the port
+ # name and recreates the port in the child process from the name
+ # and options.
+ _ManagerConnection.__init__(self, broker, options, client, worker_class)
+ self._platform_name = port.real_name()
+
+ def start_worker(self, worker_number, results_directory):
+ worker_connection = _MultiProcessWorkerConnection(self._broker, self._platform_name,
+ self._worker_class, worker_number, results_directory, self._options)
+ worker_connection.start()
+ return worker_connection
+
+
+class _WorkerConnection(message_broker.BrokerConnection):
+ def __init__(self, broker, worker_class, worker_number, results_directory, options):
+ self._client = worker_class(self, worker_number, results_directory, options)
+ self.name = self._client.name()
+ message_broker.BrokerConnection.__init__(self, broker, self._client,
+ ANY_WORKER_TOPIC, MANAGER_TOPIC)
+
+ def cancel(self):
+ raise NotImplementedError
+
+ def is_alive(self):
+ raise NotImplementedError
+
+ def join(self, timeout):
+ raise NotImplementedError
+
+ def yield_to_broker(self):
+ pass
+
+
+class _InlineWorkerConnection(_WorkerConnection):
+ def __init__(self, broker, port, manager_client, worker_class, worker_number, results_directory, options):
+ _WorkerConnection.__init__(self, broker, worker_class, worker_number, results_directory, options)
+ self._alive = False
+ self._port = port
+ self._manager_client = manager_client
+
+ def cancel(self):
+ self._client.cancel()
+
+ def is_alive(self):
+ return self._alive
+
+ def join(self, timeout):
+ assert not self._alive
+
+ def run(self):
+ self._alive = True
+ self._client.run(self._port)
+ self._alive = False
+
+ def yield_to_broker(self):
+ self._broker.run_all_pending(MANAGER_TOPIC, self._manager_client)
+
+ def raise_exception(self, exc_info):
+ # Since the worker is in the same process as the manager, we can
+ # raise the exception directly, rather than having to send it through
+ # the queue. This allows us to preserve the traceback.
+ raise exc_info[0], exc_info[1], exc_info[2]
+
+
+if multiprocessing:
+
+ class _Process(multiprocessing.Process):
+ def __init__(self, worker_connection, platform_name, options, client):
+ multiprocessing.Process.__init__(self)
+ self._worker_connection = worker_connection
+ self._platform_name = platform_name
+ self._options = options
+ self._client = client
+
+ def run(self):
+ # We need to create a new Host object here because this is
+ # running in a new process and we can't require the parent's
+ # Host to be pickleable and passed to the child.
+ if self._platform_name.startswith('test'):
+ host = MockHost()
+ else:
+ host = Host()
+ host._initialize_scm()
+
+ options = self._options
+ port_obj = host.port_factory.get(self._platform_name, options)
+
+ # The unix multiprocessing implementation clones the
+ # log handler configuration into the child processes,
+ # but the win implementation doesn't.
+ configure_logging = (sys.platform == 'win32')
+
+ # FIXME: this won't work if the calling process is logging
+ # somewhere other than sys.stderr and sys.stdout, but I'm not sure
+ # if this will be an issue in practice.
+ printer = printing.Printer(port_obj, options, sys.stderr, sys.stdout, configure_logging)
+ self._client.run(port_obj)
+ printer.cleanup()
+
+
+class _MultiProcessWorkerConnection(_WorkerConnection):
+ def __init__(self, broker, platform_name, worker_class, worker_number, results_directory, options):
+ _WorkerConnection.__init__(self, broker, worker_class, worker_number, results_directory, options)
+ self._proc = _Process(self, platform_name, options, self._client)
+
+ def cancel(self):
+ return self._proc.terminate()
+
+ def is_alive(self):
+ return self._proc.is_alive()
+
+ def join(self, timeout):
+ return self._proc.join(timeout)
+
+ def start(self):
+ self._proc.start()
diff --git a/Tools/Scripts/webkitpy/layout_tests/controllers/manager_worker_broker_unittest.py b/Tools/Scripts/webkitpy/layout_tests/controllers/manager_worker_broker_unittest.py
new file mode 100644
index 000000000..8e63f93ec
--- /dev/null
+++ b/Tools/Scripts/webkitpy/layout_tests/controllers/manager_worker_broker_unittest.py
@@ -0,0 +1,250 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import optparse
+import Queue
+import sys
+import unittest
+
+try:
+ import multiprocessing
+except ImportError:
+ multiprocessing = None
+
+
+from webkitpy.common.system import outputcapture
+from webkitpy.common.host_mock import MockHost
+
+from webkitpy.layout_tests import port
+from webkitpy.layout_tests.controllers import manager_worker_broker
+from webkitpy.layout_tests.controllers import message_broker
+from webkitpy.layout_tests.views import printing
+
+
+# In order to reliably control when child workers are starting and stopping,
+# we use a pair of global variables to hold queues used for messaging. Ideally
+# we wouldn't need globals, but we can't pass these through a lexical closure
+# because those can't be Pickled and sent to a subprocess, and we'd prefer not
+# to have to pass extra arguments to the worker in the start_worker() call.
+starting_queue = None
+stopping_queue = None
+
+
+def make_broker(manager, worker_model, start_queue=None, stop_queue=None):
+ global starting_queue
+ global stopping_queue
+ starting_queue = start_queue
+ stopping_queue = stop_queue
+ options = get_options(worker_model)
+ host = MockHost()
+ test_port = host.port_factory.get("test")
+ return manager_worker_broker.get(test_port, options, manager, _TestWorker)
+
+
+class _TestWorker(manager_worker_broker.AbstractWorker):
+ def __init__(self, broker_connection, worker_number, results_directory, options):
+ self._broker_connection = broker_connection
+ self._options = options
+ self._worker_number = worker_number
+ self._name = 'TestWorker/%d' % worker_number
+ self._stopped = False
+ self._canceled = False
+ self._starting_queue = starting_queue
+ self._stopping_queue = stopping_queue
+
+ def handle_stop(self, src):
+ self._stopped = True
+
+ def handle_test(self, src, an_int, a_str):
+ assert an_int == 1
+ assert a_str == "hello, world"
+ self._broker_connection.post_message('test', 2, 'hi, everybody')
+
+ def is_done(self):
+ return self._stopped or self._canceled
+
+ def name(self):
+ return self._name
+
+ def cancel(self):
+ self._canceled = True
+
+ def run(self, port):
+ if self._starting_queue:
+ self._starting_queue.put('')
+
+ if self._stopping_queue:
+ self._stopping_queue.get()
+ try:
+ self._broker_connection.run_message_loop()
+ self._broker_connection.yield_to_broker()
+ self._broker_connection.post_message('done')
+ except Exception, e:
+ self._broker_connection.post_message('exception', (type(e), str(e), None))
+
+
+def get_options(worker_model):
+ option_list = (manager_worker_broker.runtime_options() +
+ printing.print_options() +
+ [optparse.make_option("--experimental-fully-parallel", default=False),
+ optparse.make_option("--child-processes", default='2')])
+ parser = optparse.OptionParser(option_list=option_list)
+ options, args = parser.parse_args(args=['--worker-model', worker_model])
+ return options
+
+
+
+class FunctionTests(unittest.TestCase):
+ def test_get__inline(self):
+ self.assertTrue(make_broker(self, 'inline') is not None)
+
+ def test_get__processes(self):
+ # This test sometimes fails on Windows. See <http://webkit.org/b/55087>.
+ if sys.platform in ('cygwin', 'win32'):
+ return
+
+ if multiprocessing:
+ self.assertTrue(make_broker(self, 'processes') is not None)
+ else:
+ self.assertRaises(ValueError, make_broker, self, 'processes')
+
+ def test_get__unknown(self):
+ self.assertRaises(ValueError, make_broker, self, 'unknown')
+
+
+class _TestsMixin(object):
+ """Mixin class that implements a series of tests to enforce the
+ contract all implementations must follow."""
+
+ def name(self):
+ return 'Tester'
+
+ def is_done(self):
+ return self._done
+
+ def handle_done(self, src):
+ self._done = True
+
+ def handle_test(self, src, an_int, a_str):
+ self._an_int = an_int
+ self._a_str = a_str
+
+ def handle_exception(self, src, exc_info):
+ self._exception = exc_info
+ self._done = True
+
+ def setUp(self):
+ self._an_int = None
+ self._a_str = None
+ self._broker = None
+ self._done = False
+ self._exception = None
+ self._worker_model = None
+
+ def make_broker(self, starting_queue=None, stopping_queue=None):
+ self._broker = make_broker(self, self._worker_model, starting_queue,
+ stopping_queue)
+
+ def test_cancel(self):
+ self.make_broker()
+ worker = self._broker.start_worker(0, None)
+ worker.cancel()
+ self._broker.post_message('test', 1, 'hello, world')
+ worker.join(0.5)
+ self.assertFalse(worker.is_alive())
+
+ def test_done(self):
+ self.make_broker()
+ worker = self._broker.start_worker(0, None)
+ self._broker.post_message('test', 1, 'hello, world')
+ self._broker.post_message('stop')
+ self._broker.run_message_loop()
+ worker.join(0.5)
+ self.assertFalse(worker.is_alive())
+ self.assertTrue(self.is_done())
+ self.assertEqual(self._an_int, 2)
+ self.assertEqual(self._a_str, 'hi, everybody')
+
+ def test_unknown_message(self):
+ self.make_broker()
+ worker = self._broker.start_worker(0, None)
+ self._broker.post_message('unknown')
+ self._broker.run_message_loop()
+ worker.join(0.5)
+
+ self.assertTrue(self.is_done())
+ self.assertFalse(worker.is_alive())
+ self.assertEquals(self._exception[0], ValueError)
+ self.assertEquals(self._exception[1],
+ "TestWorker/0: received message 'unknown' it couldn't handle")
+
+
+# FIXME: https://bugs.webkit.org/show_bug.cgi?id=54520.
+if multiprocessing and sys.platform not in ('cygwin', 'win32'):
+
+ class MultiProcessBrokerTests(_TestsMixin, unittest.TestCase):
+ def setUp(self):
+ _TestsMixin.setUp(self)
+ self._worker_model = 'processes'
+
+ def queue(self):
+ return multiprocessing.Queue()
+
+
+class FunctionsTest(unittest.TestCase):
+ def test_runtime_options(self):
+ option_list = manager_worker_broker.runtime_options()
+ parser = optparse.OptionParser(option_list=option_list)
+ options, args = parser.parse_args([])
+ self.assertTrue(options)
+
+
+class InterfaceTest(unittest.TestCase):
+ # These tests mostly exist to pacify coverage.
+
+ # FIXME: There must be a better way to do this and also verify
+ # that classes do implement every abstract method in an interface.
+ def test_managerconnection_is_abstract(self):
+ # Test that all the base class methods are abstract and have the
+ # signature we expect.
+ broker = make_broker(self, 'inline')
+ obj = manager_worker_broker._ManagerConnection(broker._broker, None, self, None)
+ self.assertRaises(NotImplementedError, obj.start_worker, 0, None)
+
+ def test_workerconnection_is_abstract(self):
+ # Test that all the base class methods are abstract and have the
+ # signature we expect.
+ broker = make_broker(self, 'inline')
+ obj = manager_worker_broker._WorkerConnection(broker._broker, _TestWorker, 0, None, None)
+ self.assertRaises(NotImplementedError, obj.cancel)
+ self.assertRaises(NotImplementedError, obj.is_alive)
+ self.assertRaises(NotImplementedError, obj.join, None)
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/Tools/Scripts/webkitpy/layout_tests/controllers/message_broker.py b/Tools/Scripts/webkitpy/layout_tests/controllers/message_broker.py
new file mode 100644
index 000000000..d58a6cd11
--- /dev/null
+++ b/Tools/Scripts/webkitpy/layout_tests/controllers/message_broker.py
@@ -0,0 +1,202 @@
+# Copyright (C) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Module for handling messaging for run-webkit-tests.
+
+This module implements a simple message broker abstraction that will be
+used to coordinate messages between the main run-webkit-tests thread
+(aka TestRunner) and the individual worker threads (previously known as
+dump_render_tree_threads).
+
+The broker simply distributes messages onto topics (named queues); the actual
+queues themselves are provided by the caller, as the queue's implementation
+requirements varies vary depending on the desired concurrency model
+(none/threads/processes).
+
+In order for shared-nothing messaging between processing to be possible,
+Messages must be picklable.
+
+The module defines one interface and two classes. Callers of this package
+must implement the BrokerClient interface, and most callers will create
+BrokerConnections as well as Brokers.
+
+The classes relate to each other as:
+
+ BrokerClient ------> BrokerConnection
+ ^ |
+ | v
+ \---------------- Broker
+
+(The BrokerClient never calls broker directly after it is created, only
+BrokerConnection. BrokerConnection passes a reference to BrokerClient to
+Broker, and Broker only invokes that reference, never talking directly to
+BrokerConnection).
+"""
+import sys
+import traceback
+
+import cPickle
+import logging
+import Queue
+
+from webkitpy.common.system import stack_utils
+
+_log = logging.getLogger(__name__)
+
+
+class BrokerClient(object):
+ """Abstract base class / interface that all message broker clients must
+ implement. In addition to the methods below, by convention clients
+ implement routines of the signature type
+
+ handle_MESSAGE_NAME(self, src, ...):
+
+ where MESSAGE_NAME matches the string passed to post_message(), and
+ src indicates the name of the sender. If the message contains values in
+ the message body, those will be provided as optparams."""
+
+ def is_done(self):
+ """Called from inside run_message_loop() to indicate whether to exit."""
+ raise NotImplementedError
+
+ def name(self):
+ """Return a name that identifies the client."""
+ raise NotImplementedError
+
+
+class Broker(object):
+ """Brokers provide the basic model of a set of topics. Clients can post a
+ message to any topic using post_message(), and can process messages on one
+ topic at a time using run_message_loop()."""
+
+ def __init__(self, options, queue_maker):
+ """Args:
+ options: a runtime option class from optparse
+ queue_maker: a factory method that returns objects implementing a
+ Queue interface (put()/get()).
+ """
+ self._options = options
+ self._queue_maker = queue_maker
+ self._topics = {}
+
+ def add_topic(self, topic_name):
+ if topic_name not in self._topics:
+ self._topics[topic_name] = self._queue_maker()
+
+ def _get_queue_for_topic(self, topic_name):
+ return self._topics[topic_name]
+
+ def post_message(self, client, topic_name, message_name, *message_args):
+ """Post a message to the appropriate topic name.
+
+ Messages have a name and a tuple of optional arguments. Both must be picklable."""
+ message = _Message(client.name(), topic_name, message_name, message_args)
+ queue = self._get_queue_for_topic(topic_name)
+ queue.put(_Message.dumps(message))
+
+ def run_message_loop(self, topic_name, client, delay_secs=None):
+ """Loop processing messages until client.is_done() or delay passes.
+
+ To run indefinitely, set delay_secs to None."""
+ assert delay_secs is None or delay_secs > 0
+ self._run_loop(topic_name, client, block=True, delay_secs=delay_secs)
+
+ def run_all_pending(self, topic_name, client):
+ """Process messages until client.is_done() or caller would block."""
+ self._run_loop(topic_name, client, block=False, delay_secs=None)
+
+ def _run_loop(self, topic_name, client, block, delay_secs):
+ queue = self._get_queue_for_topic(topic_name)
+ while not client.is_done():
+ try:
+ s = queue.get(block, delay_secs)
+ except Queue.Empty:
+ return
+ msg = _Message.loads(s)
+ self._dispatch_message(msg, client)
+
+ def _dispatch_message(self, message, client):
+ if not hasattr(client, 'handle_' + message.name):
+ raise ValueError(
+ "%s: received message '%s' it couldn't handle" %
+ (client.name(), message.name))
+ optargs = message.args
+ message_handler = getattr(client, 'handle_' + message.name)
+ message_handler(message.src, *optargs)
+
+
+class _Message(object):
+ @staticmethod
+ def loads(string_value):
+ obj = cPickle.loads(string_value)
+ assert(isinstance(obj, _Message))
+ return obj
+
+ def __init__(self, src, topic_name, message_name, message_args):
+ self.src = src
+ self.topic_name = topic_name
+ self.name = message_name
+ self.args = message_args
+
+ def dumps(self):
+ return cPickle.dumps(self)
+
+ def __repr__(self):
+ return ("_Message(from='%s', topic_name='%s', message_name='%s')" %
+ (self.src, self.topic_name, self.name))
+
+
+class BrokerConnection(object):
+ """BrokerConnection provides a connection-oriented facade on top of a
+ Broker, so that callers don't have to repeatedly pass the same topic
+ names over and over."""
+
+ def __init__(self, broker, client, run_topic, post_topic):
+ """Create a BrokerConnection on top of a Broker. Note that the Broker
+ is passed in rather than created so that a single Broker can be used
+ by multiple BrokerConnections."""
+ self._broker = broker
+ self._client = client
+ self._post_topic = post_topic
+ self._run_topic = run_topic
+ broker.add_topic(run_topic)
+ broker.add_topic(post_topic)
+
+ def run_message_loop(self, delay_secs=None):
+ self._broker.run_message_loop(self._run_topic, self._client, delay_secs)
+
+ def post_message(self, message_name, *message_args):
+ self._broker.post_message(self._client, self._post_topic,
+ message_name, *message_args)
+
+ def raise_exception(self, exc_info):
+ # Since tracebacks aren't picklable, send the extracted stack instead.
+ exception_type, exception_value, exception_traceback = sys.exc_info()
+ stack_utils.log_traceback(_log.debug, exception_traceback)
+ stack = traceback.extract_tb(exception_traceback)
+ self._broker.post_message(self._client, self._post_topic, 'exception', exception_type, exception_value, stack)
diff --git a/Tools/Scripts/webkitpy/layout_tests/controllers/message_broker_unittest.py b/Tools/Scripts/webkitpy/layout_tests/controllers/message_broker_unittest.py
new file mode 100644
index 000000000..cb8d8e6f9
--- /dev/null
+++ b/Tools/Scripts/webkitpy/layout_tests/controllers/message_broker_unittest.py
@@ -0,0 +1,76 @@
+# Copyright (C) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import unittest
+
+from webkitpy.layout_tests.controllers import message_broker
+
+# This file exists to test routines that aren't necessarily covered elsewhere;
+# most of the testing of message_broker will be covered under the tests in
+# the manager_worker_broker module.
+
+
+class MessageTest(unittest.TestCase):
+ def test__no_body(self):
+ msg = message_broker._Message('src', 'topic_name', 'message_name', None)
+ self.assertTrue(repr(msg))
+ s = msg.dumps()
+ new_msg = message_broker._Message.loads(s)
+ self.assertEqual(new_msg.name, 'message_name')
+ self.assertEqual(new_msg.args, None)
+ self.assertEqual(new_msg.topic_name, 'topic_name')
+ self.assertEqual(new_msg.src, 'src')
+
+ def test__body(self):
+ msg = message_broker._Message('src', 'topic_name', 'message_name',
+ ('body', 0))
+ self.assertTrue(repr(msg))
+ s = msg.dumps()
+ new_msg = message_broker._Message.loads(s)
+ self.assertEqual(new_msg.name, 'message_name')
+ self.assertEqual(new_msg.args, ('body', 0))
+ self.assertEqual(new_msg.topic_name, 'topic_name')
+ self.assertEqual(new_msg.src, 'src')
+
+
+class InterfaceTest(unittest.TestCase):
+ # These tests mostly exist to pacify coverage.
+
+ # FIXME: There must be a better way to do this and also verify
+ # that classes do implement every abstract method in an interface.
+
+ def test_brokerclient_is_abstract(self):
+ # Test that all the base class methods are abstract and have the
+ # signature we expect.
+ obj = message_broker.BrokerClient()
+ self.assertRaises(NotImplementedError, obj.is_done)
+ self.assertRaises(NotImplementedError, obj.name)
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/Tools/Scripts/webkitpy/layout_tests/controllers/single_test_runner.py b/Tools/Scripts/webkitpy/layout_tests/controllers/single_test_runner.py
new file mode 100644
index 000000000..af4b2d92e
--- /dev/null
+++ b/Tools/Scripts/webkitpy/layout_tests/controllers/single_test_runner.py
@@ -0,0 +1,306 @@
+# Copyright (C) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+import logging
+import re
+import time
+
+from webkitpy.layout_tests.controllers import test_result_writer
+from webkitpy.layout_tests.port.driver import DriverInput, DriverOutput
+from webkitpy.layout_tests.models import test_expectations
+from webkitpy.layout_tests.models import test_failures
+from webkitpy.layout_tests.models.test_results import TestResult
+
+
+_log = logging.getLogger(__name__)
+
+
+def run_single_test(port, options, test_input, driver, worker_name):
+ runner = SingleTestRunner(options, port, driver, test_input, worker_name)
+ return runner.run()
+
+
+class SingleTestRunner:
+
+ def __init__(self, options, port, driver, test_input, worker_name):
+ self._options = options
+ self._port = port
+ self._driver = driver
+ self._timeout = test_input.timeout
+ self._worker_name = worker_name
+ self._test_name = test_input.test_name
+
+ self._is_reftest = False
+ self._reference_files = port.reference_files(self._test_name)
+
+ if self._reference_files:
+ # Detect and report a test which has a wrong combination of expectation files.
+ # For example, if 'foo.html' has two expectation files, 'foo-expected.html' and
+ # 'foo-expected.txt', we should warn users. One test file must be used exclusively
+ # in either layout tests or reftests, but not in both.
+ for suffix in ('.txt', '.png', '.wav'):
+ expected_filename = self._port.expected_filename(self._test_name, suffix)
+ if port.host.filesystem.exists(expected_filename):
+ _log.error('%s is both a reftest and has an expected output file %s.',
+ self._test_name, expected_filename)
+
+ def _expected_driver_output(self):
+ return DriverOutput(self._port.expected_text(self._test_name),
+ self._port.expected_image(self._test_name),
+ self._port.expected_checksum(self._test_name),
+ self._port.expected_audio(self._test_name))
+
+ def _should_fetch_expected_checksum(self):
+ return (self._options.pixel_tests and
+ not (self._options.new_baseline or self._options.reset_results))
+
+ def _driver_input(self):
+ # The image hash is used to avoid doing an image dump if the
+ # checksums match, so it should be set to a blank value if we
+ # are generating a new baseline. (Otherwise, an image from a
+ # previous run will be copied into the baseline."""
+ image_hash = None
+ if self._should_fetch_expected_checksum():
+ image_hash = self._port.expected_checksum(self._test_name)
+ return DriverInput(self._test_name, self._timeout, image_hash, bool(self._reference_files))
+
+ def run(self):
+ if self._reference_files:
+ if self._port.get_option('no_ref_tests') or self._options.new_baseline or self._options.reset_results:
+ result = TestResult(self._test_name)
+ result.type = test_expectations.SKIP
+ return result
+ return self._run_reftest()
+ if self._options.new_baseline or self._options.reset_results:
+ return self._run_rebaseline()
+ return self._run_compare_test()
+
+ def _run_compare_test(self):
+ driver_output = self._driver.run_test(self._driver_input())
+ expected_driver_output = self._expected_driver_output()
+ test_result = self._compare_output(driver_output, expected_driver_output)
+ if self._options.new_test_results:
+ self._add_missing_baselines(test_result, driver_output)
+ test_result_writer.write_test_result(self._port, self._test_name, driver_output, expected_driver_output, test_result.failures)
+ return test_result
+
+ def _run_rebaseline(self):
+ driver_output = self._driver.run_test(self._driver_input())
+ failures = self._handle_error(driver_output)
+ test_result_writer.write_test_result(self._port, self._test_name, driver_output, None, failures)
+ # FIXME: It the test crashed or timed out, it might be bettter to avoid
+ # to write new baselines.
+ self._overwrite_baselines(driver_output)
+ return TestResult(self._test_name, failures, driver_output.test_time, driver_output.has_stderr())
+
+ _render_tree_dump_pattern = re.compile(r"^layer at \(\d+,\d+\) size \d+x\d+\n")
+
+ def _add_missing_baselines(self, test_result, driver_output):
+ missingImage = test_result.has_failure_matching_types(test_failures.FailureMissingImage, test_failures.FailureMissingImageHash)
+ if test_result.has_failure_matching_types(test_failures.FailureMissingResult):
+ self._save_baseline_data(driver_output.text, ".txt", SingleTestRunner._render_tree_dump_pattern.match(driver_output.text))
+ if test_result.has_failure_matching_types(test_failures.FailureMissingAudio):
+ self._save_baseline_data(driver_output.audio, ".wav", generate_new_baseline=False)
+ if missingImage:
+ self._save_baseline_data(driver_output.image, ".png", generate_new_baseline=True)
+
+ def _overwrite_baselines(self, driver_output):
+ # Although all DumpRenderTree output should be utf-8,
+ # we do not ever decode it inside run-webkit-tests. For some tests
+ # DumpRenderTree may not output utf-8 text (e.g. webarchives).
+ self._save_baseline_data(driver_output.text, ".txt", generate_new_baseline=self._options.new_baseline)
+ self._save_baseline_data(driver_output.audio, ".wav", generate_new_baseline=self._options.new_baseline)
+ if self._options.pixel_tests:
+ self._save_baseline_data(driver_output.image, ".png", generate_new_baseline=self._options.new_baseline)
+
+ def _save_baseline_data(self, data, modifier, generate_new_baseline=True):
+ """Saves a new baseline file into the port's baseline directory.
+
+ The file will be named simply "<test>-expected<modifier>", suitable for
+ use as the expected results in a later run.
+
+ Args:
+ data: result to be saved as the new baseline
+ modifier: type of the result file, e.g. ".txt" or ".png"
+ generate_new_baseline: whether to enerate a new, platform-specific
+ baseline, or update the existing one
+ """
+ if data is None:
+ return
+ port = self._port
+ fs = port._filesystem
+ if generate_new_baseline:
+ relative_dir = fs.dirname(self._test_name)
+ baseline_path = port.baseline_path()
+ output_dir = fs.join(baseline_path, relative_dir)
+ output_file = fs.basename(fs.splitext(self._test_name)[0] + "-expected" + modifier)
+ fs.maybe_make_directory(output_dir)
+ output_path = fs.join(output_dir, output_file)
+ else:
+ output_path = port.expected_filename(self._test_name, modifier)
+
+ result_name = fs.relpath(output_path, port.layout_tests_dir())
+ _log.info('Writing new expected result "%s"' % result_name)
+ port.update_baseline(output_path, data)
+
+ def _handle_error(self, driver_output, reference_filename=None):
+ """Returns test failures if some unusual errors happen in driver's run.
+
+ Args:
+ driver_output: The output from the driver.
+ reference_filename: The full path to the reference file which produced the driver_output.
+ This arg is optional and should be used only in reftests until we have a better way to know
+ which html file is used for producing the driver_output.
+ """
+ failures = []
+ fs = self._port._filesystem
+ if driver_output.timeout:
+ failures.append(test_failures.FailureTimeout(bool(reference_filename)))
+
+ if reference_filename:
+ testname = self._port.relative_test_filename(reference_filename)
+ else:
+ testname = self._test_name
+
+ if driver_output.crash:
+ failures.append(test_failures.FailureCrash(bool(reference_filename)))
+ _log.debug("%s Stacktrace for %s:\n%s" % (self._worker_name, testname,
+ driver_output.error))
+ elif driver_output.error:
+ _log.debug("%s %s output stderr lines:\n%s" % (self._worker_name, testname,
+ driver_output.error))
+ return failures
+
+ def _compare_output(self, driver_output, expected_driver_output):
+ failures = []
+ failures.extend(self._handle_error(driver_output))
+
+ if driver_output.crash:
+ # Don't continue any more if we already have a crash.
+ # In case of timeouts, we continue since we still want to see the text and image output.
+ return TestResult(self._test_name, failures, driver_output.test_time, driver_output.has_stderr())
+
+ failures.extend(self._compare_text(driver_output.text, expected_driver_output.text))
+ failures.extend(self._compare_audio(driver_output.audio, expected_driver_output.audio))
+ if self._options.pixel_tests:
+ failures.extend(self._compare_image(driver_output, expected_driver_output))
+ return TestResult(self._test_name, failures, driver_output.test_time, driver_output.has_stderr())
+
+ def _compare_text(self, actual_text, expected_text):
+ failures = []
+ if (expected_text and actual_text and
+ # Assuming expected_text is already normalized.
+ self._port.compare_text(self._get_normalized_output_text(actual_text), expected_text)):
+ failures.append(test_failures.FailureTextMismatch())
+ elif actual_text and not expected_text:
+ failures.append(test_failures.FailureMissingResult())
+ return failures
+
+ def _compare_audio(self, actual_audio, expected_audio):
+ failures = []
+ if (expected_audio and actual_audio and
+ self._port.compare_audio(actual_audio, expected_audio)):
+ failures.append(test_failures.FailureAudioMismatch())
+ elif actual_audio and not expected_audio:
+ failures.append(test_failures.FailureMissingAudio())
+ return failures
+
+ def _get_normalized_output_text(self, output):
+ """Returns the normalized text output, i.e. the output in which
+ the end-of-line characters are normalized to "\n"."""
+ # Running tests on Windows produces "\r\n". The "\n" part is helpfully
+ # changed to "\r\n" by our system (Python/Cygwin), resulting in
+ # "\r\r\n", when, in fact, we wanted to compare the text output with
+ # the normalized text expectation files.
+ return output.replace("\r\r\n", "\r\n").replace("\r\n", "\n")
+
+ # FIXME: This function also creates the image diff. Maybe that work should
+ # be handled elsewhere?
+ def _compare_image(self, driver_output, expected_driver_output):
+ failures = []
+ # If we didn't produce a hash file, this test must be text-only.
+ if driver_output.image_hash is None:
+ return failures
+ if not expected_driver_output.image:
+ failures.append(test_failures.FailureMissingImage())
+ elif not expected_driver_output.image_hash:
+ failures.append(test_failures.FailureMissingImageHash())
+ elif driver_output.image_hash != expected_driver_output.image_hash:
+ diff_result = self._port.diff_image(driver_output.image, expected_driver_output.image)
+ driver_output.image_diff = diff_result[0]
+ if driver_output.image_diff:
+ failures.append(test_failures.FailureImageHashMismatch(diff_result[1]))
+ return failures
+
+ def _run_reftest(self):
+ test_output = self._driver.run_test(self._driver_input())
+ total_test_time = 0
+ reference_output = None
+ test_result = None
+
+ # A reftest can have multiple match references and multiple mismatch references;
+ # the test fails if any mismatch matches and all of the matches don't match.
+ # To minimize the number of references we have to check, we run all of the mismatches first,
+ # then the matches, and short-circuit out as soon as we can.
+ # Note that sorting by the expectation sorts "!=" before "==" so this is easy to do.
+
+ putAllMismatchBeforeMatch = sorted
+ for expectation, reference_filename in putAllMismatchBeforeMatch(self._reference_files):
+ reference_test_name = self._port.relative_test_filename(reference_filename)
+ reference_output = self._driver.run_test(DriverInput(reference_test_name, self._timeout, test_output.image_hash, is_reftest=True))
+ test_result = self._compare_output_with_reference(test_output, reference_output, reference_filename, expectation == '!=')
+
+ if (expectation == '!=' and test_result.failures) or (expectation == '==' and not test_result.failures):
+ break
+ total_test_time += test_result.test_run_time
+
+ assert(reference_output)
+ test_result_writer.write_test_result(self._port, self._test_name, test_output, reference_output, test_result.failures)
+ return TestResult(self._test_name, test_result.failures, total_test_time + test_result.test_run_time, test_result.has_stderr)
+
+ def _compare_output_with_reference(self, driver_output1, driver_output2, reference_filename, mismatch):
+ total_test_time = driver_output1.test_time + driver_output2.test_time
+ has_stderr = driver_output1.has_stderr() or driver_output2.has_stderr()
+ failures = []
+ failures.extend(self._handle_error(driver_output1))
+ if failures:
+ # Don't continue any more if we already have crash or timeout.
+ return TestResult(self._test_name, failures, total_test_time, has_stderr)
+ failures.extend(self._handle_error(driver_output2, reference_filename=reference_filename))
+ if failures:
+ return TestResult(self._test_name, failures, total_test_time, has_stderr)
+
+ if not driver_output1.image_hash and not driver_output2.image_hash:
+ failures.append(test_failures.FailureReftestNoImagesGenerated(reference_filename))
+ elif mismatch:
+ if driver_output1.image_hash == driver_output2.image_hash:
+ failures.append(test_failures.FailureReftestMismatchDidNotOccur(reference_filename))
+ elif driver_output1.image_hash != driver_output2.image_hash:
+ failures.append(test_failures.FailureReftestMismatch(reference_filename))
+ return TestResult(self._test_name, failures, total_test_time, has_stderr)
diff --git a/Tools/Scripts/webkitpy/layout_tests/controllers/test_expectations_editor.py b/Tools/Scripts/webkitpy/layout_tests/controllers/test_expectations_editor.py
new file mode 100644
index 000000000..1b2188362
--- /dev/null
+++ b/Tools/Scripts/webkitpy/layout_tests/controllers/test_expectations_editor.py
@@ -0,0 +1,172 @@
+#!/usr/bin/env python
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""A helper class for reading in and dealing with tests expectations
+for layout tests.
+"""
+
+import itertools
+import logging
+import re
+
+try:
+ import json
+except ImportError:
+ # python 2.5 compatibility
+ import webkitpy.thirdparty.simplejson as json
+
+from webkitpy.layout_tests.models.test_configuration import TestConfiguration, TestConfigurationConverter
+from webkitpy.layout_tests.models import test_expectations
+
+_log = logging.getLogger(__name__)
+
+
+class BugManager(object):
+ """A simple interface for managing bugs from TestExpectationsEditor."""
+ def close_bug(self, bug_ids, reference_bug_ids=None):
+ raise NotImplementedError("BugManager.close_bug")
+
+ def create_bug(self):
+ """Should return a newly created bug id in the form of r"BUG[^\d].*"."""
+ raise NotImplementedError("BugManager.create_bug")
+
+
+class TestExpectationsEditor(object):
+ """
+ The editor assumes that the expectation data is error-free.
+ """
+
+ def __init__(self, expectation_lines, bug_manager):
+ self._bug_manager = bug_manager
+ self._expectation_lines = expectation_lines
+ self._tests_with_directory_paths = set()
+ # FIXME: Unify this with TestExpectationsModel.
+ self._test_to_expectation_lines = {}
+ for expectation_line in expectation_lines:
+ for test in expectation_line.matching_tests:
+ if test == expectation_line.path:
+ self._test_to_expectation_lines.setdefault(test, []).append(expectation_line)
+ else:
+ self._tests_with_directory_paths.add(test)
+
+ def remove_expectation(self, test, test_config_set, remove_flakes=False):
+ """Removes existing expectations for {test} in the of test configurations {test_config_set}.
+ If the test is flaky, the expectation is not removed, unless remove_flakes is True.
+
+ In this context, removing expectations does not imply that the test is passing -- we are merely removing
+ any information about this test from the expectations.
+
+ We do not remove the actual expectation lines here. Instead, we adjust TestExpectationLine.matching_configurations.
+ The serializer will figure out what to do:
+ * An empty matching_configurations set means that the this line matches nothing and will serialize as None.
+ * A matching_configurations set that can't be expressed as one line will be serialized as multiple lines.
+
+ Also, we do only adjust matching_configurations for lines that match tests exactly, because expectation lines with
+ better path matches are valid and always win.
+
+ For example, the expectation with the path "fast/events/shadow/" will
+ be ignored when removing expectations for the test "fast/event/shadow/awesome-crash.html", since we can just
+ add a new expectation line for "fast/event/shadow/awesome-crash.html" to influence expected results.
+ """
+ expectation_lines = self._test_to_expectation_lines.get(test, [])
+ for expectation_line in expectation_lines:
+ if (not expectation_line.is_flaky() or remove_flakes) and expectation_line.matching_configurations & test_config_set:
+ expectation_line.matching_configurations = expectation_line.matching_configurations - test_config_set
+ if not expectation_line.matching_configurations:
+ self._bug_manager.close_bug(expectation_line.parsed_bug_modifiers)
+ return
+
+ def update_expectation(self, test, test_config_set, expectation_set, parsed_bug_modifiers=None):
+ """Updates expectations for {test} in the set of test configuration {test_config_set} to the values of {expectation_set}.
+ If {parsed_bug_modifiers} is supplied, it is used for updated expectations. Otherwise, a new bug is created.
+
+ Here, we treat updating expectations to PASS as special: if possible, the corresponding lines are completely removed.
+ """
+ # FIXME: Allow specifying modifiers (SLOW, SKIP, WONTFIX).
+ updated_expectations = []
+ expectation_lines = self._test_to_expectation_lines.get(test, [])
+ remaining_configurations = test_config_set.copy()
+ bug_ids = self._get_valid_bug_ids(parsed_bug_modifiers)
+ new_expectation_line_insertion_point = len(self._expectation_lines)
+ remove_expectations = expectation_set == set([test_expectations.PASS]) and test not in self._tests_with_directory_paths
+
+ for expectation_line in expectation_lines:
+ if expectation_line.matching_configurations == remaining_configurations:
+ # Tweak expectations on existing line.
+ if expectation_line.parsed_expectations == expectation_set:
+ return updated_expectations
+ self._bug_manager.close_bug(expectation_line.parsed_bug_modifiers, bug_ids)
+ updated_expectations.append(expectation_line)
+ if remove_expectations:
+ expectation_line.matching_configurations = set()
+ else:
+ expectation_line.parsed_expectations = expectation_set
+ expectation_line.parsed_bug_modifiers = bug_ids
+ return updated_expectations
+ elif expectation_line.matching_configurations >= remaining_configurations:
+ # 1) Split up into two expectation lines:
+ # * one with old expectations (existing expectation_line)
+ # * one with new expectations (new expectation_line)
+ # 2) Finish looking, since there will be no more remaining configs to test for.
+ expectation_line.matching_configurations -= remaining_configurations
+ updated_expectations.append(expectation_line)
+ new_expectation_line_insertion_point = self._expectation_lines.index(expectation_line) + 1
+ break
+ elif expectation_line.matching_configurations <= remaining_configurations:
+ # Remove existing expectation line.
+ self._bug_manager.close_bug(expectation_line.parsed_bug_modifiers, bug_ids)
+ expectation_line.matching_configurations = set()
+ updated_expectations.append(expectation_line)
+ else:
+ intersection = expectation_line.matching_configurations & remaining_configurations
+ if intersection:
+ expectation_line.matching_configurations -= intersection
+ updated_expectations.append(expectation_line)
+ new_expectation_line_insertion_point = self._expectation_lines.index(expectation_line) + 1
+
+ if not remove_expectations:
+ new_expectation_line = self._create_new_line(test, bug_ids, remaining_configurations, expectation_set)
+ updated_expectations.append(new_expectation_line)
+ self._expectation_lines.insert(new_expectation_line_insertion_point, new_expectation_line)
+
+ return updated_expectations
+
+ def _get_valid_bug_ids(self, suggested_bug_ids):
+ # FIXME: Flesh out creating a bug properly (title, etc.)
+ return suggested_bug_ids or [self._bug_manager.create_bug()]
+
+ def _create_new_line(self, name, bug_ids, config_set, expectation_set):
+ new_line = test_expectations.TestExpectationLine()
+ new_line.name = name
+ new_line.parsed_bug_modifiers = bug_ids
+ new_line.matching_configurations = config_set
+ new_line.parsed_expectations = expectation_set
+ # Ensure index integrity for multiple operations.
+ self._test_to_expectation_lines.setdefault(name, []).append(new_line)
+ return new_line
diff --git a/Tools/Scripts/webkitpy/layout_tests/controllers/test_expectations_editor_unittest.py b/Tools/Scripts/webkitpy/layout_tests/controllers/test_expectations_editor_unittest.py
new file mode 100644
index 000000000..f365d77ac
--- /dev/null
+++ b/Tools/Scripts/webkitpy/layout_tests/controllers/test_expectations_editor_unittest.py
@@ -0,0 +1,375 @@
+#!/usr/bin/python
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import unittest
+
+from webkitpy.common.host_mock import MockHost
+
+from webkitpy.layout_tests.controllers.test_expectations_editor import *
+from webkitpy.layout_tests.models.test_configuration import *
+from webkitpy.layout_tests.models.test_expectations import *
+from webkitpy.layout_tests.models.test_configuration import *
+
+
+class MockBugManager(object):
+ def close_bug(self, bug_id, reference_bug_id=None):
+ pass
+
+ def create_bug(self):
+ return "BUG_NEWLY_CREATED"
+
+
+class TestExpectationEditorTests(unittest.TestCase):
+ WIN_RELEASE_CPU_CONFIGS = set([
+ TestConfiguration('vista', 'x86', 'release', 'cpu'),
+ TestConfiguration('win7', 'x86', 'release', 'cpu'),
+ TestConfiguration('xp', 'x86', 'release', 'cpu'),
+ ])
+
+ RELEASE_CONFIGS = set([
+ TestConfiguration('vista', 'x86', 'release', 'cpu'),
+ TestConfiguration('win7', 'x86', 'release', 'cpu'),
+ TestConfiguration('xp', 'x86', 'release', 'cpu'),
+ TestConfiguration('vista', 'x86', 'release', 'gpu'),
+ TestConfiguration('win7', 'x86', 'release', 'gpu'),
+ TestConfiguration('xp', 'x86', 'release', 'gpu'),
+ TestConfiguration('snowleopard', 'x86', 'release', 'cpu'),
+ TestConfiguration('leopard', 'x86', 'release', 'cpu'),
+ TestConfiguration('snowleopard', 'x86', 'release', 'gpu'),
+ TestConfiguration('leopard', 'x86', 'release', 'gpu'),
+ TestConfiguration('lucid', 'x86', 'release', 'cpu'),
+ TestConfiguration('lucid', 'x86_64', 'release', 'cpu'),
+ TestConfiguration('lucid', 'x86', 'release', 'gpu'),
+ TestConfiguration('lucid', 'x86_64', 'release', 'gpu'),
+ ])
+
+ def __init__(self, testFunc):
+ host = MockHost()
+ self.test_port = host.port_factory.get('test-win-xp', None)
+ self.full_test_list = ['failures/expected/keyboard.html', 'failures/expected/audio.html']
+ unittest.TestCase.__init__(self, testFunc)
+
+ def make_parsed_expectation_lines(self, in_string):
+ expectation_lines = TestExpectationParser.tokenize_list(in_string)
+ parser = TestExpectationParser(self.test_port, self.full_test_list, allow_rebaseline_modifier=False)
+ for expectation_line in expectation_lines:
+ self.assertFalse(expectation_line.is_invalid())
+ parser.parse(expectation_line)
+ return expectation_lines
+
+ def assert_remove_roundtrip(self, in_string, test, expected_string, remove_flakes=False):
+ test_config_set = set([self.test_port.test_configuration()])
+ expectation_lines = self.make_parsed_expectation_lines(in_string)
+ editor = TestExpectationsEditor(expectation_lines, MockBugManager())
+ editor.remove_expectation(test, test_config_set, remove_flakes)
+ converter = TestConfigurationConverter(self.test_port.all_test_configurations(), self.test_port.configuration_specifier_macros())
+ result = TestExpectationSerializer.list_to_string(expectation_lines, converter)
+ self.assertEquals(result, expected_string)
+
+ def assert_update_roundtrip(self, in_string, test, expectation_set, expected_string, expected_update_count, remove_flakes=False, parsed_bug_modifiers=None, test_configs=None):
+ test_config_set = test_configs or set([self.test_port.test_configuration()])
+ expectation_lines = self.make_parsed_expectation_lines(in_string)
+ editor = TestExpectationsEditor(expectation_lines, MockBugManager())
+ updated_expectation_lines = editor.update_expectation(test, test_config_set, expectation_set, parsed_bug_modifiers=parsed_bug_modifiers)
+ for updated_expectation_line in updated_expectation_lines:
+ self.assertTrue(updated_expectation_line in expectation_lines)
+ self.assertEquals(len(updated_expectation_lines), expected_update_count)
+ converter = TestConfigurationConverter(self.test_port.all_test_configurations(), self.test_port.configuration_specifier_macros())
+ result = TestExpectationSerializer.list_to_string(expectation_lines, converter)
+ self.assertEquals(result, expected_string)
+
+ def test_remove_expectation(self):
+ self.assert_remove_roundtrip("""
+BUGX1 XP DEBUG : failures/expected/keyboard.html = IMAGE
+BUGX2 WIN : failures/expected/audio.html = IMAGE""", 'failures/expected/hang.html', """
+BUGX1 XP DEBUG : failures/expected/keyboard.html = IMAGE
+BUGX2 WIN : failures/expected/audio.html = IMAGE""")
+
+ self.assert_remove_roundtrip("""
+BUGX1 XP DEBUG : failures/expected/keyboard.html = IMAGE
+BUGX2 WIN : failures/expected/audio.html = IMAGE""", 'failures/expected/keyboard.html', """
+BUGX1 XP DEBUG : failures/expected/keyboard.html = IMAGE
+BUGX2 WIN : failures/expected/audio.html = IMAGE""")
+
+ self.assert_remove_roundtrip("""
+BUGX1 XP DEBUG : failures/expected/keyboard.html = IMAGE
+BUGX2 WIN : failures/expected/audio.html = IMAGE""", 'failures/expected/keyboard.html', """
+BUGX1 XP DEBUG : failures/expected/keyboard.html = IMAGE
+BUGX2 WIN : failures/expected/audio.html = IMAGE""")
+
+ self.assert_remove_roundtrip("""
+BUGX1 MAC : failures/expected/keyboard.html = IMAGE
+BUGX2 WIN : failures/expected/audio.html = IMAGE""", 'failures/expected/keyboard.html', """
+BUGX1 MAC : failures/expected/keyboard.html = IMAGE
+BUGX2 WIN : failures/expected/audio.html = IMAGE""")
+
+ self.assert_remove_roundtrip("""
+BUGX1 XP RELEASE CPU : failures/expected/keyboard.html = IMAGE
+BUGX2 WIN : failures/expected/audio.html = IMAGE""", 'failures/expected/keyboard.html', """
+BUGX2 WIN : failures/expected/audio.html = IMAGE""")
+
+ self.assert_remove_roundtrip("""
+BUGX1 WIN : failures/expected/keyboard.html = IMAGE
+BUGX2 WIN : failures/expected/audio.html = IMAGE""", 'failures/expected/keyboard.html', """
+BUGX1 XP RELEASE GPU : failures/expected/keyboard.html = IMAGE
+BUGX1 XP DEBUG : failures/expected/keyboard.html = IMAGE
+BUGX1 VISTA WIN7 : failures/expected/keyboard.html = IMAGE
+BUGX2 WIN : failures/expected/audio.html = IMAGE""")
+
+ self.assert_remove_roundtrip("""
+BUGX1 XP : failures/expected/keyboard.html = IMAGE
+BUGX2 WIN : failures/expected/audio.html = IMAGE""", 'failures/expected/keyboard.html', """
+BUGX1 XP DEBUG CPU : failures/expected/keyboard.html = IMAGE
+BUGX1 XP GPU : failures/expected/keyboard.html = IMAGE
+BUGX2 WIN : failures/expected/audio.html = IMAGE""")
+
+ self.assert_remove_roundtrip("""
+BUGX1 : failures/expected/keyboard.html = IMAGE
+BUGX2 WIN : failures/expected/audio.html = IMAGE""", 'failures/expected/keyboard.html', """
+BUGX1 XP RELEASE GPU : failures/expected/keyboard.html = IMAGE
+BUGX1 XP DEBUG : failures/expected/keyboard.html = IMAGE
+BUGX1 LINUX MAC VISTA WIN7 : failures/expected/keyboard.html = IMAGE
+BUGX2 WIN : failures/expected/audio.html = IMAGE""")
+
+ self.assert_remove_roundtrip("""
+BUGX1 WIN : failures/expected = PASS
+BUGX2 XP RELEASE : failures/expected/keyboard.html = IMAGE""", 'failures/expected/keyboard.html', """
+BUGX1 WIN : failures/expected = PASS
+BUGX2 XP RELEASE GPU : failures/expected/keyboard.html = IMAGE""")
+
+ self.assert_remove_roundtrip("""
+BUGX1 XP RELEASE CPU : failures/expected/keyboard.html = IMAGE
+BUGX2 XP DEBUG CPU : failures/expected/keyboard.html = IMAGE""", 'failures/expected/keyboard.html', """
+BUGX2 XP DEBUG CPU : failures/expected/keyboard.html = IMAGE""")
+
+ self.assert_remove_roundtrip("""
+BUGX1 WIN : failures/expected = FAIL""", 'failures/expected/keyboard.html', """
+BUGX1 WIN : failures/expected = FAIL""")
+
+ self.assert_remove_roundtrip("""
+BUGX1 XP RELEASE CPU : failures/expected/keyboard.html = IMAGE PASS
+BUGX2 XP DEBUG CPU : failures/expected/keyboard.html = IMAGE""", 'failures/expected/keyboard.html', """
+BUGX1 XP RELEASE CPU : failures/expected/keyboard.html = PASS IMAGE
+BUGX2 XP DEBUG CPU : failures/expected/keyboard.html = IMAGE""")
+
+ self.assert_remove_roundtrip("""
+BUGX1 XP RELEASE CPU : failures/expected/keyboard.html = IMAGE PASS
+BUGX2 XP DEBUG CPU : failures/expected/keyboard.html = IMAGE""", 'failures/expected/keyboard.html', """
+BUGX2 XP DEBUG CPU : failures/expected/keyboard.html = IMAGE""", remove_flakes=True)
+
+ def test_remove_expectation_multiple(self):
+ in_string = """
+BUGX1 WIN : failures/expected/keyboard.html = IMAGE
+BUGX2 WIN : failures/expected/audio.html = IMAGE"""
+ expectation_lines = self.make_parsed_expectation_lines(in_string)
+ converter = TestConfigurationConverter(self.test_port.all_test_configurations(), self.test_port.configuration_specifier_macros())
+ editor = TestExpectationsEditor(expectation_lines, MockBugManager())
+ test = "failures/expected/keyboard.html"
+
+ editor.remove_expectation(test, set([TestConfiguration('xp', 'x86', 'release', 'cpu')]))
+ self.assertEquals(TestExpectationSerializer.list_to_string(expectation_lines, converter), """
+BUGX1 XP RELEASE GPU : failures/expected/keyboard.html = IMAGE
+BUGX1 XP DEBUG : failures/expected/keyboard.html = IMAGE
+BUGX1 VISTA WIN7 : failures/expected/keyboard.html = IMAGE
+BUGX2 WIN : failures/expected/audio.html = IMAGE""")
+
+ editor.remove_expectation(test, set([TestConfiguration('xp', 'x86', 'debug', 'cpu')]))
+ self.assertEquals(TestExpectationSerializer.list_to_string(expectation_lines, converter), """
+BUGX1 XP GPU : failures/expected/keyboard.html = IMAGE
+BUGX1 VISTA WIN7 : failures/expected/keyboard.html = IMAGE
+BUGX2 WIN : failures/expected/audio.html = IMAGE""")
+
+ editor.remove_expectation(test, set([TestConfiguration('vista', 'x86', 'debug', 'gpu'), TestConfiguration('win7', 'x86', 'release', 'gpu')]))
+ self.assertEquals(TestExpectationSerializer.list_to_string(expectation_lines, converter), """
+BUGX1 VISTA DEBUG CPU : failures/expected/keyboard.html = IMAGE
+BUGX1 WIN7 DEBUG GPU : failures/expected/keyboard.html = IMAGE
+BUGX1 WIN7 CPU : failures/expected/keyboard.html = IMAGE
+BUGX1 XP GPU : failures/expected/keyboard.html = IMAGE
+BUGX1 VISTA RELEASE : failures/expected/keyboard.html = IMAGE
+BUGX2 WIN : failures/expected/audio.html = IMAGE""")
+
+ editor.remove_expectation(test, set([TestConfiguration('xp', 'x86', 'debug', 'gpu'), TestConfiguration('xp', 'x86', 'release', 'gpu')]))
+ self.assertEquals(TestExpectationSerializer.list_to_string(expectation_lines, converter), """
+BUGX1 VISTA DEBUG CPU : failures/expected/keyboard.html = IMAGE
+BUGX1 WIN7 RELEASE CPU : failures/expected/keyboard.html = IMAGE
+BUGX1 WIN7 DEBUG : failures/expected/keyboard.html = IMAGE
+BUGX1 VISTA RELEASE : failures/expected/keyboard.html = IMAGE
+BUGX2 WIN : failures/expected/audio.html = IMAGE""")
+
+ editor.remove_expectation(test, set([TestConfiguration('vista', 'x86', 'debug', 'cpu'), TestConfiguration('vista', 'x86', 'debug', 'gpu'), TestConfiguration('vista', 'x86', 'release', 'gpu')]))
+ self.assertEquals(TestExpectationSerializer.list_to_string(expectation_lines, converter), """
+BUGX1 WIN7 DEBUG : failures/expected/keyboard.html = IMAGE
+BUGX1 VISTA WIN7 RELEASE CPU : failures/expected/keyboard.html = IMAGE
+BUGX2 WIN : failures/expected/audio.html = IMAGE""")
+
+ editor.remove_expectation(test, set(self.test_port.all_test_configurations()))
+ self.assertEquals(TestExpectationSerializer.list_to_string(expectation_lines, converter), """
+BUGX2 WIN : failures/expected/audio.html = IMAGE""")
+
+ test = "failures/expected/audio.html"
+
+ editor.remove_expectation(test, set(self.test_port.all_test_configurations()))
+ self.assertEquals(TestExpectationSerializer.list_to_string(expectation_lines, converter), "")
+
+ def test_update_expectation(self):
+ self.assert_update_roundtrip("""
+BUGX1 XP RELEASE CPU : failures/expected/keyboard.html = TEXT""", 'failures/expected/keyboard.html', set([IMAGE]), """
+BUG_NEWLY_CREATED XP RELEASE CPU : failures/expected/keyboard.html = IMAGE""", 1)
+
+ self.assert_update_roundtrip("""
+BUGX1 XP RELEASE CPU : failures/expected/keyboard.html = TEXT""", 'failures/expected/keyboard.html', set([PASS]), '', 1)
+
+ self.assert_update_roundtrip("""
+BUGX1 XP RELEASE CPU : failures/expected = TEXT""", 'failures/expected/keyboard.html', set([IMAGE]), """
+BUGX1 XP RELEASE CPU : failures/expected = TEXT
+BUG_NEWLY_CREATED XP RELEASE CPU : failures/expected/keyboard.html = IMAGE""", 1)
+
+ self.assert_update_roundtrip("""
+BUGX1 XP RELEASE CPU : failures/expected = TEXT""", 'failures/expected/keyboard.html', set([PASS]), """
+BUGX1 XP RELEASE CPU : failures/expected = TEXT
+BUG_NEWLY_CREATED XP RELEASE CPU : failures/expected/keyboard.html = PASS""", 1)
+
+ self.assert_update_roundtrip("""
+BUGX1 XP RELEASE CPU : failures/expected/keyboard.html = TEXT""", 'failures/expected/keyboard.html', set([TEXT]), """
+BUGX1 XP RELEASE CPU : failures/expected/keyboard.html = TEXT""", 0)
+
+ self.assert_update_roundtrip("""
+BUGX1 XP RELEASE CPU : failures/expected/keyboard.html = TEXT""", 'failures/expected/keyboard.html', set([IMAGE]), """
+BUGAWESOME XP RELEASE CPU : failures/expected/keyboard.html = IMAGE""", 1, parsed_bug_modifiers=['BUGAWESOME'])
+
+ self.assert_update_roundtrip("""
+BUGX1 XP RELEASE : failures/expected/keyboard.html = TEXT""", 'failures/expected/keyboard.html', set([IMAGE]), """
+BUGX1 XP RELEASE GPU : failures/expected/keyboard.html = TEXT
+BUG_NEWLY_CREATED XP RELEASE CPU : failures/expected/keyboard.html = IMAGE""", 2)
+
+ self.assert_update_roundtrip("""
+BUGX1 XP RELEASE : failures/expected/keyboard.html = TEXT""", 'failures/expected/keyboard.html', set([PASS]), """
+BUGX1 XP RELEASE GPU : failures/expected/keyboard.html = TEXT""", 1)
+
+ self.assert_update_roundtrip("""
+BUGX1 XP RELEASE : failures/expected/keyboard.html = TEXT""", 'failures/expected/keyboard.html', set([IMAGE]), """
+BUGX1 XP RELEASE GPU : failures/expected/keyboard.html = TEXT
+BUGAWESOME XP RELEASE CPU : failures/expected/keyboard.html = IMAGE""", 2, parsed_bug_modifiers=['BUGAWESOME'])
+
+ self.assert_update_roundtrip("""
+BUGX1 WIN : failures/expected/keyboard.html = TEXT""", 'failures/expected/keyboard.html', set([IMAGE]), """
+BUGX1 XP DEBUG CPU : failures/expected/keyboard.html = TEXT
+BUGX1 XP GPU : failures/expected/keyboard.html = TEXT
+BUGX1 VISTA WIN7 : failures/expected/keyboard.html = TEXT
+BUG_NEWLY_CREATED XP RELEASE CPU : failures/expected/keyboard.html = IMAGE""", 2)
+
+ self.assert_update_roundtrip("""
+BUGX1 WIN : failures/expected/keyboard.html = TEXT""", 'failures/expected/keyboard.html', set([PASS]), """
+BUGX1 XP DEBUG CPU : failures/expected/keyboard.html = TEXT
+BUGX1 XP GPU : failures/expected/keyboard.html = TEXT
+BUGX1 VISTA WIN7 : failures/expected/keyboard.html = TEXT""", 1)
+
+ self.assert_update_roundtrip("""
+BUGX1 WIN : failures/expected/keyboard.html = TEXT""", 'failures/expected/keyboard.html', set([IMAGE]), """
+BUGX1 XP DEBUG CPU : failures/expected/keyboard.html = TEXT
+BUGX1 XP GPU : failures/expected/keyboard.html = TEXT
+BUGX1 VISTA WIN7 : failures/expected/keyboard.html = TEXT
+BUG_NEWLY_CREATED XP RELEASE CPU : failures/expected/keyboard.html = IMAGE""", 2)
+
+ self.assert_update_roundtrip("""
+BUGX1 XP RELEASE CPU: failures/expected/keyboard.html = TEXT""", 'failures/expected/keyboard.html', set([IMAGE]), """
+BUG_NEWLY_CREATED WIN RELEASE CPU : failures/expected/keyboard.html = IMAGE""", 2, test_configs=self.WIN_RELEASE_CPU_CONFIGS)
+
+ self.assert_update_roundtrip("""
+BUGX1 XP RELEASE CPU: failures/expected/keyboard.html = TEXT""", 'failures/expected/keyboard.html', set([PASS]), '', 1, test_configs=self.WIN_RELEASE_CPU_CONFIGS)
+
+ self.assert_update_roundtrip("""
+BUGX1 RELEASE CPU: failures/expected/keyboard.html = TEXT""", 'failures/expected/keyboard.html', set([IMAGE]), """
+BUGX1 LINUX MAC RELEASE CPU : failures/expected/keyboard.html = TEXT
+BUG_NEWLY_CREATED WIN RELEASE CPU : failures/expected/keyboard.html = IMAGE""", 2, test_configs=self.WIN_RELEASE_CPU_CONFIGS)
+
+ self.assert_update_roundtrip("""
+BUGX1 MAC : failures/expected/keyboard.html = TEXT""", 'failures/expected/keyboard.html', set([IMAGE]), """
+BUGX1 MAC : failures/expected/keyboard.html = TEXT
+BUG_NEWLY_CREATED WIN RELEASE CPU : failures/expected/keyboard.html = IMAGE""", 1, test_configs=self.WIN_RELEASE_CPU_CONFIGS)
+
+ def test_update_expectation_relative(self):
+ self.assert_update_roundtrip("""
+BUGX1 XP RELEASE : failures/expected/keyboard.html = TEXT
+BUGX2 MAC : failures/expected/audio.html = TEXT""", 'failures/expected/keyboard.html', set([IMAGE]), """
+BUGX1 XP RELEASE GPU : failures/expected/keyboard.html = TEXT
+BUGAWESOME XP RELEASE CPU : failures/expected/keyboard.html = IMAGE
+BUGX2 MAC : failures/expected/audio.html = TEXT""", 2, parsed_bug_modifiers=['BUGAWESOME'])
+
+ def test_update_expectation_multiple(self):
+ in_string = """
+BUGX1 WIN : failures/expected/keyboard.html = IMAGE
+BUGX2 WIN : failures/expected/audio.html = IMAGE"""
+ expectation_lines = self.make_parsed_expectation_lines(in_string)
+ converter = TestConfigurationConverter(self.test_port.all_test_configurations(), self.test_port.configuration_specifier_macros())
+ editor = TestExpectationsEditor(expectation_lines, MockBugManager())
+ test = "failures/expected/keyboard.html"
+
+ editor.update_expectation(test, set([TestConfiguration('xp', 'x86', 'release', 'cpu')]), set([IMAGE_PLUS_TEXT]), ['BUG_UPDATE1'])
+ self.assertEquals(TestExpectationSerializer.list_to_string(expectation_lines, converter), """
+BUGX1 XP DEBUG CPU : failures/expected/keyboard.html = IMAGE
+BUGX1 XP GPU : failures/expected/keyboard.html = IMAGE
+BUGX1 VISTA WIN7 : failures/expected/keyboard.html = IMAGE
+BUG_UPDATE1 XP RELEASE CPU : failures/expected/keyboard.html = IMAGE+TEXT
+BUGX2 WIN : failures/expected/audio.html = IMAGE""")
+
+ editor.update_expectation(test, set([TestConfiguration('xp', 'x86', 'debug', 'cpu')]), set([TEXT]), ['BUG_UPDATE2'])
+ self.assertEquals(TestExpectationSerializer.list_to_string(expectation_lines, converter), """
+BUGX1 XP GPU : failures/expected/keyboard.html = IMAGE
+BUGX1 VISTA WIN7 : failures/expected/keyboard.html = IMAGE
+BUG_UPDATE2 XP DEBUG CPU : failures/expected/keyboard.html = TEXT
+BUG_UPDATE1 XP RELEASE CPU : failures/expected/keyboard.html = IMAGE+TEXT
+BUGX2 WIN : failures/expected/audio.html = IMAGE""")
+
+ editor.update_expectation(test, self.WIN_RELEASE_CPU_CONFIGS, set([CRASH]), ['BUG_UPDATE3'])
+ self.assertEquals(TestExpectationSerializer.list_to_string(expectation_lines, converter), """
+BUGX1 VISTA DEBUG CPU : failures/expected/keyboard.html = IMAGE
+BUGX1 WIN7 RELEASE GPU : failures/expected/keyboard.html = IMAGE
+BUGX1 WIN7 DEBUG : failures/expected/keyboard.html = IMAGE
+BUGX1 VISTA XP GPU : failures/expected/keyboard.html = IMAGE
+BUG_UPDATE2 XP DEBUG CPU : failures/expected/keyboard.html = TEXT
+BUG_UPDATE3 WIN RELEASE CPU : failures/expected/keyboard.html = CRASH
+BUGX2 WIN : failures/expected/audio.html = IMAGE""")
+
+ editor.update_expectation(test, self.RELEASE_CONFIGS, set([FAIL]), ['BUG_UPDATE4'])
+ self.assertEquals(TestExpectationSerializer.list_to_string(expectation_lines, converter), """
+BUGX1 XP DEBUG GPU : failures/expected/keyboard.html = IMAGE
+BUGX1 VISTA WIN7 DEBUG : failures/expected/keyboard.html = IMAGE
+BUG_UPDATE2 XP DEBUG CPU : failures/expected/keyboard.html = TEXT
+BUG_UPDATE4 RELEASE : failures/expected/keyboard.html = FAIL
+BUGX2 WIN : failures/expected/audio.html = IMAGE""")
+
+ editor.update_expectation(test, set(self.test_port.all_test_configurations()), set([TIMEOUT]), ['BUG_UPDATE5'])
+ self.assertEquals(TestExpectationSerializer.list_to_string(expectation_lines, converter), """
+BUG_UPDATE5 : failures/expected/keyboard.html = TIMEOUT
+BUGX2 WIN : failures/expected/audio.html = IMAGE""")
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/Tools/Scripts/webkitpy/layout_tests/controllers/test_result_writer.py b/Tools/Scripts/webkitpy/layout_tests/controllers/test_result_writer.py
new file mode 100644
index 000000000..88e2ab691
--- /dev/null
+++ b/Tools/Scripts/webkitpy/layout_tests/controllers/test_result_writer.py
@@ -0,0 +1,274 @@
+# Copyright (C) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+import logging
+import os
+
+from webkitpy.common.system.crashlogs import CrashLogs
+from webkitpy.layout_tests.models import test_failures
+
+
+_log = logging.getLogger(__name__)
+
+
+def write_test_result(port, test_name, driver_output,
+ expected_driver_output, failures):
+ """Write the test result to the result output directory."""
+ root_output_dir = port.results_directory()
+ writer = TestResultWriter(port, root_output_dir, test_name)
+ if driver_output.error:
+ writer.write_stderr(driver_output.error)
+
+ for failure in failures:
+ # FIXME: Instead of this long 'if' block, each failure class might
+ # have a responsibility for writing a test result.
+ if isinstance(failure, (test_failures.FailureMissingResult,
+ test_failures.FailureTextMismatch)):
+ writer.write_text_files(driver_output.text, expected_driver_output.text)
+ writer.create_text_diff_and_write_result(driver_output.text, expected_driver_output.text)
+ elif isinstance(failure, test_failures.FailureMissingImage):
+ writer.write_image_files(driver_output.image, expected_image=None)
+ elif isinstance(failure, test_failures.FailureMissingImageHash):
+ writer.write_image_files(driver_output.image, expected_driver_output.image)
+ elif isinstance(failure, test_failures.FailureImageHashMismatch):
+ writer.write_image_files(driver_output.image, expected_driver_output.image)
+ writer.write_image_diff_files(driver_output.image_diff)
+ elif isinstance(failure, (test_failures.FailureAudioMismatch,
+ test_failures.FailureMissingAudio)):
+ writer.write_audio_files(driver_output.audio, expected_driver_output.audio)
+ elif isinstance(failure, test_failures.FailureCrash):
+ crashed_driver_output = expected_driver_output if failure.is_reftest else driver_output
+ writer.write_crash_report(crashed_driver_output.crashed_process_name, crashed_driver_output.error)
+ elif isinstance(failure, test_failures.FailureReftestMismatch):
+ writer.write_image_files(driver_output.image, expected_driver_output.image)
+ # FIXME: This work should be done earlier in the pipeline (e.g., when we compare images for non-ref tests).
+ # FIXME: We should always have 2 images here.
+ if driver_output.image and expected_driver_output.image:
+ image_diff = port.diff_image(driver_output.image, expected_driver_output.image, tolerance=0)[0]
+ if image_diff:
+ writer.write_image_diff_files(image_diff)
+ else:
+ _log.warn('Can not get image diff. ImageDiff program might not work correctly.')
+ writer.copy_file(failure.reference_filename)
+ elif isinstance(failure, test_failures.FailureReftestMismatchDidNotOccur):
+ writer.write_image_files(driver_output.image, expected_image=None)
+ writer.copy_file(failure.reference_filename)
+ else:
+ assert isinstance(failure, (test_failures.FailureTimeout, test_failures.FailureReftestNoImagesGenerated))
+
+
+class TestResultWriter(object):
+ """A class which handles all writing operations to the result directory."""
+
+ # Filename pieces when writing failures to the test results directory.
+ FILENAME_SUFFIX_ACTUAL = "-actual"
+ FILENAME_SUFFIX_EXPECTED = "-expected"
+ FILENAME_SUFFIX_DIFF = "-diff"
+ FILENAME_SUFFIX_STDERR = "-stderr"
+ FILENAME_SUFFIX_CRASH_LOG = "-crash-log"
+ FILENAME_SUFFIX_WDIFF = "-wdiff.html"
+ FILENAME_SUFFIX_PRETTY_PATCH = "-pretty-diff.html"
+ FILENAME_SUFFIX_IMAGE_DIFF = "-diff.png"
+ FILENAME_SUFFIX_IMAGE_DIFFS_HTML = "-diffs.html"
+
+ def __init__(self, port, root_output_dir, test_name):
+ self._port = port
+ self._root_output_dir = root_output_dir
+ self._test_name = test_name
+
+ def _make_output_directory(self):
+ """Creates the output directory (if needed) for a given test filename."""
+ fs = self._port._filesystem
+ output_filename = fs.join(self._root_output_dir, self._test_name)
+ self._port.maybe_make_directory(fs.dirname(output_filename))
+
+ def output_filename(self, modifier):
+ """Returns a filename inside the output dir that contains modifier.
+
+ For example, if test name is "fast/dom/foo.html" and modifier is "-expected.txt",
+ the return value is "/<path-to-root-output-dir>/fast/dom/foo-expected.txt".
+
+ Args:
+ modifier: a string to replace the extension of filename with
+
+ Return:
+ The absolute path to the output filename
+ """
+ fs = self._port._filesystem
+ output_filename = fs.join(self._root_output_dir, self._test_name)
+ return fs.splitext(output_filename)[0] + modifier
+
+ def _output_testname(self, modifier):
+ fs = self._port._filesystem
+ return fs.splitext(fs.basename(self._test_name))[0] + modifier
+
+ def write_output_files(self, file_type, output, expected):
+ """Writes the test output, the expected output in the results directory.
+
+ The full output filename of the actual, for example, will be
+ <filename>-actual<file_type>
+ For instance,
+ my_test-actual.txt
+
+ Args:
+ file_type: A string describing the test output file type, e.g. ".txt"
+ output: A string containing the test output
+ expected: A string containing the expected test output
+ """
+ self._make_output_directory()
+ actual_filename = self.output_filename(self.FILENAME_SUFFIX_ACTUAL + file_type)
+ expected_filename = self.output_filename(self.FILENAME_SUFFIX_EXPECTED + file_type)
+
+ fs = self._port._filesystem
+ if output is not None:
+ fs.write_binary_file(actual_filename, output)
+ if expected is not None:
+ fs.write_binary_file(expected_filename, expected)
+
+ def write_stderr(self, error):
+ fs = self._port._filesystem
+ filename = self.output_filename(self.FILENAME_SUFFIX_STDERR + ".txt")
+ fs.maybe_make_directory(fs.dirname(filename))
+ fs.write_binary_file(filename, error)
+
+ def write_crash_report(self, crashed_process_name, error):
+ fs = self._port._filesystem
+ filename = self.output_filename(self.FILENAME_SUFFIX_CRASH_LOG + ".txt")
+ fs.maybe_make_directory(fs.dirname(filename))
+ # FIXME: We shouldn't be grabbing private members of port.
+ crash_logs = CrashLogs(fs)
+ log = crash_logs.find_newest_log(crashed_process_name)
+ # CrashLogs doesn't support every platform, so we fall back to
+ # including the stderr output, which is admittedly somewhat redundant.
+ fs.write_text_file(filename, log if log else error)
+
+ def write_text_files(self, actual_text, expected_text):
+ self.write_output_files(".txt", actual_text, expected_text)
+
+ def create_text_diff_and_write_result(self, actual_text, expected_text):
+ # FIXME: This function is actually doing the diffs as well as writing results.
+ # It might be better to extract code which does 'diff' and make it a separate function.
+ if not actual_text or not expected_text:
+ return
+
+ self._make_output_directory()
+ file_type = '.txt'
+ actual_filename = self.output_filename(self.FILENAME_SUFFIX_ACTUAL + file_type)
+ expected_filename = self.output_filename(self.FILENAME_SUFFIX_EXPECTED + file_type)
+ fs = self._port._filesystem
+ # We treat diff output as binary. Diff output may contain multiple files
+ # in conflicting encodings.
+ diff = self._port.diff_text(expected_text, actual_text, expected_filename, actual_filename)
+ diff_filename = self.output_filename(self.FILENAME_SUFFIX_DIFF + file_type)
+ fs.write_binary_file(diff_filename, diff)
+
+ # Shell out to wdiff to get colored inline diffs.
+ wdiff = self._port.wdiff_text(expected_filename, actual_filename)
+ wdiff_filename = self.output_filename(self.FILENAME_SUFFIX_WDIFF)
+ fs.write_binary_file(wdiff_filename, wdiff)
+
+ # Use WebKit's PrettyPatch.rb to get an HTML diff.
+ pretty_patch = self._port.pretty_patch_text(diff_filename)
+ pretty_patch_filename = self.output_filename(self.FILENAME_SUFFIX_PRETTY_PATCH)
+ fs.write_binary_file(pretty_patch_filename, pretty_patch)
+
+ def write_audio_files(self, actual_audio, expected_audio):
+ self.write_output_files('.wav', actual_audio, expected_audio)
+
+ def write_image_files(self, actual_image, expected_image):
+ self.write_output_files('.png', actual_image, expected_image)
+
+ def write_image_diff_files(self, image_diff):
+ diff_filename = self.output_filename(self.FILENAME_SUFFIX_IMAGE_DIFF)
+ fs = self._port._filesystem
+ fs.write_binary_file(diff_filename, image_diff)
+
+ diffs_html_filename = self.output_filename(self.FILENAME_SUFFIX_IMAGE_DIFFS_HTML)
+ # FIXME: old-run-webkit-tests shows the diff percentage as the text contents of the "diff" link.
+ # FIXME: old-run-webkit-tests include a link to the test file.
+ html = """<!DOCTYPE HTML>
+<html>
+<head>
+<title>%(title)s</title>
+<style>.label{font-weight:bold}</style>
+</head>
+<body>
+Difference between images: <a href="%(diff_filename)s">diff</a><br>
+<div class=imageText></div>
+<div class=imageContainer data-prefix="%(prefix)s">Loading...</div>
+<script>
+(function() {
+ var preloadedImageCount = 0;
+ function preloadComplete() {
+ ++preloadedImageCount;
+ if (preloadedImageCount < 2)
+ return;
+ toggleImages();
+ setInterval(toggleImages, 2000)
+ }
+
+ function preloadImage(url) {
+ image = new Image();
+ image.addEventListener('load', preloadComplete);
+ image.src = url;
+ return image;
+ }
+
+ function toggleImages() {
+ if (text.textContent == 'Expected Image') {
+ text.textContent = 'Actual Image';
+ container.replaceChild(actualImage, container.firstChild);
+ } else {
+ text.textContent = 'Expected Image';
+ container.replaceChild(expectedImage, container.firstChild);
+ }
+ }
+
+ var text = document.querySelector('.imageText');
+ var container = document.querySelector('.imageContainer');
+ var actualImage = preloadImage(container.getAttribute('data-prefix') + '-actual.png');
+ var expectedImage = preloadImage(container.getAttribute('data-prefix') + '-expected.png');
+})();
+</script>
+</body>
+</html>
+""" % {
+ 'title': self._test_name,
+ 'diff_filename': self._output_testname(self.FILENAME_SUFFIX_IMAGE_DIFF),
+ 'prefix': self._output_testname(''),
+ }
+ # FIXME: This seems like a text file, not a binary file.
+ self._port._filesystem.write_binary_file(diffs_html_filename, html)
+
+ def copy_file(self, src_filepath):
+ fs = self._port._filesystem
+ assert fs.exists(src_filepath), 'src_filepath: %s' % src_filepath
+ dst_filepath = fs.join(self._root_output_dir, self._port.relative_test_filename(src_filepath))
+ self._make_output_directory()
+ fs.copyfile(src_filepath, dst_filepath)
diff --git a/Tools/Scripts/webkitpy/layout_tests/controllers/test_result_writer_unittest.py b/Tools/Scripts/webkitpy/layout_tests/controllers/test_result_writer_unittest.py
new file mode 100644
index 000000000..c79846a2d
--- /dev/null
+++ b/Tools/Scripts/webkitpy/layout_tests/controllers/test_result_writer_unittest.py
@@ -0,0 +1,60 @@
+#!/usr/bin/env python
+# Copyright (C) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import unittest
+
+from webkitpy.common.host_mock import MockHost
+from webkitpy.layout_tests.controllers import test_result_writer
+from webkitpy.layout_tests.models import test_failures
+from webkitpy.layout_tests.port.driver import DriverOutput
+from webkitpy.layout_tests.port.test import TestPort
+
+
+class TestResultWriterTest(unittest.TestCase):
+
+ def test_reftest_diff_image(self):
+ """A write_test_result should call port.diff_image with tolerance=0 in case of FailureReftestMismatch."""
+ used_tolerance_values = []
+
+ class ImageDiffTestPort(TestPort):
+ def diff_image(self, expected_contents, actual_contents, tolerance=None):
+ used_tolerance_values.append(tolerance)
+ return (True, 1)
+
+ host = MockHost()
+ port = ImageDiffTestPort(host)
+ test_name = 'failures/unexpected/reftest.html'
+ test_reference_file = host.filesystem.join(port.layout_tests_dir(), 'failures/unexpected/reftest-expected.html')
+ driver_output1 = DriverOutput('text1', 'image1', 'imagehash1', 'audio1')
+ driver_output2 = DriverOutput('text2', 'image2', 'imagehash2', 'audio2')
+ failures = [test_failures.FailureReftestMismatch(test_reference_file)]
+ test_result_writer.write_test_result(ImageDiffTestPort(host), test_name,
+ driver_output1, driver_output2, failures)
+ self.assertEqual([0], used_tolerance_values)
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/Tools/Scripts/webkitpy/layout_tests/controllers/worker.py b/Tools/Scripts/webkitpy/layout_tests/controllers/worker.py
new file mode 100644
index 000000000..b66ce9b79
--- /dev/null
+++ b/Tools/Scripts/webkitpy/layout_tests/controllers/worker.py
@@ -0,0 +1,254 @@
+# Copyright (C) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Handle messages from the Manager and executes actual tests."""
+
+import logging
+import sys
+import threading
+import time
+
+from webkitpy.layout_tests.controllers import manager_worker_broker
+from webkitpy.layout_tests.controllers import single_test_runner
+from webkitpy.layout_tests.models import test_expectations
+from webkitpy.layout_tests.models import test_results
+
+
+_log = logging.getLogger(__name__)
+
+
+class Worker(manager_worker_broker.AbstractWorker):
+ def __init__(self, worker_connection, worker_number, results_directory, options):
+ manager_worker_broker.AbstractWorker.__init__(self, worker_connection, worker_number, results_directory, options)
+ self._done = False
+ self._canceled = False
+ self._port = None
+ self._batch_size = None
+ self._batch_count = None
+ self._filesystem = None
+ self._driver = None
+ self._tests_run_file = None
+ self._tests_run_filename = None
+
+ def __del__(self):
+ self.cleanup()
+
+ def safe_init(self, port):
+ """This method should only be called when it is is safe for the mixin
+ to create state that can't be Pickled.
+
+ This routine exists so that the mixin can be created and then marshaled
+ across into a child process."""
+ self._port = port
+ self._filesystem = port.host.filesystem
+ self._batch_count = 0
+ self._batch_size = self._options.batch_size or 0
+ tests_run_filename = self._filesystem.join(self._results_directory, "tests_run%d.txt" % self._worker_number)
+ self._tests_run_file = self._filesystem.open_text_file_for_writing(tests_run_filename)
+
+ def cancel(self):
+ """Attempt to abort processing (best effort)."""
+ self._canceled = True
+
+ def is_done(self):
+ return self._done or self._canceled
+
+ def name(self):
+ return self._name
+
+ def run(self, port):
+ self.safe_init(port)
+
+ exception_msg = ""
+ _log.debug("%s starting" % self._name)
+
+ try:
+ self._worker_connection.run_message_loop()
+ if not self.is_done():
+ raise AssertionError("%s: ran out of messages in worker queue."
+ % self._name)
+ except KeyboardInterrupt:
+ exception_msg = ", interrupted"
+ self._worker_connection.raise_exception(sys.exc_info())
+ except:
+ exception_msg = ", exception raised"
+ self._worker_connection.raise_exception(sys.exc_info())
+ finally:
+ _log.debug("%s done with message loop%s" % (self._name, exception_msg))
+ self._worker_connection.post_message('done')
+ self.cleanup()
+ _log.debug("%s exiting" % self._name)
+
+ def handle_test_list(self, src, list_name, test_list):
+ start_time = time.time()
+ num_tests = 0
+ for test_input in test_list:
+ self._run_test(test_input)
+ num_tests += 1
+ self._worker_connection.yield_to_broker()
+
+ elapsed_time = time.time() - start_time
+ self._worker_connection.post_message('finished_list', list_name, num_tests, elapsed_time)
+
+ def handle_stop(self, src):
+ self._done = True
+
+ def _run_test(self, test_input):
+ test_timeout_sec = self.timeout(test_input)
+ start = time.time()
+ self._worker_connection.post_message('started_test', test_input, test_timeout_sec)
+
+ result = self.run_test_with_timeout(test_input, test_timeout_sec)
+
+ elapsed_time = time.time() - start
+ self._worker_connection.post_message('finished_test', result, elapsed_time)
+
+ self.clean_up_after_test(test_input, result)
+
+ def cleanup(self):
+ _log.debug("%s cleaning up" % self._name)
+ self.kill_driver()
+ if self._tests_run_file:
+ self._tests_run_file.close()
+ self._tests_run_file = None
+
+ def timeout(self, test_input):
+ """Compute the appropriate timeout value for a test."""
+ # The DumpRenderTree watchdog uses 2.5x the timeout; we want to be
+ # larger than that. We also add a little more padding if we're
+ # running tests in a separate thread.
+ #
+ # Note that we need to convert the test timeout from a
+ # string value in milliseconds to a float for Python.
+ driver_timeout_sec = 3.0 * float(test_input.timeout) / 1000.0
+ if not self._options.run_singly:
+ return driver_timeout_sec
+
+ thread_padding_sec = 1.0
+ thread_timeout_sec = driver_timeout_sec + thread_padding_sec
+ return thread_timeout_sec
+
+ def kill_driver(self):
+ if self._driver:
+ _log.debug("%s killing driver" % self._name)
+ self._driver.stop()
+ self._driver = None
+
+ def run_test_with_timeout(self, test_input, timeout):
+ if self._options.run_singly:
+ return self._run_test_in_another_thread(test_input, timeout)
+ return self._run_test_in_this_thread(test_input)
+
+ def clean_up_after_test(self, test_input, result):
+ self._batch_count += 1
+ test_name = test_input.test_name
+ self._tests_run_file.write(test_name + "\n")
+
+ if result.failures:
+ # Check and kill DumpRenderTree if we need to.
+ if any([f.driver_needs_restart() for f in result.failures]):
+ self.kill_driver()
+ # Reset the batch count since the shell just bounced.
+ self._batch_count = 0
+
+ # Print the error message(s).
+ _log.debug("%s %s failed:" % (self._name, test_name))
+ for f in result.failures:
+ _log.debug("%s %s" % (self._name, f.message()))
+ elif result.type == test_expectations.SKIP:
+ _log.debug("%s %s skipped" % (self._name, test_name))
+ else:
+ _log.debug("%s %s passed" % (self._name, test_name))
+
+ if self._batch_size > 0 and self._batch_count >= self._batch_size:
+ self.kill_driver()
+ self._batch_count = 0
+
+ def _run_test_in_another_thread(self, test_input, thread_timeout_sec):
+ """Run a test in a separate thread, enforcing a hard time limit.
+
+ Since we can only detect the termination of a thread, not any internal
+ state or progress, we can only run per-test timeouts when running test
+ files singly.
+
+ Args:
+ test_input: Object containing the test filename and timeout
+ thread_timeout_sec: time to wait before killing the driver process.
+ Returns:
+ A TestResult
+ """
+ worker = self
+
+ driver = self._port.create_driver(self._worker_number)
+
+ class SingleTestThread(threading.Thread):
+ def __init__(self):
+ threading.Thread.__init__(self)
+ self.result = None
+
+ def run(self):
+ self.result = worker.run_single_test(driver, test_input)
+
+ thread = SingleTestThread()
+ thread.start()
+ thread.join(thread_timeout_sec)
+ result = thread.result
+ if thread.isAlive():
+ # If join() returned with the thread still running, the
+ # DumpRenderTree is completely hung and there's nothing
+ # more we can do with it. We have to kill all the
+ # DumpRenderTrees to free it up. If we're running more than
+ # one DumpRenderTree thread, we'll end up killing the other
+ # DumpRenderTrees too, introducing spurious crashes. We accept
+ # that tradeoff in order to avoid losing the rest of this
+ # thread's results.
+ _log.error('Test thread hung: killing all DumpRenderTrees')
+
+ driver.stop()
+
+ if not result:
+ result = test_results.TestResult(test_input.test_name, failures=[], test_run_time=0)
+ return result
+
+ def _run_test_in_this_thread(self, test_input):
+ """Run a single test file using a shared DumpRenderTree process.
+
+ Args:
+ test_input: Object containing the test filename, uri and timeout
+
+ Returns: a TestResult object.
+ """
+ if self._driver and self._driver.has_crashed():
+ self.kill_driver()
+ if not self._driver:
+ self._driver = self._port.create_driver(self._worker_number)
+ return self.run_single_test(self._driver, test_input)
+
+ def run_single_test(self, driver, test_input):
+ return single_test_runner.run_single_test(self._port, self._options,
+ test_input, driver, self._name)