changeset 4239:cb58342692d7

RT-31450: Merge with Webkit master (svn revision r150795) python fix add
author Alexey Utkin <alexey.utkin@oracle.com>
date Fri, 05 Jul 2013 17:42:24 +0400
parents 7ffe66aa4523
children 02fad2dd67b1
files modules/web/src/main/native/Tools/Scripts/webkitpy/common/checksvnconfigfile.py modules/web/src/main/native/Tools/Scripts/webkitpy/common/message_pool.py modules/web/src/main/native/Tools/Scripts/webkitpy/common/multiprocessing_bootstrap.py modules/web/src/main/native/Tools/Scripts/webkitpy/common/system/file_lock_mock.py modules/web/src/main/native/Tools/Scripts/webkitpy/common/system/outputtee.py modules/web/src/main/native/Tools/Scripts/webkitpy/common/system/outputtee_unittest.py modules/web/src/main/native/Tools/Scripts/webkitpy/common/system/profiler.py modules/web/src/main/native/Tools/Scripts/webkitpy/common/system/profiler_unittest.py modules/web/src/main/native/Tools/Scripts/webkitpy/common/webkit_finder.py modules/web/src/main/native/Tools/Scripts/webkitpy/layout_tests/controllers/layout_test_finder.py modules/web/src/main/native/Tools/Scripts/webkitpy/layout_tests/controllers/layout_test_runner.py modules/web/src/main/native/Tools/Scripts/webkitpy/layout_tests/controllers/layout_test_runner_unittest.py modules/web/src/main/native/Tools/Scripts/webkitpy/layout_tests/lint_test_expectations.py modules/web/src/main/native/Tools/Scripts/webkitpy/layout_tests/lint_test_expectations_unittest.py modules/web/src/main/native/Tools/Scripts/webkitpy/layout_tests/models/test_run_results.py modules/web/src/main/native/Tools/Scripts/webkitpy/layout_tests/models/test_run_results_unittest.py modules/web/src/main/native/Tools/Scripts/webkitpy/layout_tests/servers/http_server_base_unittest.py modules/web/src/main/native/Tools/Scripts/webkitpy/layout_tests/views/buildbot_results.py modules/web/src/main/native/Tools/Scripts/webkitpy/layout_tests/views/buildbot_results_unittest.py modules/web/src/main/native/Tools/Scripts/webkitpy/performance_tests/perftestsrunner_integrationtest.py modules/web/src/main/native/Tools/Scripts/webkitpy/port/__init__.py modules/web/src/main/native/Tools/Scripts/webkitpy/port/apple.py modules/web/src/main/native/Tools/Scripts/webkitpy/port/base.py modules/web/src/main/native/Tools/Scripts/webkitpy/port/base_unittest.py modules/web/src/main/native/Tools/Scripts/webkitpy/port/builders.py modules/web/src/main/native/Tools/Scripts/webkitpy/port/builders_unittest.py modules/web/src/main/native/Tools/Scripts/webkitpy/port/config.py modules/web/src/main/native/Tools/Scripts/webkitpy/port/config_standalone.py modules/web/src/main/native/Tools/Scripts/webkitpy/port/config_unittest.py modules/web/src/main/native/Tools/Scripts/webkitpy/port/driver.py modules/web/src/main/native/Tools/Scripts/webkitpy/port/driver_unittest.py modules/web/src/main/native/Tools/Scripts/webkitpy/port/efl.py modules/web/src/main/native/Tools/Scripts/webkitpy/port/efl_unittest.py modules/web/src/main/native/Tools/Scripts/webkitpy/port/factory.py modules/web/src/main/native/Tools/Scripts/webkitpy/port/factory_unittest.py modules/web/src/main/native/Tools/Scripts/webkitpy/port/gtk.py modules/web/src/main/native/Tools/Scripts/webkitpy/port/gtk_unittest.py modules/web/src/main/native/Tools/Scripts/webkitpy/port/http_lock.py modules/web/src/main/native/Tools/Scripts/webkitpy/port/http_lock_unittest.py modules/web/src/main/native/Tools/Scripts/webkitpy/port/image_diff.py modules/web/src/main/native/Tools/Scripts/webkitpy/port/image_diff_unittest.py modules/web/src/main/native/Tools/Scripts/webkitpy/port/leakdetector.py modules/web/src/main/native/Tools/Scripts/webkitpy/port/leakdetector_unittest.py modules/web/src/main/native/Tools/Scripts/webkitpy/port/mac.py modules/web/src/main/native/Tools/Scripts/webkitpy/port/mac_unittest.py modules/web/src/main/native/Tools/Scripts/webkitpy/port/mock_drt.py modules/web/src/main/native/Tools/Scripts/webkitpy/port/mock_drt_unittest.py modules/web/src/main/native/Tools/Scripts/webkitpy/port/port_testcase.py modules/web/src/main/native/Tools/Scripts/webkitpy/port/pulseaudio_sanitizer.py modules/web/src/main/native/Tools/Scripts/webkitpy/port/pulseaudio_sanitizer_mock.py modules/web/src/main/native/Tools/Scripts/webkitpy/port/qt.py modules/web/src/main/native/Tools/Scripts/webkitpy/port/qt_unittest.py modules/web/src/main/native/Tools/Scripts/webkitpy/port/server_process.py modules/web/src/main/native/Tools/Scripts/webkitpy/port/server_process_mock.py modules/web/src/main/native/Tools/Scripts/webkitpy/port/server_process_unittest.py modules/web/src/main/native/Tools/Scripts/webkitpy/port/test.py modules/web/src/main/native/Tools/Scripts/webkitpy/port/win.py modules/web/src/main/native/Tools/Scripts/webkitpy/port/win_unittest.py modules/web/src/main/native/Tools/Scripts/webkitpy/port/xvfbdriver.py modules/web/src/main/native/Tools/Scripts/webkitpy/port/xvfbdriver_unittest.py modules/web/src/main/native/Tools/Scripts/webkitpy/pylintrc modules/web/src/main/native/Tools/Scripts/webkitpy/style/checkers/cmake.py modules/web/src/main/native/Tools/Scripts/webkitpy/style/checkers/cmake_unittest.py modules/web/src/main/native/Tools/Scripts/webkitpy/test/finder.py modules/web/src/main/native/Tools/Scripts/webkitpy/test/finder_unittest.py modules/web/src/main/native/Tools/Scripts/webkitpy/test/printer.py modules/web/src/main/native/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/mux.py modules/web/src/main/native/Tools/Scripts/webkitpy/tool/bot/ircbot.py modules/web/src/main/native/Tools/Scripts/webkitpy/tool/bot/ircbot_unittest.py modules/web/src/main/native/Tools/Scripts/webkitpy/tool/commands/newcommitbot.py modules/web/src/main/native/Tools/Scripts/webkitpy/tool/commands/newcommitbot_unittest.py modules/web/src/main/native/Tools/Scripts/webkitpy/tool/commands/perfalizer.py modules/web/src/main/native/Tools/Scripts/webkitpy/tool/commands/perfalizer_unittest.py modules/web/src/main/native/Tools/Scripts/webkitpy/tool/servers/reflectionhandler_unittest.py modules/web/src/main/native/Tools/Scripts/webkitpy/tool/steps/addsvnmimetypeforpng.py modules/web/src/main/native/Tools/Scripts/webkitpy/tool/steps/addsvnmimetypeforpng_unittest.py modules/web/src/main/native/Tools/Scripts/webkitpy/tool/steps/discardlocalchanges.py modules/web/src/main/native/Tools/Scripts/webkitpy/tool/steps/discardlocalchanges_unittest.py modules/web/src/main/native/Tools/Scripts/webkitpy/tool/steps/haslanded.py modules/web/src/main/native/Tools/Scripts/webkitpy/tool/steps/haslanded_unittest.py modules/web/src/main/native/Tools/Scripts/webkitpy/w3c/__init__.py modules/web/src/main/native/Tools/Scripts/webkitpy/w3c/test_converter.py modules/web/src/main/native/Tools/Scripts/webkitpy/w3c/test_converter_unittest.py modules/web/src/main/native/Tools/Scripts/webkitpy/w3c/test_importer.py modules/web/src/main/native/Tools/Scripts/webkitpy/w3c/test_importer_unittest.py modules/web/src/main/native/Tools/Scripts/webkitpy/w3c/test_parser.py modules/web/src/main/native/Tools/Scripts/webkitpy/w3c/test_parser_unittest.py
diffstat 87 files changed, 18510 insertions(+), 0 deletions(-) [+]
line wrap: on
line diff
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/modules/web/src/main/native/Tools/Scripts/webkitpy/common/checksvnconfigfile.py	Fri Jul 05 17:42:24 2013 +0400
@@ -0,0 +1,65 @@
+# Copyright (C) 2012 Balazs Ankes (bank@inf.u-szeged.hu) University of Szeged
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1.  Redistributions of source code must retain the above copyright
+#     notice, this list of conditions and the following disclaimer.
+# 2.  Redistributions in binary form must reproduce the above copyright
+#     notice, this list of conditions and the following disclaimer in the
+#     documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# This file is used by:
+# webkitpy/tool/steps/addsvnmimetypeforpng.py
+# webkitpy/style/checkers/png.py
+
+import os
+import re
+
+
+def check(host, fs):
+    """
+    check the svn config file
+    return with three logical value:
+    is svn config file missing, is auto-props missing, is the svn:mime-type for png missing
+    """
+
+    cfg_file_path = config_file_path(host, fs)
+
+    try:
+        config_file = fs.read_text_file(cfg_file_path)
+    except IOError:
+        return (True, True, True)
+
+    errorcode_autoprop = not re.search("^\s*enable-auto-props\s*=\s*yes", config_file, re.MULTILINE)
+    errorcode_png = not re.search("^\s*\*\.png\s*=\s*svn:mime-type=image/png", config_file, re.MULTILINE)
+
+    return (False, errorcode_autoprop, errorcode_png)
+
+
+def config_file_path(host, fs):
+    if host.platform.is_win():
+        config_file_path = fs.join(os.environ['APPDATA'], "Subversion", "config")
+    else:
+        config_file_path = fs.join(fs.expanduser("~"), ".subversion", "config")
+    return config_file_path
+
+
+def errorstr_autoprop(config_file_path):
+    return 'Have to enable auto props in the subversion config file (%s "enable-auto-props = yes"). ' % config_file_path
+
+
+def errorstr_png(config_file_path):
+    return 'Have to set the svn:mime-type in the subversion config file (%s "*.png = svn:mime-type=image/png").' % config_file_path
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/modules/web/src/main/native/Tools/Scripts/webkitpy/common/message_pool.py	Fri Jul 05 17:42:24 2013 +0400
@@ -0,0 +1,324 @@
+# Copyright (C) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Module for handling messages and concurrency for run-webkit-tests
+and test-webkitpy. This module follows the design for multiprocessing.Pool
+and concurrency.futures.ProcessPoolExecutor, with the following differences:
+
+* Tasks are executed in stateful subprocesses via objects that implement the
+  Worker interface - this allows the workers to share state across tasks.
+* The pool provides an asynchronous event-handling interface so the caller
+  may receive events as tasks are processed.
+
+If you don't need these features, use multiprocessing.Pool or concurrency.futures
+intead.
+
+"""
+
+import cPickle
+import logging
+import multiprocessing
+import Queue
+import sys
+import time
+import traceback
+
+
+from webkitpy.common.host import Host
+from webkitpy.common.system import stack_utils
+
+
+_log = logging.getLogger(__name__)
+
+
+def get(caller, worker_factory, num_workers, worker_startup_delay_secs=0.0, host=None):
+    """Returns an object that exposes a run() method that takes a list of test shards and runs them in parallel."""
+    return _MessagePool(caller, worker_factory, num_workers, worker_startup_delay_secs, host)
+
+
+class _MessagePool(object):
+    def __init__(self, caller, worker_factory, num_workers, worker_startup_delay_secs=0.0, host=None):
+        self._caller = caller
+        self._worker_factory = worker_factory
+        self._num_workers = num_workers
+        self._worker_startup_delay_secs = worker_startup_delay_secs
+        self._workers = []
+        self._workers_stopped = set()
+        self._host = host
+        self._name = 'manager'
+        self._running_inline = (self._num_workers == 1)
+        if self._running_inline:
+            self._messages_to_worker = Queue.Queue()
+            self._messages_to_manager = Queue.Queue()
+        else:
+            self._messages_to_worker = multiprocessing.Queue()
+            self._messages_to_manager = multiprocessing.Queue()
+
+    def __enter__(self):
+        return self
+
+    def __exit__(self, exc_type, exc_value, exc_traceback):
+        self._close()
+        return False
+
+    def run(self, shards):
+        """Posts a list of messages to the pool and waits for them to complete."""
+        for message in shards:
+            self._messages_to_worker.put(_Message(self._name, message[0], message[1:], from_user=True, logs=()))
+
+        for _ in xrange(self._num_workers):
+            self._messages_to_worker.put(_Message(self._name, 'stop', message_args=(), from_user=False, logs=()))
+
+        self.wait()
+
+    def _start_workers(self):
+        assert not self._workers
+        self._workers_stopped = set()
+        host = None
+        if self._running_inline or self._can_pickle(self._host):
+            host = self._host
+
+        for worker_number in xrange(self._num_workers):
+            worker = _Worker(host, self._messages_to_manager, self._messages_to_worker, self._worker_factory, worker_number, self._running_inline, self if self._running_inline else None, self._worker_log_level())
+            self._workers.append(worker)
+            worker.start()
+            if self._worker_startup_delay_secs:
+                time.sleep(self._worker_startup_delay_secs)
+
+    def _worker_log_level(self):
+        log_level = logging.NOTSET
+        for handler in logging.root.handlers:
+            if handler.level != logging.NOTSET:
+                if log_level == logging.NOTSET:
+                    log_level = handler.level
+                else:
+                    log_level = min(log_level, handler.level)
+        return log_level
+
+    def wait(self):
+        try:
+            self._start_workers()
+            if self._running_inline:
+                self._workers[0].run()
+                self._loop(block=False)
+            else:
+                self._loop(block=True)
+        finally:
+            self._close()
+
+    def _close(self):
+        for worker in self._workers:
+            if worker.is_alive():
+                worker.terminate()
+                worker.join()
+        self._workers = []
+        if not self._running_inline:
+            # FIXME: This is a hack to get multiprocessing to not log tracebacks during shutdown :(.
+            multiprocessing.util._exiting = True
+            if self._messages_to_worker:
+                self._messages_to_worker.close()
+                self._messages_to_worker = None
+            if self._messages_to_manager:
+                self._messages_to_manager.close()
+                self._messages_to_manager = None
+
+    def _log_messages(self, messages):
+        for message in messages:
+            logging.root.handle(message)
+
+    def _handle_done(self, source):
+        self._workers_stopped.add(source)
+
+    @staticmethod
+    def _handle_worker_exception(source, exception_type, exception_value, _):
+        if exception_type == KeyboardInterrupt:
+            raise exception_type(exception_value)
+        raise WorkerException(str(exception_value))
+
+    def _can_pickle(self, host):
+        try:
+            cPickle.dumps(host)
+            return True
+        except TypeError:
+            return False
+
+    def _loop(self, block):
+        try:
+            while True:
+                if len(self._workers_stopped) == len(self._workers):
+                    block = False
+                message = self._messages_to_manager.get(block)
+                self._log_messages(message.logs)
+                if message.from_user:
+                    self._caller.handle(message.name, message.src, *message.args)
+                    continue
+                method = getattr(self, '_handle_' + message.name)
+                assert method, 'bad message %s' % repr(message)
+                method(message.src, *message.args)
+        except Queue.Empty:
+            pass
+
+
+class WorkerException(BaseException):
+    """Raised when we receive an unexpected/unknown exception from a worker."""
+    pass
+
+
+class _Message(object):
+    def __init__(self, src, message_name, message_args, from_user, logs):
+        self.src = src
+        self.name = message_name
+        self.args = message_args
+        self.from_user = from_user
+        self.logs = logs
+
+    def __repr__(self):
+        return '_Message(src=%s, name=%s, args=%s, from_user=%s, logs=%s)' % (self.src, self.name, self.args, self.from_user, self.logs)
+
+
+class _Worker(multiprocessing.Process):
+    def __init__(self, host, messages_to_manager, messages_to_worker, worker_factory, worker_number, running_inline, manager, log_level):
+        super(_Worker, self).__init__()
+        self.host = host
+        self.worker_number = worker_number
+        self.name = 'worker/%d' % worker_number
+        self.log_messages = []
+        self.log_level = log_level
+        self._running_inline = running_inline
+        self._manager = manager
+
+        self._messages_to_manager = messages_to_manager
+        self._messages_to_worker = messages_to_worker
+        self._worker = worker_factory(self)
+        self._logger = None
+        self._log_handler = None
+
+    def terminate(self):
+        if self._worker:
+            if hasattr(self._worker, 'stop'):
+                self._worker.stop()
+            self._worker = None
+        if self.is_alive():
+            super(_Worker, self).terminate()
+
+    def _close(self):
+        if self._log_handler and self._logger:
+            self._logger.removeHandler(self._log_handler)
+        self._log_handler = None
+        self._logger = None
+
+    def start(self):
+        if not self._running_inline:
+            super(_Worker, self).start()
+
+    def run(self):
+        if not self.host:
+            self.host = Host()
+        if not self._running_inline:
+            self._set_up_logging()
+
+        worker = self._worker
+        exception_msg = ""
+        _log.debug("%s starting" % self.name)
+
+        try:
+            if hasattr(worker, 'start'):
+                worker.start()
+            while True:
+                message = self._messages_to_worker.get()
+                if message.from_user:
+                    worker.handle(message.name, message.src, *message.args)
+                    self._yield_to_manager()
+                else:
+                    assert message.name == 'stop', 'bad message %s' % repr(message)
+                    break
+
+            _log.debug("%s exiting" % self.name)
+        except Queue.Empty:
+            assert False, '%s: ran out of messages in worker queue.' % self.name
+        except KeyboardInterrupt, e:
+            self._raise(sys.exc_info())
+        except Exception, e:
+            self._raise(sys.exc_info())
+        finally:
+            try:
+                if hasattr(worker, 'stop'):
+                    worker.stop()
+            finally:
+                self._post(name='done', args=(), from_user=False)
+            self._close()
+
+    def post(self, name, *args):
+        self._post(name, args, from_user=True)
+        self._yield_to_manager()
+
+    def _yield_to_manager(self):
+        if self._running_inline:
+            self._manager._loop(block=False)
+
+    def _post(self, name, args, from_user):
+        log_messages = self.log_messages
+        self.log_messages = []
+        self._messages_to_manager.put(_Message(self.name, name, args, from_user, log_messages))
+
+    def _raise(self, exc_info):
+        exception_type, exception_value, exception_traceback = exc_info
+        if self._running_inline:
+            raise exception_type, exception_value, exception_traceback
+
+        if exception_type == KeyboardInterrupt:
+            _log.debug("%s: interrupted, exiting" % self.name)
+            stack_utils.log_traceback(_log.debug, exception_traceback)
+        else:
+            _log.error("%s: %s('%s') raised:" % (self.name, exception_value.__class__.__name__, str(exception_value)))
+            stack_utils.log_traceback(_log.error, exception_traceback)
+        # Since tracebacks aren't picklable, send the extracted stack instead.
+        stack = traceback.extract_tb(exception_traceback)
+        self._post(name='worker_exception', args=(exception_type, exception_value, stack), from_user=False)
+
+    def _set_up_logging(self):
+        self._logger = logging.getLogger()
+
+        # The unix multiprocessing implementation clones any log handlers into the child process,
+        # so we remove them to avoid duplicate logging.
+        for h in self._logger.handlers:
+            self._logger.removeHandler(h)
+
+        self._log_handler = _WorkerLogHandler(self)
+        self._logger.addHandler(self._log_handler)
+        self._logger.setLevel(self.log_level)
+
+
+class _WorkerLogHandler(logging.Handler):
+    def __init__(self, worker):
+        logging.Handler.__init__(self)
+        self._worker = worker
+        self.setLevel(worker.log_level)
+
+    def emit(self, record):
+        self._worker.log_messages.append(record)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/modules/web/src/main/native/Tools/Scripts/webkitpy/common/multiprocessing_bootstrap.py	Fri Jul 05 17:42:24 2013 +0400
@@ -0,0 +1,70 @@
+# Copyright (c) 2011 Google Inc. All rights reserved.
+# Copyright (C) 2010 Chris Jerdonek (cjerdonek@webkit.org)
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""In order for the multiprocessing module to spawn children correctly on
+Windows, we need to be running a Python module that can be imported
+(which means a file in sys.path that ends in .py). In addition, we need to
+ensure that sys.path / PYTHONPATH is set and propagating correctly.
+
+This module enforces that."""
+
+import os
+import subprocess
+import sys
+
+from webkitpy.common import version_check   # 'unused import' pylint: disable=W0611
+
+
+def run(*parts):
+    up = os.path.dirname
+    script_dir = up(up(up(os.path.abspath(__file__))))
+    env = os.environ
+    if 'PYTHONPATH' in env:
+        if script_dir not in env['PYTHONPATH']:
+            env['PYTHONPATH'] = env['PYTHONPATH'] + os.pathsep + script_dir
+    else:
+        env['PYTHONPATH'] = script_dir
+    module_path = os.path.join(script_dir, *parts)
+    cmd = [sys.executable, module_path] + sys.argv[1:]
+
+    # Wrap processes in the jhbuild environment so DRT or WKTR
+    # doesn't need to do it and their process id as reported by
+    # subprocess.Popen is not jhbuild's.
+    if '--gtk' in sys.argv[1:] and os.path.exists(os.path.join(script_dir, '..', '..', 'WebKitBuild', 'Dependencies')):
+        prefix = [os.path.join(script_dir, '..', 'jhbuild', 'jhbuild-wrapper'), '--gtk', 'run']
+        cmd = prefix + cmd
+
+    proc = subprocess.Popen(cmd, env=env)
+    try:
+        proc.wait()
+    except KeyboardInterrupt:
+        # We need a second wait in order to make sure the subprocess exits fully.
+        # FIXME: It would be nice if we could put a timeout on this.
+        proc.wait()
+    sys.exit(proc.returncode)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/modules/web/src/main/native/Tools/Scripts/webkitpy/common/system/file_lock_mock.py	Fri Jul 05 17:42:24 2013 +0400
@@ -0,0 +1,35 @@
+# Copyright (c) 2012 Google Inc. All rights reserved.
+#
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+#    notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+#    notice, this list of conditions and the following disclaimer in the
+#    documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY UNIVERSITY OF SZEGED ``AS IS'' AND ANY
+# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+# PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL UNIVERSITY OF SZEGED OR
+# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+class MockFileLock(object):
+    def __init__(self, lock_file_path, max_wait_time_sec=20):
+        pass
+
+    def acquire_lock(self):
+        return True
+
+    def release_lock(self):
+        return True
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/modules/web/src/main/native/Tools/Scripts/webkitpy/common/system/outputtee.py	Fri Jul 05 17:42:24 2013 +0400
@@ -0,0 +1,79 @@
+# Copyright (c) 2009, Google Inc. All rights reserved.
+# Copyright (c) 2009 Apple Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import codecs
+import os
+import sys
+
+
+# Simple class to split output between multiple destinations
+class Tee:
+    def __init__(self, *files):
+        self.files = files
+
+    # Callers should pass an already encoded string for writing.
+    def write(self, bytes):
+        for file in self.files:
+            file.write(bytes)
+
+
+class OutputTee:
+    def __init__(self):
+        self._original_stdout = None
+        self._original_stderr = None
+        self._files_for_output = []
+
+    def add_log(self, path):
+        log_file = self._open_log_file(path)
+        self._files_for_output.append(log_file)
+        self._tee_outputs_to_files(self._files_for_output)
+        return log_file
+
+    def remove_log(self, log_file):
+        self._files_for_output.remove(log_file)
+        self._tee_outputs_to_files(self._files_for_output)
+        log_file.close()
+
+    @staticmethod
+    def _open_log_file(log_path):
+        (log_directory, log_name) = os.path.split(log_path)
+        if log_directory and not os.path.exists(log_directory):
+            os.makedirs(log_directory)
+        return codecs.open(log_path, "a+", "utf-8")
+
+    def _tee_outputs_to_files(self, files):
+        if not self._original_stdout:
+            self._original_stdout = sys.stdout
+            self._original_stderr = sys.stderr
+        if files and len(files):
+            sys.stdout = Tee(self._original_stdout, *files)
+            sys.stderr = Tee(self._original_stderr, *files)
+        else:
+            sys.stdout = self._original_stdout
+            sys.stderr = self._original_stderr
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/modules/web/src/main/native/Tools/Scripts/webkitpy/common/system/outputtee_unittest.py	Fri Jul 05 17:42:24 2013 +0400
@@ -0,0 +1,43 @@
+# Copyright (C) 2012 Zan Dobersek <zandobersek@gmail.com>
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#    * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#    * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#    * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import StringIO
+import unittest2 as unittest
+
+from webkitpy.common.system.outputtee import Tee, OutputTee
+
+
+class SimpleTeeTest(unittest.TestCase):
+    def test_simple_tee(self):
+        file1, file2 = StringIO.StringIO(), StringIO.StringIO()
+        tee = Tee(file1, file2)
+        tee.write("foo bar\n")
+        tee.write("baz\n")
+
+        self.assertEqual(file1.getvalue(), "foo bar\nbaz\n")
+        self.assertEqual(file2.getvalue(), file1.getvalue())
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/modules/web/src/main/native/Tools/Scripts/webkitpy/common/system/profiler.py	Fri Jul 05 17:42:24 2013 +0400
@@ -0,0 +1,210 @@
+# Copyright (C) 2012 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the Google name nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import logging
+import re
+import itertools
+
+_log = logging.getLogger(__name__)
+
+
+class ProfilerFactory(object):
+    @classmethod
+    def create_profiler(cls, host, executable_path, output_dir, profiler_name=None, identifier=None):
+        profilers = cls.profilers_for_platform(host.platform)
+        if not profilers:
+            return None
+        profiler_name = profiler_name or cls.default_profiler_name(host.platform)
+        profiler_class = next(itertools.ifilter(lambda profiler: profiler.name == profiler_name, profilers), None)
+        if not profiler_class:
+            return None
+        return profilers[0](host, executable_path, output_dir, identifier)
+
+    @classmethod
+    def default_profiler_name(cls, platform):
+        profilers = cls.profilers_for_platform(platform)
+        return profilers[0].name if profilers else None
+
+    @classmethod
+    def profilers_for_platform(cls, platform):
+        # GooglePProf requires TCMalloc/google-perftools, but is available everywhere.
+        profilers_by_os_name = {
+            'mac': [IProfiler, Sample, GooglePProf],
+            'linux': [Perf, GooglePProf],
+            # Note: freebsd, win32 have no profilers defined yet, thus --profile will be ignored
+            # by default, but a profiler can be selected with --profiler=PROFILER explicitly.
+        }
+        return profilers_by_os_name.get(platform.os_name, [])
+
+
+class Profiler(object):
+    # Used by ProfilerFactory to lookup a profiler from the --profiler=NAME option.
+    name = None
+
+    def __init__(self, host, executable_path, output_dir, identifier=None):
+        self._host = host
+        self._executable_path = executable_path
+        self._output_dir = output_dir
+        self._identifier = "test"
+        self._host.filesystem.maybe_make_directory(self._output_dir)
+
+    def adjusted_environment(self, env):
+        return env
+
+    def attach_to_pid(self, pid):
+        pass
+
+    def profile_after_exit(self):
+        pass
+
+
+class SingleFileOutputProfiler(Profiler):
+    def __init__(self, host, executable_path, output_dir, output_suffix, identifier=None):
+        super(SingleFileOutputProfiler, self).__init__(host, executable_path, output_dir, identifier)
+        # FIXME: Currently all reports are kept as test.*, until we fix that, search up to 1000 names before giving up.
+        self._output_path = self._host.workspace.find_unused_filename(self._output_dir, self._identifier, output_suffix, search_limit=1000)
+        assert(self._output_path)
+
+
+class GooglePProf(SingleFileOutputProfiler):
+    name = 'pprof'
+
+    def __init__(self, host, executable_path, output_dir, identifier=None):
+        super(GooglePProf, self).__init__(host, executable_path, output_dir, "pprof", identifier)
+
+    def adjusted_environment(self, env):
+        env['CPUPROFILE'] = self._output_path
+        return env
+
+    def _first_ten_lines_of_profile(self, pprof_output):
+        match = re.search("^Total:[^\n]*\n((?:[^\n]*\n){0,10})", pprof_output, re.MULTILINE)
+        return match.group(1) if match else None
+
+    def _pprof_path(self):
+        # FIXME: We should have code to find the right google-pprof executable, some Googlers have
+        # google-pprof installed as "pprof" on their machines for them.
+        return '/usr/bin/google-pprof'
+
+    def profile_after_exit(self):
+        # google-pprof doesn't check its arguments, so we have to.
+        if not (self._host.filesystem.exists(self._output_path)):
+            print "Failed to gather profile, %s does not exist." % self._output_path
+            return
+
+        pprof_args = [self._pprof_path(), '--text', self._executable_path, self._output_path]
+        profile_text = self._host.executive.run_command(pprof_args)
+        print "First 10 lines of pprof --text:"
+        print self._first_ten_lines_of_profile(profile_text)
+        print "http://google-perftools.googlecode.com/svn/trunk/doc/cpuprofile.html documents output."
+        print
+        print "To interact with the the full profile, including produce graphs:"
+        print ' '.join([self._pprof_path(), self._executable_path, self._output_path])
+
+
+class Perf(SingleFileOutputProfiler):
+    name = 'perf'
+
+    def __init__(self, host, executable_path, output_dir, identifier=None):
+        super(Perf, self).__init__(host, executable_path, output_dir, "data", identifier)
+        self._perf_process = None
+        self._pid_being_profiled = None
+
+    def _perf_path(self):
+        # FIXME: We may need to support finding the perf binary in other locations.
+        return 'perf'
+
+    def attach_to_pid(self, pid):
+        assert(not self._perf_process and not self._pid_being_profiled)
+        self._pid_being_profiled = pid
+        cmd = [self._perf_path(), "record", "--call-graph", "--pid", pid, "--output", self._output_path]
+        self._perf_process = self._host.executive.popen(cmd)
+
+    def _first_ten_lines_of_profile(self, perf_output):
+        match = re.search("^#[^\n]*\n((?: [^\n]*\n){1,10})", perf_output, re.MULTILINE)
+        return match.group(1) if match else None
+
+    def profile_after_exit(self):
+        # Perf doesn't automatically watch the attached pid for death notifications,
+        # so we have to do it for it, and then tell it its time to stop sampling. :(
+        self._host.executive.wait_limited(self._pid_being_profiled, limit_in_seconds=10)
+        perf_exitcode = self._perf_process.poll()
+        if perf_exitcode is None:  # This should always be the case, unless perf error'd out early.
+            self._host.executive.interrupt(self._perf_process.pid)
+
+        perf_exitcode = self._perf_process.wait()
+        if perf_exitcode not in (0, -2):  # The exit code should always be -2, as we're always interrupting perf.
+            print "'perf record' failed (exit code: %i), can't process results:" % perf_exitcode
+            return
+
+        perf_args = [self._perf_path(), 'report', '--call-graph', 'none', '--input', self._output_path]
+        print "First 10 lines of 'perf report --call-graph=none':"
+
+        print " ".join(perf_args)
+        perf_output = self._host.executive.run_command(perf_args)
+        print self._first_ten_lines_of_profile(perf_output)
+
+        print "To view the full profile, run:"
+        print ' '.join([self._perf_path(), 'report', '-i', self._output_path])
+        print  # An extra line between tests looks nicer.
+
+
+class Sample(SingleFileOutputProfiler):
+    name = 'sample'
+
+    def __init__(self, host, executable_path, output_dir, identifier=None):
+        super(Sample, self).__init__(host, executable_path, output_dir, "txt", identifier)
+        self._profiler_process = None
+
+    def attach_to_pid(self, pid):
+        cmd = ["sample", pid, "-mayDie", "-file", self._output_path]
+        self._profiler_process = self._host.executive.popen(cmd)
+
+    def profile_after_exit(self):
+        self._profiler_process.wait()
+
+
+class IProfiler(SingleFileOutputProfiler):
+    name = 'iprofiler'
+
+    def __init__(self, host, executable_path, output_dir, identifier=None):
+        super(IProfiler, self).__init__(host, executable_path, output_dir, "dtps", identifier)
+        self._profiler_process = None
+
+    def attach_to_pid(self, pid):
+        # FIXME: iprofiler requires us to pass the directory separately
+        # from the basename of the file, with no control over the extension.
+        fs = self._host.filesystem
+        cmd = ["iprofiler", "-timeprofiler", "-a", pid,
+                "-d", fs.dirname(self._output_path), "-o", fs.splitext(fs.basename(self._output_path))[0]]
+        # FIXME: Consider capturing instead of letting instruments spam to stderr directly.
+        self._profiler_process = self._host.executive.popen(cmd)
+
+    def profile_after_exit(self):
+        # It seems like a nicer user experiance to wait on the profiler to exit to prevent
+        # it from spewing to stderr at odd times.
+        self._profiler_process.wait()
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/modules/web/src/main/native/Tools/Scripts/webkitpy/common/system/profiler_unittest.py	Fri Jul 05 17:42:24 2013 +0400
@@ -0,0 +1,103 @@
+# Copyright (C) 2012 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import unittest2 as unittest
+
+from webkitpy.common.system.platforminfo_mock import MockPlatformInfo
+from webkitpy.common.system.systemhost_mock import MockSystemHost
+
+from .profiler import ProfilerFactory, GooglePProf
+
+
+class ProfilerFactoryTest(unittest.TestCase):
+    def _assert_default_profiler_name(self, os_name, expected_profiler_name):
+        profiler_name = ProfilerFactory.default_profiler_name(MockPlatformInfo(os_name))
+        self.assertEqual(profiler_name, expected_profiler_name)
+
+    def test_default_profilers(self):
+        self._assert_default_profiler_name('mac', 'iprofiler')
+        self._assert_default_profiler_name('linux', 'perf')
+        self._assert_default_profiler_name('win32', None)
+        self._assert_default_profiler_name('freebsd', None)
+
+    def test_default_profiler_output(self):
+        host = MockSystemHost()
+        self.assertFalse(host.filesystem.exists("/tmp/output"))
+
+        # Default mocks are Mac, so iprofile should be default.
+        profiler = ProfilerFactory.create_profiler(host, '/bin/executable', '/tmp/output')
+        self.assertTrue(host.filesystem.exists("/tmp/output"))
+        self.assertEqual(profiler._output_path, "/tmp/output/test.dtps")
+
+        # Linux defaults to perf.
+        host.platform.os_name = 'linux'
+        profiler = ProfilerFactory.create_profiler(host, '/bin/executable', '/tmp/output')
+        self.assertEqual(profiler._output_path, "/tmp/output/test.data")
+
+
+class GooglePProfTest(unittest.TestCase):
+    def test_pprof_output_regexp(self):
+        pprof_output = """
+sometimes
+there
+is
+junk before the total line
+
+
+Total: 3770 samples
+      76   2.0%   2.0%      104   2.8% lookup (inline)
+      60   1.6%   3.6%       60   1.6% FL_SetPrevious (inline)
+      56   1.5%   5.1%       56   1.5% MaskPtr (inline)
+      51   1.4%   6.4%      222   5.9% WebCore::HTMLTokenizer::nextToken
+      42   1.1%   7.6%       47   1.2% WTF::Vector::shrinkCapacity
+      35   0.9%   8.5%       35   0.9% WTF::RefPtr::get (inline)
+      33   0.9%   9.4%       43   1.1% append (inline)
+      29   0.8%  10.1%       67   1.8% WTF::StringImpl::deref (inline)
+      29   0.8%  10.9%      100   2.7% add (inline)
+      28   0.7%  11.6%       28   0.7% WebCore::QualifiedName::localName (inline)
+      25   0.7%  12.3%       27   0.7% WebCore::Private::addChildNodesToDeletionQueue
+      24   0.6%  12.9%       24   0.6% __memcpy_ssse3_back
+      23   0.6%  13.6%       23   0.6% intHash (inline)
+      23   0.6%  14.2%       76   2.0% tcmalloc::FL_Next
+      23   0.6%  14.8%       95   2.5% tcmalloc::FL_Push
+      22   0.6%  15.4%       22   0.6% WebCore::MarkupTokenizerBase::InputStreamPreprocessor::peek (inline)
+"""
+        expected_first_ten_lines = """      76   2.0%   2.0%      104   2.8% lookup (inline)
+      60   1.6%   3.6%       60   1.6% FL_SetPrevious (inline)
+      56   1.5%   5.1%       56   1.5% MaskPtr (inline)
+      51   1.4%   6.4%      222   5.9% WebCore::HTMLTokenizer::nextToken
+      42   1.1%   7.6%       47   1.2% WTF::Vector::shrinkCapacity
+      35   0.9%   8.5%       35   0.9% WTF::RefPtr::get (inline)
+      33   0.9%   9.4%       43   1.1% append (inline)
+      29   0.8%  10.1%       67   1.8% WTF::StringImpl::deref (inline)
+      29   0.8%  10.9%      100   2.7% add (inline)
+      28   0.7%  11.6%       28   0.7% WebCore::QualifiedName::localName (inline)
+"""
+        host = MockSystemHost()
+        profiler = GooglePProf(host, '/bin/executable', '/tmp/output')
+        self.assertEqual(profiler._first_ten_lines_of_profile(pprof_output), expected_first_ten_lines)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/modules/web/src/main/native/Tools/Scripts/webkitpy/common/webkit_finder.py	Fri Jul 05 17:42:24 2013 +0400
@@ -0,0 +1,65 @@
+# Copyright (c) 2012 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+class WebKitFinder(object):
+    def __init__(self, filesystem):
+        self._filesystem = filesystem
+        self._webkit_base = None
+
+    def webkit_base(self):
+        """Returns the absolute path to the top of the WebKit tree.
+
+        Raises an AssertionError if the top dir can't be determined."""
+        # Note: This code somewhat duplicates the code in
+        # scm.find_checkout_root(). However, that code only works if the top
+        # of the SCM repository also matches the top of the WebKit tree. Some SVN users
+        # (the chromium test bots, for example), might only check out subdirectories like
+        # Tools/Scripts. This code will also work if there is no SCM system at all.
+        if not self._webkit_base:
+            self._webkit_base = self._webkit_base
+            module_path = self._filesystem.path_to_module(self.__module__)
+            tools_index = module_path.rfind('Tools')
+            assert tools_index != -1, "could not find location of this checkout from %s" % module_path
+            self._webkit_base = self._filesystem.normpath(module_path[0:tools_index - 1])
+        return self._webkit_base
+
+    def path_from_webkit_base(self, *comps):
+        return self._filesystem.join(self.webkit_base(), *comps)
+
+    def path_to_script(self, script_name):
+        """Returns the relative path to the script from the top of the WebKit tree."""
+        # This is intentionally relative in order to force callers to consider what
+        # their current working directory is (and change to the top of the tree if necessary).
+        return self._filesystem.join("Tools", "Scripts", script_name)
+
+    def layout_tests_dir(self):
+        return self.path_from_webkit_base('LayoutTests')
+
+    def perf_tests_dir(self):
+        return self.path_from_webkit_base('PerformanceTests')
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/modules/web/src/main/native/Tools/Scripts/webkitpy/layout_tests/controllers/layout_test_finder.py	Fri Jul 05 17:42:24 2013 +0400
@@ -0,0 +1,175 @@
+# Copyright (C) 2012 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import errno
+import logging
+import re
+
+from webkitpy.layout_tests.models import test_expectations
+
+
+_log = logging.getLogger(__name__)
+
+
+class LayoutTestFinder(object):
+    def __init__(self, port, options):
+        self._port = port
+        self._options = options
+        self._filesystem = self._port.host.filesystem
+        self.LAYOUT_TESTS_DIRECTORY = 'LayoutTests'
+
+    def find_tests(self, options, args):
+        paths = self._strip_test_dir_prefixes(args)
+        if options.test_list:
+            paths += self._strip_test_dir_prefixes(self._read_test_names_from_file(options.test_list, self._port.TEST_PATH_SEPARATOR))
+        test_files = self._port.tests(paths)
+        return (paths, test_files)
+
+    def _strip_test_dir_prefixes(self, paths):
+        return [self._strip_test_dir_prefix(path) for path in paths if path]
+
+    def _strip_test_dir_prefix(self, path):
+        # Handle both "LayoutTests/foo/bar.html" and "LayoutTests\foo\bar.html" if
+        # the filesystem uses '\\' as a directory separator.
+        if path.startswith(self.LAYOUT_TESTS_DIRECTORY + self._port.TEST_PATH_SEPARATOR):
+            return path[len(self.LAYOUT_TESTS_DIRECTORY + self._port.TEST_PATH_SEPARATOR):]
+        if path.startswith(self.LAYOUT_TESTS_DIRECTORY + self._filesystem.sep):
+            return path[len(self.LAYOUT_TESTS_DIRECTORY + self._filesystem.sep):]
+        return path
+
+    def _read_test_names_from_file(self, filenames, test_path_separator):
+        fs = self._filesystem
+        tests = []
+        for filename in filenames:
+            try:
+                if test_path_separator != fs.sep:
+                    filename = filename.replace(test_path_separator, fs.sep)
+                file_contents = fs.read_text_file(filename).split('\n')
+                for line in file_contents:
+                    line = self._strip_comments(line)
+                    if line:
+                        tests.append(line)
+            except IOError, e:
+                if e.errno == errno.ENOENT:
+                    _log.critical('')
+                    _log.critical('--test-list file "%s" not found' % file)
+                raise
+        return tests
+
+    @staticmethod
+    def _strip_comments(line):
+        commentIndex = line.find('//')
+        if commentIndex is -1:
+            commentIndex = len(line)
+
+        line = re.sub(r'\s+', ' ', line[:commentIndex].strip())
+        if line == '':
+            return None
+        else:
+            return line
+
+    def skip_tests(self, paths, all_tests_list, expectations, http_tests):
+        all_tests = set(all_tests_list)
+
+        tests_to_skip = expectations.get_tests_with_result_type(test_expectations.SKIP)
+        if self._options.skip_failing_tests:
+            tests_to_skip.update(expectations.get_tests_with_result_type(test_expectations.FAIL))
+            tests_to_skip.update(expectations.get_tests_with_result_type(test_expectations.FLAKY))
+
+        if self._options.skipped == 'only':
+            tests_to_skip = all_tests - tests_to_skip
+        elif self._options.skipped == 'ignore':
+            tests_to_skip = set()
+        elif self._options.skipped != 'always':
+            # make sure we're explicitly running any tests passed on the command line; equivalent to 'default'.
+            tests_to_skip -= set(paths)
+
+        # unless of course we don't want to run the HTTP tests :)
+        if not self._options.http:
+            tests_to_skip.update(set(http_tests))
+
+        return tests_to_skip
+
+    def split_into_chunks(self, test_names):
+        """split into a list to run and a set to skip, based on --run-chunk and --run-part."""
+        if not self._options.run_chunk and not self._options.run_part:
+            return test_names, set()
+
+        # If the user specifies they just want to run a subset of the tests,
+        # just grab a subset of the non-skipped tests.
+        chunk_value = self._options.run_chunk or self._options.run_part
+        try:
+            (chunk_num, chunk_len) = chunk_value.split(":")
+            chunk_num = int(chunk_num)
+            assert(chunk_num >= 0)
+            test_size = int(chunk_len)
+            assert(test_size > 0)
+        except AssertionError:
+            _log.critical("invalid chunk '%s'" % chunk_value)
+            return (None, None)
+
+        # Get the number of tests
+        num_tests = len(test_names)
+
+        # Get the start offset of the slice.
+        if self._options.run_chunk:
+            chunk_len = test_size
+            # In this case chunk_num can be really large. We need
+            # to make the slave fit in the current number of tests.
+            slice_start = (chunk_num * chunk_len) % num_tests
+        else:
+            # Validate the data.
+            assert(test_size <= num_tests)
+            assert(chunk_num <= test_size)
+
+            # To count the chunk_len, and make sure we don't skip
+            # some tests, we round to the next value that fits exactly
+            # all the parts.
+            rounded_tests = num_tests
+            if rounded_tests % test_size != 0:
+                rounded_tests = (num_tests + test_size - (num_tests % test_size))
+
+            chunk_len = rounded_tests / test_size
+            slice_start = chunk_len * (chunk_num - 1)
+            # It does not mind if we go over test_size.
+
+        # Get the end offset of the slice.
+        slice_end = min(num_tests, slice_start + chunk_len)
+
+        tests_to_run = test_names[slice_start:slice_end]
+
+        _log.debug('chunk slice [%d:%d] of %d is %d tests' % (slice_start, slice_end, num_tests, (slice_end - slice_start)))
+
+        # If we reached the end and we don't have enough tests, we run some
+        # from the beginning.
+        if slice_end - slice_start < chunk_len:
+            extra = chunk_len - (slice_end - slice_start)
+            _log.debug('   last chunk is partial, appending [0:%d]' % extra)
+            tests_to_run.extend(test_names[0:extra])
+
+        return (tests_to_run, set(test_names) - set(tests_to_run))
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/modules/web/src/main/native/Tools/Scripts/webkitpy/layout_tests/controllers/layout_test_runner.py	Fri Jul 05 17:42:24 2013 +0400
@@ -0,0 +1,582 @@
+# Copyright (C) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import logging
+import math
+import threading
+import time
+
+from webkitpy.common import message_pool
+from webkitpy.layout_tests.controllers import single_test_runner
+from webkitpy.layout_tests.models.test_run_results import TestRunResults
+from webkitpy.layout_tests.models import test_expectations
+from webkitpy.layout_tests.models import test_failures
+from webkitpy.layout_tests.models import test_results
+from webkitpy.tool import grammar
+
+
+_log = logging.getLogger(__name__)
+
+
+TestExpectations = test_expectations.TestExpectations
+
+# Export this so callers don't need to know about message pools.
+WorkerException = message_pool.WorkerException
+
+
+class TestRunInterruptedException(Exception):
+    """Raised when a test run should be stopped immediately."""
+    def __init__(self, reason):
+        Exception.__init__(self)
+        self.reason = reason
+        self.msg = reason
+
+    def __reduce__(self):
+        return self.__class__, (self.reason,)
+
+
+class LayoutTestRunner(object):
+    def __init__(self, options, port, printer, results_directory, test_is_slow_fn):
+        self._options = options
+        self._port = port
+        self._printer = printer
+        self._results_directory = results_directory
+        self._test_is_slow = test_is_slow_fn
+        self._sharder = Sharder(self._port.split_test, self._options.max_locked_shards)
+        self._filesystem = self._port.host.filesystem
+
+        self._expectations = None
+        self._test_inputs = []
+        self._needs_http = None
+        self._needs_websockets = None
+        self._retrying = False
+
+        self._current_run_results = None
+        self._remaining_locked_shards = []
+        self._has_http_lock = False
+
+    def run_tests(self, expectations, test_inputs, tests_to_skip, num_workers, needs_http, needs_websockets, retrying):
+        self._expectations = expectations
+        self._test_inputs = test_inputs
+        self._needs_http = needs_http
+        self._needs_websockets = needs_websockets
+        self._retrying = retrying
+
+        # FIXME: rename all variables to test_run_results or some such ...
+        run_results = TestRunResults(self._expectations, len(test_inputs) + len(tests_to_skip))
+        self._current_run_results = run_results
+        self._remaining_locked_shards = []
+        self._has_http_lock = False
+        self._printer.num_tests = len(test_inputs)
+        self._printer.num_completed = 0
+
+        if not retrying:
+            self._printer.print_expected(run_results, self._expectations.get_tests_with_result_type)
+
+        for test_name in set(tests_to_skip):
+            result = test_results.TestResult(test_name)
+            result.type = test_expectations.SKIP
+            run_results.add(result, expected=True, test_is_slow=self._test_is_slow(test_name))
+
+        self._printer.write_update('Sharding tests ...')
+        locked_shards, unlocked_shards = self._sharder.shard_tests(test_inputs, int(self._options.child_processes), self._options.fully_parallel)
+
+        # FIXME: We don't have a good way to coordinate the workers so that
+        # they don't try to run the shards that need a lock if we don't actually
+        # have the lock. The easiest solution at the moment is to grab the
+        # lock at the beginning of the run, and then run all of the locked
+        # shards first. This minimizes the time spent holding the lock, but
+        # means that we won't be running tests while we're waiting for the lock.
+        # If this becomes a problem in practice we'll need to change this.
+
+        all_shards = locked_shards + unlocked_shards
+        self._remaining_locked_shards = locked_shards
+        if self._port.requires_http_server() or (locked_shards and self._options.http):
+            self.start_servers_with_lock(2 * min(num_workers, len(locked_shards)))
+
+        num_workers = min(num_workers, len(all_shards))
+        self._printer.print_workers_and_shards(num_workers, len(all_shards), len(locked_shards))
+
+        if self._options.dry_run:
+            return run_results
+
+        self._printer.write_update('Starting %s ...' % grammar.pluralize('worker', num_workers))
+
+        try:
+            with message_pool.get(self, self._worker_factory, num_workers, self._port.worker_startup_delay_secs(), self._port.host) as pool:
+                pool.run(('test_list', shard.name, shard.test_inputs) for shard in all_shards)
+        except TestRunInterruptedException, e:
+            _log.warning(e.reason)
+            run_results.interrupted = True
+        except KeyboardInterrupt:
+            self._printer.flush()
+            self._printer.writeln('Interrupted, exiting ...')
+            raise
+        except Exception, e:
+            _log.debug('%s("%s") raised, exiting' % (e.__class__.__name__, str(e)))
+            raise
+        finally:
+            self.stop_servers_with_lock()
+
+        return run_results
+
+    def _worker_factory(self, worker_connection):
+        results_directory = self._results_directory
+        if self._retrying:
+            self._filesystem.maybe_make_directory(self._filesystem.join(self._results_directory, 'retries'))
+            results_directory = self._filesystem.join(self._results_directory, 'retries')
+        return Worker(worker_connection, results_directory, self._options)
+
+    def _mark_interrupted_tests_as_skipped(self, run_results):
+        for test_input in self._test_inputs:
+            if test_input.test_name not in run_results.results_by_name:
+                result = test_results.TestResult(test_input.test_name, [test_failures.FailureEarlyExit()])
+                # FIXME: We probably need to loop here if there are multiple iterations.
+                # FIXME: Also, these results are really neither expected nor unexpected. We probably
+                # need a third type of result.
+                run_results.add(result, expected=False, test_is_slow=self._test_is_slow(test_input.test_name))
+
+    def _interrupt_if_at_failure_limits(self, run_results):
+        # Note: The messages in this method are constructed to match old-run-webkit-tests
+        # so that existing buildbot grep rules work.
+        def interrupt_if_at_failure_limit(limit, failure_count, run_results, message):
+            if limit and failure_count >= limit:
+                message += " %d tests run." % (run_results.expected + run_results.unexpected)
+                self._mark_interrupted_tests_as_skipped(run_results)
+                raise TestRunInterruptedException(message)
+
+        interrupt_if_at_failure_limit(
+            self._options.exit_after_n_failures,
+            run_results.unexpected_failures,
+            run_results,
+            "Exiting early after %d failures." % run_results.unexpected_failures)
+        interrupt_if_at_failure_limit(
+            self._options.exit_after_n_crashes_or_timeouts,
+            run_results.unexpected_crashes + run_results.unexpected_timeouts,
+            run_results,
+            # This differs from ORWT because it does not include WebProcess crashes.
+            "Exiting early after %d crashes and %d timeouts." % (run_results.unexpected_crashes, run_results.unexpected_timeouts))
+
+    def _update_summary_with_result(self, run_results, result):
+        if result.type == test_expectations.SKIP:
+            exp_str = got_str = 'SKIP'
+            expected = True
+        else:
+            expected = self._expectations.matches_an_expected_result(result.test_name, result.type, self._options.pixel_tests or result.reftest_type)
+            exp_str = self._expectations.get_expectations_string(result.test_name)
+            got_str = self._expectations.expectation_to_string(result.type)
+
+        run_results.add(result, expected, self._test_is_slow(result.test_name))
+
+        self._printer.print_finished_test(result, expected, exp_str, got_str)
+
+        self._interrupt_if_at_failure_limits(run_results)
+
+    def start_servers_with_lock(self, number_of_servers):
+        self._printer.write_update('Acquiring http lock ...')
+        self._port.acquire_http_lock()
+        if self._needs_http:
+            self._printer.write_update('Starting HTTP server ...')
+            self._port.start_http_server(number_of_servers=number_of_servers)
+        if self._needs_websockets:
+            self._printer.write_update('Starting WebSocket server ...')
+            self._port.start_websocket_server()
+        self._has_http_lock = True
+
+    def stop_servers_with_lock(self):
+        if self._has_http_lock:
+            if self._needs_http:
+                self._printer.write_update('Stopping HTTP server ...')
+                self._port.stop_http_server()
+            if self._needs_websockets:
+                self._printer.write_update('Stopping WebSocket server ...')
+                self._port.stop_websocket_server()
+            self._printer.write_update('Releasing server lock ...')
+            self._port.release_http_lock()
+            self._has_http_lock = False
+
+    def handle(self, name, source, *args):
+        method = getattr(self, '_handle_' + name)
+        if method:
+            return method(source, *args)
+        raise AssertionError('unknown message %s received from %s, args=%s' % (name, source, repr(args)))
+
+    def _handle_started_test(self, worker_name, test_input, test_timeout_sec):
+        self._printer.print_started_test(test_input.test_name)
+
+    def _handle_finished_test_list(self, worker_name, list_name):
+        def find(name, test_lists):
+            for i in range(len(test_lists)):
+                if test_lists[i].name == name:
+                    return i
+            return -1
+
+        index = find(list_name, self._remaining_locked_shards)
+        if index >= 0:
+            self._remaining_locked_shards.pop(index)
+            if not self._remaining_locked_shards and not self._port.requires_http_server():
+                self.stop_servers_with_lock()
+
+    def _handle_finished_test(self, worker_name, result, log_messages=[]):
+        self._update_summary_with_result(self._current_run_results, result)
+
+
+class Worker(object):
+    def __init__(self, caller, results_directory, options):
+        self._caller = caller
+        self._worker_number = caller.worker_number
+        self._name = caller.name
+        self._results_directory = results_directory
+        self._options = options
+
+        # The remaining fields are initialized in start()
+        self._host = None
+        self._port = None
+        self._batch_size = None
+        self._batch_count = None
+        self._filesystem = None
+        self._driver = None
+        self._num_tests = 0
+
+    def __del__(self):
+        self.stop()
+
+    def start(self):
+        """This method is called when the object is starting to be used and it is safe
+        for the object to create state that does not need to be pickled (usually this means
+        it is called in a child process)."""
+        self._host = self._caller.host
+        self._filesystem = self._host.filesystem
+        self._port = self._host.port_factory.get(self._options.platform, self._options)
+
+        self._batch_count = 0
+        self._batch_size = self._options.batch_size or 0
+
+    def handle(self, name, source, test_list_name, test_inputs):
+        assert name == 'test_list'
+        for test_input in test_inputs:
+            self._run_test(test_input, test_list_name)
+        self._caller.post('finished_test_list', test_list_name)
+
+    def _update_test_input(self, test_input):
+        if test_input.reference_files is None:
+            # Lazy initialization.
+            test_input.reference_files = self._port.reference_files(test_input.test_name)
+        if test_input.reference_files:
+            test_input.should_run_pixel_test = True
+        else:
+            test_input.should_run_pixel_test = self._port.should_run_as_pixel_test(test_input)
+
+    def _run_test(self, test_input, shard_name):
+        self._batch_count += 1
+
+        stop_when_done = False
+        if self._batch_size > 0 and self._batch_count >= self._batch_size:
+            self._batch_count = 0
+            stop_when_done = True
+
+        self._update_test_input(test_input)
+        test_timeout_sec = self._timeout(test_input)
+        start = time.time()
+        self._caller.post('started_test', test_input, test_timeout_sec)
+
+        result = self._run_test_with_timeout(test_input, test_timeout_sec, stop_when_done)
+        result.shard_name = shard_name
+        result.worker_name = self._name
+        result.total_run_time = time.time() - start
+        result.test_number = self._num_tests
+        self._num_tests += 1
+
+        self._caller.post('finished_test', result)
+
+        self._clean_up_after_test(test_input, result)
+
+    def stop(self):
+        _log.debug("%s cleaning up" % self._name)
+        self._kill_driver()
+
+    def _timeout(self, test_input):
+        """Compute the appropriate timeout value for a test."""
+        # The DumpRenderTree watchdog uses 2.5x the timeout; we want to be
+        # larger than that. We also add a little more padding if we're
+        # running tests in a separate thread.
+        #
+        # Note that we need to convert the test timeout from a
+        # string value in milliseconds to a float for Python.
+        driver_timeout_sec = 3.0 * float(test_input.timeout) / 1000.0
+        if not self._options.run_singly:
+            return driver_timeout_sec
+
+        thread_padding_sec = 1.0
+        thread_timeout_sec = driver_timeout_sec + thread_padding_sec
+        return thread_timeout_sec
+
+    def _kill_driver(self):
+        # Be careful about how and when we kill the driver; if driver.stop()
+        # raises an exception, this routine may get re-entered via __del__.
+        driver = self._driver
+        self._driver = None
+        if driver:
+            _log.debug("%s killing driver" % self._name)
+            driver.stop()
+
+    def _run_test_with_timeout(self, test_input, timeout, stop_when_done):
+        if self._options.run_singly:
+            return self._run_test_in_another_thread(test_input, timeout, stop_when_done)
+        return self._run_test_in_this_thread(test_input, stop_when_done)
+
+    def _clean_up_after_test(self, test_input, result):
+        test_name = test_input.test_name
+
+        if result.failures:
+            # Check and kill DumpRenderTree if we need to.
+            if any([f.driver_needs_restart() for f in result.failures]):
+                self._kill_driver()
+                # Reset the batch count since the shell just bounced.
+                self._batch_count = 0
+
+            # Print the error message(s).
+            _log.debug("%s %s failed:" % (self._name, test_name))
+            for f in result.failures:
+                _log.debug("%s  %s" % (self._name, f.message()))
+        elif result.type == test_expectations.SKIP:
+            _log.debug("%s %s skipped" % (self._name, test_name))
+        else:
+            _log.debug("%s %s passed" % (self._name, test_name))
+
+    def _run_test_in_another_thread(self, test_input, thread_timeout_sec, stop_when_done):
+        """Run a test in a separate thread, enforcing a hard time limit.
+
+        Since we can only detect the termination of a thread, not any internal
+        state or progress, we can only run per-test timeouts when running test
+        files singly.
+
+        Args:
+          test_input: Object containing the test filename and timeout
+          thread_timeout_sec: time to wait before killing the driver process.
+        Returns:
+          A TestResult
+        """
+        worker = self
+
+        driver = self._port.create_driver(self._worker_number)
+
+        class SingleTestThread(threading.Thread):
+            def __init__(self):
+                threading.Thread.__init__(self)
+                self.result = None
+
+            def run(self):
+                self.result = worker._run_single_test(driver, test_input, stop_when_done)
+
+        thread = SingleTestThread()
+        thread.start()
+        thread.join(thread_timeout_sec)
+        result = thread.result
+        failures = []
+        if thread.isAlive():
+            # If join() returned with the thread still running, the
+            # DumpRenderTree is completely hung and there's nothing
+            # more we can do with it.  We have to kill all the
+            # DumpRenderTrees to free it up. If we're running more than
+            # one DumpRenderTree thread, we'll end up killing the other
+            # DumpRenderTrees too, introducing spurious crashes. We accept
+            # that tradeoff in order to avoid losing the rest of this
+            # thread's results.
+            _log.error('Test thread hung: killing all DumpRenderTrees')
+            failures = [test_failures.FailureTimeout()]
+
+        driver.stop()
+
+        if not result:
+            result = test_results.TestResult(test_input.test_name, failures=failures, test_run_time=0)
+        return result
+
+    def _run_test_in_this_thread(self, test_input, stop_when_done):
+        """Run a single test file using a shared DumpRenderTree process.
+
+        Args:
+          test_input: Object containing the test filename, uri and timeout
+
+        Returns: a TestResult object.
+        """
+        if self._driver and self._driver.has_crashed():
+            self._kill_driver()
+        if not self._driver:
+            self._driver = self._port.create_driver(self._worker_number)
+        return self._run_single_test(self._driver, test_input, stop_when_done)
+
+    def _run_single_test(self, driver, test_input, stop_when_done):
+        return single_test_runner.run_single_test(self._port, self._options, self._results_directory,
+            self._name, driver, test_input, stop_when_done)
+
+
+class TestShard(object):
+    """A test shard is a named list of TestInputs."""
+
+    def __init__(self, name, test_inputs):
+        self.name = name
+        self.test_inputs = test_inputs
+        self.requires_lock = test_inputs[0].requires_lock
+
+    def __repr__(self):
+        return "TestShard(name='%s', test_inputs=%s, requires_lock=%s'" % (self.name, self.test_inputs, self.requires_lock)
+
+    def __eq__(self, other):
+        return self.name == other.name and self.test_inputs == other.test_inputs
+
+
+class Sharder(object):
+    def __init__(self, test_split_fn, max_locked_shards):
+        self._split = test_split_fn
+        self._max_locked_shards = max_locked_shards
+
+    def shard_tests(self, test_inputs, num_workers, fully_parallel):
+        """Groups tests into batches.
+        This helps ensure that tests that depend on each other (aka bad tests!)
+        continue to run together as most cross-tests dependencies tend to
+        occur within the same directory.
+        Return:
+            Two list of TestShards. The first contains tests that must only be
+            run under the server lock, the second can be run whenever.
+        """
+
+        # FIXME: Move all of the sharding logic out of manager into its
+        # own class or module. Consider grouping it with the chunking logic
+        # in prepare_lists as well.
+        if num_workers == 1:
+            return self._shard_in_two(test_inputs)
+        elif fully_parallel:
+            return self._shard_every_file(test_inputs)
+        return self._shard_by_directory(test_inputs, num_workers)
+
+    def _shard_in_two(self, test_inputs):
+        """Returns two lists of shards, one with all the tests requiring a lock and one with the rest.
+
+        This is used when there's only one worker, to minimize the per-shard overhead."""
+        locked_inputs = []
+        unlocked_inputs = []
+        for test_input in test_inputs:
+            if test_input.requires_lock:
+                locked_inputs.append(test_input)
+            else:
+                unlocked_inputs.append(test_input)
+
+        locked_shards = []
+        unlocked_shards = []
+        if locked_inputs:
+            locked_shards = [TestShard('locked_tests', locked_inputs)]
+        if unlocked_inputs:
+            unlocked_shards = [TestShard('unlocked_tests', unlocked_inputs)]
+
+        return locked_shards, unlocked_shards
+
+    def _shard_every_file(self, test_inputs):
+        """Returns two lists of shards, each shard containing a single test file.
+
+        This mode gets maximal parallelism at the cost of much higher flakiness."""
+        locked_shards = []
+        unlocked_shards = []
+        for test_input in test_inputs:
+            # Note that we use a '.' for the shard name; the name doesn't really
+            # matter, and the only other meaningful value would be the filename,
+            # which would be really redundant.
+            if test_input.requires_lock:
+                locked_shards.append(TestShard('.', [test_input]))
+            else:
+                unlocked_shards.append(TestShard('.', [test_input]))
+
+        return locked_shards, unlocked_shards
+
+    def _shard_by_directory(self, test_inputs, num_workers):
+        """Returns two lists of shards, each shard containing all the files in a directory.
+
+        This is the default mode, and gets as much parallelism as we can while
+        minimizing flakiness caused by inter-test dependencies."""
+        locked_shards = []
+        unlocked_shards = []
+        tests_by_dir = {}
+        # FIXME: Given that the tests are already sorted by directory,
+        # we can probably rewrite this to be clearer and faster.
+        for test_input in test_inputs:
+            directory = self._split(test_input.test_name)[0]
+            tests_by_dir.setdefault(directory, [])
+            tests_by_dir[directory].append(test_input)
+
+        for directory, test_inputs in tests_by_dir.iteritems():
+            shard = TestShard(directory, test_inputs)
+            if test_inputs[0].requires_lock:
+                locked_shards.append(shard)
+            else:
+                unlocked_shards.append(shard)
+
+        # Sort the shards by directory name.
+        locked_shards.sort(key=lambda shard: shard.name)
+        unlocked_shards.sort(key=lambda shard: shard.name)
+
+        # Put a ceiling on the number of locked shards, so that we
+        # don't hammer the servers too badly.
+
+        # FIXME: For now, limit to one shard or set it
+        # with the --max-locked-shards. After testing to make sure we
+        # can handle multiple shards, we should probably do something like
+        # limit this to no more than a quarter of all workers, e.g.:
+        # return max(math.ceil(num_workers / 4.0), 1)
+        return (self._resize_shards(locked_shards, self._max_locked_shards, 'locked_shard'),
+                unlocked_shards)
+
+    def _resize_shards(self, old_shards, max_new_shards, shard_name_prefix):
+        """Takes a list of shards and redistributes the tests into no more
+        than |max_new_shards| new shards."""
+
+        # This implementation assumes that each input shard only contains tests from a
+        # single directory, and that tests in each shard must remain together; as a
+        # result, a given input shard is never split between output shards.
+        #
+        # Each output shard contains the tests from one or more input shards and
+        # hence may contain tests from multiple directories.
+
+        def divide_and_round_up(numerator, divisor):
+            return int(math.ceil(float(numerator) / divisor))
+
+        def extract_and_flatten(shards):
+            test_inputs = []
+            for shard in shards:
+                test_inputs.extend(shard.test_inputs)
+            return test_inputs
+
+        def split_at(seq, index):
+            return (seq[:index], seq[index:])
+
+        num_old_per_new = divide_and_round_up(len(old_shards), max_new_shards)
+        new_shards = []
+        remaining_shards = old_shards
+        while remaining_shards:
+            some_shards, remaining_shards = split_at(remaining_shards, num_old_per_new)
+            new_shards.append(TestShard('%s_%d' % (shard_name_prefix, len(new_shards) + 1), extract_and_flatten(some_shards)))
+        return new_shards
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/modules/web/src/main/native/Tools/Scripts/webkitpy/layout_tests/controllers/layout_test_runner_unittest.py	Fri Jul 05 17:42:24 2013 +0400
@@ -0,0 +1,334 @@
+# Copyright (C) 2012 Google Inc. All rights reserved.
+# Copyright (C) 2010 Gabor Rapcsanyi (rgabor@inf.u-szeged.hu), University of Szeged
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import unittest2 as unittest
+
+from webkitpy.common.host_mock import MockHost
+from webkitpy.common.system.systemhost_mock import MockSystemHost
+from webkitpy.layout_tests import run_webkit_tests
+from webkitpy.layout_tests.controllers.layout_test_runner import LayoutTestRunner, Sharder, TestRunInterruptedException
+from webkitpy.layout_tests.models import test_expectations
+from webkitpy.layout_tests.models import test_failures
+from webkitpy.layout_tests.models.test_run_results import TestRunResults
+from webkitpy.layout_tests.models.test_input import TestInput
+from webkitpy.layout_tests.models.test_results import TestResult
+from webkitpy.port.test import TestPort
+
+
+TestExpectations = test_expectations.TestExpectations
+
+
+class FakePrinter(object):
+    num_completed = 0
+    num_tests = 0
+
+    def print_expected(self, run_results, get_tests_with_result_type):
+        pass
+
+    def print_workers_and_shards(self, num_workers, num_shards, num_locked_shards):
+        pass
+
+    def print_started_test(self, test_name):
+        pass
+
+    def print_finished_test(self, result, expected, exp_str, got_str):
+        pass
+
+    def write(self, msg):
+        pass
+
+    def write_update(self, msg):
+        pass
+
+    def flush(self):
+        pass
+
+
+class LockCheckingRunner(LayoutTestRunner):
+    def __init__(self, port, options, printer, tester, http_lock):
+        super(LockCheckingRunner, self).__init__(options, port, printer, port.results_directory(), lambda test_name: False)
+        self._finished_list_called = False
+        self._tester = tester
+        self._should_have_http_lock = http_lock
+
+    def handle_finished_list(self, source, list_name, num_tests, elapsed_time):
+        if not self._finished_list_called:
+            self._tester.assertEqual(list_name, 'locked_tests')
+            self._tester.assertTrue(self._remaining_locked_shards)
+            self._tester.assertTrue(self._has_http_lock is self._should_have_http_lock)
+
+        super(LockCheckingRunner, self).handle_finished_list(source, list_name, num_tests, elapsed_time)
+
+        if not self._finished_list_called:
+            self._tester.assertEqual(self._remaining_locked_shards, [])
+            self._tester.assertFalse(self._has_http_lock)
+            self._finished_list_called = True
+
+
+class LayoutTestRunnerTests(unittest.TestCase):
+    def _runner(self, port=None):
+        # FIXME: we shouldn't have to use run_webkit_tests.py to get the options we need.
+        options = run_webkit_tests.parse_args(['--platform', 'test-mac-snowleopard'])[0]
+        options.child_processes = '1'
+
+        host = MockHost()
+        port = port or host.port_factory.get(options.platform, options=options)
+        return LockCheckingRunner(port, options, FakePrinter(), self, True)
+
+    def _run_tests(self, runner, tests):
+        test_inputs = [TestInput(test, 6000) for test in tests]
+        expectations = TestExpectations(runner._port, tests)
+        runner.run_tests(expectations, test_inputs, set(),
+            num_workers=1, needs_http=any('http' in test for test in tests), needs_websockets=any(['websocket' in test for test in tests]), retrying=False)
+
+    def test_http_locking(self):
+        runner = self._runner()
+        self._run_tests(runner, ['http/tests/passes/text.html', 'passes/text.html'])
+
+    def test_perf_locking(self):
+        runner = self._runner()
+        self._run_tests(runner, ['http/tests/passes/text.html', 'perf/foo/test.html'])
+
+    def test_interrupt_if_at_failure_limits(self):
+        runner = self._runner()
+        runner._options.exit_after_n_failures = None
+        runner._options.exit_after_n_crashes_or_times = None
+        test_names = ['passes/text.html', 'passes/image.html']
+        runner._test_inputs = [TestInput(test_name, 6000) for test_name in test_names]
+
+        run_results = TestRunResults(TestExpectations(runner._port, test_names), len(test_names))
+        run_results.unexpected_failures = 100
+        run_results.unexpected_crashes = 50
+        run_results.unexpected_timeouts = 50
+        # No exception when the exit_after* options are None.
+        runner._interrupt_if_at_failure_limits(run_results)
+
+        # No exception when we haven't hit the limit yet.
+        runner._options.exit_after_n_failures = 101
+        runner._options.exit_after_n_crashes_or_timeouts = 101
+        runner._interrupt_if_at_failure_limits(run_results)
+
+        # Interrupt if we've exceeded either limit:
+        runner._options.exit_after_n_crashes_or_timeouts = 10
+        self.assertRaises(TestRunInterruptedException, runner._interrupt_if_at_failure_limits, run_results)
+        self.assertEqual(run_results.results_by_name['passes/text.html'].type, test_expectations.SKIP)
+        self.assertEqual(run_results.results_by_name['passes/image.html'].type, test_expectations.SKIP)
+
+        runner._options.exit_after_n_crashes_or_timeouts = None
+        runner._options.exit_after_n_failures = 10
+        exception = self.assertRaises(TestRunInterruptedException, runner._interrupt_if_at_failure_limits, run_results)
+
+    def test_update_summary_with_result(self):
+        # Reftests expected to be image mismatch should be respected when pixel_tests=False.
+        runner = self._runner()
+        runner._options.pixel_tests = False
+        test = 'failures/expected/reftest.html'
+        expectations = TestExpectations(runner._port, tests=[test])
+        runner._expectations = expectations
+
+        run_results = TestRunResults(expectations, 1)
+        result = TestResult(test_name=test, failures=[test_failures.FailureReftestMismatchDidNotOccur()], reftest_type=['!='])
+        runner._update_summary_with_result(run_results, result)
+        self.assertEqual(1, run_results.expected)
+        self.assertEqual(0, run_results.unexpected)
+
+        run_results = TestRunResults(expectations, 1)
+        result = TestResult(test_name=test, failures=[], reftest_type=['=='])
+        runner._update_summary_with_result(run_results, result)
+        self.assertEqual(0, run_results.expected)
+        self.assertEqual(1, run_results.unexpected)
+
+    def test_servers_started(self):
+
+        def start_http_server(number_of_servers=None):
+            self.http_started = True
+
+        def start_websocket_server():
+            self.websocket_started = True
+
+        def stop_http_server():
+            self.http_stopped = True
+
+        def stop_websocket_server():
+            self.websocket_stopped = True
+
+        host = MockHost()
+        port = host.port_factory.get('test-mac-leopard')
+        port.start_http_server = start_http_server
+        port.start_websocket_server = start_websocket_server
+        port.stop_http_server = stop_http_server
+        port.stop_websocket_server = stop_websocket_server
+
+        self.http_started = self.http_stopped = self.websocket_started = self.websocket_stopped = False
+        runner = self._runner(port=port)
+        runner._needs_http = True
+        runner._needs_websockets = False
+        runner.start_servers_with_lock(number_of_servers=4)
+        self.assertEqual(self.http_started, True)
+        self.assertEqual(self.websocket_started, False)
+        runner.stop_servers_with_lock()
+        self.assertEqual(self.http_stopped, True)
+        self.assertEqual(self.websocket_stopped, False)
+
+        self.http_started = self.http_stopped = self.websocket_started = self.websocket_stopped = False
+        runner._needs_http = True
+        runner._needs_websockets = True
+        runner.start_servers_with_lock(number_of_servers=4)
+        self.assertEqual(self.http_started, True)
+        self.assertEqual(self.websocket_started, True)
+        runner.stop_servers_with_lock()
+        self.assertEqual(self.http_stopped, True)
+        self.assertEqual(self.websocket_stopped, True)
+
+        self.http_started = self.http_stopped = self.websocket_started = self.websocket_stopped = False
+        runner._needs_http = False
+        runner._needs_websockets = False
+        runner.start_servers_with_lock(number_of_servers=4)
+        self.assertEqual(self.http_started, False)
+        self.assertEqual(self.websocket_started, False)
+        runner.stop_servers_with_lock()
+        self.assertEqual(self.http_stopped, False)
+        self.assertEqual(self.websocket_stopped, False)
+
+
+class SharderTests(unittest.TestCase):
+
+    test_list = [
+        "http/tests/websocket/tests/unicode.htm",
+        "animations/keyframes.html",
+        "http/tests/security/view-source-no-refresh.html",
+        "http/tests/websocket/tests/websocket-protocol-ignored.html",
+        "fast/css/display-none-inline-style-change-crash.html",
+        "http/tests/xmlhttprequest/supported-xml-content-types.html",
+        "dom/html/level2/html/HTMLAnchorElement03.html",
+        "ietestcenter/Javascript/11.1.5_4-4-c-1.html",
+        "dom/html/level2/html/HTMLAnchorElement06.html",
+        "perf/object-keys.html",
+    ]
+
+    def get_test_input(self, test_file):
+        return TestInput(test_file, requires_lock=(test_file.startswith('http') or test_file.startswith('perf')))
+
+    def get_shards(self, num_workers, fully_parallel, test_list=None, max_locked_shards=1):
+        port = TestPort(MockSystemHost())
+        self.sharder = Sharder(port.split_test, max_locked_shards)
+        test_list = test_list or self.test_list
+        return self.sharder.shard_tests([self.get_test_input(test) for test in test_list], num_workers, fully_parallel)
+
+    def assert_shards(self, actual_shards, expected_shard_names):
+        self.assertEqual(len(actual_shards), len(expected_shard_names))
+        for i, shard in enumerate(actual_shards):
+            expected_shard_name, expected_test_names = expected_shard_names[i]
+            self.assertEqual(shard.name, expected_shard_name)
+            self.assertEqual([test_input.test_name for test_input in shard.test_inputs],
+                              expected_test_names)
+
+    def test_shard_by_dir(self):
+        locked, unlocked = self.get_shards(num_workers=2, fully_parallel=False)
+
+        # Note that although there are tests in multiple dirs that need locks,
+        # they are crammed into a single shard in order to reduce the # of
+        # workers hitting the server at once.
+        self.assert_shards(locked,
+             [('locked_shard_1',
+               ['http/tests/security/view-source-no-refresh.html',
+                'http/tests/websocket/tests/unicode.htm',
+                'http/tests/websocket/tests/websocket-protocol-ignored.html',
+                'http/tests/xmlhttprequest/supported-xml-content-types.html',
+                'perf/object-keys.html'])])
+        self.assert_shards(unlocked,
+            [('animations', ['animations/keyframes.html']),
+             ('dom/html/level2/html', ['dom/html/level2/html/HTMLAnchorElement03.html',
+                                      'dom/html/level2/html/HTMLAnchorElement06.html']),
+             ('fast/css', ['fast/css/display-none-inline-style-change-crash.html']),
+             ('ietestcenter/Javascript', ['ietestcenter/Javascript/11.1.5_4-4-c-1.html'])])
+
+    def test_shard_every_file(self):
+        locked, unlocked = self.get_shards(num_workers=2, fully_parallel=True)
+        self.assert_shards(locked,
+            [('.', ['http/tests/websocket/tests/unicode.htm']),
+             ('.', ['http/tests/security/view-source-no-refresh.html']),
+             ('.', ['http/tests/websocket/tests/websocket-protocol-ignored.html']),
+             ('.', ['http/tests/xmlhttprequest/supported-xml-content-types.html']),
+             ('.', ['perf/object-keys.html'])]),
+        self.assert_shards(unlocked,
+            [('.', ['animations/keyframes.html']),
+             ('.', ['fast/css/display-none-inline-style-change-crash.html']),
+             ('.', ['dom/html/level2/html/HTMLAnchorElement03.html']),
+             ('.', ['ietestcenter/Javascript/11.1.5_4-4-c-1.html']),
+             ('.', ['dom/html/level2/html/HTMLAnchorElement06.html'])])
+
+    def test_shard_in_two(self):
+        locked, unlocked = self.get_shards(num_workers=1, fully_parallel=False)
+        self.assert_shards(locked,
+            [('locked_tests',
+              ['http/tests/websocket/tests/unicode.htm',
+               'http/tests/security/view-source-no-refresh.html',
+               'http/tests/websocket/tests/websocket-protocol-ignored.html',
+               'http/tests/xmlhttprequest/supported-xml-content-types.html',
+               'perf/object-keys.html'])])
+        self.assert_shards(unlocked,
+            [('unlocked_tests',
+              ['animations/keyframes.html',
+               'fast/css/display-none-inline-style-change-crash.html',
+               'dom/html/level2/html/HTMLAnchorElement03.html',
+               'ietestcenter/Javascript/11.1.5_4-4-c-1.html',
+               'dom/html/level2/html/HTMLAnchorElement06.html'])])
+
+    def test_shard_in_two_has_no_locked_shards(self):
+        locked, unlocked = self.get_shards(num_workers=1, fully_parallel=False,
+             test_list=['animations/keyframe.html'])
+        self.assertEqual(len(locked), 0)
+        self.assertEqual(len(unlocked), 1)
+
+    def test_shard_in_two_has_no_unlocked_shards(self):
+        locked, unlocked = self.get_shards(num_workers=1, fully_parallel=False,
+             test_list=['http/tests/websocket/tests/unicode.htm'])
+        self.assertEqual(len(locked), 1)
+        self.assertEqual(len(unlocked), 0)
+
+    def test_multiple_locked_shards(self):
+        locked, unlocked = self.get_shards(num_workers=4, fully_parallel=False, max_locked_shards=2)
+        self.assert_shards(locked,
+            [('locked_shard_1',
+              ['http/tests/security/view-source-no-refresh.html',
+               'http/tests/websocket/tests/unicode.htm',
+               'http/tests/websocket/tests/websocket-protocol-ignored.html']),
+             ('locked_shard_2',
+              ['http/tests/xmlhttprequest/supported-xml-content-types.html',
+               'perf/object-keys.html'])])
+
+        locked, unlocked = self.get_shards(num_workers=4, fully_parallel=False)
+        self.assert_shards(locked,
+            [('locked_shard_1',
+              ['http/tests/security/view-source-no-refresh.html',
+               'http/tests/websocket/tests/unicode.htm',
+               'http/tests/websocket/tests/websocket-protocol-ignored.html',
+               'http/tests/xmlhttprequest/supported-xml-content-types.html',
+               'perf/object-keys.html'])])
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/modules/web/src/main/native/Tools/Scripts/webkitpy/layout_tests/lint_test_expectations.py	Fri Jul 05 17:42:24 2013 +0400
@@ -0,0 +1,111 @@
+# Copyright (C) 2012 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import logging
+import optparse
+import signal
+import traceback
+
+from webkitpy.common.host import Host
+from webkitpy.layout_tests.models import test_expectations
+from webkitpy.port import platform_options
+
+
+# This mirrors what the shell normally does.
+INTERRUPTED_EXIT_STATUS = signal.SIGINT + 128
+
+# This is a randomly chosen exit code that can be tested against to
+# indicate that an unexpected exception occurred.
+EXCEPTIONAL_EXIT_STATUS = 254
+
+_log = logging.getLogger(__name__)
+
+
+def lint(host, options, logging_stream):
+    logger = logging.getLogger()
+    logger.setLevel(logging.INFO)
+    handler = logging.StreamHandler(logging_stream)
+    logger.addHandler(handler)
+
+    try:
+        ports_to_lint = [host.port_factory.get(name) for name in host.port_factory.all_port_names(options.platform)]
+        files_linted = set()
+        lint_failed = False
+
+        for port_to_lint in ports_to_lint:
+            expectations_dict = port_to_lint.expectations_dict()
+
+            # FIXME: This won't work if multiple ports share a TestExpectations file but support different modifiers in the file.
+            for expectations_file in expectations_dict.keys():
+                if expectations_file in files_linted:
+                    continue
+
+                try:
+                    test_expectations.TestExpectations(port_to_lint,
+                        expectations_to_lint={expectations_file: expectations_dict[expectations_file]})
+                except test_expectations.ParseError as e:
+                    lint_failed = True
+                    _log.error('')
+                    for warning in e.warnings:
+                        _log.error(warning)
+                    _log.error('')
+                files_linted.add(expectations_file)
+
+        if lint_failed:
+            _log.error('Lint failed.')
+            return -1
+
+        _log.info('Lint succeeded.')
+        return 0
+    finally:
+        logger.removeHandler(handler)
+
+
+def main(argv, _, stderr):
+    parser = optparse.OptionParser(option_list=platform_options(use_globs=True))
+    options, _ = parser.parse_args(argv)
+
+    if options.platform and 'test' in options.platform:
+        # It's a bit lame to import mocks into real code, but this allows the user
+        # to run tests against the test platform interactively, which is useful for
+        # debugging test failures.
+        from webkitpy.common.host_mock import MockHost
+        host = MockHost()
+    else:
+        host = Host()
+
+    try:
+        exit_status = lint(host, options, stderr)
+    except KeyboardInterrupt:
+        exit_status = INTERRUPTED_EXIT_STATUS
+    except Exception as e:
+        print >> stderr, '\n%s raised: %s' % (e.__class__.__name__, str(e))
+        traceback.print_exc(file=stderr)
+        exit_status = EXCEPTIONAL_EXIT_STATUS
+
+    return exit_status
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/modules/web/src/main/native/Tools/Scripts/webkitpy/layout_tests/lint_test_expectations_unittest.py	Fri Jul 05 17:42:24 2013 +0400
@@ -0,0 +1,157 @@
+# Copyright (C) 2012 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import optparse
+import StringIO
+import unittest2 as unittest
+
+from webkitpy.common.host_mock import MockHost
+from webkitpy.layout_tests import lint_test_expectations
+
+
+class FakePort(object):
+    def __init__(self, host, name, path):
+        self.host = host
+        self.name = name
+        self.path = path
+
+    def test_configuration(self):
+        return None
+
+    def expectations_dict(self):
+        self.host.ports_parsed.append(self.name)
+        return {self.path: ''}
+
+    def skipped_layout_tests(self, _):
+        return set([])
+
+    def all_test_configurations(self):
+        return []
+
+    def configuration_specifier_macros(self):
+        return []
+
+    def get_option(self, _, val):
+        return val
+
+    def path_to_generic_test_expectations_file(self):
+        return ''
+
+class FakeFactory(object):
+    def __init__(self, host, ports):
+        self.host = host
+        self.ports = {}
+        for port in ports:
+            self.ports[port.name] = port
+
+    def get(self, port_name, *args, **kwargs):  # pylint: disable=W0613,E0202
+        return self.ports[port_name]
+
+    def all_port_names(self, platform=None):  # pylint: disable=W0613,E0202
+        return sorted(self.ports.keys())
+
+
+class LintTest(unittest.TestCase):
+    def test_all_configurations(self):
+        host = MockHost()
+        host.ports_parsed = []
+        host.port_factory = FakeFactory(host, (FakePort(host, 'a', 'path-to-a'),
+                                               FakePort(host, 'b', 'path-to-b'),
+                                               FakePort(host, 'b-win', 'path-to-b')))
+
+        logging_stream = StringIO.StringIO()
+        options = optparse.Values({'platform': None})
+        res = lint_test_expectations.lint(host, options, logging_stream)
+        self.assertEqual(res, 0)
+        self.assertEqual(host.ports_parsed, ['a', 'b', 'b-win'])
+
+    def test_lint_test_files(self):
+        logging_stream = StringIO.StringIO()
+        options = optparse.Values({'platform': 'test-mac-leopard'})
+        host = MockHost()
+
+        # pylint appears to complain incorrectly about the method overrides pylint: disable=E0202,C0322
+        # FIXME: incorrect complaints about spacing pylint: disable=C0322
+        host.port_factory.all_port_names = lambda platform=None: [platform]
+
+        res = lint_test_expectations.lint(host, options, logging_stream)
+
+        self.assertEqual(res, 0)
+        self.assertIn('Lint succeeded', logging_stream.getvalue())
+
+    def test_lint_test_files__errors(self):
+        options = optparse.Values({'platform': 'test', 'debug_rwt_logging': False})
+        host = MockHost()
+
+        # FIXME: incorrect complaints about spacing pylint: disable=C0322
+        port = host.port_factory.get(options.platform, options=options)
+        port.expectations_dict = lambda: {'foo': '-- syntax error1', 'bar': '-- syntax error2'}
+
+        host.port_factory.get = lambda platform, options=None: port
+        host.port_factory.all_port_names = lambda platform=None: [port.name()]
+
+        logging_stream = StringIO.StringIO()
+
+        res = lint_test_expectations.lint(host, options, logging_stream)
+
+        self.assertEqual(res, -1)
+        self.assertIn('Lint failed', logging_stream.getvalue())
+        self.assertIn('foo:1', logging_stream.getvalue())
+        self.assertIn('bar:1', logging_stream.getvalue())
+
+
+class MainTest(unittest.TestCase):
+    def test_success(self):
+        orig_lint_fn = lint_test_expectations.lint
+
+        # unused args pylint: disable=W0613
+        def interrupting_lint(host, options, logging_stream):
+            raise KeyboardInterrupt
+
+        def successful_lint(host, options, logging_stream):
+            return 0
+
+        def exception_raising_lint(host, options, logging_stream):
+            assert False
+
+        stdout = StringIO.StringIO()
+        stderr = StringIO.StringIO()
+        try:
+            lint_test_expectations.lint = interrupting_lint
+            res = lint_test_expectations.main([], stdout, stderr)
+            self.assertEqual(res, lint_test_expectations.INTERRUPTED_EXIT_STATUS)
+
+            lint_test_expectations.lint = successful_lint
+            res = lint_test_expectations.main(['--platform', 'test'], stdout, stderr)
+            self.assertEqual(res, 0)
+
+            lint_test_expectations.lint = exception_raising_lint
+            res = lint_test_expectations.main([], stdout, stderr)
+            self.assertEqual(res, lint_test_expectations.EXCEPTIONAL_EXIT_STATUS)
+        finally:
+            lint_test_expectations.lint = orig_lint_fn
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/modules/web/src/main/native/Tools/Scripts/webkitpy/layout_tests/models/test_run_results.py	Fri Jul 05 17:42:24 2013 +0400
@@ -0,0 +1,260 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+# Copyright (C) 2010 Gabor Rapcsanyi (rgabor@inf.u-szeged.hu), University of Szeged
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import logging
+
+from webkitpy.layout_tests.models import test_expectations
+from webkitpy.layout_tests.models import test_failures
+
+
+_log = logging.getLogger(__name__)
+
+
+class TestRunResults(object):
+    def __init__(self, expectations, num_tests):
+        self.total = num_tests
+        self.remaining = self.total
+        self.expectations = expectations
+        self.expected = 0
+        self.unexpected = 0
+        self.unexpected_failures = 0
+        self.unexpected_crashes = 0
+        self.unexpected_timeouts = 0
+        self.tests_by_expectation = {}
+        self.tests_by_timeline = {}
+        self.results_by_name = {}  # Map of test name to the last result for the test.
+        self.all_results = []  # All results from a run, including every iteration of every test.
+        self.unexpected_results_by_name = {}
+        self.failures_by_name = {}
+        self.total_failures = 0
+        self.expected_skips = 0
+        for expectation in test_expectations.TestExpectations.EXPECTATIONS.values():
+            self.tests_by_expectation[expectation] = set()
+        for timeline in test_expectations.TestExpectations.TIMELINES.values():
+            self.tests_by_timeline[timeline] = expectations.get_tests_with_timeline(timeline)
+        self.slow_tests = set()
+        self.interrupted = False
+
+    def add(self, test_result, expected, test_is_slow):
+        self.tests_by_expectation[test_result.type].add(test_result.test_name)
+        self.results_by_name[test_result.test_name] = test_result
+        if test_result.type != test_expectations.SKIP:
+            self.all_results.append(test_result)
+        self.remaining -= 1
+        if len(test_result.failures):
+            self.total_failures += 1
+            self.failures_by_name[test_result.test_name] = test_result.failures
+        if expected:
+            self.expected += 1
+            if test_result.type == test_expectations.SKIP:
+                self.expected_skips += 1
+        else:
+            self.unexpected_results_by_name[test_result.test_name] = test_result
+            self.unexpected += 1
+            if len(test_result.failures):
+                self.unexpected_failures += 1
+            if test_result.type == test_expectations.CRASH:
+                self.unexpected_crashes += 1
+            elif test_result.type == test_expectations.TIMEOUT:
+                self.unexpected_timeouts += 1
+        if test_is_slow:
+            self.slow_tests.add(test_result.test_name)
+
+
+class RunDetails(object):
+    def __init__(self, exit_code, summarized_results=None, initial_results=None, retry_results=None, enabled_pixel_tests_in_retry=False):
+        self.exit_code = exit_code
+        self.summarized_results = summarized_results
+        self.initial_results = initial_results
+        self.retry_results = retry_results
+        self.enabled_pixel_tests_in_retry = enabled_pixel_tests_in_retry
+
+
+def _interpret_test_failures(failures):
+    test_dict = {}
+    failure_types = [type(failure) for failure in failures]
+    # FIXME: get rid of all this is_* values once there is a 1:1 map between
+    # TestFailure type and test_expectations.EXPECTATION.
+    if test_failures.FailureMissingAudio in failure_types:
+        test_dict['is_missing_audio'] = True
+
+    if test_failures.FailureMissingResult in failure_types:
+        test_dict['is_missing_text'] = True
+
+    if test_failures.FailureMissingImage in failure_types or test_failures.FailureMissingImageHash in failure_types:
+        test_dict['is_missing_image'] = True
+
+    if 'image_diff_percent' not in test_dict:
+        for failure in failures:
+            if isinstance(failure, test_failures.FailureImageHashMismatch) or isinstance(failure, test_failures.FailureReftestMismatch):
+                test_dict['image_diff_percent'] = failure.diff_percent
+
+    return test_dict
+
+
+def summarize_results(port_obj, expectations, initial_results, retry_results, enabled_pixel_tests_in_retry):
+    """Returns a dictionary containing a summary of the test runs, with the following fields:
+        'version': a version indicator
+        'fixable': The number of fixable tests (NOW - PASS)
+        'skipped': The number of skipped tests (NOW & SKIPPED)
+        'num_regressions': The number of non-flaky failures
+        'num_flaky': The number of flaky failures
+        'num_missing': The number of tests with missing results
+        'num_passes': The number of unexpected passes
+        'tests': a dict of tests -> {'expected': '...', 'actual': '...'}
+    """
+    results = {}
+    results['version'] = 3
+
+    tbe = initial_results.tests_by_expectation
+    tbt = initial_results.tests_by_timeline
+    results['fixable'] = len(tbt[test_expectations.NOW] - tbe[test_expectations.PASS])
+    results['skipped'] = len(tbt[test_expectations.NOW] & tbe[test_expectations.SKIP])
+
+    num_passes = 0
+    num_flaky = 0
+    num_missing = 0
+    num_regressions = 0
+    keywords = {}
+    for expecation_string, expectation_enum in test_expectations.TestExpectations.EXPECTATIONS.iteritems():
+        keywords[expectation_enum] = expecation_string.upper()
+
+    for modifier_string, modifier_enum in test_expectations.TestExpectations.MODIFIERS.iteritems():
+        keywords[modifier_enum] = modifier_string.upper()
+
+    tests = {}
+
+    for test_name, result in initial_results.results_by_name.iteritems():
+        # Note that if a test crashed in the original run, we ignore
+        # whether or not it crashed when we retried it (if we retried it),
+        # and always consider the result not flaky.
+        expected = expectations.get_expectations_string(test_name)
+        result_type = result.type
+        actual = [keywords[result_type]]
+
+        if result_type == test_expectations.SKIP:
+            continue
+
+        test_dict = {}
+        if result.has_stderr:
+            test_dict['has_stderr'] = True
+
+        if result.reftest_type:
+            test_dict.update(reftest_type=list(result.reftest_type))
+
+        if expectations.has_modifier(test_name, test_expectations.WONTFIX):
+            test_dict['wontfix'] = True
+
+        if result_type == test_expectations.PASS:
+            num_passes += 1
+            # FIXME: include passing tests that have stderr output.
+            if expected == 'PASS':
+                continue
+        elif result_type == test_expectations.CRASH:
+            if test_name in initial_results.unexpected_results_by_name:
+                num_regressions += 1
+        elif result_type == test_expectations.MISSING:
+            if test_name in initial_results.unexpected_results_by_name:
+                num_missing += 1
+        elif test_name in initial_results.unexpected_results_by_name:
+            if retry_results and test_name not in retry_results.unexpected_results_by_name:
+                actual.extend(expectations.get_expectations_string(test_name).split(" "))
+                num_flaky += 1
+            elif retry_results:
+                retry_result_type = retry_results.unexpected_results_by_name[test_name].type
+                if result_type != retry_result_type:
+                    if enabled_pixel_tests_in_retry and result_type == test_expectations.TEXT and retry_result_type == test_expectations.IMAGE_PLUS_TEXT:
+                        num_regressions += 1
+                    else:
+                        num_flaky += 1
+                    actual.append(keywords[retry_result_type])
+                else:
+                    num_regressions += 1
+            else:
+                num_regressions += 1
+
+        test_dict['expected'] = expected
+        test_dict['actual'] = " ".join(actual)
+
+        test_dict.update(_interpret_test_failures(result.failures))
+
+        if retry_results:
+            retry_result = retry_results.unexpected_results_by_name.get(test_name)
+            if retry_result:
+                test_dict.update(_interpret_test_failures(retry_result.failures))
+
+        # Store test hierarchically by directory. e.g.
+        # foo/bar/baz.html: test_dict
+        # foo/bar/baz1.html: test_dict
+        #
+        # becomes
+        # foo: {
+        #     bar: {
+        #         baz.html: test_dict,
+        #         baz1.html: test_dict
+        #     }
+        # }
+        parts = test_name.split('/')
+        current_map = tests
+        for i, part in enumerate(parts):
+            if i == (len(parts) - 1):
+                current_map[part] = test_dict
+                break
+            if part not in current_map:
+                current_map[part] = {}
+            current_map = current_map[part]
+
+    results['tests'] = tests
+    results['num_passes'] = num_passes
+    results['num_flaky'] = num_flaky
+    results['num_missing'] = num_missing
+    results['num_regressions'] = num_regressions
+    results['uses_expectations_file'] = port_obj.uses_test_expectations_file()
+    results['interrupted'] = initial_results.interrupted  # Does results.html have enough information to compute this itself? (by checking total number of results vs. total number of tests?)
+    results['layout_tests_dir'] = port_obj.layout_tests_dir()
+    results['has_wdiff'] = port_obj.wdiff_available()
+    results['has_pretty_patch'] = port_obj.pretty_patch_available()
+    results['pixel_tests_enabled'] = port_obj.get_option('pixel_tests')
+
+    try:
+        # We only use the svn revision for using trac links in the results.html file,
+        # Don't do this by default since it takes >100ms.
+        # FIXME: Do we really need to populate this both here and in the json_results_generator?
+        if port_obj.get_option("builder_name"):
+            port_obj.host.initialize_scm()
+            results['revision'] = port_obj.host.scm().head_svn_revision()
+    except Exception, e:
+        _log.warn("Failed to determine svn revision for checkout (cwd: %s, webkit_base: %s), leaving 'revision' key blank in full_results.json.\n%s" % (port_obj._filesystem.getcwd(), port_obj.path_from_webkit_base(), e))
+        # Handle cases where we're running outside of version control.
+        import traceback
+        _log.debug('Failed to learn head svn revision:')
+        _log.debug(traceback.format_exc())
+        results['revision'] = ""
+
+    return results
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/modules/web/src/main/native/Tools/Scripts/webkitpy/layout_tests/models/test_run_results_unittest.py	Fri Jul 05 17:42:24 2013 +0400
@@ -0,0 +1,135 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import unittest2 as unittest
+
+from webkitpy.common.host_mock import MockHost
+from webkitpy.layout_tests.models import test_expectations
+from webkitpy.layout_tests.models import test_failures
+from webkitpy.layout_tests.models import test_results
+from webkitpy.layout_tests.models import test_run_results
+
+
+def get_result(test_name, result_type=test_expectations.PASS, run_time=0):
+    failures = []
+    if result_type == test_expectations.TIMEOUT:
+        failures = [test_failures.FailureTimeout()]
+    elif result_type == test_expectations.AUDIO:
+        failures = [test_failures.FailureAudioMismatch()]
+    elif result_type == test_expectations.CRASH:
+        failures = [test_failures.FailureCrash()]
+    return test_results.TestResult(test_name, failures=failures, test_run_time=run_time)
+
+
+def run_results(port):
+    tests = ['passes/text.html', 'failures/expected/timeout.html', 'failures/expected/crash.html', 'failures/expected/hang.html',
+             'failures/expected/audio.html']
+    expectations = test_expectations.TestExpectations(port, tests)
+    return test_run_results.TestRunResults(expectations, len(tests))
+
+
+def summarized_results(port, expected, passing, flaky):
+    test_is_slow = False
+
+    initial_results = run_results(port)
+    if expected:
+        initial_results.add(get_result('passes/text.html', test_expectations.PASS), expected, test_is_slow)
+        initial_results.add(get_result('failures/expected/audio.html', test_expectations.AUDIO), expected, test_is_slow)
+        initial_results.add(get_result('failures/expected/timeout.html', test_expectations.TIMEOUT), expected, test_is_slow)
+        initial_results.add(get_result('failures/expected/crash.html', test_expectations.CRASH), expected, test_is_slow)
+    elif passing:
+        initial_results.add(get_result('passes/text.html'), expected, test_is_slow)
+        initial_results.add(get_result('failures/expected/audio.html'), expected, test_is_slow)
+        initial_results.add(get_result('failures/expected/timeout.html'), expected, test_is_slow)
+        initial_results.add(get_result('failures/expected/crash.html'), expected, test_is_slow)
+    else:
+        initial_results.add(get_result('passes/text.html', test_expectations.TIMEOUT), expected, test_is_slow)
+        initial_results.add(get_result('failures/expected/audio.html', test_expectations.AUDIO), expected, test_is_slow)
+        initial_results.add(get_result('failures/expected/timeout.html', test_expectations.CRASH), expected, test_is_slow)
+        initial_results.add(get_result('failures/expected/crash.html', test_expectations.TIMEOUT), expected, test_is_slow)
+
+        # we only list hang.html here, since normally this is WontFix
+        initial_results.add(get_result('failures/expected/hang.html', test_expectations.TIMEOUT), expected, test_is_slow)
+
+    if flaky:
+        retry_results = run_results(port)
+        retry_results.add(get_result('passes/text.html'), True, test_is_slow)
+        retry_results.add(get_result('failures/expected/timeout.html'), True, test_is_slow)
+        retry_results.add(get_result('failures/expected/crash.html'), True, test_is_slow)
+    else:
+        retry_results = None
+
+    return test_run_results.summarize_results(port, initial_results.expectations, initial_results, retry_results, enabled_pixel_tests_in_retry=False)
+
+
+class InterpretTestFailuresTest(unittest.TestCase):
+    def setUp(self):
+        host = MockHost()
+        self.port = host.port_factory.get(port_name='test')
+
+    def test_interpret_test_failures(self):
+        test_dict = test_run_results._interpret_test_failures([test_failures.FailureImageHashMismatch(diff_percent=0.42)])
+        self.assertEqual(test_dict['image_diff_percent'], 0.42)
+
+        test_dict = test_run_results._interpret_test_failures([test_failures.FailureReftestMismatch(self.port.abspath_for_test('foo/reftest-expected.html'))])
+        self.assertIn('image_diff_percent', test_dict)
+
+        test_dict = test_run_results._interpret_test_failures([test_failures.FailureReftestMismatchDidNotOccur(self.port.abspath_for_test('foo/reftest-expected-mismatch.html'))])
+        self.assertEqual(len(test_dict), 0)
+
+        test_dict = test_run_results._interpret_test_failures([test_failures.FailureMissingAudio()])
+        self.assertIn('is_missing_audio', test_dict)
+
+        test_dict = test_run_results._interpret_test_failures([test_failures.FailureMissingResult()])
+        self.assertIn('is_missing_text', test_dict)
+
+        test_dict = test_run_results._interpret_test_failures([test_failures.FailureMissingImage()])
+        self.assertIn('is_missing_image', test_dict)
+
+        test_dict = test_run_results._interpret_test_failures([test_failures.FailureMissingImageHash()])
+        self.assertIn('is_missing_image', test_dict)
+
+
+class SummarizedResultsTest(unittest.TestCase):
+    def setUp(self):
+        host = MockHost(initialize_scm_by_default=False)
+        self.port = host.port_factory.get(port_name='test')
+
+    def test_no_svn_revision(self):
+        summary = summarized_results(self.port, expected=False, passing=False, flaky=False)
+        self.assertNotIn('revision', summary)
+
+    def test_svn_revision(self):
+        self.port._options.builder_name = 'dummy builder'
+        summary = summarized_results(self.port, expected=False, passing=False, flaky=False)
+        self.assertNotEquals(summary['revision'], '')
+
+    def test_summarized_results_wontfix(self):
+        self.port._options.builder_name = 'dummy builder'
+        summary = summarized_results(self.port, expected=False, passing=False, flaky=False)
+        self.assertTrue(summary['tests']['failures']['expected']['hang.html']['wontfix'])
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/modules/web/src/main/native/Tools/Scripts/webkitpy/layout_tests/servers/http_server_base_unittest.py	Fri Jul 05 17:42:24 2013 +0400
@@ -0,0 +1,58 @@
+# Copyright (C) 2012 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import unittest2 as unittest
+
+from webkitpy.common.host_mock import MockHost
+from webkitpy.port import test
+from webkitpy.layout_tests.servers.http_server_base import HttpServerBase
+
+
+class TestHttpServerBase(unittest.TestCase):
+    def test_corrupt_pid_file(self):
+        # This tests that if the pid file is corrupt or invalid,
+        # both start() and stop() deal with it correctly and delete the file.
+        host = MockHost()
+        test_port = test.TestPort(host)
+
+        server = HttpServerBase(test_port)
+        server._pid_file = '/tmp/pidfile'
+        server._spawn_process = lambda: 4
+        server._is_server_running_on_all_ports = lambda: True
+
+        host.filesystem.write_text_file(server._pid_file, 'foo')
+        server.stop()
+        self.assertEqual(host.filesystem.files[server._pid_file], None)
+
+        host.filesystem.write_text_file(server._pid_file, 'foo')
+        server.start()
+        self.assertEqual(server._pid, 4)
+
+        # Note that the pid file would not be None if _spawn_process()
+        # was actually a real implementation.
+        self.assertEqual(host.filesystem.files[server._pid_file], None)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/modules/web/src/main/native/Tools/Scripts/webkitpy/layout_tests/views/buildbot_results.py	Fri Jul 05 17:42:24 2013 +0400
@@ -0,0 +1,167 @@
+#!/usr/bin/env python
+# Copyright (C) 2012 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+from webkitpy.layout_tests.models import test_expectations
+
+from webkitpy.common.net import resultsjsonparser
+
+
+TestExpectations = test_expectations.TestExpectations
+TestExpectationParser = test_expectations.TestExpectationParser
+
+
+class BuildBotPrinter(object):
+    # This output is parsed by buildbots and must only be changed in coordination with buildbot scripts (see webkit.org's
+    # Tools/BuildSlaveSupport/build.webkit.org-config/master.cfg: RunWebKitTests._parseNewRunWebKitTestsOutput
+    # and chromium.org's buildbot/master.chromium/scripts/master/log_parser/webkit_test_command.py).
+
+    def __init__(self, stream, debug_logging):
+        self.stream = stream
+        self.debug_logging = debug_logging
+
+    def print_results(self, run_details):
+        if self.debug_logging:
+            self.print_run_results(run_details.initial_results)
+        self.print_unexpected_results(run_details.summarized_results, run_details.enabled_pixel_tests_in_retry)
+
+    def _print(self, msg):
+        self.stream.write(msg + '\n')
+
+    def print_run_results(self, run_results):
+        failed = run_results.total_failures
+        total = run_results.total
+        passed = total - failed - run_results.remaining
+        percent_passed = 0.0
+        if total > 0:
+            percent_passed = float(passed) * 100 / total
+
+        self._print("=> Results: %d/%d tests passed (%.1f%%)" % (passed, total, percent_passed))
+        self._print("")
+        self._print_run_results_entry(run_results, test_expectations.NOW, "Tests to be fixed")
+
+        self._print("")
+        # FIXME: We should be skipping anything marked WONTFIX, so we shouldn't bother logging these stats.
+        self._print_run_results_entry(run_results, test_expectations.WONTFIX,
+            "Tests that will only be fixed if they crash (WONTFIX)")
+        self._print("")
+
+    def _print_run_results_entry(self, run_results, timeline, heading):
+        total = len(run_results.tests_by_timeline[timeline])
+        not_passing = (total -
+            len(run_results.tests_by_expectation[test_expectations.PASS] &
+                run_results.tests_by_timeline[timeline]))
+        self._print("=> %s (%d):" % (heading, not_passing))
+
+        for result in TestExpectations.EXPECTATION_ORDER:
+            if result in (test_expectations.PASS, test_expectations.SKIP):
+                continue
+            results = (run_results.tests_by_expectation[result] & run_results.tests_by_timeline[timeline])
+            desc = TestExpectations.EXPECTATION_DESCRIPTIONS[result]
+            if not_passing and len(results):
+                pct = len(results) * 100.0 / not_passing
+                self._print("  %5d %-24s (%4.1f%%)" % (len(results), desc, pct))
+
+    def print_unexpected_results(self, summarized_results, enabled_pixel_tests_in_retry=False):
+        passes = {}
+        flaky = {}
+        regressions = {}
+
+        def add_to_dict_of_lists(dict, key, value):
+            dict.setdefault(key, []).append(value)
+
+        def add_result(test, results, passes=passes, flaky=flaky, regressions=regressions):
+            actual = results['actual'].split(" ")
+            expected = results['expected'].split(" ")
+
+            def is_expected(result):
+                return (result in expected) or (result in ('AUDIO', 'TEXT', 'IMAGE+TEXT') and 'FAIL' in expected)
+
+            if all(is_expected(actual_result) for actual_result in actual):
+                # Don't print anything for tests that ran as expected.
+                return
+
+            if actual == ['PASS']:
+                if 'CRASH' in expected:
+                    add_to_dict_of_lists(passes, 'Expected to crash, but passed', test)
+                elif 'TIMEOUT' in expected:
+                    add_to_dict_of_lists(passes, 'Expected to timeout, but passed', test)
+                else:
+                    add_to_dict_of_lists(passes, 'Expected to fail, but passed', test)
+            elif enabled_pixel_tests_in_retry and actual == ['TEXT', 'IMAGE+TEXT']:
+                add_to_dict_of_lists(regressions, actual[0], test)
+            elif len(actual) > 1:
+                # We group flaky tests by the first actual result we got.
+                add_to_dict_of_lists(flaky, actual[0], test)
+            else:
+                add_to_dict_of_lists(regressions, results['actual'], test)
+
+        resultsjsonparser.for_each_test(summarized_results['tests'], add_result)
+
+        if len(passes) or len(flaky) or len(regressions):
+            self._print("")
+        if len(passes):
+            for key, tests in passes.iteritems():
+                self._print("%s: (%d)" % (key, len(tests)))
+                tests.sort()
+                for test in tests:
+                    self._print("  %s" % test)
+                self._print("")
+            self._print("")
+
+        if len(flaky):
+            descriptions = TestExpectations.EXPECTATION_DESCRIPTIONS
+            for key, tests in flaky.iteritems():
+                result = TestExpectations.EXPECTATIONS[key.lower()]
+                self._print("Unexpected flakiness: %s (%d)" % (descriptions[result], len(tests)))
+                tests.sort()
+
+                for test in tests:
+                    result = resultsjsonparser.result_for_test(summarized_results['tests'], test)
+                    actual = result['actual'].split(" ")
+                    expected = result['expected'].split(" ")
+                    result = TestExpectations.EXPECTATIONS[key.lower()]
+                    # FIXME: clean this up once the old syntax is gone
+                    new_expectations_list = [TestExpectationParser._inverted_expectation_tokens[exp] for exp in list(set(actual) | set(expected))]
+                    self._print("  %s [ %s ]" % (test, " ".join(new_expectations_list)))
+                self._print("")
+            self._print("")
+
+        if len(regressions):
+            descriptions = TestExpectations.EXPECTATION_DESCRIPTIONS
+            for key, tests in regressions.iteritems():
+                result = TestExpectations.EXPECTATIONS[key.lower()]
+                self._print("Regressions: Unexpected %s (%d)" % (descriptions[result], len(tests)))
+                tests.sort()
+                for test in tests:
+                    self._print("  %s [ %s ]" % (test, TestExpectationParser._inverted_expectation_tokens[key]))
+                self._print("")
+
+        if len(summarized_results['tests']) and self.debug_logging:
+            self._print("%s" % ("-" * 78))
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/modules/web/src/main/native/Tools/Scripts/webkitpy/layout_tests/views/buildbot_results_unittest.py	Fri Jul 05 17:42:24 2013 +0400
@@ -0,0 +1,99 @@
+#!/usr/bin/env python
+# Copyright (C) 2012 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import StringIO
+import unittest2 as unittest
+
+from webkitpy.common.host_mock import MockHost
+
+from webkitpy.layout_tests.models import test_expectations
+from webkitpy.layout_tests.models import test_failures
+from webkitpy.layout_tests.models import test_run_results
+from webkitpy.layout_tests.models import test_run_results
+from webkitpy.layout_tests.models import test_run_results_unittest
+from webkitpy.layout_tests.views import buildbot_results
+
+
+class BuildBotPrinterTests(unittest.TestCase):
+    def assertEmpty(self, stream):
+        self.assertFalse(stream.getvalue())
+
+    def assertNotEmpty(self, stream):
+        self.assertTrue(stream.getvalue())
+
+    def get_printer(self):
+        stream = StringIO.StringIO()
+        printer = buildbot_results.BuildBotPrinter(stream, debug_logging=True)
+        return printer, stream
+
+    def test_print_unexpected_results(self):
+        port = MockHost().port_factory.get('test')
+        printer, out = self.get_printer()
+
+        # test everything running as expected
+        DASHED_LINE = "-" * 78 + "\n"
+        summary = test_run_results_unittest.summarized_results(port, expected=True, passing=False, flaky=False)
+        printer.print_unexpected_results(summary)
+        self.assertEqual(out.getvalue(), DASHED_LINE)
+
+        # test failures
+        printer, out = self.get_printer()
+        summary = test_run_results_unittest.summarized_results(port, expected=False, passing=False, flaky=False)
+        printer.print_unexpected_results(summary)
+        self.assertNotEmpty(out)
+
+        # test unexpected flaky
+        printer, out = self.get_printer()
+        summary = test_run_results_unittest.summarized_results(port, expected=False, passing=False, flaky=True)
+        printer.print_unexpected_results(summary)
+        self.assertNotEmpty(out)
+
+        printer, out = self.get_printer()
+        summary = test_run_results_unittest.summarized_results(port, expected=False, passing=False, flaky=False)
+        printer.print_unexpected_results(summary)
+        self.assertNotEmpty(out)
+
+        printer, out = self.get_printer()
+        summary = test_run_results_unittest.summarized_results(port, expected=False, passing=False, flaky=False)
+        printer.print_unexpected_results(summary)
+        self.assertNotEmpty(out)
+
+        printer, out = self.get_printer()
+        summary = test_run_results_unittest.summarized_results(port, expected=False, passing=True, flaky=False)
+        printer.print_unexpected_results(summary)
+        self.assertNotEmpty(out)
+
+    def test_print_results(self):
+        port = MockHost().port_factory.get('test')
+        printer, out = self.get_printer()
+        initial_results = test_run_results_unittest.run_results(port)
+        summary = test_run_results_unittest.summarized_results(port, expected=False, passing=True, flaky=False)
+        details = test_run_results.RunDetails(summary['num_regressions'], summary, initial_results, None)
+        printer.print_results(details)
+        self.assertNotEmpty(out)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/modules/web/src/main/native/Tools/Scripts/webkitpy/performance_tests/perftestsrunner_integrationtest.py	Fri Jul 05 17:42:24 2013 +0400
@@ -0,0 +1,582 @@
+# Copyright (C) 2012 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Integration tests for run_perf_tests."""
+
+import StringIO
+import datetime
+import json
+import re
+import unittest2 as unittest
+
+from webkitpy.common.host_mock import MockHost
+from webkitpy.common.system.outputcapture import OutputCapture
+from webkitpy.port.driver import DriverOutput
+from webkitpy.port.test import TestPort
+from webkitpy.performance_tests.perftest import ChromiumStylePerfTest
+from webkitpy.performance_tests.perftest import PerfTest
+from webkitpy.performance_tests.perftestsrunner import PerfTestsRunner
+
+
+class InspectorPassTestData:
+    text = 'RESULT group_name: test_name= 42 ms'
+    output = """Running inspector/pass.html (2 of 2)
+RESULT group_name: test_name= 42 ms
+Finished: 0.1 s
+
+"""
+
+
+class EventTargetWrapperTestData:
+    text = """Running 20 times
+Ignoring warm-up run (1502)
+1504
+1505
+1510
+1504
+1507
+1509
+1510
+1487
+1488
+1472
+1472
+1488
+1473
+1472
+1475
+1487
+1486
+1486
+1475
+1471
+
+Time:
+values 1486, 1471, 1510, 1505, 1478, 1490 ms
+avg 1490 ms
+median 1488 ms
+stdev 15.13935 ms
+min 1471 ms
+max 1510 ms
+"""
+
+    output = """Running Bindings/event-target-wrapper.html (1 of 2)
+RESULT Bindings: event-target-wrapper: Time= 1490.0 ms
+median= 1488.0 ms, stdev= 14.11751 ms, min= 1471.0 ms, max= 1510.0 ms
+Finished: 0.1 s
+
+"""
+
+    results = {'url': 'http://trac.webkit.org/browser/trunk/PerformanceTests/Bindings/event-target-wrapper.html',
+        'metrics': {'Time': {'current': [[1486.0, 1471.0, 1510.0, 1505.0, 1478.0, 1490.0]] * 4}}}
+
+
+class SomeParserTestData:
+    text = """Running 20 times
+Ignoring warm-up run (1115)
+
+Time:
+values 1080, 1120, 1095, 1101, 1104 ms
+avg 1100 ms
+median 1101 ms
+stdev 14.50861 ms
+min 1080 ms
+max 1120 ms
+"""
+
+    output = """Running Parser/some-parser.html (2 of 2)
+RESULT Parser: some-parser: Time= 1100.0 ms
+median= 1101.0 ms, stdev= 13.31402 ms, min= 1080.0 ms, max= 1120.0 ms
+Finished: 0.1 s
+
+"""
+
+
+class MemoryTestData:
+    text = """Running 20 times
+Ignoring warm-up run (1115)
+
+Time:
+values 1080, 1120, 1095, 1101, 1104 ms
+avg 1100 ms
+median 1101 ms
+stdev 14.50861 ms
+min 1080 ms
+max 1120 ms
+
+JS Heap:
+values 825000, 811000, 848000, 837000, 829000 bytes
+avg 830000 bytes
+median 829000 bytes
+stdev 13784.04875 bytes
+min 811000 bytes
+max 848000 bytes
+
+Malloc:
+values 529000, 511000, 548000, 536000, 521000 bytes
+avg 529000 bytes
+median 529000 bytes
+stdev 14124.44689 bytes
+min 511000 bytes
+max 548000 bytes
+"""
+
+    output = """Running 1 tests
+Running Parser/memory-test.html (1 of 1)
+RESULT Parser: memory-test: Time= 1100.0 ms
+median= 1101.0 ms, stdev= 13.31402 ms, min= 1080.0 ms, max= 1120.0 ms
+RESULT Parser: memory-test: JSHeap= 830000.0 bytes
+median= 829000.0 bytes, stdev= 12649.11064 bytes, min= 811000.0 bytes, max= 848000.0 bytes
+RESULT Parser: memory-test: Malloc= 529000.0 bytes
+median= 529000.0 bytes, stdev= 12961.48139 bytes, min= 511000.0 bytes, max= 548000.0 bytes
+Finished: 0.1 s
+"""
+
+    results = {'current': [[1080, 1120, 1095, 1101, 1104]] * 4}
+    js_heap_results = {'current': [[825000, 811000, 848000, 837000, 829000]] * 4}
+    malloc_results = {'current': [[529000, 511000, 548000, 536000, 521000]] * 4}
+
+
+class TestDriver:
+    def run_test(self, driver_input, stop_when_done):
+        text = ''
+        timeout = False
+        crash = False
+        if driver_input.test_name.endswith('pass.html'):
+            text = InspectorPassTestData.text
+        elif driver_input.test_name.endswith('timeout.html'):
+            timeout = True
+        elif driver_input.test_name.endswith('failed.html'):
+            text = None
+        elif driver_input.test_name.endswith('tonguey.html'):
+            text = 'we are not expecting an output from perf tests but RESULT blablabla'
+        elif driver_input.test_name.endswith('crash.html'):
+            crash = True
+        elif driver_input.test_name.endswith('event-target-wrapper.html'):
+            text = EventTargetWrapperTestData.text
+        elif driver_input.test_name.endswith('some-parser.html'):
+            text = SomeParserTestData.text
+        elif driver_input.test_name.endswith('memory-test.html'):
+            text = MemoryTestData.text
+        return DriverOutput(text, '', '', '', crash=crash, timeout=timeout)
+
+    def start(self):
+        """do nothing"""
+
+    def stop(self):
+        """do nothing"""
+
+
+class MainTest(unittest.TestCase):
+    def _normalize_output(self, log):
+        return re.sub(r'(stdev=\s+\d+\.\d{5})\d+', r'\1', re.sub(r'Finished: [0-9\.]+ s', 'Finished: 0.1 s', log))
+
+    def _load_output_json(self, runner):
+        json_content = runner._host.filesystem.read_text_file(runner._output_json_path())
+        return json.loads(re.sub(r'("stdev":\s*\d+\.\d{5})\d+', r'\1', json_content))
+
+    def create_runner(self, args=[], driver_class=TestDriver):
+        options, parsed_args = PerfTestsRunner._parse_args(args)
+        test_port = TestPort(host=MockHost(), options=options)
+        test_port.create_driver = lambda worker_number=None, no_timeout=False: driver_class()
+
+        runner = PerfTestsRunner(args=args, port=test_port)
+        runner._host.filesystem.maybe_make_directory(runner._base_path, 'inspector')
+        runner._host.filesystem.maybe_make_directory(runner._base_path, 'Bindings')
+        runner._host.filesystem.maybe_make_directory(runner._base_path, 'Parser')
+
+        return runner, test_port
+
+    def run_test(self, test_name):
+        runner, port = self.create_runner()
+        tests = [ChromiumStylePerfTest(port, test_name, runner._host.filesystem.join('some-dir', test_name))]
+        return runner._run_tests_set(tests) == 0
+
+    def test_run_passing_test(self):
+        self.assertTrue(self.run_test('pass.html'))
+
+    def test_run_silent_test(self):
+        self.assertFalse(self.run_test('silent.html'))
+
+    def test_run_failed_test(self):
+        self.assertFalse(self.run_test('failed.html'))
+
+    def test_run_tonguey_test(self):
+        self.assertFalse(self.run_test('tonguey.html'))
+
+    def test_run_timeout_test(self):
+        self.assertFalse(self.run_test('timeout.html'))
+
+    def test_run_crash_test(self):
+        self.assertFalse(self.run_test('crash.html'))
+
+    def _tests_for_runner(self, runner, test_names):
+        filesystem = runner._host.filesystem
+        tests = []
+        for test in test_names:
+            path = filesystem.join(runner._base_path, test)
+            dirname = filesystem.dirname(path)
+            if test.startswith('inspector/'):
+                tests.append(ChromiumStylePerfTest(runner._port, test, path))
+            else:
+                tests.append(PerfTest(runner._port, test, path))
+        return tests
+
+    def test_run_test_set(self):
+        runner, port = self.create_runner()
+        tests = self._tests_for_runner(runner, ['inspector/pass.html', 'inspector/silent.html', 'inspector/failed.html',
+            'inspector/tonguey.html', 'inspector/timeout.html', 'inspector/crash.html'])
+        output = OutputCapture()
+        output.capture_output()
+        try:
+            unexpected_result_count = runner._run_tests_set(tests)
+        finally:
+            stdout, stderr, log = output.restore_output()
+        self.assertEqual(unexpected_result_count, len(tests) - 1)
+        self.assertTrue('\nRESULT group_name: test_name= 42 ms\n' in log)
+
+    def test_run_test_set_kills_drt_per_run(self):
+
+        class TestDriverWithStopCount(TestDriver):
+            stop_count = 0
+            def stop(self):
+                TestDriverWithStopCount.stop_count += 1
+
+        runner, port = self.create_runner(driver_class=TestDriverWithStopCount)
+
+        tests = self._tests_for_runner(runner, ['inspector/pass.html', 'inspector/silent.html', 'inspector/failed.html',
+            'inspector/tonguey.html', 'inspector/timeout.html', 'inspector/crash.html'])
+        unexpected_result_count = runner._run_tests_set(tests)
+
+        self.assertEqual(TestDriverWithStopCount.stop_count, 6)
+
+    def test_run_test_set_for_parser_tests(self):
+        runner, port = self.create_runner()
+        tests = self._tests_for_runner(runner, ['Bindings/event-target-wrapper.html', 'Parser/some-parser.html'])
+        output = OutputCapture()
+        output.capture_output()
+        try:
+            unexpected_result_count = runner._run_tests_set(tests)
+        finally:
+            stdout, stderr, log = output.restore_output()
+        self.assertEqual(unexpected_result_count, 0)
+        self.assertEqual(self._normalize_output(log), EventTargetWrapperTestData.output + SomeParserTestData.output)
+
+    def test_run_memory_test(self):
+        runner, port = self.create_runner_and_setup_results_template()
+        runner._timestamp = 123456789
+        port.host.filesystem.write_text_file(runner._base_path + '/Parser/memory-test.html', 'some content')
+
+        output = OutputCapture()
+        output.capture_output()
+        try:
+            unexpected_result_count = runner.run()
+        finally:
+            stdout, stderr, log = output.restore_output()
+        self.assertEqual(unexpected_result_count, 0)
+        self.assertEqual(self._normalize_output(log), MemoryTestData.output + '\nMOCK: user.open_url: file://...\n')
+        parser_tests = self._load_output_json(runner)[0]['tests']['Parser']['tests']
+        self.assertEqual(parser_tests['memory-test']['metrics']['Time'], MemoryTestData.results)
+        self.assertEqual(parser_tests['memory-test']['metrics']['JSHeap'], MemoryTestData.js_heap_results)
+        self.assertEqual(parser_tests['memory-test']['metrics']['Malloc'], MemoryTestData.malloc_results)
+
+    def _test_run_with_json_output(self, runner, filesystem, upload_succeeds=False, results_shown=True, expected_exit_code=0, repeat=1, compare_logs=True):
+        filesystem.write_text_file(runner._base_path + '/inspector/pass.html', 'some content')
+        filesystem.write_text_file(runner._base_path + '/Bindings/event-target-wrapper.html', 'some content')
+
+        uploaded = [False]
+
+        def mock_upload_json(hostname, json_path, host_path=None):
+            # FIXME: Get rid of the hard-coded perf.webkit.org once we've completed the transition.
+            self.assertIn(hostname, ['some.host'])
+            self.assertIn(json_path, ['/mock-checkout/output.json'])
+            self.assertIn(host_path, [None, '/api/report'])
+            uploaded[0] = upload_succeeds
+            return upload_succeeds
+
+        runner._upload_json = mock_upload_json
+        runner._timestamp = 123456789
+        runner._utc_timestamp = datetime.datetime(2013, 2, 8, 15, 19, 37, 460000)
+        output_capture = OutputCapture()
+        output_capture.capture_output()
+        try:
+            self.assertEqual(runner.run(), expected_exit_code)
+        finally:
+            stdout, stderr, logs = output_capture.restore_output()
+
+        if not expected_exit_code and compare_logs:
+            expected_logs = ''
+            for i in xrange(repeat):
+                runs = ' (Run %d of %d)' % (i + 1, repeat) if repeat > 1 else ''
+                expected_logs += 'Running 2 tests%s\n' % runs + EventTargetWrapperTestData.output + InspectorPassTestData.output
+            if results_shown:
+                expected_logs += 'MOCK: user.open_url: file://...\n'
+            self.assertEqual(self._normalize_output(logs), expected_logs)
+
+        self.assertEqual(uploaded[0], upload_succeeds)
+
+        return logs
+
+    _event_target_wrapper_and_inspector_results = {
+        "Bindings":
+            {"url": "http://trac.webkit.org/browser/trunk/PerformanceTests/Bindings",
+            "tests": {"event-target-wrapper": EventTargetWrapperTestData.results}}}
+
+    def test_run_with_json_output(self):
+        runner, port = self.create_runner_and_setup_results_template(args=['--output-json-path=/mock-checkout/output.json',
+            '--test-results-server=some.host'])
+        self._test_run_with_json_output(runner, port.host.filesystem, upload_succeeds=True)
+        self.assertEqual(self._load_output_json(runner), [{
+            "buildTime": "2013-02-08T15:19:37.460000", "tests": self._event_target_wrapper_and_inspector_results,
+            "revisions": {"WebKit": {"timestamp": "2013-02-01 08:48:05 +0000", "revision": "5678"}}}])
+
+        filesystem = port.host.filesystem
+        self.assertTrue(filesystem.isfile(runner._output_json_path()))
+        self.assertTrue(filesystem.isfile(filesystem.splitext(runner._output_json_path())[0] + '.html'))
+
+    def test_run_with_description(self):
+        runner, port = self.create_runner_and_setup_results_template(args=['--output-json-path=/mock-checkout/output.json',
+            '--test-results-server=some.host', '--description', 'some description'])
+        self._test_run_with_json_output(runner, port.host.filesystem, upload_succeeds=True)
+        self.assertEqual(self._load_output_json(runner), [{
+            "buildTime": "2013-02-08T15:19:37.460000", "description": "some description",
+            "tests": self._event_target_wrapper_and_inspector_results,
+            "revisions": {"WebKit": {"timestamp": "2013-02-01 08:48:05 +0000", "revision": "5678"}}}])
+
+    def create_runner_and_setup_results_template(self, args=[]):
+        runner, port = self.create_runner(args)
+        filesystem = port.host.filesystem
+        filesystem.write_text_file(runner._base_path + '/resources/results-template.html',
+            'BEGIN<script src="%AbsolutePathToWebKitTrunk%/some.js"></script>'
+            '<script src="%AbsolutePathToWebKitTrunk%/other.js"></script><script>%PeformanceTestsResultsJSON%</script>END')
+        filesystem.write_text_file(runner._base_path + '/Dromaeo/resources/dromaeo/web/lib/jquery-1.6.4.js', 'jquery content')
+        return runner, port
+
+    def test_run_respects_no_results(self):
+        runner, port = self.create_runner(args=['--output-json-path=/mock-checkout/output.json',
+            '--test-results-server=some.host', '--no-results'])
+        self._test_run_with_json_output(runner, port.host.filesystem, upload_succeeds=False, results_shown=False)
+        self.assertFalse(port.host.filesystem.isfile('/mock-checkout/output.json'))
+
+    def test_run_generates_json_by_default(self):
+        runner, port = self.create_runner_and_setup_results_template()
+        filesystem = port.host.filesystem
+        output_json_path = runner._output_json_path()
+        results_page_path = filesystem.splitext(output_json_path)[0] + '.html'
+
+        self.assertFalse(filesystem.isfile(output_json_path))
+        self.assertFalse(filesystem.isfile(results_page_path))
+
+        self._test_run_with_json_output(runner, port.host.filesystem)
+
+        self.assertEqual(self._load_output_json(runner), [{
+            "buildTime": "2013-02-08T15:19:37.460000", "tests": self._event_target_wrapper_and_inspector_results,
+            "revisions": {"WebKit": {"timestamp": "2013-02-01 08:48:05 +0000", "revision": "5678"}}}])
+
+        self.assertTrue(filesystem.isfile(output_json_path))
+        self.assertTrue(filesystem.isfile(results_page_path))
+
+    def test_run_merges_output_by_default(self):
+        runner, port = self.create_runner_and_setup_results_template()
+        filesystem = port.host.filesystem
+        output_json_path = runner._output_json_path()
+
+        filesystem.write_text_file(output_json_path, '[{"previous": "results"}]')
+
+        self._test_run_with_json_output(runner, port.host.filesystem)
+
+        self.assertEqual(self._load_output_json(runner), [{"previous": "results"}, {
+            "buildTime": "2013-02-08T15:19:37.460000", "tests": self._event_target_wrapper_and_inspector_results,
+            "revisions": {"WebKit": {"timestamp": "2013-02-01 08:48:05 +0000", "revision": "5678"}}}])
+        self.assertTrue(filesystem.isfile(filesystem.splitext(output_json_path)[0] + '.html'))
+
+    def test_run_respects_reset_results(self):
+        runner, port = self.create_runner_and_setup_results_template(args=["--reset-results"])
+        filesystem = port.host.filesystem
+        output_json_path = runner._output_json_path()
+
+        filesystem.write_text_file(output_json_path, '[{"previous": "results"}]')
+
+        self._test_run_with_json_output(runner, port.host.filesystem)
+
+        self.assertEqual(self._load_output_json(runner), [{
+            "buildTime": "2013-02-08T15:19:37.460000", "tests": self._event_target_wrapper_and_inspector_results,
+            "revisions": {"WebKit": {"timestamp": "2013-02-01 08:48:05 +0000", "revision": "5678"}}}])
+        self.assertTrue(filesystem.isfile(filesystem.splitext(output_json_path)[0] + '.html'))
+        pass
+
+    def test_run_generates_and_show_results_page(self):
+        runner, port = self.create_runner_and_setup_results_template(args=['--output-json-path=/mock-checkout/output.json'])
+        page_shown = []
+        port.show_results_html_file = lambda path: page_shown.append(path)
+        filesystem = port.host.filesystem
+        self._test_run_with_json_output(runner, filesystem, results_shown=False)
+
+        expected_entry = {"buildTime": "2013-02-08T15:19:37.460000", "tests": self._event_target_wrapper_and_inspector_results,
+            "revisions": {"WebKit": {"timestamp": "2013-02-01 08:48:05 +0000", "revision": "5678"}}}
+
+        self.maxDiff = None
+        self.assertEqual(runner._output_json_path(), '/mock-checkout/output.json')
+        self.assertEqual(self._load_output_json(runner), [expected_entry])
+        self.assertEqual(filesystem.read_text_file('/mock-checkout/output.html'),
+            'BEGIN<script src="/test.checkout/some.js"></script><script src="/test.checkout/other.js"></script>'
+            '<script>%s</script>END' % port.host.filesystem.read_text_file(runner._output_json_path()))
+        self.assertEqual(page_shown[0], '/mock-checkout/output.html')
+
+        self._test_run_with_json_output(runner, filesystem, results_shown=False)
+        self.assertEqual(runner._output_json_path(), '/mock-checkout/output.json')
+        self.assertEqual(self._load_output_json(runner), [expected_entry, expected_entry])
+        self.assertEqual(filesystem.read_text_file('/mock-checkout/output.html'),
+            'BEGIN<script src="/test.checkout/some.js"></script><script src="/test.checkout/other.js"></script>'
+            '<script>%s</script>END' % port.host.filesystem.read_text_file(runner._output_json_path()))
+
+    def test_run_respects_no_show_results(self):
+        show_results_html_file = lambda path: page_shown.append(path)
+
+        runner, port = self.create_runner_and_setup_results_template(args=['--output-json-path=/mock-checkout/output.json'])
+        page_shown = []
+        port.show_results_html_file = show_results_html_file
+        self._test_run_with_json_output(runner, port.host.filesystem, results_shown=False)
+        self.assertEqual(page_shown[0], '/mock-checkout/output.html')
+
+        runner, port = self.create_runner_and_setup_results_template(args=['--output-json-path=/mock-checkout/output.json',
+            '--no-show-results'])
+        page_shown = []
+        port.show_results_html_file = show_results_html_file
+        self._test_run_with_json_output(runner, port.host.filesystem, results_shown=False)
+        self.assertEqual(page_shown, [])
+
+    def test_run_with_bad_output_json(self):
+        runner, port = self.create_runner_and_setup_results_template(args=['--output-json-path=/mock-checkout/output.json'])
+        port.host.filesystem.write_text_file('/mock-checkout/output.json', 'bad json')
+        self._test_run_with_json_output(runner, port.host.filesystem, expected_exit_code=PerfTestsRunner.EXIT_CODE_BAD_MERGE)
+        port.host.filesystem.write_text_file('/mock-checkout/output.json', '{"another bad json": "1"}')
+        self._test_run_with_json_output(runner, port.host.filesystem, expected_exit_code=PerfTestsRunner.EXIT_CODE_BAD_MERGE)
+
+    def test_run_with_slave_config_json(self):
+        runner, port = self.create_runner_and_setup_results_template(args=['--output-json-path=/mock-checkout/output.json',
+            '--slave-config-json-path=/mock-checkout/slave-config.json', '--test-results-server=some.host'])
+        port.host.filesystem.write_text_file('/mock-checkout/slave-config.json', '{"key": "value"}')
+        self._test_run_with_json_output(runner, port.host.filesystem, upload_succeeds=True)
+        self.assertEqual(self._load_output_json(runner), [{
+            "buildTime": "2013-02-08T15:19:37.460000", "tests": self._event_target_wrapper_and_inspector_results,
+            "revisions": {"WebKit": {"timestamp": "2013-02-01 08:48:05 +0000", "revision": "5678"}}, "builderKey": "value"}])
+
+    def test_run_with_bad_slave_config_json(self):
+        runner, port = self.create_runner_and_setup_results_template(args=['--output-json-path=/mock-checkout/output.json',
+            '--slave-config-json-path=/mock-checkout/slave-config.json', '--test-results-server=some.host'])
+        logs = self._test_run_with_json_output(runner, port.host.filesystem, expected_exit_code=PerfTestsRunner.EXIT_CODE_BAD_SOURCE_JSON)
+        self.assertTrue('Missing slave configuration JSON file: /mock-checkout/slave-config.json' in logs)
+        port.host.filesystem.write_text_file('/mock-checkout/slave-config.json', 'bad json')
+        self._test_run_with_json_output(runner, port.host.filesystem, expected_exit_code=PerfTestsRunner.EXIT_CODE_BAD_SOURCE_JSON)
+        port.host.filesystem.write_text_file('/mock-checkout/slave-config.json', '["another bad json"]')
+        self._test_run_with_json_output(runner, port.host.filesystem, expected_exit_code=PerfTestsRunner.EXIT_CODE_BAD_SOURCE_JSON)
+
+    def test_run_with_multiple_repositories(self):
+        runner, port = self.create_runner_and_setup_results_template(args=['--output-json-path=/mock-checkout/output.json',
+            '--test-results-server=some.host'])
+        port.repository_paths = lambda: [('webkit', '/mock-checkout'), ('some', '/mock-checkout/some')]
+        self._test_run_with_json_output(runner, port.host.filesystem, upload_succeeds=True)
+        self.assertEqual(self._load_output_json(runner), [{
+            "buildTime": "2013-02-08T15:19:37.460000", "tests": self._event_target_wrapper_and_inspector_results,
+            "revisions": {"webkit": {"timestamp": "2013-02-01 08:48:05 +0000", "revision": "5678"},
+            "some": {"timestamp": "2013-02-01 08:48:05 +0000", "revision": "5678"}}}])
+
+    def test_run_with_upload_json(self):
+        runner, port = self.create_runner_and_setup_results_template(args=['--output-json-path=/mock-checkout/output.json',
+            '--test-results-server', 'some.host', '--platform', 'platform1', '--builder-name', 'builder1', '--build-number', '123'])
+
+        self._test_run_with_json_output(runner, port.host.filesystem, upload_succeeds=True)
+        generated_json = json.loads(port.host.filesystem.files['/mock-checkout/output.json'])
+        self.assertEqual(generated_json[0]['platform'], 'platform1')
+        self.assertEqual(generated_json[0]['builderName'], 'builder1')
+        self.assertEqual(generated_json[0]['buildNumber'], 123)
+
+        self._test_run_with_json_output(runner, port.host.filesystem, upload_succeeds=False, expected_exit_code=PerfTestsRunner.EXIT_CODE_FAILED_UPLOADING)
+
+    def test_run_with_upload_json_should_generate_perf_webkit_json(self):
+        runner, port = self.create_runner_and_setup_results_template(args=['--output-json-path=/mock-checkout/output.json',
+            '--test-results-server', 'some.host', '--platform', 'platform1', '--builder-name', 'builder1', '--build-number', '123',
+            '--slave-config-json-path=/mock-checkout/slave-config.json'])
+        port.host.filesystem.write_text_file('/mock-checkout/slave-config.json', '{"key": "value1"}')
+
+        self._test_run_with_json_output(runner, port.host.filesystem, upload_succeeds=True)
+        generated_json = json.loads(port.host.filesystem.files['/mock-checkout/output.json'])
+        self.assertTrue(isinstance(generated_json, list))
+        self.assertEqual(len(generated_json), 1)
+
+        output = generated_json[0]
+        self.maxDiff = None
+        self.assertEqual(output['platform'], 'platform1')
+        self.assertEqual(output['buildNumber'], 123)
+        self.assertEqual(output['buildTime'], '2013-02-08T15:19:37.460000')
+        self.assertEqual(output['builderName'], 'builder1')
+        self.assertEqual(output['builderKey'], 'value1')
+        self.assertEqual(output['revisions'], {'WebKit': {'revision': '5678', 'timestamp': '2013-02-01 08:48:05 +0000'}})
+        self.assertEqual(output['tests'].keys(), ['Bindings'])
+        self.assertEqual(sorted(output['tests']['Bindings'].keys()), ['tests', 'url'])
+        self.assertEqual(output['tests']['Bindings']['url'], 'http://trac.webkit.org/browser/trunk/PerformanceTests/Bindings')
+        self.assertEqual(output['tests']['Bindings']['tests'].keys(), ['event-target-wrapper'])
+        self.assertEqual(output['tests']['Bindings']['tests']['event-target-wrapper'], {
+            'url': 'http://trac.webkit.org/browser/trunk/PerformanceTests/Bindings/event-target-wrapper.html',
+            'metrics': {'Time': {'current': [[1486.0, 1471.0, 1510.0, 1505.0, 1478.0, 1490.0]] * 4}}})
+
+    def test_run_with_repeat(self):
+        runner, port = self.create_runner_and_setup_results_template(args=['--output-json-path=/mock-checkout/output.json',
+            '--test-results-server=some.host', '--repeat', '5'])
+        self._test_run_with_json_output(runner, port.host.filesystem, upload_succeeds=True, repeat=5)
+        self.assertEqual(self._load_output_json(runner), [
+            {"buildTime": "2013-02-08T15:19:37.460000",
+            "tests": self._event_target_wrapper_and_inspector_results,
+            "revisions": {"WebKit": {"timestamp": "2013-02-01 08:48:05 +0000", "revision": "5678"}}},
+            {"buildTime": "2013-02-08T15:19:37.460000",
+            "tests": self._event_target_wrapper_and_inspector_results,
+            "revisions": {"WebKit": {"timestamp": "2013-02-01 08:48:05 +0000", "revision": "5678"}}},
+            {"buildTime": "2013-02-08T15:19:37.460000",
+            "tests": self._event_target_wrapper_and_inspector_results,
+            "revisions": {"WebKit": {"timestamp": "2013-02-01 08:48:05 +0000", "revision": "5678"}}},
+            {"buildTime": "2013-02-08T15:19:37.460000",
+            "tests": self._event_target_wrapper_and_inspector_results,
+            "revisions": {"WebKit": {"timestamp": "2013-02-01 08:48:05 +0000", "revision": "5678"}}},
+            {"buildTime": "2013-02-08T15:19:37.460000",
+            "tests": self._event_target_wrapper_and_inspector_results,
+            "revisions": {"WebKit": {"timestamp": "2013-02-01 08:48:05 +0000", "revision": "5678"}}}])
+
+    def test_run_with_test_runner_count(self):
+        runner, port = self.create_runner_and_setup_results_template(args=['--output-json-path=/mock-checkout/output.json',
+            '--test-runner-count=3'])
+        self._test_run_with_json_output(runner, port.host.filesystem, compare_logs=False)
+        generated_json = json.loads(port.host.filesystem.files['/mock-checkout/output.json'])
+        self.assertTrue(isinstance(generated_json, list))
+        self.assertEqual(len(generated_json), 1)
+
+        output = generated_json[0]['tests']['Bindings']['tests']['event-target-wrapper']['metrics']['Time']['current']
+        self.assertEqual(len(output), 3)
+        expectedMetrics = EventTargetWrapperTestData.results['metrics']['Time']['current'][0]
+        for metrics in output:
+            self.assertEqual(metrics, expectedMetrics)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/modules/web/src/main/native/Tools/Scripts/webkitpy/port/__init__.py	Fri Jul 05 17:42:24 2013 +0400
@@ -0,0 +1,35 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Port-specific entrypoints for the layout tests test infrastructure."""
+
+import builders  # Why is this in port?
+
+from base import Port  # It's possible we don't need to export this virtual baseclass outside the module.
+from driver import Driver, DriverInput, DriverOutput
+from factory import platform_options, configuration_options
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/modules/web/src/main/native/Tools/Scripts/webkitpy/port/apple.py	Fri Jul 05 17:42:24 2013 +0400
@@ -0,0 +1,108 @@
+# Copyright (C) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the Google name nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import logging
+
+from webkitpy.port.base import Port
+from webkitpy.layout_tests.models.test_configuration import TestConfiguration
+
+
+_log = logging.getLogger(__name__)
+
+
+class ApplePort(Port):
+    """Shared logic between all of Apple's ports."""
+
+    # This is used to represent the version of an operating system
+    # corresponding to the "mac" or "win" base LayoutTests/platform
+    # directory.  I'm not sure this concept is very useful,
+    # but it gives us a way to refer to fallback paths *only* including
+    # the base directory.
+    # This is mostly done because TestConfiguration assumes that self.version()
+    # will never return None. (None would be another way to represent this concept.)
+    # Apple supposedly has explicit "future" results which are kept in an internal repository.
+    # It's possible that Apple would want to fix this code to work better with those results.
+    FUTURE_VERSION = 'future'  # FIXME: This whole 'future' thing feels like a hack.
+
+    # overridden in subclasses
+    VERSION_FALLBACK_ORDER = []
+    ARCHITECTURES = []
+
+    @classmethod
+    def determine_full_port_name(cls, host, options, port_name):
+        options = options or {}
+        if port_name in (cls.port_name, cls.port_name + '-wk2'):
+            # If the port_name matches the (badly named) cls.port_name, that
+            # means that they passed 'mac' or 'win' and didn't specify a version.
+            # That convention means that we're supposed to use the version currently
+            # being run, so this won't work if you're not on mac or win (respectively).
+            # If you're not on the o/s in question, you must specify a full version or -future (cf. above).
+            assert host.platform.os_name in port_name, "%s is not in %s!" % (host.platform.os_name, port_name)
+            if port_name == cls.port_name and not getattr(options, 'webkit_test_runner', False):
+                port_name = cls.port_name + '-' + host.platform.os_version
+            else:
+                port_name = cls.port_name + '-' + host.platform.os_version + '-wk2'
+        elif getattr(options, 'webkit_test_runner', False) and  '-wk2' not in port_name:
+            port_name += '-wk2'
+
+        return port_name
+
+    def _strip_port_name_prefix(self, port_name):
+        # Callers treat this return value as the "version", which only works
+        # because Apple ports use a simple name-version port_name scheme.
+        # FIXME: This parsing wouldn't be needed if port_name handling was moved to factory.py
+        # instead of the individual port constructors.
+        return port_name[len(self.port_name + '-'):]
+
+    def __init__(self, host, port_name, **kwargs):
+        super(ApplePort, self).__init__(host, port_name, **kwargs)
+
+        allowed_port_names = self.VERSION_FALLBACK_ORDER + [self.operating_system() + "-future"]
+        port_name = port_name.replace('-wk2', '')
+        self._version = self._strip_port_name_prefix(port_name)
+        assert port_name in allowed_port_names, "%s is not in %s" % (port_name, allowed_port_names)
+
+    def _skipped_file_search_paths(self):
+        # We don't have a dedicated Skipped file for the most recent version of the port;
+        # we just use the one in platform/{mac,win}
+        most_recent_name = self.VERSION_FALLBACK_ORDER[-1]
+        return set(filter(lambda name: name != most_recent_name, super(ApplePort, self)._skipped_file_search_paths()))
+
+    # FIXME: A more sophisticated version of this function should move to WebKitPort and replace all calls to name().
+    # This is also a misleading name, since 'mac-future' gets remapped to 'mac'.
+    def _port_name_with_version(self):
+        return self.name().replace('-future', '').replace('-wk2', '')
+
+    def _generate_all_test_configurations(self):
+        configurations = []
+        allowed_port_names = self.VERSION_FALLBACK_ORDER + [self.operating_system() + "-future"]
+        for port_name in allowed_port_names:
+            for build_type in self.ALL_BUILD_TYPES:
+                for architecture in self.ARCHITECTURES:
+                    configurations.append(TestConfiguration(version=self._strip_port_name_prefix(port_name), architecture=architecture, build_type=build_type))
+        return configurations
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/modules/web/src/main/native/Tools/Scripts/webkitpy/port/base.py	Fri Jul 05 17:42:24 2013 +0400
@@ -0,0 +1,1543 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the Google name nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Abstract base class of Port-specific entry points for the layout tests
+test infrastructure (the Port and Driver classes)."""
+
+import cgi
+import difflib
+import errno
+import itertools
+import logging
+import os
+import operator
+import optparse
+import re
+import sys
+
+try:
+    from collections import OrderedDict
+except ImportError:
+    # Needed for Python < 2.7
+    from webkitpy.thirdparty.ordered_dict import OrderedDict
+
+
+from webkitpy.common import find_files
+from webkitpy.common import read_checksum_from_png
+from webkitpy.common.memoized import memoized
+from webkitpy.common.system import path
+from webkitpy.common.system.executive import ScriptError
+from webkitpy.common.system.systemhost import SystemHost
+from webkitpy.common.webkit_finder import WebKitFinder
+from webkitpy.layout_tests.models.test_configuration import TestConfiguration
+from webkitpy.port import config as port_config
+from webkitpy.port import driver
+from webkitpy.port import http_lock
+from webkitpy.port import image_diff
+from webkitpy.port import server_process
+from webkitpy.port.factory import PortFactory
+from webkitpy.layout_tests.servers import apache_http_server
+from webkitpy.layout_tests.servers import http_server
+from webkitpy.layout_tests.servers import websocket_server
+
+_log = logging.getLogger(__name__)
+
+
+class Port(object):
+    """Abstract class for Port-specific hooks for the layout_test package."""
+
+    # Subclasses override this. This should indicate the basic implementation
+    # part of the port name, e.g., 'win', 'gtk'; there is probably (?) one unique value per class.
+
+    # FIXME: We should probably rename this to something like 'implementation_name'.
+    port_name = None
+
+    # Test names resemble unix relative paths, and use '/' as a directory separator.
+    TEST_PATH_SEPARATOR = '/'
+
+    ALL_BUILD_TYPES = ('debug', 'release')
+
+    @classmethod
+    def determine_full_port_name(cls, host, options, port_name):
+        """Return a fully-specified port name that can be used to construct objects."""
+        # Subclasses will usually override this.
+        options = options or {}
+        assert port_name.startswith(cls.port_name)
+        if getattr(options, 'webkit_test_runner', False) and not '-wk2' in port_name:
+            return port_name + '-wk2'
+        return port_name
+
+    def __init__(self, host, port_name, options=None, **kwargs):
+
+        # This value may be different from cls.port_name by having version modifiers
+        # and other fields appended to it (for example, 'qt-arm' or 'mac-wk2').
+        self._name = port_name
+
+        # These are default values that should be overridden in a subclasses.
+        self._version = ''
+        self._architecture = 'x86'
+
+        # FIXME: Ideally we'd have a package-wide way to get a
+        # well-formed options object that had all of the necessary
+        # options defined on it.
+        self._options = options or optparse.Values()
+
+        if self._name and '-wk2' in self._name:
+            self._options.webkit_test_runner = True
+
+        self.host = host
+        self._executive = host.executive
+        self._filesystem = host.filesystem
+        self._webkit_finder = WebKitFinder(host.filesystem)
+        self._config = port_config.Config(self._executive, self._filesystem, self.port_name)
+
+        self._helper = None
+        self._http_server = None
+        self._websocket_server = None
+        self._image_differ = None
+        self._server_process_constructor = server_process.ServerProcess  # overridable for testing
+        self._http_lock = None  # FIXME: Why does this live on the port object?
+
+        # Python's Popen has a bug that causes any pipes opened to a
+        # process that can't be executed to be leaked.  Since this
+        # code is specifically designed to tolerate exec failures
+        # to gracefully handle cases where wdiff is not installed,
+        # the bug results in a massive file descriptor leak. As a
+        # workaround, if an exec failure is ever experienced for
+        # wdiff, assume it's not available.  This will leak one
+        # file descriptor but that's better than leaking each time
+        # wdiff would be run.
+        #
+        # http://mail.python.org/pipermail/python-list/
+        #    2008-August/505753.html
+        # http://bugs.python.org/issue3210
+        self._wdiff_available = None
+
+        # FIXME: prettypatch.py knows this path, why is it copied here?
+        self._pretty_patch_path = self.path_from_webkit_base("Websites", "bugs.webkit.org", "PrettyPatch", "prettify.rb")
+        self._pretty_patch_available = None
+
+        if not hasattr(options, 'configuration') or not options.configuration:
+            self.set_option_default('configuration', self.default_configuration())
+        self._test_configuration = None
+        self._reftest_list = {}
+        self._results_directory = None
+        self._root_was_set = hasattr(options, 'root') and options.root
+
+    def additional_drt_flag(self):
+        return []
+
+    def supports_per_test_timeout(self):
+        return False
+
+    def default_pixel_tests(self):
+        # FIXME: Disable until they are run by default on build.webkit.org.
+        return False
+
+    def default_timeout_ms(self):
+        if self.get_option('webkit_test_runner'):
+            # Add some more time to WebKitTestRunner because it needs to syncronise the state
+            # with the web process and we want to detect if there is a problem with that in the driver.
+            return 80 * 1000
+        return 35 * 1000
+
+    def driver_stop_timeout(self):
+        """ Returns the amount of time in seconds to wait before killing the process in driver.stop()."""
+        # We want to wait for at least 3 seconds, but if we are really slow, we want to be slow on cleanup as
+        # well (for things like ASAN, Valgrind, etc.)
+        return 3.0 * float(self.get_option('time_out_ms', '0')) / self.default_timeout_ms()
+
+    def wdiff_available(self):
+        if self._wdiff_available is None:
+            self._wdiff_available = self.check_wdiff(logging=False)
+        return self._wdiff_available
+
+    def pretty_patch_available(self):
+        if self._pretty_patch_available is None:
+            self._pretty_patch_available = self.check_pretty_patch(logging=False)
+        return self._pretty_patch_available
+
+    def should_retry_crashes(self):
+        return False
+
+    def default_child_processes(self):
+        """Return the number of DumpRenderTree instances to use for this port."""
+        return self._executive.cpu_count()
+
+    def default_max_locked_shards(self):
+        """Return the number of "locked" shards to run in parallel (like the http tests)."""
+        return 1
+
+    def worker_startup_delay_secs(self):
+        # FIXME: If we start workers up too quickly, DumpRenderTree appears
+        # to thrash on something and time out its first few tests. Until
+        # we can figure out what's going on, sleep a bit in between
+        # workers. See https://bugs.webkit.org/show_bug.cgi?id=79147 .
+        return 0.1
+
+    def baseline_path(self):
+        """Return the absolute path to the directory to store new baselines in for this port."""
+        # FIXME: remove once all callers are calling either baseline_version_dir() or baseline_platform_dir()
+        return self.baseline_version_dir()
+
+    def baseline_platform_dir(self):
+        """Return the absolute path to the default (version-independent) platform-specific results."""
+        return self._filesystem.join(self.layout_tests_dir(), 'platform', self.port_name)
+
+    def baseline_version_dir(self):
+        """Return the absolute path to the platform-and-version-specific results."""
+        baseline_search_paths = self.baseline_search_path()
+        return baseline_search_paths[0]
+
+    def baseline_search_path(self):
+        return self.get_option('additional_platform_directory', []) + self._compare_baseline() + self.default_baseline_search_path()
+
+    def default_baseline_search_path(self):
+        """Return a list of absolute paths to directories to search under for
+        baselines. The directories are searched in order."""
+        search_paths = []
+        if self.get_option('webkit_test_runner'):
+            search_paths.append(self._wk2_port_name())
+        search_paths.append(self.name())
+        if self.name() != self.port_name:
+            search_paths.append(self.port_name)
+        return map(self._webkit_baseline_path, search_paths)
+
+    @memoized
+    def _compare_baseline(self):
+        factory = PortFactory(self.host)
+        target_port = self.get_option('compare_port')
+        if target_port:
+            return factory.get(target_port).default_baseline_search_path()
+        return []
+
+    def check_build(self, needs_http):
+        """This routine is used to ensure that the build is up to date
+        and all the needed binaries are present."""
+        # If we're using a pre-built copy of WebKit (--root), we assume it also includes a build of DRT.
+        if not self._root_was_set and self.get_option('build') and not self._build_driver():
+            return False
+        if not self._check_driver():
+            return False
+        if self.get_option('pixel_tests'):
+            if not self.check_image_diff():
+                return False
+        if not self._check_port_build():
+            return False
+        return True
+
+    def _check_driver(self):
+        driver_path = self._path_to_driver()
+        if not self._filesystem.exists(driver_path):
+            _log.error("%s was not found at %s" % (self.driver_name(), driver_path))
+            return False
+        return True
+
+    def _check_port_build(self):
+        # Ports can override this method to do additional checks.
+        return True
+
+    def check_sys_deps(self, needs_http):
+        """If the port needs to do some runtime checks to ensure that the
+        tests can be run successfully, it should override this routine.
+        This step can be skipped with --nocheck-sys-deps.
+
+        Returns whether the system is properly configured."""
+        if needs_http:
+            return self.check_httpd()
+        return True
+
+    def check_image_diff(self, override_step=None, logging=True):
+        """This routine is used to check whether image_diff binary exists."""
+        image_diff_path = self._path_to_image_diff()
+        if not self._filesystem.exists(image_diff_path):
+            _log.error("ImageDiff was not found at %s" % image_diff_path)
+            return False
+        return True
+
+    def check_pretty_patch(self, logging=True):
+        """Checks whether we can use the PrettyPatch ruby script."""
+        try:
+            _ = self._executive.run_command(['ruby', '--version'])
+        except OSError, e:
+            if e.errno in [errno.ENOENT, errno.EACCES, errno.ECHILD]:
+                if logging:
+                    _log.warning("Ruby is not installed; can't generate pretty patches.")
+                    _log.warning('')
+                return False
+
+        if not self._filesystem.exists(self._pretty_patch_path):
+            if logging:
+                _log.warning("Unable to find %s; can't generate pretty patches." % self._pretty_patch_path)
+                _log.warning('')
+            return False
+
+        return True
+
+    def check_wdiff(self, logging=True):
+        if not self._path_to_wdiff():
+            # Don't need to log here since this is the port choosing not to use wdiff.
+            return False
+
+        try:
+            _ = self._executive.run_command([self._path_to_wdiff(), '--help'])
+        except OSError:
+            if logging:
+                message = self._wdiff_missing_message()
+                if message:
+                    for line in message.splitlines():
+                        _log.warning('    ' + line)
+                        _log.warning('')
+            return False
+
+        return True
+
+    def _wdiff_missing_message(self):
+        return 'wdiff is not installed; please install it to generate word-by-word diffs.'
+
+    def check_httpd(self):
+        if self._uses_apache():
+            httpd_path = self._path_to_apache()
+        else:
+            httpd_path = self._path_to_lighttpd()
+
+        try:
+            server_name = self._filesystem.basename(httpd_path)
+            env = self.setup_environ_for_server(server_name)
+            if self._executive.run_command([httpd_path, "-v"], env=env, return_exit_code=True) != 0:
+                _log.error("httpd seems broken. Cannot run http tests.")
+                return False
+            return True
+        except OSError:
+            _log.error("No httpd found. Cannot run http tests.")
+            return False
+
+    def do_text_results_differ(self, expected_text, actual_text):
+        return expected_text != actual_text
+
+    def do_audio_results_differ(self, expected_audio, actual_audio):
+        return expected_audio != actual_audio
+
+    def diff_image(self, expected_contents, actual_contents, tolerance=None):
+        """Compare two images and return a tuple of an image diff, a percentage difference (0-100), and an error string.
+
+        |tolerance| should be a percentage value (0.0 - 100.0).
+        If it is omitted, the port default tolerance value is used.
+
+        If an error occurs (like ImageDiff isn't found, or crashes, we log an error and return True (for a diff).
+        """
+        if not actual_contents and not expected_contents:
+            return (None, 0, None)
+        if not actual_contents or not expected_contents:
+            return (True, 0, None)
+        if not self._image_differ:
+            self._image_differ = image_diff.ImageDiffer(self)
+        self.set_option_default('tolerance', 0.1)
+        if tolerance is None:
+            tolerance = self.get_option('tolerance')
+        return self._image_differ.diff_image(expected_contents, actual_contents, tolerance)
+
+    def diff_text(self, expected_text, actual_text, expected_filename, actual_filename):
+        """Returns a string containing the diff of the two text strings
+        in 'unified diff' format."""
+
+        # The filenames show up in the diff output, make sure they're
+        # raw bytes and not unicode, so that they don't trigger join()
+        # trying to decode the input.
+        def to_raw_bytes(string_value):
+            if isinstance(string_value, unicode):
+                return string_value.encode('utf-8')
+            return string_value
+        expected_filename = to_raw_bytes(expected_filename)
+        actual_filename = to_raw_bytes(actual_filename)
+        diff = difflib.unified_diff(expected_text.splitlines(True),
+                                    actual_text.splitlines(True),
+                                    expected_filename,
+                                    actual_filename)
+        return ''.join(diff)
+
+    def check_for_leaks(self, process_name, process_pid):
+        # Subclasses should check for leaks in the running process
+        # and print any necessary warnings if leaks are found.
+        # FIXME: We should consider moving much of this logic into
+        # Executive and make it platform-specific instead of port-specific.
+        pass
+
+    def print_leaks_summary(self):
+        # Subclasses can override this to print a summary of leaks found
+        # while running the layout tests.
+        pass
+
+    def driver_name(self):
+        if self.get_option('driver_name'):
+            return self.get_option('driver_name')
+        if self.get_option('webkit_test_runner'):
+            return 'WebKitTestRunner'
+        return 'DumpRenderTree'
+
+    def expected_baselines_by_extension(self, test_name):
+        """Returns a dict mapping baseline suffix to relative path for each baseline in
+        a test. For reftests, it returns ".==" or ".!=" instead of the suffix."""
+        # FIXME: The name similarity between this and expected_baselines() below, is unfortunate.
+        # We should probably rename them both.
+        baseline_dict = {}
+        reference_files = self.reference_files(test_name)
+        if reference_files:
+            # FIXME: How should this handle more than one type of reftest?
+            baseline_dict['.' + reference_files[0][0]] = self.relative_test_filename(reference_files[0][1])
+
+        for extension in self.baseline_extensions():
+            path = self.expected_filename(test_name, extension, return_default=False)
+            baseline_dict[extension] = self.relative_test_filename(path) if path else path
+
+        return baseline_dict
+
+    def baseline_extensions(self):
+        """Returns a tuple of all of the non-reftest baseline extensions we use. The extensions include the leading '.'."""
+        return ('.wav', '.webarchive', '.txt', '.png')
+
+    def expected_baselines(self, test_name, suffix, all_baselines=False):
+        """Given a test name, finds where the baseline results are located.
+
+        Args:
+        test_name: name of test file (usually a relative path under LayoutTests/)
+        suffix: file suffix of the expected results, including dot; e.g.
+            '.txt' or '.png'.  This should not be None, but may be an empty
+            string.
+        all_baselines: If True, return an ordered list of all baseline paths
+            for the given platform. If False, return only the first one.
+        Returns
+        a list of ( platform_dir, results_filename ), where
+            platform_dir - abs path to the top of the results tree (or test
+                tree)
+            results_filename - relative path from top of tree to the results
+                file
+            (port.join() of the two gives you the full path to the file,
+                unless None was returned.)
+        Return values will be in the format appropriate for the current
+        platform (e.g., "\\" for path separators on Windows). If the results
+        file is not found, then None will be returned for the directory,
+        but the expected relative pathname will still be returned.
+
+        This routine is generic but lives here since it is used in
+        conjunction with the other baseline and filename routines that are
+        platform specific.
+        """
+        baseline_filename = self._filesystem.splitext(test_name)[0] + '-expected' + suffix
+        baseline_search_path = self.baseline_search_path()
+
+        baselines = []
+        for platform_dir in baseline_search_path:
+            if self._filesystem.exists(self._filesystem.join(platform_dir, baseline_filename)):
+                baselines.append((platform_dir, baseline_filename))
+
+            if not all_baselines and baselines:
+                return baselines
+
+        # If it wasn't found in a platform directory, return the expected
+        # result in the test directory, even if no such file actually exists.
+        platform_dir = self.layout_tests_dir()
+        if self._filesystem.exists(self._filesystem.join(platform_dir, baseline_filename)):
+            baselines.append((platform_dir, baseline_filename))
+
+        if baselines:
+            return baselines
+
+        return [(None, baseline_filename)]
+
+    def expected_filename(self, test_name, suffix, return_default=True):
+        """Given a test name, returns an absolute path to its expected results.
+
+        If no expected results are found in any of the searched directories,
+        the directory in which the test itself is located will be returned.
+        The return value is in the format appropriate for the platform
+        (e.g., "\\" for path separators on windows).
+
+        Args:
+        test_name: name of test file (usually a relative path under LayoutTests/)
+        suffix: file suffix of the expected results, including dot; e.g. '.txt'
+            or '.png'.  This should not be None, but may be an empty string.
+        platform: the most-specific directory name to use to build the
+            search list of directories; e.g. 'mountainlion-wk2'
+        return_default: if True, returns the path to the generic expectation if nothing
+            else is found; if False, returns None.
+
+        This routine is generic but is implemented here to live alongside
+        the other baseline and filename manipulation routines.
+        """
+        # FIXME: The [0] here is very mysterious, as is the destructured return.
+        platform_dir, baseline_filename = self.expected_baselines(test_name, suffix)[0]
+        if platform_dir:
+            return self._filesystem.join(platform_dir, baseline_filename)
+
+        actual_test_name = self.lookup_virtual_test_base(test_name)
+        if actual_test_name:
+            return self.expected_filename(actual_test_name, suffix)
+
+        if return_default:
+            return self._filesystem.join(self.layout_tests_dir(), baseline_filename)
+        return None
+
+    def expected_checksum(self, test_name):
+        """Returns the checksum of the image we expect the test to produce, or None if it is a text-only test."""
+        png_path = self.expected_filename(test_name, '.png')
+
+        if self._filesystem.exists(png_path):
+            with self._filesystem.open_binary_file_for_reading(png_path) as filehandle:
+                return read_checksum_from_png.read_checksum(filehandle)
+
+        return None
+
+    def expected_image(self, test_name):
+        """Returns the image we expect the test to produce."""
+        baseline_path = self.expected_filename(test_name, '.png')
+        if not self._filesystem.exists(baseline_path):
+            return None
+        return self._filesystem.read_binary_file(baseline_path)
+
+    def expected_audio(self, test_name):
+        baseline_path = self.expected_filename(test_name, '.wav')
+        if not self._filesystem.exists(baseline_path):
+            return None
+        return self._filesystem.read_binary_file(baseline_path)
+
+    def expected_text(self, test_name):
+        """Returns the text output we expect the test to produce, or None
+        if we don't expect there to be any text output.
+        End-of-line characters are normalized to '\n'."""
+        # FIXME: DRT output is actually utf-8, but since we don't decode the
+        # output from DRT (instead treating it as a binary string), we read the
+        # baselines as a binary string, too.
+        baseline_path = self.expected_filename(test_name, '.txt')
+        if not self._filesystem.exists(baseline_path):
+            baseline_path = self.expected_filename(test_name, '.webarchive')
+            if not self._filesystem.exists(baseline_path):
+                return None
+        text = self._filesystem.read_binary_file(baseline_path)
+        return text.replace("\r\n", "\n")
+
+    def _get_reftest_list(self, test_name):
+        dirname = self._filesystem.join(self.layout_tests_dir(), self._filesystem.dirname(test_name))
+        if dirname not in self._reftest_list:
+            self._reftest_list[dirname] = Port._parse_reftest_list(self._filesystem, dirname)
+        return self._reftest_list[dirname]
+
+    @staticmethod
+    def _parse_reftest_list(filesystem, test_dirpath):
+        reftest_list_path = filesystem.join(test_dirpath, 'reftest.list')
+        if not filesystem.isfile(reftest_list_path):
+            return None
+        reftest_list_file = filesystem.read_text_file(reftest_list_path)
+
+        parsed_list = {}
+        for line in reftest_list_file.split('\n'):
+            line = re.sub('#.+$', '', line)
+            split_line = line.split()
+            if len(split_line) < 3:
+                continue
+            expectation_type, test_file, ref_file = split_line
+            parsed_list.setdefault(filesystem.join(test_dirpath, test_file), []).append((expectation_type, filesystem.join(test_dirpath, ref_file)))
+        return parsed_list
+
+    def reference_files(self, test_name):
+        """Return a list of expectation (== or !=) and filename pairs"""
+
+        reftest_list = self._get_reftest_list(test_name)
+        if not reftest_list:
+            reftest_list = []
+            for expectation, prefix in (('==', ''), ('!=', '-mismatch')):
+                for extention in Port._supported_file_extensions:
+                    path = self.expected_filename(test_name, prefix + extention)
+                    if self._filesystem.exists(path):
+                        reftest_list.append((expectation, path))
+            return reftest_list
+
+        return reftest_list.get(self._filesystem.join(self.layout_tests_dir(), test_name), [])  # pylint: disable=E1103
+
+    def tests(self, paths):
+        """Return the list of tests found. Both generic and platform-specific tests matching paths should be returned."""
+        expanded_paths = self._expanded_paths(paths)
+        tests = self._real_tests(expanded_paths)
+        tests.extend(self._virtual_tests(expanded_paths, self.populated_virtual_test_suites()))
+        return tests
+
+    def _expanded_paths(self, paths):
+        expanded_paths = []
+        fs = self._filesystem
+        all_platform_dirs = [path for path in fs.glob(fs.join(self.layout_tests_dir(), 'platform', '*')) if fs.isdir(path)]
+        for path in paths:
+            expanded_paths.append(path)
+            if self.test_isdir(path) and not path.startswith('platform'):
+                for platform_dir in all_platform_dirs:
+                    if fs.isdir(fs.join(platform_dir, path)) and platform_dir in self.baseline_search_path():
+                        expanded_paths.append(self.relative_test_filename(fs.join(platform_dir, path)))
+
+        return expanded_paths
+
+    def _real_tests(self, paths):
+        # When collecting test cases, skip these directories
+        skipped_directories = set(['.svn', '_svn', 'resources', 'script-tests', 'reference', 'reftest'])
+        files = find_files.find(self._filesystem, self.layout_tests_dir(), paths, skipped_directories, Port._is_test_file, self.test_key)
+        return [self.relative_test_filename(f) for f in files]
+
+    # When collecting test cases, we include any file with these extensions.
+    _supported_file_extensions = set(['.html', '.shtml', '.xml', '.xhtml', '.pl',
+                                      '.htm', '.php', '.svg', '.mht'])
+
+    @staticmethod
+    # If any changes are made here be sure to update the isUsedInReftest method in old-run-webkit-tests as well.
+    def is_reference_html_file(filesystem, dirname, filename):
+        if filename.startswith('ref-') or filename.startswith('notref-'):
+            return True
+        filename_wihout_ext, unused = filesystem.splitext(filename)
+        for suffix in ['-expected', '-expected-mismatch', '-ref', '-notref']:
+            if filename_wihout_ext.endswith(suffix):
+                return True
+        return False
+
+    @staticmethod
+    def _has_supported_extension(filesystem, filename):
+        """Return true if filename is one of the file extensions we want to run a test on."""
+        extension = filesystem.splitext(filename)[1]
+        return extension in Port._supported_file_extensions
+
+    @staticmethod
+    def _is_test_file(filesystem, dirname, filename):
+        return Port._has_supported_extension(filesystem, filename) and not Port.is_reference_html_file(filesystem, dirname, filename)
+
+    def test_key(self, test_name):
+        """Turns a test name into a list with two sublists, the natural key of the
+        dirname, and the natural key of the basename.
+
+        This can be used when sorting paths so that files in a directory.
+        directory are kept together rather than being mixed in with files in
+        subdirectories."""
+        dirname, basename = self.split_test(test_name)
+        return (self._natural_sort_key(dirname + self.TEST_PATH_SEPARATOR), self._natural_sort_key(basename))
+
+    def _natural_sort_key(self, string_to_split):
+        """ Turns a string into a list of string and number chunks, i.e. "z23a" -> ["z", 23, "a"]
+
+        This can be used to implement "natural sort" order. See:
+        http://www.codinghorror.com/blog/2007/12/sorting-for-humans-natural-sort-order.html
+        http://nedbatchelder.com/blog/200712.html#e20071211T054956
+        """
+        def tryint(val):
+            try:
+                return int(val)
+            except ValueError:
+                return val
+
+        return [tryint(chunk) for chunk in re.split('(\d+)', string_to_split)]
+
+    def test_dirs(self):
+        """Returns the list of top-level test directories."""
+        layout_tests_dir = self.layout_tests_dir()
+        return filter(lambda x: self._filesystem.isdir(self._filesystem.join(layout_tests_dir, x)),
+                      self._filesystem.listdir(layout_tests_dir))
+
+    @memoized
+    def test_isfile(self, test_name):
+        """Return True if the test name refers to a directory of tests."""
+        # Used by test_expectations.py to apply rules to whole directories.
+        if self._filesystem.isfile(self.abspath_for_test(test_name)):
+            return True
+        base = self.lookup_virtual_test_base(test_name)
+        return base and self._filesystem.isfile(self.abspath_for_test(base))
+
+    @memoized
+    def test_isdir(self, test_name):
+        """Return True if the test name refers to a directory of tests."""
+        # Used by test_expectations.py to apply rules to whole directories.
+        if self._filesystem.isdir(self.abspath_for_test(test_name)):
+            return True
+        base = self.lookup_virtual_test_base(test_name)
+        return base and self._filesystem.isdir(self.abspath_for_test(base))
+
+    @memoized
+    def test_exists(self, test_name):
+        """Return True if the test name refers to an existing test or baseline."""
+        # Used by test_expectations.py to determine if an entry refers to a
+        # valid test and by printing.py to determine if baselines exist.
+        return self.test_isfile(test_name) or self.test_isdir(test_name)
+
+    def split_test(self, test_name):
+        """Splits a test name into the 'directory' part and the 'basename' part."""
+        index = test_name.rfind(self.TEST_PATH_SEPARATOR)
+        if index < 1:
+            return ('', test_name)
+        return (test_name[0:index], test_name[index:])
+
+    def normalize_test_name(self, test_name):
+        """Returns a normalized version of the test name or test directory."""
+        if test_name.endswith('/'):
+            return test_name
+        if self.test_isdir(test_name):
+            return test_name + '/'
+        return test_name
+
+    def driver_cmd_line(self):
+        """Prints the DRT command line that will be used."""
+        driver = self.create_driver(0)
+        return driver.cmd_line(self.get_option('pixel_tests'), [])
+
+    def update_baseline(self, baseline_path, data):
+        """Updates the baseline for a test.
+
+        Args:
+            baseline_path: the actual path to use for baseline, not the path to
+              the test. This function is used to update either generic or
+              platform-specific baselines, but we can't infer which here.
+            data: contents of the baseline.
+        """
+        self._filesystem.write_binary_file(baseline_path, data)
+
+    # FIXME: update callers to create a finder and call it instead of these next five routines (which should be protected).
+    def webkit_base(self):
+        return self._webkit_finder.webkit_base()
+
+    def path_from_webkit_base(self, *comps):
+        return self._webkit_finder.path_from_webkit_base(*comps)
+
+    def path_to_script(self, script_name):
+        return self._webkit_finder.path_to_script(script_name)
+
+    def layout_tests_dir(self):
+        return self._webkit_finder.layout_tests_dir()
+
+    def perf_tests_dir(self):
+        return self._webkit_finder.perf_tests_dir()
+
+    def skipped_layout_tests(self, test_list):
+        """Returns tests skipped outside of the TestExpectations files."""
+        return set(self._tests_for_other_platforms()).union(self._skipped_tests_for_unsupported_features(test_list))
+
+    def _tests_from_skipped_file_contents(self, skipped_file_contents):
+        tests_to_skip = []
+        for line in skipped_file_contents.split('\n'):
+            line = line.strip()
+            line = line.rstrip('/')  # Best to normalize directory names to not include the trailing slash.
+            if line.startswith('#') or not len(line):
+                continue
+            tests_to_skip.append(line)
+        return tests_to_skip
+
+    def _expectations_from_skipped_files(self, skipped_file_paths):
+        tests_to_skip = []
+        for search_path in skipped_file_paths:
+            filename = self._filesystem.join(self._webkit_baseline_path(search_path), "Skipped")
+            if not self._filesystem.exists(filename):
+                _log.debug("Skipped does not exist: %s" % filename)
+                continue
+            _log.debug("Using Skipped file: %s" % filename)
+            skipped_file_contents = self._filesystem.read_text_file(filename)
+            tests_to_skip.extend(self._tests_from_skipped_file_contents(skipped_file_contents))
+        return tests_to_skip
+
+    @memoized
+    def skipped_perf_tests(self):
+        return self._expectations_from_skipped_files([self.perf_tests_dir()])
+
+    def skips_perf_test(self, test_name):
+        for test_or_category in self.skipped_perf_tests():
+            if test_or_category == test_name:
+                return True
+            category = self._filesystem.join(self.perf_tests_dir(), test_or_category)
+            if self._filesystem.isdir(category) and test_name.startswith(test_or_category):
+                return True
+        return False
+
+    def name(self):
+        """Returns a name that uniquely identifies this particular type of port
+        (e.g., "mac-snowleopard" or "chromium-linux-x86_x64" and can be passed
+        to factory.get() to instantiate the port."""
+        return self._name
+
+    def operating_system(self):
+        # Subclasses should override this default implementation.
+        return 'mac'
+
+    def version(self):
+        """Returns a string indicating the version of a given platform, e.g.
+        'leopard' or 'xp'.
+
+        This is used to help identify the exact port when parsing test
+        expectations, determining search paths, and logging information."""
+        return self._version
+
+    def architecture(self):
+        return self._architecture
+
+    def get_option(self, name, default_value=None):
+        return getattr(self._options, name, default_value)
+
+    def set_option_default(self, name, default_value):
+        return self._options.ensure_value(name, default_value)
+
+    @memoized
+    def path_to_generic_test_expectations_file(self):
+        return self._filesystem.join(self.layout_tests_dir(), 'TestExpectations')
+
+    @memoized
+    def path_to_test_expectations_file(self):
+        """Update the test expectations to the passed-in string.
+
+        This is used by the rebaselining tool. Raises NotImplementedError
+        if the port does not use expectations files."""
+
+        # FIXME: We need to remove this when we make rebaselining work with multiple files and just generalize expectations_files().
+
+        # test_expectations are always in mac/ not mac-leopard/ by convention, hence we use port_name instead of name().
+        return self._filesystem.join(self._webkit_baseline_path(self.port_name), 'TestExpectations')
+
+    def relative_test_filename(self, filename):
+        """Returns a test_name a relative unix-style path for a filename under the LayoutTests
+        directory. Ports may legitimately return abspaths here if no relpath makes sense."""
+        # Ports that run on windows need to override this method to deal with
+        # filenames with backslashes in them.
+        if filename.startswith(self.layout_tests_dir()):
+            return self.host.filesystem.relpath(filename, self.layout_tests_dir())
+        else:
+            return self.host.filesystem.abspath(filename)
+
+    @memoized
+    def abspath_for_test(self, test_name):
+        """Returns the full path to the file for a given test name. This is the
+        inverse of relative_test_filename()."""
+        return self._filesystem.join(self.layout_tests_dir(), test_name)
+
+    def results_directory(self):
+        """Absolute path to the place to store the test results (uses --results-directory)."""
+        if not self._results_directory:
+            option_val = self.get_option('results_directory') or self.default_results_directory()
+            self._results_directory = self._filesystem.abspath(option_val)
+        return self._results_directory
+
+    def perf_results_directory(self):
+        return self._build_path()
+
+    def default_results_directory(self):
+        """Absolute path to the default place to store the test results."""
+        # Results are store relative to the built products to make it easy
+        # to have multiple copies of webkit checked out and built.
+        return self._build_path('layout-test-results')
+
+    def setup_test_run(self):
+        """Perform port-specific work at the beginning of a test run."""
+        pass
+
+    def clean_up_test_run(self):
+        """Perform port-specific work at the end of a test run."""
+        if self._image_differ:
+            self._image_differ.stop()
+            self._image_differ = None
+
+    # FIXME: os.environ access should be moved to onto a common/system class to be more easily mockable.
+    def _value_or_default_from_environ(self, name, default=None):
+        if name in os.environ:
+            return os.environ[name]
+        return default
+
+    def _copy_value_from_environ_if_set(self, clean_env, name):
+        if name in os.environ:
+            clean_env[name] = os.environ[name]
+
+    def setup_environ_for_server(self, server_name=None):
+        # We intentionally copy only a subset of os.environ when
+        # launching subprocesses to ensure consistent test results.
+        clean_env = {}
+        variables_to_copy = [
+            # For Linux:
+            'XAUTHORITY',
+            'HOME',
+            'LANG',
+            'LD_LIBRARY_PATH',
+            'DBUS_SESSION_BUS_ADDRESS',
+            'XDG_DATA_DIRS',
+
+            # Darwin:
+            'DYLD_LIBRARY_PATH',
+            'HOME',
+
+            # CYGWIN:
+            'HOMEDRIVE',
+            'HOMEPATH',
+            '_NT_SYMBOL_PATH',
+
+            # Windows:
+            'PATH',
+
+            # Most ports (?):
+            'WEBKIT_TESTFONTS',
+            'WEBKITOUTPUTDIR',
+
+            # Chromium:
+            'CHROME_DEVEL_SANDBOX',
+        ]
+        for variable in variables_to_copy:
+            self._copy_value_from_environ_if_set(clean_env, variable)
+
+        # For Linux:
+        clean_env['DISPLAY'] = self._value_or_default_from_environ('DISPLAY', ':1')
+
+        for string_variable in self.get_option('additional_env_var', []):
+            [name, value] = string_variable.split('=', 1)
+            clean_env[name] = value
+
+        return clean_env
+
+    def show_results_html_file(self, results_filename):
+        """This routine should display the HTML file pointed at by
+        results_filename in a users' browser."""
+        return self.host.user.open_url(path.abspath_to_uri(self.host.platform, results_filename))
+
+    def create_driver(self, worker_number, no_timeout=False):
+        """Return a newly created Driver subclass for starting/stopping the test driver."""
+        return driver.DriverProxy(self, worker_number, self._driver_class(), pixel_tests=self.get_option('pixel_tests'), no_timeout=no_timeout)
+
+    def start_helper(self):
+        """If a port needs to reconfigure graphics settings or do other
+        things to ensure a known test configuration, it should override this
+        method."""
+        pass
+
+    def requires_http_server(self):
+        """Does the port require an HTTP server for running tests? This could
+        be the case when the tests aren't run on the host platform."""
+        return False
+
+    def start_http_server(self, additional_dirs=None, number_of_servers=None):
+        """Start a web server. Raise an error if it can't start or is already running.
+
+        Ports can stub this out if they don't need a web server to be running."""
+        assert not self._http_server, 'Already running an http server.'
+
+        if self._uses_apache():
+            server = apache_http_server.LayoutTestApacheHttpd(self, self.results_directory(), additional_dirs=additional_dirs, number_of_servers=number_of_servers)
+        else:
+            server = http_server.Lighttpd(self, self.results_directory(), additional_dirs=additional_dirs, number_of_servers=number_of_servers)
+
+        server.start()
+        self._http_server = server
+
+    def start_websocket_server(self):
+        """Start a web server. Raise an error if it can't start or is already running.
+
+        Ports can stub this out if they don't need a websocket server to be running."""
+        assert not self._websocket_server, 'Already running a websocket server.'
+
+        server = websocket_server.PyWebSocket(self, self.results_directory())
+        server.start()
+        self._websocket_server = server
+
+    def http_server_supports_ipv6(self):
+        # Cygwin is the only platform to still use Apache 1.3, which only supports IPV4.
+        # Once it moves to Apache 2, we can drop this method altogether.
+        if self.host.platform.is_cygwin():
+            return False
+        return True
+
+    def acquire_http_lock(self):
+        self._http_lock = http_lock.HttpLock(None, filesystem=self._filesystem, executive=self._executive)
+        self._http_lock.wait_for_httpd_lock()
+
+    def stop_helper(self):
+        """Shut down the test helper if it is running. Do nothing if
+        it isn't, or it isn't available. If a port overrides start_helper()
+        it must override this routine as well."""
+        pass
+
+    def stop_http_server(self):
+        """Shut down the http server if it is running. Do nothing if it isn't."""
+        if self._http_server:
+            self._http_server.stop()
+            self._http_server = None
+
+    def stop_websocket_server(self):
+        """Shut down the websocket server if it is running. Do nothing if it isn't."""
+        if self._websocket_server:
+            self._websocket_server.stop()
+            self._websocket_server = None
+
+    def release_http_lock(self):
+        if self._http_lock:
+            self._http_lock.cleanup_http_lock()
+
+    def exit_code_from_summarized_results(self, unexpected_results):
+        """Given summarized results, compute the exit code to be returned by new-run-webkit-tests.
+        Bots turn red when this function returns a non-zero value. By default, return the number of regressions
+        to avoid turning bots red by flaky failures, unexpected passes, and missing results"""
+        # Don't turn bots red for flaky failures, unexpected passes, and missing results.
+        return unexpected_results['num_regressions']
+
+    #
+    # TEST EXPECTATION-RELATED METHODS
+    #
+
+    def test_configuration(self):
+        """Returns the current TestConfiguration for the port."""
+        if not self._test_configuration:
+            self._test_configuration = TestConfiguration(self._version, self._architecture, self._options.configuration.lower())
+        return self._test_configuration
+
+    # FIXME: Belongs on a Platform object.
+    @memoized
+    def all_test_configurations(self):
+        """Returns a list of TestConfiguration instances, representing all available
+        test configurations for this port."""
+        return self._generate_all_test_configurations()
+
+    # FIXME: Belongs on a Platform object.
+    def configuration_specifier_macros(self):
+        """Ports may provide a way to abbreviate configuration specifiers to conveniently
+        refer to them as one term or alias specific values to more generic ones. For example:
+
+        (xp, vista, win7) -> win # Abbreviate all Windows versions into one namesake.
+        (lucid) -> linux  # Change specific name of the Linux distro to a more generic term.
+
+        Returns a dictionary, each key representing a macro term ('win', for example),
+        and value being a list of valid configuration specifiers (such as ['xp', 'vista', 'win7'])."""
+        return {}
+
+    def all_baseline_variants(self):
+        """Returns a list of platform names sufficient to cover all the baselines.
+
+        The list should be sorted so that a later platform  will reuse
+        an earlier platform's baselines if they are the same (e.g.,
+        'snowleopard' should precede 'leopard')."""
+        raise NotImplementedError
+
+    def uses_test_expectations_file(self):
+        # This is different from checking test_expectations() is None, because
+        # some ports have Skipped files which are returned as part of test_expectations().
+        return self._filesystem.exists(self.path_to_test_expectations_file())
+
+    def warn_if_bug_missing_in_test_expectations(self):
+        return False
+
+    def expectations_dict(self):
+        """Returns an OrderedDict of name -> expectations strings.
+        The names are expected to be (but not required to be) paths in the filesystem.
+        If the name is a path, the file can be considered updatable for things like rebaselining,
+        so don't use names that are paths if they're not paths.
+        Generally speaking the ordering should be files in the filesystem in cascade order
+        (TestExpectations followed by Skipped, if the port honors both formats),
+        then any built-in expectations (e.g., from compile-time exclusions), then --additional-expectations options."""
+        # FIXME: rename this to test_expectations() once all the callers are updated to know about the ordered dict.
+        expectations = OrderedDict()
+
+        for path in self.expectations_files():
+            if self._filesystem.exists(path):
+                expectations[path] = self._filesystem.read_text_file(path)
+
+        for path in self.get_option('additional_expectations', []):
+            expanded_path = self._filesystem.expanduser(path)
+            if self._filesystem.exists(expanded_path):
+                _log.debug("reading additional_expectations from path '%s'" % path)
+                expectations[path] = self._filesystem.read_text_file(expanded_path)
+            else:
+                _log.warning("additional_expectations path '%s' does not exist" % path)
+        return expectations
+
+    def _port_specific_expectations_files(self):
+        # Unlike baseline_search_path, we only want to search [WK2-PORT, PORT-VERSION, PORT] and any directories
+        # included via --additional-platform-directory, not the full casade.
+        search_paths = [self.port_name]
+
+        non_wk2_name = self.name().replace('-wk2', '')
+        if non_wk2_name != self.port_name:
+            search_paths.append(non_wk2_name)
+
+        if self.get_option('webkit_test_runner'):
+            # Because nearly all of the skipped tests for WebKit 2 are due to cross-platform
+            # issues, all wk2 ports share a skipped list under platform/wk2.
+            search_paths.extend(["wk2", self._wk2_port_name()])
+
+        search_paths.extend(self.get_option("additional_platform_directory", []))
+
+        return [self._filesystem.join(self._webkit_baseline_path(d), 'TestExpectations') for d in search_paths]
+
+    def expectations_files(self):
+        return [self.path_to_generic_test_expectations_file()] + self._port_specific_expectations_files()
+
+    def repository_paths(self):
+        """Returns a list of (repository_name, repository_path) tuples of its depending code base.
+        By default it returns a list that only contains a ('WebKit', <webkitRepositoryPath>) tuple."""
+
+        # We use LayoutTest directory here because webkit_base isn't a part of WebKit repository in Chromium port
+        # where turnk isn't checked out as a whole.
+        return [('WebKit', self.layout_tests_dir())]
+
+    _WDIFF_DEL = '##WDIFF_DEL##'
+    _WDIFF_ADD = '##WDIFF_ADD##'
+    _WDIFF_END = '##WDIFF_END##'
+
+    def _format_wdiff_output_as_html(self, wdiff):
+        wdiff = cgi.escape(wdiff)
+        wdiff = wdiff.replace(self._WDIFF_DEL, "<span class=del>")
+        wdiff = wdiff.replace(self._WDIFF_ADD, "<span class=add>")
+        wdiff = wdiff.replace(self._WDIFF_END, "</span>")
+        html = "<head><style>.del { background: #faa; } "
+        html += ".add { background: #afa; }</style></head>"
+        html += "<pre>%s</pre>" % wdiff
+        return html
+
+    def _wdiff_command(self, actual_filename, expected_filename):
+        executable = self._path_to_wdiff()
+        return [executable,
+                "--start-delete=%s" % self._WDIFF_DEL,
+                "--end-delete=%s" % self._WDIFF_END,
+                "--start-insert=%s" % self._WDIFF_ADD,
+                "--end-insert=%s" % self._WDIFF_END,
+                actual_filename,
+                expected_filename]
+
+    @staticmethod
+    def _handle_wdiff_error(script_error):
+        # Exit 1 means the files differed, any other exit code is an error.
+        if script_error.exit_code != 1:
+            raise script_error
+
+    def _run_wdiff(self, actual_filename, expected_filename):
+        """Runs wdiff and may throw exceptions.
+        This is mostly a hook for unit testing."""
+        # Diffs are treated as binary as they may include multiple files
+        # with conflicting encodings.  Thus we do not decode the output.
+        command = self._wdiff_command(actual_filename, expected_filename)
+        wdiff = self._executive.run_command(command, decode_output=False,
+            error_handler=self._handle_wdiff_error)
+        return self._format_wdiff_output_as_html(wdiff)
+
+    def wdiff_text(self, actual_filename, expected_filename):
+        """Returns a string of HTML indicating the word-level diff of the
+        contents of the two filenames. Returns an empty string if word-level
+        diffing isn't available."""
+        if not self.wdiff_available():
+            return ""
+        try:
+            # It's possible to raise a ScriptError we pass wdiff invalid paths.
+            return self._run_wdiff(actual_filename, expected_filename)
+        except OSError, e:
+            if e.errno in [errno.ENOENT, errno.EACCES, errno.ECHILD]:
+                # Silently ignore cases where wdiff is missing.
+                self._wdiff_available = False
+                return ""
+            raise
+
+    # This is a class variable so we can test error output easily.
+    _pretty_patch_error_html = "Failed to run PrettyPatch, see error log."
+
+    def pretty_patch_text(self, diff_path):
+        if self._pretty_patch_available is None:
+            self._pretty_patch_available = self.check_pretty_patch(logging=False)
+        if not self._pretty_patch_available:
+            return self._pretty_patch_error_html
+        command = ("ruby", "-I", self._filesystem.dirname(self._pretty_patch_path),
+                   self._pretty_patch_path, diff_path)
+        try:
+            # Diffs are treated as binary (we pass decode_output=False) as they
+            # may contain multiple files of conflicting encodings.
+            return self._executive.run_command(command, decode_output=False)
+        except OSError, e:
+            # If the system is missing ruby log the error and stop trying.
+            self._pretty_patch_available = False
+            _log.error("Failed to run PrettyPatch (%s): %s" % (command, e))
+            return self._pretty_patch_error_html
+        except ScriptError, e:
+            # If ruby failed to run for some reason, log the command
+            # output and stop trying.
+            self._pretty_patch_available = False
+            _log.error("Failed to run PrettyPatch (%s):\n%s" % (command, e.message_with_output()))
+            return self._pretty_patch_error_html
+
+    def default_configuration(self):
+        return self._config.default_configuration()
+
+    #
+    # PROTECTED ROUTINES
+    #
+    # The routines below should only be called by routines in this class
+    # or any of its subclasses.
+    #
+
+    def _uses_apache(self):
+        return True
+
+    # FIXME: This does not belong on the port object.
+    @memoized
+    def _path_to_apache(self):
+        """Returns the full path to the apache binary.
+
+        This is needed only by ports that use the apache_http_server module."""
+        # The Apache binary path can vary depending on OS and distribution
+        # See http://wiki.apache.org/httpd/DistrosDefaultLayout
+        for path in ["/usr/sbin/httpd", "/usr/sbin/apache2"]:
+            if self._filesystem.exists(path):
+                return path
+        _log.error("Could not find apache. Not installed or unknown path.")
+        return None
+
+    # FIXME: This belongs on some platform abstraction instead of Port.
+    def _is_redhat_based(self):
+        return self._filesystem.exists('/etc/redhat-release')
+
+    def _is_debian_based(self):
+        return self._filesystem.exists('/etc/debian_version')
+
+    def _is_arch_based(self):
+        return self._filesystem.exists('/etc/arch-release')
+
+    def _apache_version(self):
+        config = self._executive.run_command([self._path_to_apache(), '-v'])
+        return re.sub(r'(?:.|\n)*Server version: Apache/(\d+\.\d+)(?:.|\n)*', r'\1', config)
+
+    # We pass sys_platform into this method to make it easy to unit test.
+    def _apache_config_file_name_for_platform(self, sys_platform):
+        if sys_platform == 'cygwin':
+            return 'cygwin-httpd.conf'  # CYGWIN is the only platform to still use Apache 1.3.
+        if sys_platform.startswith('linux'):
+            if self._is_redhat_based():
+                return 'fedora-httpd-' + self._apache_version() + '.conf'
+            if self._is_debian_based():
+                return 'apache2-debian-httpd.conf'
+            if self._is_arch_based():
+                return 'archlinux-httpd.conf'
+        # All platforms use apache2 except for CYGWIN (and Mac OS X Tiger and prior, which we no longer support).
+        return "apache2-httpd.conf"
+
+    def _path_to_apache_config_file(self):
+        """Returns the full path to the apache configuration file.
+
+        If the WEBKIT_HTTP_SERVER_CONF_PATH environment variable is set, its
+        contents will be used instead.
+
+        This is needed only by ports that use the apache_http_server module."""
+        config_file_from_env = os.environ.get('WEBKIT_HTTP_SERVER_CONF_PATH')
+        if config_file_from_env:
+            if not self._filesystem.exists(config_file_from_env):
+                raise IOError('%s was not found on the system' % config_file_from_env)
+            return config_file_from_env
+
+        config_file_name = self._apache_config_file_name_for_platform(sys.platform)
+        return self._filesystem.join(self.layout_tests_dir(), 'http', 'conf', config_file_name)
+
+    def _build_path(self, *comps):
+        root_directory = self.get_option('root')
+        if not root_directory:
+            build_directory = self.get_option('build_directory')
+            if build_directory:
+                root_directory = self._filesystem.join(build_directory, self.get_option('configuration'))
+            else:
+                root_directory = self._config.build_directory(self.get_option('configuration'))
+            # Set --root so that we can pass this to subprocesses and avoid making the
+            # slow call to config.build_directory() N times in each worker.
+            # FIXME: This is like @memoized, but more annoying and fragile; there should be another
+            # way to propagate values without mutating the options list.
+            self.set_option_default('root', root_directory)
+        return self._filesystem.join(self._filesystem.abspath(root_directory), *comps)
+
+    def _path_to_driver(self, configuration=None):
+        """Returns the full path to the test driver (DumpRenderTree)."""
+        return self._build_path(self.driver_name())
+
+    def _driver_tempdir(self):
+        return self._filesystem.mkdtemp(prefix='%s-' % self.driver_name())
+
+    def _driver_tempdir_for_environment(self):
+        return self._driver_tempdir()
+
+    def _path_to_webcore_library(self):
+        """Returns the full path to a built copy of WebCore."""
+        return None
+
+    def _path_to_helper(self):
+        """Returns the full path to the layout_test_helper binary, which
+        is used to help configure the system for the test run, or None
+        if no helper is needed.
+
+        This is likely only used by start/stop_helper()."""
+        return None
+
+    def _path_to_image_diff(self):
+        """Returns the full path to the image_diff binary, or None if it is not available.
+
+        This is likely used only by diff_image()"""
+        return self._build_path('ImageDiff')
+
+    def _path_to_lighttpd(self):
+        """Returns the path to the LigHTTPd binary.
+
+        This is needed only by ports that use the http_server.py module."""
+        raise NotImplementedError('Port._path_to_lighttpd')
+
+    def _path_to_lighttpd_modules(self):
+        """Returns the path to the LigHTTPd modules directory.
+
+        This is needed only by ports that use the http_server.py module."""
+        raise NotImplementedError('Port._path_to_lighttpd_modules')
+
+    def _path_to_lighttpd_php(self):
+        """Returns the path to the LigHTTPd PHP executable.
+
+        This is needed only by ports that use the http_server.py module."""
+        raise NotImplementedError('Port._path_to_lighttpd_php')
+
+    @memoized
+    def _path_to_wdiff(self):
+        """Returns the full path to the wdiff binary, or None if it is not available.
+
+        This is likely used only by wdiff_text()"""
+        for path in ("/usr/bin/wdiff", "/usr/bin/dwdiff"):
+            if self._filesystem.exists(path):
+                return path
+        return None
+
+    def _webkit_baseline_path(self, platform):
+        """Return the  full path to the top of the baseline tree for a
+        given platform."""
+        return self._filesystem.join(self.layout_tests_dir(), 'platform', platform)
+
+    # FIXME: Belongs on a Platform object.
+    def _generate_all_test_configurations(self):
+        """Generates a list of TestConfiguration instances, representing configurations
+        for a platform across all OSes, architectures, build and graphics types."""
+        raise NotImplementedError('Port._generate_test_configurations')
+
+    def _driver_class(self):
+        """Returns the port's driver implementation."""
+        return driver.Driver
+
+    def _get_crash_log(self, name, pid, stdout, stderr, newer_than):
+        name_str = name or '<unknown process name>'
+        pid_str = str(pid or '<unknown>')
+        stdout_lines = (stdout or '<empty>').decode('utf8', 'replace').splitlines()
+        stderr_lines = (stderr or '<empty>').decode('utf8', 'replace').splitlines()
+        return (stderr, 'crash log for %s (pid %s):\n%s\n%s\n' % (name_str, pid_str,
+            '\n'.join(('STDOUT: ' + l) for l in stdout_lines),
+            '\n'.join(('STDERR: ' + l) for l in stderr_lines)))
+
+    def look_for_new_crash_logs(self, crashed_processes, start_time):
+        pass
+
+    def look_for_new_samples(self, unresponsive_processes, start_time):
+        pass
+
+    def sample_process(self, name, pid):
+        pass
+
+    def virtual_test_suites(self):
+        return []
+
+    @memoized
+    def populated_virtual_test_suites(self):
+        suites = self.virtual_test_suites()
+
+        # Sanity-check the suites to make sure they don't point to other suites.
+        suite_dirs = [suite.name for suite in suites]
+        for suite in suites:
+            assert suite.base not in suite_dirs
+
+        for suite in suites:
+            base_tests = self._real_tests([suite.base])
+            suite.tests = {}
+            for test in base_tests:
+                suite.tests[test.replace(suite.base, suite.name, 1)] = test
+        return suites
+
+    def _virtual_tests(self, paths, suites):
+        virtual_tests = list()
+        for suite in suites:
+            if paths:
+                for test in suite.tests:
+                    if any(test.startswith(p) for p in paths):
+                        virtual_tests.append(test)
+            else:
+                virtual_tests.extend(suite.tests.keys())
+        return virtual_tests
+
+    def lookup_virtual_test_base(self, test_name):
+        for suite in self.populated_virtual_test_suites():
+            if test_name.startswith(suite.name):
+                return test_name.replace(suite.name, suite.base, 1)
+        return None
+
+    def lookup_virtual_test_args(self, test_name):
+        for suite in self.populated_virtual_test_suites():
+            if test_name.startswith(suite.name):
+                return suite.args
+        return []
+
+    def should_run_as_pixel_test(self, test_input):
+        if not self._options.pixel_tests:
+            return False
+        if self._options.pixel_test_directories:
+            return any(test_input.test_name.startswith(directory) for directory in self._options.pixel_test_directories)
+        return self._should_run_as_pixel_test(test_input)
+
+    def _should_run_as_pixel_test(self, test_input):
+        # Default behavior is to allow all test to run as pixel tests if --pixel-tests is on and
+        # --pixel-test-directory is not specified.
+        return True
+
+    # FIXME: Eventually we should standarize port naming, and make this method smart enough
+    # to use for all port configurations (including architectures, graphics types, etc).
+    def _port_flag_for_scripts(self):
+        # This is overrriden by ports which need a flag passed to scripts to distinguish the use of that port.
+        # For example --qt on linux, since a user might have both Gtk and Qt libraries installed.
+        return None
+
+    # This is modeled after webkitdirs.pm argumentsForConfiguration() from old-run-webkit-tests
+    def _arguments_for_configuration(self):
+        config_args = []
+        config_args.append(self._config.flag_for_configuration(self.get_option('configuration')))
+        # FIXME: We may need to add support for passing --32-bit like old-run-webkit-tests had.
+        port_flag = self._port_flag_for_scripts()
+        if port_flag:
+            config_args.append(port_flag)
+        return config_args
+
+    def _run_script(self, script_name, args=None, include_configuration_arguments=True, decode_output=True, env=None):
+        run_script_command = [self.path_to_script(script_name)]
+        if include_configuration_arguments:
+            run_script_command.extend(self._arguments_for_configuration())
+        if args:
+            run_script_command.extend(args)
+        output = self._executive.run_command(run_script_command, cwd=self.webkit_base(), decode_output=decode_output, env=env)
+        _log.debug('Output of %s:\n%s' % (run_script_command, output))
+        return output
+
+    def _build_driver(self):
+        environment = self.host.copy_current_environment()
+        environment.disable_gcc_smartquotes()
+        env = environment.to_dictionary()
+
+        # FIXME: We build both DumpRenderTree and WebKitTestRunner for
+        # WebKitTestRunner runs because DumpRenderTree still includes
+        # the DumpRenderTreeSupport module and the TestNetscapePlugin.
+        # These two projects should be factored out into their own
+        # projects.
+        try:
+            self._run_script("build-dumprendertree", args=self._build_driver_flags(), env=env)
+            if self.get_option('webkit_test_runner'):
+                self._run_script("build-webkittestrunner", args=self._build_driver_flags(), env=env)
+        except ScriptError, e:
+            _log.error(e.message_with_output(output_limit=None))
+            return False
+        return True
+
+    def _build_driver_flags(self):
+        return []
+
+    def _tests_for_other_platforms(self):
+        # By default we will skip any directory under LayoutTests/platform
+        # that isn't in our baseline search path (this mirrors what
+        # old-run-webkit-tests does in findTestsToRun()).
+        # Note this returns LayoutTests/platform/*, not platform/*/*.
+        entries = self._filesystem.glob(self._webkit_baseline_path('*'))
+        dirs_to_skip = []
+        for entry in entries:
+            if self._filesystem.isdir(entry) and entry not in self.baseline_search_path():
+                basename = self._filesystem.basename(entry)
+                dirs_to_skip.append('platform/%s' % basename)
+        return dirs_to_skip
+
+    def _runtime_feature_list(self):
+        """If a port makes certain features available only through runtime flags, it can override this routine to indicate which ones are available."""
+        return None
+
+    def nm_command(self):
+        return 'nm'
+
+    def _modules_to_search_for_symbols(self):
+        path = self._path_to_webcore_library()
+        if path:
+            return [path]
+        return []
+
+    def _symbols_string(self):
+        symbols = ''
+        for path_to_module in self._modules_to_search_for_symbols():
+            try:
+                symbols += self._executive.run_command([self.nm_command(), path_to_module], error_handler=self._executive.ignore_error)
+            except OSError, e:
+                _log.warn("Failed to run nm: %s.  Can't determine supported features correctly." % e)
+        return symbols
+
+    # Ports which use run-time feature detection should define this method and return
+    # a dictionary mapping from Feature Names to skipped directoires.  NRWT will
+    # run DumpRenderTree --print-supported-features and parse the output.
+    # If the Feature Names are not found in the output, the corresponding directories
+    # will be skipped.
+    def _missing_feature_to_skipped_tests(self):
+        """Return the supported feature dictionary. Keys are feature names and values
+        are the lists of directories to skip if the feature name is not matched."""
+        # FIXME: This list matches WebKitWin and should be moved onto the Win port.
+        return {
+            "Accelerated Compositing": ["compositing"],
+            "3D Rendering": ["animations/3d", "transforms/3d"],
+        }
+
+    def _has_test_in_directories(self, directory_lists, test_list):
+        if not test_list:
+            return False
+
+        directories = itertools.chain.from_iterable(directory_lists)
+        for directory, test in itertools.product(directories, test_list):
+            if test.startswith(directory):
+                return True
+        return False
+
+    def _skipped_tests_for_unsupported_features(self, test_list):
+        # Only check the runtime feature list of there are tests in the test_list that might get skipped.
+        # This is a performance optimization to avoid the subprocess call to DRT.
+        # If the port supports runtime feature detection, disable any tests
+        # for features missing from the runtime feature list.
+        # If _runtime_feature_list returns a non-None value, then prefer
+        # runtime feature detection over static feature detection.
+        if self._has_test_in_directories(self._missing_feature_to_skipped_tests().values(), test_list):
+            supported_feature_list = self._runtime_feature_list()
+            if supported_feature_list is not None:
+                return reduce(operator.add, [directories for feature, directories in self._missing_feature_to_skipped_tests().items() if feature not in supported_feature_list])
+
+        return []
+
+    def _wk2_port_name(self):
+        # By current convention, the WebKit2 name is always mac-wk2, win-wk2, not mac-leopard-wk2, etc,
+        # except for Qt because WebKit2 is only supported by Qt 5.0 (therefore: qt-5.0-wk2).
+        return "%s-wk2" % self.port_name
+
+
+class VirtualTestSuite(object):
+    def __init__(self, name, base, args, tests=None):
+        self.name = name
+        self.base = base
+        self.args = args
+        self.tests = tests or set()
+
+    def __repr__(self):
+        return "VirtualTestSuite('%s', '%s', %s)" % (self.name, self.base, self.args)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/modules/web/src/main/native/Tools/Scripts/webkitpy/port/base_unittest.py	Fri Jul 05 17:42:24 2013 +0400
@@ -0,0 +1,499 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#    * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#    * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#    * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import logging
+import optparse
+import sys
+import tempfile
+import unittest2 as unittest
+
+from webkitpy.common.system.executive import Executive, ScriptError
+from webkitpy.common.system import executive_mock
+from webkitpy.common.system.filesystem_mock import MockFileSystem
+from webkitpy.common.system.outputcapture import OutputCapture
+from webkitpy.common.system.path import abspath_to_uri
+from webkitpy.thirdparty.mock import Mock
+from webkitpy.tool.mocktool import MockOptions
+from webkitpy.common.system.executive_mock import MockExecutive, MockExecutive2
+from webkitpy.common.system.systemhost_mock import MockSystemHost
+
+from webkitpy.port import Port, Driver, DriverOutput
+from webkitpy.port.test import add_unit_tests_to_mock_filesystem, TestPort
+
+class PortTest(unittest.TestCase):
+    def make_port(self, executive=None, with_tests=False, port_name=None, **kwargs):
+        host = MockSystemHost()
+        if executive:
+            host.executive = executive
+        if with_tests:
+            add_unit_tests_to_mock_filesystem(host.filesystem)
+            return TestPort(host, **kwargs)
+        return Port(host, port_name or 'baseport', **kwargs)
+
+    def test_default_child_processes(self):
+        port = self.make_port()
+        self.assertIsNotNone(port.default_child_processes())
+
+    def test_format_wdiff_output_as_html(self):
+        output = "OUTPUT %s %s %s" % (Port._WDIFF_DEL, Port._WDIFF_ADD, Port._WDIFF_END)
+        html = self.make_port()._format_wdiff_output_as_html(output)
+        expected_html = "<head><style>.del { background: #faa; } .add { background: #afa; }</style></head><pre>OUTPUT <span class=del> <span class=add> </span></pre>"
+        self.assertEqual(html, expected_html)
+
+    def test_wdiff_command(self):
+        port = self.make_port()
+        port._path_to_wdiff = lambda: "/path/to/wdiff"
+        command = port._wdiff_command("/actual/path", "/expected/path")
+        expected_command = [
+            "/path/to/wdiff",
+            "--start-delete=##WDIFF_DEL##",
+            "--end-delete=##WDIFF_END##",
+            "--start-insert=##WDIFF_ADD##",
+            "--end-insert=##WDIFF_END##",
+            "/actual/path",
+            "/expected/path",
+        ]
+        self.assertEqual(command, expected_command)
+
+    def _file_with_contents(self, contents, encoding="utf-8"):
+        new_file = tempfile.NamedTemporaryFile()
+        new_file.write(contents.encode(encoding))
+        new_file.flush()
+        return new_file
+
+    def test_pretty_patch_os_error(self):
+        port = self.make_port(executive=executive_mock.MockExecutive2(exception=OSError))
+        oc = OutputCapture()
+        oc.capture_output()
+        self.assertEqual(port.pretty_patch_text("patch.txt"),
+                         port._pretty_patch_error_html)
+
+        # This tests repeated calls to make sure we cache the result.
+        self.assertEqual(port.pretty_patch_text("patch.txt"),
+                         port._pretty_patch_error_html)
+        oc.restore_output()
+
+    def test_pretty_patch_script_error(self):
+        # FIXME: This is some ugly white-box test hacking ...
+        port = self.make_port(executive=executive_mock.MockExecutive2(exception=ScriptError))
+        port._pretty_patch_available = True
+        self.assertEqual(port.pretty_patch_text("patch.txt"),
+                         port._pretty_patch_error_html)
+
+        # This tests repeated calls to make sure we cache the result.
+        self.assertEqual(port.pretty_patch_text("patch.txt"),
+                         port._pretty_patch_error_html)
+
+    def integration_test_run_wdiff(self):
+        executive = Executive()
+        # This may fail on some systems.  We could ask the port
+        # object for the wdiff path, but since we don't know what
+        # port object to use, this is sufficient for now.
+        try:
+            wdiff_path = executive.run_command(["which", "wdiff"]).rstrip()
+        except Exception, e:
+            wdiff_path = None
+
+        port = self.make_port(executive=executive)
+        port._path_to_wdiff = lambda: wdiff_path
+
+        if wdiff_path:
+            # "with tempfile.NamedTemporaryFile() as actual" does not seem to work in Python 2.5
+            actual = self._file_with_contents(u"foo")
+            expected = self._file_with_contents(u"bar")
+            wdiff = port._run_wdiff(actual.name, expected.name)
+            expected_wdiff = "<head><style>.del { background: #faa; } .add { background: #afa; }</style></head><pre><span class=del>foo</span><span class=add>bar</span></pre>"
+            self.assertEqual(wdiff, expected_wdiff)
+            # Running the full wdiff_text method should give the same result.
+            port._wdiff_available = True  # In case it's somehow already disabled.
+            wdiff = port.wdiff_text(actual.name, expected.name)
+            self.assertEqual(wdiff, expected_wdiff)
+            # wdiff should still be available after running wdiff_text with a valid diff.
+            self.assertTrue(port._wdiff_available)
+            actual.close()
+            expected.close()
+
+            # Bogus paths should raise a script error.
+            self.assertRaises(ScriptError, port._run_wdiff, "/does/not/exist", "/does/not/exist2")
+            self.assertRaises(ScriptError, port.wdiff_text, "/does/not/exist", "/does/not/exist2")
+            # wdiff will still be available after running wdiff_text with invalid paths.
+            self.assertTrue(port._wdiff_available)
+
+        # If wdiff does not exist _run_wdiff should throw an OSError.
+        port._path_to_wdiff = lambda: "/invalid/path/to/wdiff"
+        self.assertRaises(OSError, port._run_wdiff, "foo", "bar")
+
+        # wdiff_text should not throw an error if wdiff does not exist.
+        self.assertEqual(port.wdiff_text("foo", "bar"), "")
+        # However wdiff should not be available after running wdiff_text if wdiff is missing.
+        self.assertFalse(port._wdiff_available)
+
+    def test_wdiff_text(self):
+        port = self.make_port()
+        port.wdiff_available = lambda: True
+        port._run_wdiff = lambda a, b: 'PASS'
+        self.assertEqual('PASS', port.wdiff_text(None, None))
+
+    def test_diff_text(self):
+        port = self.make_port()
+        # Make sure that we don't run into decoding exceptions when the
+        # filenames are unicode, with regular or malformed input (expected or
+        # actual input is always raw bytes, not unicode).
+        port.diff_text('exp', 'act', 'exp.txt', 'act.txt')
+        port.diff_text('exp', 'act', u'exp.txt', 'act.txt')
+        port.diff_text('exp', 'act', u'a\xac\u1234\u20ac\U00008000', 'act.txt')
+
+        port.diff_text('exp' + chr(255), 'act', 'exp.txt', 'act.txt')
+        port.diff_text('exp' + chr(255), 'act', u'exp.txt', 'act.txt')
+
+        # Though expected and actual files should always be read in with no
+        # encoding (and be stored as str objects), test unicode inputs just to
+        # be safe.
+        port.diff_text(u'exp', 'act', 'exp.txt', 'act.txt')
+        port.diff_text(
+            u'a\xac\u1234\u20ac\U00008000', 'act', 'exp.txt', 'act.txt')
+
+        # And make sure we actually get diff output.
+        diff = port.diff_text('foo', 'bar', 'exp.txt', 'act.txt')
+        self.assertIn('foo', diff)
+        self.assertIn('bar', diff)
+        self.assertIn('exp.txt', diff)
+        self.assertIn('act.txt', diff)
+        self.assertNotIn('nosuchthing', diff)
+
+    def test_setup_test_run(self):
+        port = self.make_port()
+        # This routine is a no-op. We just test it for coverage.
+        port.setup_test_run()
+
+    def test_test_dirs(self):
+        port = self.make_port()
+        port.host.filesystem.write_text_file(port.layout_tests_dir() + '/canvas/test', '')
+        port.host.filesystem.write_text_file(port.layout_tests_dir() + '/css2.1/test', '')
+        dirs = port.test_dirs()
+        self.assertIn('canvas', dirs)
+        self.assertIn('css2.1', dirs)
+
+    def test_skipped_perf_tests(self):
+        port = self.make_port()
+
+        def add_text_file(dirname, filename, content='some content'):
+            dirname = port.host.filesystem.join(port.perf_tests_dir(), dirname)
+            port.host.filesystem.maybe_make_directory(dirname)
+            port.host.filesystem.write_text_file(port.host.filesystem.join(dirname, filename), content)
+
+        add_text_file('inspector', 'test1.html')
+        add_text_file('inspector', 'unsupported_test1.html')
+        add_text_file('inspector', 'test2.html')
+        add_text_file('inspector/resources', 'resource_file.html')
+        add_text_file('unsupported', 'unsupported_test2.html')
+        add_text_file('', 'Skipped', '\n'.join(['Layout', '', 'SunSpider', 'Supported/some-test.html']))
+        self.assertEqual(port.skipped_perf_tests(), ['Layout', 'SunSpider', 'Supported/some-test.html'])
+
+    def test_get_option__set(self):
+        options, args = optparse.OptionParser().parse_args([])
+        options.foo = 'bar'
+        port = self.make_port(options=options)
+        self.assertEqual(port.get_option('foo'), 'bar')
+
+    def test_get_option__unset(self):
+        port = self.make_port()
+        self.assertIsNone(port.get_option('foo'))
+
+    def test_get_option__default(self):
+        port = self.make_port()
+        self.assertEqual(port.get_option('foo', 'bar'), 'bar')
+
+    def test_additional_platform_directory(self):
+        port = self.make_port(port_name='foo')
+        port.default_baseline_search_path = lambda: ['LayoutTests/platform/foo']
+        layout_test_dir = port.layout_tests_dir()
+        test_file = 'fast/test.html'
+
+        # No additional platform directory
+        self.assertEqual(
+            port.expected_baselines(test_file, '.txt'),
+            [(None, 'fast/test-expected.txt')])
+        self.assertEqual(port.baseline_path(), 'LayoutTests/platform/foo')
+
+        # Simple additional platform directory
+        port._options.additional_platform_directory = ['/tmp/local-baselines']
+        port._filesystem.write_text_file('/tmp/local-baselines/fast/test-expected.txt', 'foo')
+        self.assertEqual(
+            port.expected_baselines(test_file, '.txt'),
+            [('/tmp/local-baselines', 'fast/test-expected.txt')])
+        self.assertEqual(port.baseline_path(), '/tmp/local-baselines')
+
+        # Multiple additional platform directories
+        port._options.additional_platform_directory = ['/foo', '/tmp/local-baselines']
+        self.assertEqual(
+            port.expected_baselines(test_file, '.txt'),
+            [('/tmp/local-baselines', 'fast/test-expected.txt')])
+        self.assertEqual(port.baseline_path(), '/foo')
+
+    def test_nonexistant_expectations(self):
+        port = self.make_port(port_name='foo')
+        port.expectations_files = lambda: ['/mock-checkout/LayoutTests/platform/exists/TestExpectations', '/mock-checkout/LayoutTests/platform/nonexistant/TestExpectations']
+        port._filesystem.write_text_file('/mock-checkout/LayoutTests/platform/exists/TestExpectations', '')
+        self.assertEqual('\n'.join(port.expectations_dict().keys()), '/mock-checkout/LayoutTests/platform/exists/TestExpectations')
+
+    def test_additional_expectations(self):
+        port = self.make_port(port_name='foo')
+        port.port_name = 'foo'
+        port._filesystem.write_text_file('/mock-checkout/LayoutTests/platform/foo/TestExpectations', '')
+        port._filesystem.write_text_file(
+            '/tmp/additional-expectations-1.txt', 'content1\n')
+        port._filesystem.write_text_file(
+            '/tmp/additional-expectations-2.txt', 'content2\n')
+
+        self.assertEqual('\n'.join(port.expectations_dict().values()), '')
+
+        port._options.additional_expectations = [
+            '/tmp/additional-expectations-1.txt']
+        self.assertEqual('\n'.join(port.expectations_dict().values()), '\ncontent1\n')
+
+        port._options.additional_expectations = [
+            '/tmp/nonexistent-file', '/tmp/additional-expectations-1.txt']
+        self.assertEqual('\n'.join(port.expectations_dict().values()), '\ncontent1\n')
+
+        port._options.additional_expectations = [
+            '/tmp/additional-expectations-1.txt', '/tmp/additional-expectations-2.txt']
+        self.assertEqual('\n'.join(port.expectations_dict().values()), '\ncontent1\n\ncontent2\n')
+
+    def test_additional_env_var(self):
+        port = self.make_port(options=optparse.Values({'additional_env_var': ['FOO=BAR', 'BAR=FOO']}))
+        self.assertEqual(port.get_option('additional_env_var'), ['FOO=BAR', 'BAR=FOO'])
+        environment = port.setup_environ_for_server()
+        self.assertTrue(('FOO' in environment) & ('BAR' in environment))
+        self.assertEqual(environment['FOO'], 'BAR')
+        self.assertEqual(environment['BAR'], 'FOO')
+
+    def test_uses_test_expectations_file(self):
+        port = self.make_port(port_name='foo')
+        port.port_name = 'foo'
+        port.path_to_test_expectations_file = lambda: '/mock-results/TestExpectations'
+        self.assertFalse(port.uses_test_expectations_file())
+        port._filesystem = MockFileSystem({'/mock-results/TestExpectations': ''})
+        self.assertTrue(port.uses_test_expectations_file())
+
+    def test_find_no_paths_specified(self):
+        port = self.make_port(with_tests=True)
+        layout_tests_dir = port.layout_tests_dir()
+        tests = port.tests([])
+        self.assertNotEqual(len(tests), 0)
+
+    def test_find_one_test(self):
+        port = self.make_port(with_tests=True)
+        tests = port.tests(['failures/expected/image.html'])
+        self.assertEqual(len(tests), 1)
+
+    def test_find_glob(self):
+        port = self.make_port(with_tests=True)
+        tests = port.tests(['failures/expected/im*'])
+        self.assertEqual(len(tests), 2)
+
+    def test_find_with_skipped_directories(self):
+        port = self.make_port(with_tests=True)
+        tests = port.tests(['userscripts'])
+        self.assertNotIn('userscripts/resources/iframe.html', tests)
+
+    def test_find_with_skipped_directories_2(self):
+        port = self.make_port(with_tests=True)
+        tests = port.tests(['userscripts/resources'])
+        self.assertEqual(tests, [])
+
+    def test_is_test_file(self):
+        filesystem = MockFileSystem()
+        self.assertTrue(Port._is_test_file(filesystem, '', 'foo.html'))
+        self.assertTrue(Port._is_test_file(filesystem, '', 'foo.shtml'))
+        self.assertTrue(Port._is_test_file(filesystem, '', 'foo.svg'))
+        self.assertTrue(Port._is_test_file(filesystem, '', 'test-ref-test.html'))
+        self.assertFalse(Port._is_test_file(filesystem, '', 'foo.png'))
+        self.assertFalse(Port._is_test_file(filesystem, '', 'foo-expected.html'))
+        self.assertFalse(Port._is_test_file(filesystem, '', 'foo-expected.svg'))
+        self.assertFalse(Port._is_test_file(filesystem, '', 'foo-expected.xht'))
+        self.assertFalse(Port._is_test_file(filesystem, '', 'foo-expected-mismatch.html'))
+        self.assertFalse(Port._is_test_file(filesystem, '', 'foo-expected-mismatch.svg'))
+        self.assertFalse(Port._is_test_file(filesystem, '', 'foo-expected-mismatch.xhtml'))
+        self.assertFalse(Port._is_test_file(filesystem, '', 'foo-ref.html'))
+        self.assertFalse(Port._is_test_file(filesystem, '', 'foo-notref.html'))
+        self.assertFalse(Port._is_test_file(filesystem, '', 'foo-notref.xht'))
+        self.assertFalse(Port._is_test_file(filesystem, '', 'foo-ref.xhtml'))
+        self.assertFalse(Port._is_test_file(filesystem, '', 'ref-foo.html'))
+        self.assertFalse(Port._is_test_file(filesystem, '', 'notref-foo.xhr'))
+
+    def test_parse_reftest_list(self):
+        port = self.make_port(with_tests=True)
+        port.host.filesystem.files['bar/reftest.list'] = "\n".join(["== test.html test-ref.html",
+        "",
+        "# some comment",
+        "!= test-2.html test-notref.html # more comments",
+        "== test-3.html test-ref.html",
+        "== test-3.html test-ref2.html",
+        "!= test-3.html test-notref.html"])
+
+        reftest_list = Port._parse_reftest_list(port.host.filesystem, 'bar')
+        self.assertEqual(reftest_list, {'bar/test.html': [('==', 'bar/test-ref.html')],
+            'bar/test-2.html': [('!=', 'bar/test-notref.html')],
+            'bar/test-3.html': [('==', 'bar/test-ref.html'), ('==', 'bar/test-ref2.html'), ('!=', 'bar/test-notref.html')]})
+
+    def test_reference_files(self):
+        port = self.make_port(with_tests=True)
+        self.assertEqual(port.reference_files('passes/svgreftest.svg'), [('==', port.layout_tests_dir() + '/passes/svgreftest-expected.svg')])
+        self.assertEqual(port.reference_files('passes/xhtreftest.svg'), [('==', port.layout_tests_dir() + '/passes/xhtreftest-expected.html')])
+        self.assertEqual(port.reference_files('passes/phpreftest.php'), [('!=', port.layout_tests_dir() + '/passes/phpreftest-expected-mismatch.svg')])
+
+    def test_operating_system(self):
+        self.assertEqual('mac', self.make_port().operating_system())
+
+    def test_http_server_supports_ipv6(self):
+        port = self.make_port()
+        self.assertTrue(port.http_server_supports_ipv6())
+        port.host.platform.os_name = 'cygwin'
+        self.assertFalse(port.http_server_supports_ipv6())
+        port.host.platform.os_name = 'win'
+        self.assertTrue(port.http_server_supports_ipv6())
+
+    def test_check_httpd_success(self):
+        port = self.make_port(executive=MockExecutive2())
+        port._path_to_apache = lambda: '/usr/sbin/httpd'
+        capture = OutputCapture()
+        capture.capture_output()
+        self.assertTrue(port.check_httpd())
+        _, _, logs = capture.restore_output()
+        self.assertEqual('', logs)
+
+    def test_httpd_returns_error_code(self):
+        port = self.make_port(executive=MockExecutive2(exit_code=1))
+        port._path_to_apache = lambda: '/usr/sbin/httpd'
+        capture = OutputCapture()
+        capture.capture_output()
+        self.assertFalse(port.check_httpd())
+        _, _, logs = capture.restore_output()
+        self.assertEqual('httpd seems broken. Cannot run http tests.\n', logs)
+
+    def test_test_exists(self):
+        port = self.make_port(with_tests=True)
+        self.assertTrue(port.test_exists('passes'))
+        self.assertTrue(port.test_exists('passes/text.html'))
+        self.assertFalse(port.test_exists('passes/does_not_exist.html'))
+
+        self.assertTrue(port.test_exists('virtual'))
+        self.assertFalse(port.test_exists('virtual/does_not_exist.html'))
+        self.assertTrue(port.test_exists('virtual/passes/text.html'))
+
+    def test_test_isfile(self):
+        port = self.make_port(with_tests=True)
+        self.assertFalse(port.test_isfile('passes'))
+        self.assertTrue(port.test_isfile('passes/text.html'))
+        self.assertFalse(port.test_isfile('passes/does_not_exist.html'))
+
+        self.assertFalse(port.test_isfile('virtual'))
+        self.assertTrue(port.test_isfile('virtual/passes/text.html'))
+        self.assertFalse(port.test_isfile('virtual/does_not_exist.html'))
+
+    def test_test_isdir(self):
+        port = self.make_port(with_tests=True)
+        self.assertTrue(port.test_isdir('passes'))
+        self.assertFalse(port.test_isdir('passes/text.html'))
+        self.assertFalse(port.test_isdir('passes/does_not_exist.html'))
+        self.assertFalse(port.test_isdir('passes/does_not_exist/'))
+
+        self.assertTrue(port.test_isdir('virtual'))
+        self.assertFalse(port.test_isdir('virtual/does_not_exist.html'))
+        self.assertFalse(port.test_isdir('virtual/does_not_exist/'))
+        self.assertFalse(port.test_isdir('virtual/passes/text.html'))
+
+    def test_tests(self):
+        port = self.make_port(with_tests=True)
+        tests = port.tests([])
+        self.assertIn('passes/text.html', tests)
+        self.assertIn('virtual/passes/text.html', tests)
+
+        tests = port.tests(['passes'])
+        self.assertIn('passes/text.html', tests)
+        self.assertIn('passes/passes/test-virtual-passes.html', tests)
+        self.assertNotIn('virtual/passes/text.html', tests)
+
+        tests = port.tests(['virtual/passes'])
+        self.assertNotIn('passes/text.html', tests)
+        self.assertIn('virtual/passes/test-virtual-passes.html', tests)
+        self.assertIn('virtual/passes/passes/test-virtual-passes.html', tests)
+        self.assertNotIn('virtual/passes/test-virtual-virtual/passes.html', tests)
+        self.assertNotIn('virtual/passes/virtual/passes/test-virtual-passes.html', tests)
+
+    def test_build_path(self):
+        port = self.make_port(options=optparse.Values({'build_directory': '/my-build-directory/'}))
+        self.assertEqual(port._build_path(), '/my-build-directory/Release')
+
+    def test_dont_require_http_server(self):
+        port = self.make_port()
+        self.assertEqual(port.requires_http_server(), False)
+
+
+class NaturalCompareTest(unittest.TestCase):
+    def setUp(self):
+        self._port = TestPort(MockSystemHost())
+
+    def assert_cmp(self, x, y, result):
+        self.assertEqual(cmp(self._port._natural_sort_key(x), self._port._natural_sort_key(y)), result)
+
+    def test_natural_compare(self):
+        self.assert_cmp('a', 'a', 0)
+        self.assert_cmp('ab', 'a', 1)
+        self.assert_cmp('a', 'ab', -1)
+        self.assert_cmp('', '', 0)
+        self.assert_cmp('', 'ab', -1)
+        self.assert_cmp('1', '2', -1)
+        self.assert_cmp('2', '1', 1)
+        self.assert_cmp('1', '10', -1)
+        self.assert_cmp('2', '10', -1)
+        self.assert_cmp('foo_1.html', 'foo_2.html', -1)
+        self.assert_cmp('foo_1.1.html', 'foo_2.html', -1)
+        self.assert_cmp('foo_1.html', 'foo_10.html', -1)
+        self.assert_cmp('foo_2.html', 'foo_10.html', -1)
+        self.assert_cmp('foo_23.html', 'foo_10.html', 1)
+        self.assert_cmp('foo_23.html', 'foo_100.html', -1)
+
+
+class KeyCompareTest(unittest.TestCase):
+    def setUp(self):
+        self._port = TestPort(MockSystemHost())
+
+    def assert_cmp(self, x, y, result):
+        self.assertEqual(cmp(self._port.test_key(x), self._port.test_key(y)), result)
+
+    def test_test_key(self):
+        self.assert_cmp('/a', '/a', 0)
+        self.assert_cmp('/a', '/b', -1)
+        self.assert_cmp('/a2', '/a10', -1)
+        self.assert_cmp('/a2/foo', '/a10/foo', -1)
+        self.assert_cmp('/a/foo11', '/a/foo2', 1)
+        self.assert_cmp('/ab', '/a/a/b', -1)
+        self.assert_cmp('/a/a/b', '/ab', 1)
+        self.assert_cmp('/foo-bar/baz', '/foo/baz', -1)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/modules/web/src/main/native/Tools/Scripts/webkitpy/port/builders.py	Fri Jul 05 17:42:24 2013 +0400
@@ -0,0 +1,134 @@
+# Copyright (C) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import re
+
+from webkitpy.common.memoized import memoized
+
+
+# In this dictionary, each item stores:
+# * port_name -- a fully qualified port name
+# * is_debug -- whether we are using a debug build
+# * move_overwritten_baselines_to -- (optional) list of platform directories that we will copy an existing
+#      baseline to before pulling down a new baseline during rebaselining. This is useful
+#      for bringing up a new port, for example when adding a Lion was the most recent Mac version and
+#      we wanted to bring up Mountain Lion, we would want to copy an existing baseline in platform/mac
+#      to platform/mac-mountainlion before updating the platform/mac entry.
+# * rebaseline_override_dir -- (optional) directory to put baselines in instead of where you would normally put them.
+#      This is useful when we don't have bots that cover particular configurations; so, e.g., you might
+#      support mac-mountainlion but not have a mac-mountainlion bot yet, so you'd want to put the mac-lion
+#      results into platform/mac temporarily.
+
+_exact_matches = {
+    # These builders are on build.webkit.org.
+    "Apple MountainLion Release WK1 (Tests)": {"port_name": "mac-mountainlion", "is_debug": False, "rebaseline_override_dir": "mac"},
+    "Apple MountainLion Debug WK1 (Tests)": {"port_name": "mac-mountainlion", "is_debug": True, "rebaseline_override_dir": "mac"},
+    "Apple MountainLion Release WK2 (Tests)": {"port_name": "mac-mountainlion-wk2", "is_debug": False, "rebaseline_override_dir": "mac"},
+    "Apple MountainLion Debug WK2 (Tests)": {"port_name": "mac-mountainlion-wk2", "is_debug": True, "rebaseline_override_dir": "mac"},
+    "Apple Lion Release WK1 (Tests)": {"port_name": "mac-lion", "is_debug": False},
+    "Apple Lion Debug WK1 (Tests)": {"port_name": "mac-lion", "is_debug": True},
+    "Apple Lion Release WK2 (Tests)": {"port_name": "mac-lion-wk2", "is_debug": False},
+    "Apple Lion Debug WK2 (Tests)": {"port_name": "mac-lion-wk2", "is_debug": True},
+
+    "Apple Win XP Debug (Tests)": {"port_name": "win-xp", "is_debug": True},
+    # FIXME: Remove rebaseline_override_dir once there is an Apple buildbot that corresponds to platform/win.
+    "Apple Win 7 Release (Tests)": {"port_name": "win-7sp0", "is_debug": False, "rebaseline_override_dir": "win"},
+
+    "GTK Linux 32-bit Release": {"port_name": "gtk", "is_debug": False},
+    "GTK Linux 64-bit Debug": {"port_name": "gtk", "is_debug": True},
+    "GTK Linux 64-bit Release": {"port_name": "gtk", "is_debug": False},
+    "GTK Linux 64-bit Release WK2 (Tests)": {"port_name": "gtk-wk2", "is_debug": False},
+
+    # FIXME: Remove rebaseline_override_dir once there are Qt bots for all the platform/qt-* directories.
+    "Qt Linux Release": {"port_name": "qt-linux", "is_debug": False, "rebaseline_override_dir": "qt"},
+
+    "EFL Linux 64-bit Release": {"port_name": "efl", "is_debug": False},
+    "EFL Linux 64-bit Release WK2": {"port_name": "efl-wk2", "is_debug": False},
+    "EFL Linux 64-bit Debug WK2": {"port_name": "efl-wk2", "is_debug": True},
+}
+
+
+_fuzzy_matches = {
+    # These builders are on build.webkit.org.
+    r"SnowLeopard": "mac-snowleopard",
+    r"Apple Lion": "mac-lion",
+    r"Windows": "win",
+    r"GTK": "gtk",
+    r"Qt": "qt",
+}
+
+
+_ports_without_builders = [
+    "qt-mac",
+    "qt-win",
+    "qt-wk2",
+]
+
+
+def builder_path_from_name(builder_name):
+    return re.sub(r'[\s().]', '_', builder_name)
+
+
+def all_builder_names():
+    return sorted(set(_exact_matches.keys()))
+
+
+def all_port_names():
+    return sorted(set(map(lambda x: x["port_name"], _exact_matches.values()) + _ports_without_builders))
+
+
+def rebaseline_override_dir(builder_name):
+    return _exact_matches[builder_name].get("rebaseline_override_dir", None)
+
+
+def move_overwritten_baselines_to(builder_name):
+    return _exact_matches[builder_name].get("move_overwritten_baselines_to", [])
+
+
+def port_name_for_builder_name(builder_name):
+    if builder_name in _exact_matches:
+        return _exact_matches[builder_name]["port_name"]
+
+    for regexp, port_name in _fuzzy_matches.items():
+        if re.match(regexp, builder_name):
+            return port_name
+
+
+def builder_name_for_port_name(target_port_name):
+    debug_builder_name = None
+    for builder_name, builder_info in _exact_matches.items():
+        if builder_info['port_name'] == target_port_name:
+            if builder_info['is_debug']:
+                debug_builder_name = builder_name
+            else:
+                return builder_name
+    return debug_builder_name
+
+
+def builder_path_for_port_name(port_name):
+    builder_path_from_name(builder_name_for_port_name(port_name))
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/modules/web/src/main/native/Tools/Scripts/webkitpy/port/builders_unittest.py	Fri Jul 05 17:42:24 2013 +0400
@@ -0,0 +1,41 @@
+# Copyright (C) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#    * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#    * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#    * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import builders
+import unittest2 as unittest
+
+
+class BuildersTest(unittest.TestCase):
+    def test_path_from_name(self):
+        tests = {
+            'test': 'test',
+            'Mac 10.6 (dbg)(1)': 'Mac_10_6__dbg__1_',
+            '(.) ': '____',
+        }
+        for name, expected in tests.items():
+            self.assertEqual(expected, builders.builder_path_from_name(name))
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/modules/web/src/main/native/Tools/Scripts/webkitpy/port/config.py	Fri Jul 05 17:42:24 2013 +0400
@@ -0,0 +1,141 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Wrapper objects for WebKit-specific utility routines."""
+
+# FIXME: This file needs to be unified with common/config/ports.py .
+
+import logging
+
+from webkitpy.common import webkit_finder
+
+
+_log = logging.getLogger(__name__)
+
+#
+# FIXME: This is used to record if we've already hit the filesystem to look
+# for a default configuration. We cache this to speed up the unit tests,
+# but this can be reset with clear_cached_configuration(). This should be
+# replaced with us consistently using MockConfigs() for tests that don't
+# hit the filesystem at all and provide a reliable value.
+#
+_have_determined_configuration = False
+_configuration = "Release"
+
+
+def clear_cached_configuration():
+    global _have_determined_configuration, _configuration
+    _have_determined_configuration = False
+    _configuration = "Release"
+
+
+class Config(object):
+    _FLAGS_FROM_CONFIGURATIONS = {
+        "Debug": "--debug",
+        "Release": "--release",
+    }
+
+    def __init__(self, executive, filesystem, port_implementation=None):
+        self._executive = executive
+        self._filesystem = filesystem
+        self._webkit_finder = webkit_finder.WebKitFinder(self._filesystem)
+        self._default_configuration = None
+        self._build_directories = {}
+        self._port_implementation = port_implementation
+
+    def build_directory(self, configuration):
+        """Returns the path to the build directory for the configuration."""
+        if configuration:
+            flags = ["--configuration", self.flag_for_configuration(configuration)]
+        else:
+            configuration = ""
+            flags = []
+
+        if self._port_implementation:
+            flags.append('--' + self._port_implementation)
+
+        if not self._build_directories.get(configuration):
+            args = ["perl", self._webkit_finder.path_to_script("webkit-build-directory")] + flags
+            output = self._executive.run_command(args, cwd=self._webkit_finder.webkit_base(), return_stderr=False).rstrip()
+            parts = output.split("\n")
+            self._build_directories[configuration] = parts[0]
+
+            if len(parts) == 2:
+                default_configuration = parts[1][len(parts[0]):]
+                if default_configuration.startswith("/"):
+                    default_configuration = default_configuration[1:]
+                self._build_directories[default_configuration] = parts[1]
+
+        return self._build_directories[configuration]
+
+    def flag_for_configuration(self, configuration):
+        return self._FLAGS_FROM_CONFIGURATIONS[configuration]
+
+    def default_configuration(self):
+        """Returns the default configuration for the user.
+
+        Returns the value set by 'set-webkit-configuration', or "Release"
+        if that has not been set. This mirrors the logic in webkitdirs.pm."""
+        if not self._default_configuration:
+            self._default_configuration = self._determine_configuration()
+        if not self._default_configuration:
+            self._default_configuration = 'Release'
+        if self._default_configuration not in self._FLAGS_FROM_CONFIGURATIONS:
+            _log.warn("Configuration \"%s\" is not a recognized value.\n" % self._default_configuration)
+            _log.warn("Scripts may fail.  See 'set-webkit-configuration --help'.")
+        return self._default_configuration
+
+    def _determine_configuration(self):
+        # This mirrors the logic in webkitdirs.pm:determineConfiguration().
+        #
+        # FIXME: See the comment at the top of the file regarding unit tests
+        # and our use of global mutable static variables.
+        # FIXME: We should just @memoize this method and then this will only
+        # be read once per object lifetime (which should be sufficiently fast).
+        global _have_determined_configuration, _configuration
+        if not _have_determined_configuration:
+            contents = self._read_configuration()
+            if not contents:
+                contents = "Release"
+            if contents == "Deployment":
+                contents = "Release"
+            if contents == "Development":
+                contents = "Debug"
+            _configuration = contents
+            _have_determined_configuration = True
+        return _configuration
+
+    def _read_configuration(self):
+        try:
+            configuration_path = self._filesystem.join(self.build_directory(None), "Configuration")
+            if not self._filesystem.exists(configuration_path):
+                return None
+        except:
+            return None
+
+        return self._filesystem.read_text_file(configuration_path).rstrip()
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/modules/web/src/main/native/Tools/Scripts/webkitpy/port/config_standalone.py	Fri Jul 05 17:42:24 2013 +0400
@@ -0,0 +1,69 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#    * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#    * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#    * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""FIXME: This script is used by
+config_unittest.test_default_configuration__standalone() to read the
+default configuration to work around any possible caching / reset bugs. See
+https://bugs.webkit.org/show_bug?id=49360 for the motivation. We can remove
+this test when we remove the global configuration cache in config.py."""
+
+import os
+import sys
+
+
+# Ensure that webkitpy is in PYTHONPATH.
+this_dir = os.path.abspath(sys.path[0])
+up = os.path.dirname
+script_dir = up(up(up(this_dir)))
+if script_dir not in sys.path:
+    sys.path.append(script_dir)
+
+from webkitpy.common.system import executive
+from webkitpy.common.system import executive_mock
+from webkitpy.common.system import filesystem
+from webkitpy.common.system import filesystem_mock
+
+import config
+
+
+def main(argv=None):
+    if not argv:
+        argv = sys.argv
+
+    if len(argv) == 3 and argv[1] == '--mock':
+        e = executive_mock.MockExecutive2(output='foo\nfoo/%s' % argv[2])
+        fs = filesystem_mock.MockFileSystem({'foo/Configuration': argv[2]})
+    else:
+        e = executive.Executive()
+        fs = filesystem.FileSystem()
+
+    c = config.Config(e, fs)
+    print c.default_configuration()
+
+if __name__ == '__main__':
+    main()
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/modules/web/src/main/native/Tools/Scripts/webkitpy/port/config_unittest.py	Fri Jul 05 17:42:24 2013 +0400
@@ -0,0 +1,158 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#    * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#    * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#    * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import os
+import sys
+import unittest2 as unittest
+
+from webkitpy.common.system.executive import Executive, ScriptError
+from webkitpy.common.system.executive_mock import MockExecutive2
+from webkitpy.common.system.filesystem import FileSystem
+from webkitpy.common.system.filesystem_mock import MockFileSystem
+from webkitpy.common.system.outputcapture import OutputCapture
+from webkitpy.common.webkit_finder import WebKitFinder
+
+import config
+
+
+class ConfigTest(unittest.TestCase):
+    def setUp(self):
+        config.clear_cached_configuration()
+
+    def tearDown(self):
+        config.clear_cached_configuration()
+
+    def make_config(self, output='', files=None, exit_code=0, exception=None, run_command_fn=None, stderr='', port_implementation=None):
+        e = MockExecutive2(output=output, exit_code=exit_code, exception=exception, run_command_fn=run_command_fn, stderr=stderr)
+        fs = MockFileSystem(files)
+        return config.Config(e, fs, port_implementation=port_implementation)
+
+    def assert_configuration(self, contents, expected):
+        # This tests that a configuration file containing
+        # _contents_ ends up being interpreted as _expected_.
+        output = 'foo\nfoo/%s' % contents
+        c = self.make_config(output, {'foo/Configuration': contents})
+        self.assertEqual(c.default_configuration(), expected)
+
+    def test_build_directory(self):
+        # --top-level
+        def mock_webkit_build_directory(arg_list):
+            if arg_list == ['--top-level']:
+                return '/WebKitBuild/'
+            elif arg_list == ['--configuration', '--debug']:
+                return '/WebKitBuild/Debug'
+            elif arg_list == ['--configuration', '--release']:
+                return '/WebKitBuild/Release'
+            elif arg_list == []:
+                return '/WebKitBuild/\n/WebKitBuild//Debug\n'
+            return 'Error'
+
+        def mock_run_command(arg_list):
+            if 'webkit-build-directory' in arg_list[1]:
+                return mock_webkit_build_directory(arg_list[2:])
+            return 'Error'
+
+        c = self.make_config(run_command_fn=mock_run_command)
+        self.assertEqual(c.build_directory(None), '/WebKitBuild/')
+
+        # Test again to check caching
+        self.assertEqual(c.build_directory(None), '/WebKitBuild/')
+
+        # Test other values
+        self.assertTrue(c.build_directory('Release').endswith('/Release'))
+        self.assertTrue(c.build_directory('Debug').endswith('/Debug'))
+        self.assertRaises(KeyError, c.build_directory, 'Unknown')
+
+        # Test that stderr output from webkit-build-directory won't mangle the build dir
+        c = self.make_config(output='/WebKitBuild/', stderr="mock stderr output from webkit-build-directory")
+        self.assertEqual(c.build_directory(None), '/WebKitBuild/')
+
+    def test_build_directory_passes_port_implementation(self):
+        def mock_run_command(arg_list):
+            self.assetEquals('--gtk' in arg_list)
+            return '/tmp'
+
+        c = self.make_config(run_command_fn=mock_run_command, port_implementation='gtk')
+
+    def test_default_configuration__release(self):
+        self.assert_configuration('Release', 'Release')
+
+    def test_default_configuration__debug(self):
+        self.assert_configuration('Debug', 'Debug')
+
+    def test_default_configuration__deployment(self):
+        self.assert_configuration('Deployment', 'Release')
+
+    def test_default_configuration__development(self):
+        self.assert_configuration('Development', 'Debug')
+
+    def test_default_configuration__notfound(self):
+        # This tests what happens if the default configuration file doesn't exist.
+        c = self.make_config(output='foo\nfoo/Release', files={'foo/Configuration': None})
+        self.assertEqual(c.default_configuration(), "Release")
+
+    def test_default_configuration__unknown(self):
+        # Ignore the warning about an unknown configuration value.
+        oc = OutputCapture()
+        oc.capture_output()
+        self.assert_configuration('Unknown', 'Unknown')
+        oc.restore_output()
+
+    def test_default_configuration__standalone(self):
+        # FIXME: This test runs a standalone python script to test
+        # reading the default configuration to work around any possible
+        # caching / reset bugs. See https://bugs.webkit.org/show_bug.cgi?id=49360
+        # for the motivation. We can remove this test when we remove the
+        # global configuration cache in config.py.
+        e = Executive()
+        fs = FileSystem()
+        c = config.Config(e, fs)
+        script = WebKitFinder(fs).path_from_webkit_base('Tools', 'Scripts', 'webkitpy', 'port', 'config_standalone.py')
+
+        # Note: don't use 'Release' here, since that's the normal default.
+        expected = 'Debug'
+
+        args = [sys.executable, script, '--mock', expected]
+        actual = e.run_command(args).rstrip()
+        self.assertEqual(actual, expected)
+
+    def test_default_configuration__no_perl(self):
+        # We need perl to run webkit-build-directory to find out where the
+        # default configuration file is. See what happens if perl isn't
+        # installed. (We should get the default value, 'Release').
+        c = self.make_config(exception=OSError)
+        actual = c.default_configuration()
+        self.assertEqual(actual, 'Release')
+
+    def test_default_configuration__scripterror(self):
+        # We run webkit-build-directory to find out where the default
+        # configuration file is. See what happens if that script fails.
+        # (We should get the default value, 'Release').
+        c = self.make_config(exception=ScriptError())
+        actual = c.default_configuration()
+        self.assertEqual(actual, 'Release')
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/modules/web/src/main/native/Tools/Scripts/webkitpy/port/driver.py	Fri Jul 05 17:42:24 2013 +0400
@@ -0,0 +1,562 @@
+# Copyright (C) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the Google name nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import base64
+import copy
+import logging
+import re
+import shlex
+import sys
+import time
+import os
+
+from webkitpy.common.system import path
+from webkitpy.common.system.profiler import ProfilerFactory
+
+
+_log = logging.getLogger(__name__)
+
+
+class DriverInput(object):
+    def __init__(self, test_name, timeout, image_hash, should_run_pixel_test, args=None):
+        self.test_name = test_name
+        self.timeout = timeout  # in ms
+        self.image_hash = image_hash
+        self.should_run_pixel_test = should_run_pixel_test
+        self.args = args or []
+
+
+class DriverOutput(object):
+    """Groups information about a output from driver for easy passing
+    and post-processing of data."""
+
+    strip_patterns = []
+    strip_patterns.append((re.compile('at \(-?[0-9]+,-?[0-9]+\) *'), ''))
+    strip_patterns.append((re.compile('size -?[0-9]+x-?[0-9]+ *'), ''))
+    strip_patterns.append((re.compile('text run width -?[0-9]+: '), ''))
+    strip_patterns.append((re.compile('text run width -?[0-9]+ [a-zA-Z ]+: '), ''))
+    strip_patterns.append((re.compile('RenderButton {BUTTON} .*'), 'RenderButton {BUTTON}'))
+    strip_patterns.append((re.compile('RenderImage {INPUT} .*'), 'RenderImage {INPUT}'))
+    strip_patterns.append((re.compile('RenderBlock {INPUT} .*'), 'RenderBlock {INPUT}'))
+    strip_patterns.append((re.compile('RenderTextControl {INPUT} .*'), 'RenderTextControl {INPUT}'))
+    strip_patterns.append((re.compile('\([0-9]+px'), 'px'))
+    strip_patterns.append((re.compile(' *" *\n +" *'), ' '))
+    strip_patterns.append((re.compile('" +$'), '"'))
+    strip_patterns.append((re.compile('- '), '-'))
+    strip_patterns.append((re.compile('\n( *)"\s+'), '\n\g<1>"'))
+    strip_patterns.append((re.compile('\s+"\n'), '"\n'))
+    strip_patterns.append((re.compile('scrollWidth [0-9]+'), 'scrollWidth'))
+    strip_patterns.append((re.compile('scrollHeight [0-9]+'), 'scrollHeight'))
+    strip_patterns.append((re.compile('scrollX [0-9]+'), 'scrollX'))
+    strip_patterns.append((re.compile('scrollY [0-9]+'), 'scrollY'))
+    strip_patterns.append((re.compile('scrolled to [0-9]+,[0-9]+'), 'scrolled'))
+
+    def __init__(self, text, image, image_hash, audio, crash=False,
+            test_time=0, measurements=None, timeout=False, error='', crashed_process_name='??',
+            crashed_pid=None, crash_log=None, pid=None):
+        # FIXME: Args could be renamed to better clarify what they do.
+        self.text = text
+        self.image = image  # May be empty-string if the test crashes.
+        self.image_hash = image_hash
+        self.image_diff = None  # image_diff gets filled in after construction.
+        self.audio = audio  # Binary format is port-dependent.
+        self.crash = crash
+        self.crashed_process_name = crashed_process_name
+        self.crashed_pid = crashed_pid
+        self.crash_log = crash_log
+        self.test_time = test_time
+        self.measurements = measurements
+        self.timeout = timeout
+        self.error = error  # stderr output
+        self.pid = pid
+
+    def has_stderr(self):
+        return bool(self.error)
+
+    def strip_metrics(self):
+        if not self.text:
+            return
+        for pattern in self.strip_patterns:
+            self.text = re.sub(pattern[0], pattern[1], self.text)
+
+
+class Driver(object):
+    """object for running test(s) using DumpRenderTree/WebKitTestRunner."""
+
+    def __init__(self, port, worker_number, pixel_tests, no_timeout=False):
+        """Initialize a Driver to subsequently run tests.
+
+        Typically this routine will spawn DumpRenderTree in a config
+        ready for subsequent input.
+
+        port - reference back to the port object.
+        worker_number - identifier for a particular worker/driver instance
+        """
+        self._port = port
+        self._worker_number = worker_number
+        self._no_timeout = no_timeout
+
+        self._driver_tempdir = None
+        # WebKitTestRunner can report back subprocess crashes by printing
+        # "#CRASHED - PROCESSNAME".  Since those can happen at any time
+        # and ServerProcess won't be aware of them (since the actual tool
+        # didn't crash, just a subprocess) we record the crashed subprocess name here.
+        self._crashed_process_name = None
+        self._crashed_pid = None
+
+        # WebKitTestRunner can report back subprocesses that became unresponsive
+        # This could mean they crashed.
+        self._subprocess_was_unresponsive = False
+
+        # stderr reading is scoped on a per-test (not per-block) basis, so we store the accumulated
+        # stderr output, as well as if we've seen #EOF on this driver instance.
+        # FIXME: We should probably remove _read_first_block and _read_optional_image_block and
+        # instead scope these locally in run_test.
+        self.error_from_test = str()
+        self.err_seen_eof = False
+        self._server_process = None
+
+        self._measurements = {}
+        if self._port.get_option("profile"):
+            profiler_name = self._port.get_option("profiler")
+            self._profiler = ProfilerFactory.create_profiler(self._port.host,
+                self._port._path_to_driver(), self._port.results_directory(), profiler_name)
+        else:
+            self._profiler = None
+
+    def __del__(self):
+        self.stop()
+
+    def run_test(self, driver_input, stop_when_done):
+        """Run a single test and return the results.
+
+        Note that it is okay if a test times out or crashes and leaves
+        the driver in an indeterminate state. The upper layers of the program
+        are responsible for cleaning up and ensuring things are okay.
+
+        Returns a DriverOutput object.
+        """
+        start_time = time.time()
+        self.start(driver_input.should_run_pixel_test, driver_input.args)
+        test_begin_time = time.time()
+        self.error_from_test = str()
+        self.err_seen_eof = False
+
+        command = self._command_from_driver_input(driver_input)
+        deadline = test_begin_time + int(driver_input.timeout) / 1000.0
+
+        self._server_process.write(command)
+        text, audio = self._read_first_block(deadline)  # First block is either text or audio
+        image, actual_image_hash = self._read_optional_image_block(deadline)  # The second (optional) block is image data.
+
+        crashed = self.has_crashed()
+        timed_out = self._server_process.timed_out
+        pid = self._server_process.pid()
+
+        if stop_when_done or crashed or timed_out:
+            # We call stop() even if we crashed or timed out in order to get any remaining stdout/stderr output.
+            # In the timeout case, we kill the hung process as well.
+            out, err = self._server_process.stop(self._port.driver_stop_timeout() if stop_when_done else 0.0)
+            if out:
+                text += out
+            if err:
+                self.error_from_test += err
+            self._server_process = None
+
+        crash_log = None
+        if crashed:
+            self.error_from_test, crash_log = self._get_crash_log(text, self.error_from_test, newer_than=start_time)
+
+            # If we don't find a crash log use a placeholder error message instead.
+            if not crash_log:
+                pid_str = str(self._crashed_pid) if self._crashed_pid else "unknown pid"
+                crash_log = 'No crash log found for %s:%s.\n' % (self._crashed_process_name, pid_str)
+                # If we were unresponsive append a message informing there may not have been a crash.
+                if self._subprocess_was_unresponsive:
+                    crash_log += 'Process failed to become responsive before timing out.\n'
+
+                # Print stdout and stderr to the placeholder crash log; we want as much context as possible.
+                if self.error_from_test:
+                    crash_log += '\nstdout:\n%s\nstderr:\n%s\n' % (text, self.error_from_test)
+
+        return DriverOutput(text, image, actual_image_hash, audio,
+            crash=crashed, test_time=time.time() - test_begin_time, measurements=self._measurements,
+            timeout=timed_out, error=self.error_from_test,
+            crashed_process_name=self._crashed_process_name,
+            crashed_pid=self._crashed_pid, crash_log=crash_log, pid=pid)
+
+    def _get_crash_log(self, stdout, stderr, newer_than):
+        return self._port._get_crash_log(self._crashed_process_name, self._crashed_pid, stdout, stderr, newer_than)
+
+    # FIXME: Seems this could just be inlined into callers.
+    @classmethod
+    def _command_wrapper(cls, wrapper_option):
+        # Hook for injecting valgrind or other runtime instrumentation,
+        # used by e.g. tools/valgrind/valgrind_tests.py.
+        return shlex.split(wrapper_option) if wrapper_option else []
+
+    HTTP_DIR = "http/tests/"
+    HTTP_LOCAL_DIR = "http/tests/local/"
+
+    def is_http_test(self, test_name):
+        return test_name.startswith(self.HTTP_DIR) and not test_name.startswith(self.HTTP_LOCAL_DIR)
+
+    def test_to_uri(self, test_name):
+        """Convert a test name to a URI."""
+        if not self.is_http_test(test_name):
+            return path.abspath_to_uri(self._port.host.platform, self._port.abspath_for_test(test_name))
+
+        relative_path = test_name[len(self.HTTP_DIR):]
+
+        # TODO(dpranke): remove the SSL reference?
+        if relative_path.startswith("ssl/"):
+            return "https://127.0.0.1:8443/" + relative_path
+        return "http://127.0.0.1:8000/" + relative_path
+
+    def uri_to_test(self, uri):
+        """Return the base layout test name for a given URI.
+
+        This returns the test name for a given URI, e.g., if you passed in
+        "file:///src/LayoutTests/fast/html/keygen.html" it would return
+        "fast/html/keygen.html".
+
+        """
+        if uri.startswith("file:///"):
+            prefix = path.abspath_to_uri(self._port.host.platform, self._port.layout_tests_dir())
+            if not prefix.endswith('/'):
+                prefix += '/'
+            return uri[len(prefix):]
+        if uri.startswith("http://"):
+            return uri.replace('http://127.0.0.1:8000/', self.HTTP_DIR)
+        if uri.startswith("https://"):
+            return uri.replace('https://127.0.0.1:8443/', self.HTTP_DIR)
+        raise NotImplementedError('unknown url type: %s' % uri)
+
+    def has_crashed(self):
+        if self._server_process is None:
+            return False
+        if self._crashed_process_name:
+            return True
+        if self._server_process.has_crashed():
+            self._crashed_process_name = self._server_process.name()
+            self._crashed_pid = self._server_process.pid()
+            return True
+        return False
+
+    def start(self, pixel_tests, per_test_args):
+        # FIXME: Callers shouldn't normally call this, since this routine
+        # may not be specifying the correct combination of pixel test and
+        # per_test args.
+        #
+        # The only reason we have this routine at all is so the perftestrunner
+        # can pause before running a test; it might be better to push that
+        # into run_test() directly.
+        if not self._server_process:
+            self._start(pixel_tests, per_test_args)
+            self._run_post_start_tasks()
+
+    def _setup_environ_for_driver(self, environment):
+        environment['DYLD_LIBRARY_PATH'] = self._port._build_path()
+        environment['DYLD_FRAMEWORK_PATH'] = self._port._build_path()
+        # FIXME: We're assuming that WebKitTestRunner checks this DumpRenderTree-named environment variable.
+        # FIXME: Commented out for now to avoid tests breaking. Re-enable after
+        # we cut over to NRWT
+        #environment['DUMPRENDERTREE_TEMP'] = str(self._port._driver_tempdir_for_environment())
+        environment['DUMPRENDERTREE_TEMP'] = str(self._driver_tempdir)
+        environment['LOCAL_RESOURCE_ROOT'] = self._port.layout_tests_dir()
+        if 'WEBKITOUTPUTDIR' in os.environ:
+            environment['WEBKITOUTPUTDIR'] = os.environ['WEBKITOUTPUTDIR']
+        if self._profiler:
+            environment = self._profiler.adjusted_environment(environment)
+        return environment
+
+    def _start(self, pixel_tests, per_test_args):
+        self.stop()
+        self._driver_tempdir = self._port._driver_tempdir()
+        server_name = self._port.driver_name()
+        environment = self._port.setup_environ_for_server(server_name)
+        environment = self._setup_environ_for_driver(environment)
+        self._crashed_process_name = None
+        self._crashed_pid = None
+        self._server_process = self._port._server_process_constructor(self._port, server_name, self.cmd_line(pixel_tests, per_test_args), environment)
+        self._server_process.start()
+
+    def _run_post_start_tasks(self):
+        # Remote drivers may override this to delay post-start tasks until the server has ack'd.
+        if self._profiler:
+            self._profiler.attach_to_pid(self._pid_on_target())
+
+    def _pid_on_target(self):
+        # Remote drivers will override this method to return the pid on the device.
+        return self._server_process.pid()
+
+    def stop(self):
+        if self._server_process:
+            self._server_process.stop(self._port.driver_stop_timeout())
+            self._server_process = None
+            if self._profiler:
+                self._profiler.profile_after_exit()
+
+        if self._driver_tempdir:
+            self._port._filesystem.rmtree(str(self._driver_tempdir))
+            self._driver_tempdir = None
+
+    def cmd_line(self, pixel_tests, per_test_args):
+        cmd = self._command_wrapper(self._port.get_option('wrapper'))
+        cmd.append(self._port._path_to_driver())
+        if self._port.get_option('gc_between_tests'):
+            cmd.append('--gc-between-tests')
+        if self._port.get_option('complex_text'):
+            cmd.append('--complex-text')
+        if self._port.get_option('threaded'):
+            cmd.append('--threaded')
+        if self._no_timeout:
+            cmd.append('--no-timeout')
+        # FIXME: We need to pass --timeout=SECONDS to WebKitTestRunner for WebKit2.
+
+        cmd.extend(self._port.get_option('additional_drt_flag', []))
+        cmd.extend(self._port.additional_drt_flag())
+
+        cmd.extend(per_test_args)
+
+        cmd.append('-')
+        return cmd
+
+    def _check_for_driver_crash(self, error_line):
+        if error_line == "#CRASHED\n":
+            # This is used on Windows to report that the process has crashed
+            # See http://trac.webkit.org/changeset/65537.
+            self._crashed_process_name = self._server_process.name()
+            self._crashed_pid = self._server_process.pid()
+        elif (error_line.startswith("#CRASHED - ")
+            or error_line.startswith("#PROCESS UNRESPONSIVE - ")):
+            # WebKitTestRunner uses this to report that the WebProcess subprocess crashed.
+            match = re.match('#(?:CRASHED|PROCESS UNRESPONSIVE) - (\S+)', error_line)
+            self._crashed_process_name = match.group(1) if match else 'WebProcess'
+            match = re.search('pid (\d+)', error_line)
+            pid = int(match.group(1)) if match else None
+            self._crashed_pid = pid
+            # FIXME: delete this after we're sure this code is working :)
+            _log.debug('%s crash, pid = %s, error_line = %s' % (self._crashed_process_name, str(pid), error_line))
+            if error_line.startswith("#PROCESS UNRESPONSIVE - "):
+                self._subprocess_was_unresponsive = True
+                self._port.sample_process(self._crashed_process_name, self._crashed_pid)
+                # We want to show this since it's not a regular crash and probably we don't have a crash log.
+                self.error_from_test += error_line
+            return True
+        return self.has_crashed()
+
+    def _command_from_driver_input(self, driver_input):
+        # FIXME: performance tests pass in full URLs instead of test names.
+        if driver_input.test_name.startswith('http://') or driver_input.test_name.startswith('https://')  or driver_input.test_name == ('about:blank'):
+            command = driver_input.test_name
+        elif self.is_http_test(driver_input.test_name):
+            command = self.test_to_uri(driver_input.test_name)
+        else:
+            command = self._port.abspath_for_test(driver_input.test_name)
+            if sys.platform == 'cygwin':
+                command = path.cygpath(command)
+
+        assert not driver_input.image_hash or driver_input.should_run_pixel_test
+
+        # ' is the separator between arguments.
+        if self._port.supports_per_test_timeout():
+            command += "'--timeout'%s" % driver_input.timeout
+        if driver_input.should_run_pixel_test:
+            command += "'--pixel-test"
+        if driver_input.image_hash:
+            command += "'" + driver_input.image_hash
+        return command + "\n"
+
+    def _read_first_block(self, deadline):
+        # returns (text_content, audio_content)
+        block = self._read_block(deadline)
+        if block.malloc:
+            self._measurements['Malloc'] = float(block.malloc)
+        if block.js_heap:
+            self._measurements['JSHeap'] = float(block.js_heap)
+        if block.content_type == 'audio/wav':
+            return (None, block.decoded_content)
+        return (block.decoded_content, None)
+
+    def _read_optional_image_block(self, deadline):
+        # returns (image, actual_image_hash)
+        block = self._read_block(deadline, wait_for_stderr_eof=True)
+        if block.content and block.content_type == 'image/png':
+            return (block.decoded_content, block.content_hash)
+        return (None, block.content_hash)
+
+    def _read_header(self, block, line, header_text, header_attr, header_filter=None):
+        if line.startswith(header_text) and getattr(block, header_attr) is None:
+            value = line.split()[1]
+            if header_filter:
+                value = header_filter(value)
+            setattr(block, header_attr, value)
+            return True
+        return False
+
+    def _process_stdout_line(self, block, line):
+        if (self._read_header(block, line, 'Content-Type: ', 'content_type')
+            or self._read_header(block, line, 'Content-Transfer-Encoding: ', 'encoding')
+            or self._read_header(block, line, 'Content-Length: ', '_content_length', int)
+            or self._read_header(block, line, 'ActualHash: ', 'content_hash')
+            or self._read_header(block, line, 'DumpMalloc: ', 'malloc')
+            or self._read_header(block, line, 'DumpJSHeap: ', 'js_heap')):
+            return
+        # Note, we're not reading ExpectedHash: here, but we could.
+        # If the line wasn't a header, we just append it to the content.
+        block.content += line
+
+    def _strip_eof(self, line):
+        if line and line.endswith("#EOF\n"):
+            return line[:-5], True
+        return line, False
+
+    def _read_block(self, deadline, wait_for_stderr_eof=False):
+        block = ContentBlock()
+        out_seen_eof = False
+
+        while not self.has_crashed():
+            if out_seen_eof and (self.err_seen_eof or not wait_for_stderr_eof):
+                break
+
+            if self.err_seen_eof:
+                out_line = self._server_process.read_stdout_line(deadline)
+                err_line = None
+            elif out_seen_eof:
+                out_line = None
+                err_line = self._server_process.read_stderr_line(deadline)
+            else:
+                out_line, err_line = self._server_process.read_either_stdout_or_stderr_line(deadline)
+
+            if self._server_process.timed_out or self.has_crashed():
+                break
+
+            if out_line:
+                assert not out_seen_eof
+                out_line, out_seen_eof = self._strip_eof(out_line)
+            if err_line:
+                assert not self.err_seen_eof
+                err_line, self.err_seen_eof = self._strip_eof(err_line)
+
+            if out_line:
+                if out_line[-1] != "\n":
+                    _log.error("Last character read from DRT stdout line was not a newline!  This indicates either a NRWT or DRT bug.")
+                content_length_before_header_check = block._content_length
+                self._process_stdout_line(block, out_line)
+                # FIXME: Unlike HTTP, DRT dumps the content right after printing a Content-Length header.
+                # Don't wait until we're done with headers, just read the binary blob right now.
+                if content_length_before_header_check != block._content_length:
+                    block.content = self._server_process.read_stdout(deadline, block._content_length)
+
+            if err_line:
+                if self._check_for_driver_crash(err_line):
+                    break
+                self.error_from_test += err_line
+
+        block.decode_content()
+        return block
+
+
+class ContentBlock(object):
+    def __init__(self):
+        self.content_type = None
+        self.encoding = None
+        self.content_hash = None
+        self._content_length = None
+        # Content is treated as binary data even though the text output is usually UTF-8.
+        self.content = str()  # FIXME: Should be bytearray() once we require Python 2.6.
+        self.decoded_content = None
+        self.malloc = None
+        self.js_heap = None
+
+    def decode_content(self):
+        if self.encoding == 'base64' and self.content is not None:
+            self.decoded_content = base64.b64decode(self.content)
+        else:
+            self.decoded_content = self.content
+
+class DriverProxy(object):
+    """A wrapper for managing two Driver instances, one with pixel tests and
+    one without. This allows us to handle plain text tests and ref tests with a
+    single driver."""
+
+    def __init__(self, port, worker_number, driver_instance_constructor, pixel_tests, no_timeout):
+        self._port = port
+        self._worker_number = worker_number
+        self._driver_instance_constructor = driver_instance_constructor
+        self._no_timeout = no_timeout
+
+        # FIXME: We shouldn't need to create a driver until we actually run a test.
+        self._driver = self._make_driver(pixel_tests)
+        self._driver_cmd_line = None
+
+    def _make_driver(self, pixel_tests):
+        return self._driver_instance_constructor(self._port, self._worker_number, pixel_tests, self._no_timeout)
+
+    # FIXME: this should be a @classmethod (or implemented on Port instead).
+    def is_http_test(self, test_name):
+        return self._driver.is_http_test(test_name)
+
+    # FIXME: this should be a @classmethod (or implemented on Port instead).
+    def test_to_uri(self, test_name):
+        return self._driver.test_to_uri(test_name)
+
+    # FIXME: this should be a @classmethod (or implemented on Port instead).
+    def uri_to_test(self, uri):
+        return self._driver.uri_to_test(uri)
+
+    def run_test(self, driver_input, stop_when_done):
+        base = self._port.lookup_virtual_test_base(driver_input.test_name)
+        if base:
+            virtual_driver_input = copy.copy(driver_input)
+            virtual_driver_input.test_name = base
+            virtual_driver_input.args = self._port.lookup_virtual_test_args(driver_input.test_name)
+            return self.run_test(virtual_driver_input, stop_when_done)
+
+        pixel_tests_needed = driver_input.should_run_pixel_test
+        cmd_line_key = self._cmd_line_as_key(pixel_tests_needed, driver_input.args)
+        if cmd_line_key != self._driver_cmd_line:
+            self._driver.stop()
+            self._driver = self._make_driver(pixel_tests_needed)
+            self._driver_cmd_line = cmd_line_key
+
+        return self._driver.run_test(driver_input, stop_when_done)
+
+    def has_crashed(self):
+        return self._driver.has_crashed()
+
+    def stop(self):
+        self._driver.stop()
+
+    # FIXME: this should be a @classmethod (or implemented on Port instead).
+    def cmd_line(self, pixel_tests=None, per_test_args=None):
+        return self._driver.cmd_line(pixel_tests, per_test_args or [])
+
+    def _cmd_line_as_key(self, pixel_tests, per_test_args):
+        return ' '.join(self.cmd_line(pixel_tests, per_test_args))
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/modules/web/src/main/native/Tools/Scripts/webkitpy/port/driver_unittest.py	Fri Jul 05 17:42:24 2013 +0400
@@ -0,0 +1,269 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#    * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#    * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#    * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import unittest2 as unittest
+
+from webkitpy.common.system.systemhost_mock import MockSystemHost
+
+from webkitpy.port import Port, Driver, DriverOutput
+from webkitpy.port.server_process_mock import MockServerProcess
+
+# FIXME: remove the dependency on TestWebKitPort
+from webkitpy.port.port_testcase import TestWebKitPort
+
+from webkitpy.tool.mocktool import MockOptions
+
+
+class DriverOutputTest(unittest.TestCase):
+    def test_strip_metrics(self):
+        patterns = [
+            ('RenderView at (0,0) size 800x600', 'RenderView '),
+            ('text run at (0,0) width 100: "some text"', '"some text"'),
+            ('RenderBlock {HTML} at (0,0) size 800x600', 'RenderBlock {HTML} '),
+            ('RenderBlock {INPUT} at (29,3) size 12x12 [color=#000000]', 'RenderBlock {INPUT}'),
+
+            ('RenderBlock (floating) {DT} at (5,5) size 79x310 [border: (5px solid #000000)]',
+            'RenderBlock (floating) {DT} [border: px solid #000000)]'),
+
+            ('\n    "truncate text    "\n', '\n    "truncate text"\n'),
+
+            ('RenderText {#text} at (0,3) size 41x12\n    text run at (0,3) width 41: "whimper "\n',
+            'RenderText {#text} \n    "whimper"\n'),
+
+            ("""text run at (0,0) width 109: ".one {color: green;}"
+          text run at (109,0) width 0: " "
+          text run at (0,17) width 81: ".1 {color: red;}"
+          text run at (81,17) width 0: " "
+          text run at (0,34) width 102: ".a1 {color: green;}"
+          text run at (102,34) width 0: " "
+          text run at (0,51) width 120: "P.two {color: purple;}"
+          text run at (120,51) width 0: " "\n""",
+            '".one {color: green;}  .1 {color: red;}  .a1 {color: green;}  P.two {color: purple;}"\n'),
+
+            ('text-- other text', 'text--other text'),
+
+            (' some output   "truncate trailing spaces at end of line after text"   \n',
+            ' some output   "truncate trailing spaces at end of line after text"\n'),
+
+            (r'scrollWidth 120', r'scrollWidth'),
+            (r'scrollHeight 120', r'scrollHeight'),
+        ]
+
+        for pattern in patterns:
+            driver_output = DriverOutput(pattern[0], None, None, None)
+            driver_output.strip_metrics()
+            self.assertEqual(driver_output.text, pattern[1])
+
+
+class DriverTest(unittest.TestCase):
+    def make_port(self):
+        port = Port(MockSystemHost(), 'test', MockOptions(configuration='Release'))
+        port._config.build_directory = lambda configuration: '/mock-build'
+        return port
+
+    def _assert_wrapper(self, wrapper_string, expected_wrapper):
+        wrapper = Driver(self.make_port(), None, pixel_tests=False)._command_wrapper(wrapper_string)
+        self.assertEqual(wrapper, expected_wrapper)
+
+    def test_command_wrapper(self):
+        self._assert_wrapper(None, [])
+        self._assert_wrapper("valgrind", ["valgrind"])
+
+        # Validate that shlex works as expected.
+        command_with_spaces = "valgrind --smc-check=\"check with spaces!\" --foo"
+        expected_parse = ["valgrind", "--smc-check=check with spaces!", "--foo"]
+        self._assert_wrapper(command_with_spaces, expected_parse)
+
+    def test_test_to_uri(self):
+        port = self.make_port()
+        driver = Driver(port, None, pixel_tests=False)
+        self.assertEqual(driver.test_to_uri('foo/bar.html'), 'file://%s/foo/bar.html' % port.layout_tests_dir())
+        self.assertEqual(driver.test_to_uri('http/tests/foo.html'), 'http://127.0.0.1:8000/foo.html')
+        self.assertEqual(driver.test_to_uri('http/tests/ssl/bar.html'), 'https://127.0.0.1:8443/ssl/bar.html')
+
+    def test_uri_to_test(self):
+        port = self.make_port()
+        driver = Driver(port, None, pixel_tests=False)
+        self.assertEqual(driver.uri_to_test('file://%s/foo/bar.html' % port.layout_tests_dir()), 'foo/bar.html')
+        self.assertEqual(driver.uri_to_test('http://127.0.0.1:8000/foo.html'), 'http/tests/foo.html')
+        self.assertEqual(driver.uri_to_test('https://127.0.0.1:8443/ssl/bar.html'), 'http/tests/ssl/bar.html')
+
+    def test_read_block(self):
+        port = TestWebKitPort()
+        driver = Driver(port, 0, pixel_tests=False)
+        driver._server_process = MockServerProcess(lines=[
+            'ActualHash: foobar',
+            'Content-Type: my_type',
+            'Content-Transfer-Encoding: none',
+            "#EOF",
+        ])
+        content_block = driver._read_block(0)
+        self.assertEqual(content_block.content_type, 'my_type')
+        self.assertEqual(content_block.encoding, 'none')
+        self.assertEqual(content_block.content_hash, 'foobar')
+        driver._server_process = None
+
+    def test_read_binary_block(self):
+        port = TestWebKitPort()
+        driver = Driver(port, 0, pixel_tests=True)
+        driver._server_process = MockServerProcess(lines=[
+            'ActualHash: actual',
+            'ExpectedHash: expected',
+            'Content-Type: image/png',
+            'Content-Length: 9',
+            "12345678",
+            "#EOF",
+        ])
+        content_block = driver._read_block(0)
+        self.assertEqual(content_block.content_type, 'image/png')
+        self.assertEqual(content_block.content_hash, 'actual')
+        self.assertEqual(content_block.content, '12345678\n')
+        self.assertEqual(content_block.decoded_content, '12345678\n')
+        driver._server_process = None
+
+    def test_read_base64_block(self):
+        port = TestWebKitPort()
+        driver = Driver(port, 0, pixel_tests=True)
+        driver._server_process = MockServerProcess(lines=[
+            'ActualHash: actual',
+            'ExpectedHash: expected',
+            'Content-Type: image/png',
+            'Content-Transfer-Encoding: base64',
+            'Content-Length: 12',
+            'MTIzNDU2NzgK#EOF',
+        ])
+        content_block = driver._read_block(0)
+        self.assertEqual(content_block.content_type, 'image/png')
+        self.assertEqual(content_block.content_hash, 'actual')
+        self.assertEqual(content_block.encoding, 'base64')
+        self.assertEqual(content_block.content, 'MTIzNDU2NzgK')
+        self.assertEqual(content_block.decoded_content, '12345678\n')
+
+    def test_no_timeout(self):
+        port = TestWebKitPort()
+        port._config.build_directory = lambda configuration: '/mock-build'
+        driver = Driver(port, 0, pixel_tests=True, no_timeout=True)
+        self.assertEqual(driver.cmd_line(True, []), ['/mock-build/DumpRenderTree', '--no-timeout', '-'])
+
+    def test_check_for_driver_crash(self):
+        port = TestWebKitPort()
+        driver = Driver(port, 0, pixel_tests=True)
+
+        class FakeServerProcess(object):
+            def __init__(self, crashed):
+                self.crashed = crashed
+
+            def pid(self):
+                return 1234
+
+            def name(self):
+                return 'FakeServerProcess'
+
+            def has_crashed(self):
+                return self.crashed
+
+            def stop(self, timeout):
+                pass
+
+        def assert_crash(driver, error_line, crashed, name, pid, unresponsive=False):
+            self.assertEqual(driver._check_for_driver_crash(error_line), crashed)
+            self.assertEqual(driver._crashed_process_name, name)
+            self.assertEqual(driver._crashed_pid, pid)
+            self.assertEqual(driver._subprocess_was_unresponsive, unresponsive)
+            driver.stop()
+
+        driver._server_process = FakeServerProcess(False)
+        assert_crash(driver, '', False, None, None)
+
+        driver._crashed_process_name = None
+        driver._crashed_pid = None
+        driver._server_process = FakeServerProcess(False)
+        driver._subprocess_was_unresponsive = False
+        assert_crash(driver, '#CRASHED\n', True, 'FakeServerProcess', 1234)
+
+        driver._crashed_process_name = None
+        driver._crashed_pid = None
+        driver._server_process = FakeServerProcess(False)
+        driver._subprocess_was_unresponsive = False
+        assert_crash(driver, '#CRASHED - WebProcess\n', True, 'WebProcess', None)
+
+        driver._crashed_process_name = None
+        driver._crashed_pid = None
+        driver._server_process = FakeServerProcess(False)
+        driver._subprocess_was_unresponsive = False
+        assert_crash(driver, '#CRASHED - WebProcess (pid 8675)\n', True, 'WebProcess', 8675)
+
+        driver._crashed_process_name = None
+        driver._crashed_pid = None
+        driver._server_process = FakeServerProcess(False)
+        driver._subprocess_was_unresponsive = False
+        assert_crash(driver, '#PROCESS UNRESPONSIVE - WebProcess (pid 8675)\n', True, 'WebProcess', 8675, True)
+
+        driver._crashed_process_name = None
+        driver._crashed_pid = None
+        driver._server_process = FakeServerProcess(False)
+        driver._subprocess_was_unresponsive = False
+        assert_crash(driver, '#CRASHED - renderer (pid 8675)\n', True, 'renderer', 8675)
+
+        driver._crashed_process_name = None
+        driver._crashed_pid = None
+        driver._server_process = FakeServerProcess(True)
+        driver._subprocess_was_unresponsive = False
+        assert_crash(driver, '', True, 'FakeServerProcess', 1234)
+
+    def test_creating_a_port_does_not_write_to_the_filesystem(self):
+        port = TestWebKitPort()
+        driver = Driver(port, 0, pixel_tests=True)
+        self.assertEqual(port._filesystem.written_files, {})
+        self.assertEqual(port._filesystem.last_tmpdir, None)
+
+    def test_stop_cleans_up_properly(self):
+        port = TestWebKitPort()
+        port._server_process_constructor = MockServerProcess
+        driver = Driver(port, 0, pixel_tests=True)
+        driver.start(True, [])
+        last_tmpdir = port._filesystem.last_tmpdir
+        self.assertNotEquals(last_tmpdir, None)
+        driver.stop()
+        self.assertFalse(port._filesystem.isdir(last_tmpdir))
+
+    def test_two_starts_cleans_up_properly(self):
+        port = TestWebKitPort()
+        port._server_process_constructor = MockServerProcess
+        driver = Driver(port, 0, pixel_tests=True)
+        driver.start(True, [])
+        last_tmpdir = port._filesystem.last_tmpdir
+        driver._start(True, [])
+        self.assertFalse(port._filesystem.isdir(last_tmpdir))
+
+    def test_start_actually_starts(self):
+        port = TestWebKitPort()
+        port._server_process_constructor = MockServerProcess
+        driver = Driver(port, 0, pixel_tests=True)
+        driver.start(True, [])
+        self.assertTrue(driver._server_process.started)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/modules/web/src/main/native/Tools/Scripts/webkitpy/port/efl.py	Fri Jul 05 17:42:24 2013 +0400
@@ -0,0 +1,136 @@
+# Copyright (C) 2011 ProFUSION Embedded Systems. All rights reserved.
+# Copyright (C) 2011 Samsung Electronics. All rights reserved.
+# Copyright (C) 2012 Intel Corporation
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""WebKit Efl implementation of the Port interface."""
+
+import os
+
+from webkitpy.layout_tests.models.test_configuration import TestConfiguration
+from webkitpy.port.base import Port
+from webkitpy.port.pulseaudio_sanitizer import PulseAudioSanitizer
+from webkitpy.port.xvfbdriver import XvfbDriver
+
+
+class EflPort(Port):
+    port_name = 'efl'
+
+    def __init__(self, *args, **kwargs):
+        super(EflPort, self).__init__(*args, **kwargs)
+
+        self._jhbuild_wrapper_path = [self.path_from_webkit_base('Tools', 'jhbuild', 'jhbuild-wrapper'), '--efl', 'run']
+
+        self.set_option_default('wrapper', ' '.join(self._jhbuild_wrapper_path))
+        self.webprocess_cmd_prefix = self.get_option('webprocess_cmd_prefix')
+
+        self._pulseaudio_sanitizer = PulseAudioSanitizer()
+
+    def _port_flag_for_scripts(self):
+        return "--efl"
+
+    def setup_test_run(self):
+        super(EflPort, self).setup_test_run()
+        self._pulseaudio_sanitizer.unload_pulseaudio_module()
+
+    def setup_environ_for_server(self, server_name=None):
+        env = super(EflPort, self).setup_environ_for_server(server_name)
+
+        # If DISPLAY environment variable is unset in the system
+        # e.g. on build bot, remove DISPLAY variable from the dictionary
+        if not 'DISPLAY' in os.environ:
+            del env['DISPLAY']
+
+        env['TEST_RUNNER_INJECTED_BUNDLE_FILENAME'] = self._build_path('lib', 'libTestRunnerInjectedBundle.so')
+        env['TEST_RUNNER_PLUGIN_PATH'] = self._build_path('lib')
+
+        # Silence GIO warnings about using the "memory" GSettings backend.
+        env['GSETTINGS_BACKEND'] = 'memory'
+
+        if self.webprocess_cmd_prefix:
+            env['WEB_PROCESS_CMD_PREFIX'] = self.webprocess_cmd_prefix
+
+        return env
+
+    def default_timeout_ms(self):
+        # Tests run considerably slower under gdb
+        # or valgrind.
+        if self.get_option('webprocess_cmd_prefix'):
+            return 350 * 1000
+        return super(EflPort, self).default_timeout_ms()
+
+    def clean_up_test_run(self):
+        super(EflPort, self).clean_up_test_run()
+        self._pulseaudio_sanitizer.restore_pulseaudio_module()
+
+    def _generate_all_test_configurations(self):
+        return [TestConfiguration(version=self._version, architecture='x86', build_type=build_type) for build_type in self.ALL_BUILD_TYPES]
+
+    def _driver_class(self):
+        return XvfbDriver
+
+    def _path_to_driver(self):
+        return self._build_path('bin', self.driver_name())
+
+    def _path_to_image_diff(self):
+        return self._build_path('bin', 'ImageDiff')
+
+    def _image_diff_command(self, *args, **kwargs):
+        return self._jhbuild_wrapper_path + super(EflPort, self)._image_diff_command(*args, **kwargs)
+
+    def _path_to_webcore_library(self):
+        static_path = self._build_path('lib', 'libwebcore_efl.a')
+        dyn_path = self._build_path('lib', 'libwebcore_efl.so')
+        return static_path if self._filesystem.exists(static_path) else dyn_path
+
+    def _search_paths(self):
+        search_paths = []
+        if self.get_option('webkit_test_runner'):
+            search_paths.append(self.port_name + '-wk2')
+            search_paths.append('wk2')
+        else:
+            search_paths.append(self.port_name + '-wk1')
+        search_paths.append(self.port_name)
+        return search_paths
+
+    def default_baseline_search_path(self):
+        return map(self._webkit_baseline_path, self._search_paths())
+
+    def _port_specific_expectations_files(self):
+        # FIXME: We should be able to use the default algorithm here.
+        return list(reversed([self._filesystem.join(self._webkit_baseline_path(p), 'TestExpectations') for p in self._search_paths()]))
+
+    def show_results_html_file(self, results_filename):
+        # FIXME: We should find a way to share this implmentation with Gtk,
+        # or teach run-launcher how to call run-safari and move this down to WebKitPort.
+        run_launcher_args = ["file://%s" % results_filename]
+        if self.get_option('webkit_test_runner'):
+            run_launcher_args.append('-2')
+        # FIXME: old-run-webkit-tests also added ["-graphicssystem", "raster", "-style", "windows"]
+        # FIXME: old-run-webkit-tests converted results_filename path for cygwin.
+        self._run_script("run-launcher", run_launcher_args)
+
+    def check_sys_deps(self, needs_http):
+        return super(EflPort, self).check_sys_deps(needs_http) and XvfbDriver.check_xvfb(self)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/modules/web/src/main/native/Tools/Scripts/webkitpy/port/efl_unittest.py	Fri Jul 05 17:42:24 2013 +0400
@@ -0,0 +1,50 @@
+# Copyright (C) 2011 ProFUSION Embedded Systems. All rights reserved.
+# Copyright (C) 2011 Samsung Electronics. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#    * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#    * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import unittest2 as unittest
+
+from webkitpy.common.system.executive_mock import MockExecutive
+from webkitpy.common.system.outputcapture import OutputCapture
+from webkitpy.port.efl import EflPort
+from webkitpy.port.pulseaudio_sanitizer_mock import PulseAudioSanitizerMock
+from webkitpy.port import port_testcase
+
+
+class EflPortTest(port_testcase.PortTestCase):
+    port_name = 'efl'
+    port_maker = EflPort
+
+    # Additionally mocks out the PulseAudioSanitizer methods.
+    def make_port(self, host=None, port_name=None, options=None, os_name=None, os_version=None, **kwargs):
+        port = super(EflPortTest, self).make_port(host, port_name, options, os_name, os_version, **kwargs)
+        port._pulseaudio_sanitizer = PulseAudioSanitizerMock()
+        return port
+
+    def test_show_results_html_file(self):
+        port = self.make_port()
+        port._executive = MockExecutive(should_log=True)
+        expected_logs = "MOCK run_command: ['Tools/Scripts/run-launcher', '--release', '--efl', 'file://test.html'], cwd=/mock-checkout\n"
+        OutputCapture().assert_outputs(self, port.show_results_html_file, ["test.html"], expected_logs=expected_logs)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/modules/web/src/main/native/Tools/Scripts/webkitpy/port/factory.py	Fri Jul 05 17:42:24 2013 +0400
@@ -0,0 +1,128 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Factory method to retrieve the appropriate port implementation."""
+
+import fnmatch
+import optparse
+import re
+
+from webkitpy.port import builders
+
+
+def platform_options(use_globs=False):
+    return [
+        optparse.make_option('--platform', action='store',
+            help=('Glob-style list of platform/ports to use (e.g., "mac*")' if use_globs else 'Platform to use (e.g., "mac-lion")')),
+        optparse.make_option('--efl', action='store_const', dest='platform',
+            const=('efl*' if use_globs else 'efl'),
+            help=('Alias for --platform=efl*' if use_globs else 'Alias for --platform=efl')),
+        optparse.make_option('--gtk', action='store_const', dest='platform',
+            const=('gtk*' if use_globs else 'gtk'),
+            help=('Alias for --platform=gtk*' if use_globs else 'Alias for --platform=gtk')),
+        optparse.make_option('--qt', action='store_const', dest="platform",
+            const=('qt*' if use_globs else 'qt'),
+            help=('Alias for --platform=qt' if use_globs else 'Alias for --platform=qt')),
+        ]
+
+
+def configuration_options():
+    return [
+        optparse.make_option("-t", "--target", dest="configuration", help="(DEPRECATED)"),
+        # FIXME: --help should display which configuration is default.
+        optparse.make_option('--debug', action='store_const', const='Debug', dest="configuration",
+            help='Set the configuration to Debug'),
+        optparse.make_option('--release', action='store_const', const='Release', dest="configuration",
+            help='Set the configuration to Release'),
+        optparse.make_option('--32-bit', action='store_const', const='x86', default=None, dest="architecture",
+            help='use 32-bit binaries by default (x86 instead of x86_64)'),
+        ]
+
+
+
+def _builder_options(builder_name):
+    configuration = "Debug" if re.search(r"[d|D](ebu|b)g", builder_name) else "Release"
+    is_webkit2 = builder_name.find("WK2") != -1
+    builder_name = builder_name
+    return optparse.Values({'builder_name': builder_name, 'configuration': configuration, 'webkit_test_runner': is_webkit2})
+
+
+class PortFactory(object):
+    PORT_CLASSES = (
+        'efl.EflPort',
+        'gtk.GtkPort',
+        'mac.MacPort',
+        'mock_drt.MockDRTPort',
+        'qt.QtPort',
+        'test.TestPort',
+        'win.WinPort',
+    )
+
+    def __init__(self, host):
+        self._host = host
+
+    def _default_port(self, options):
+        platform = self._host.platform
+        if platform.is_linux() or platform.is_freebsd():
+            return 'qt-linux'
+        elif platform.is_mac():
+            return 'mac'
+        elif platform.is_win():
+            return 'win'
+        raise NotImplementedError('unknown platform: %s' % platform)
+
+    def get(self, port_name=None, options=None, **kwargs):
+        """Returns an object implementing the Port interface. If
+        port_name is None, this routine attempts to guess at the most
+        appropriate port on this platform."""
+        port_name = port_name or self._default_port(options)
+
+        for port_class in self.PORT_CLASSES:
+            module_name, class_name = port_class.rsplit('.', 1)
+            module = __import__(module_name, globals(), locals(), [], -1)
+            cls = module.__dict__[class_name]
+            if port_name.startswith(cls.port_name):
+                port_name = cls.determine_full_port_name(self._host, options, port_name)
+                return cls(self._host, port_name, options=options, **kwargs)
+        raise NotImplementedError('unsupported platform: "%s"' % port_name)
+
+    def all_port_names(self, platform=None):
+        """Return a list of all valid, fully-specified, "real" port names.
+
+        This is the list of directories that are used as actual baseline_paths()
+        by real ports. This does not include any "fake" names like "test"
+        or "mock-mac", and it does not include any directories that are not.
+
+        If platform is not specified, we will glob-match all ports"""
+        platform = platform or '*'
+        return fnmatch.filter(builders.all_port_names(), platform)
+
+    def get_from_builder_name(self, builder_name):
+        port_name = builders.port_name_for_builder_name(builder_name)
+        assert port_name, "unrecognized builder name '%s'" % builder_name
+        return self.get(port_name, _builder_options(builder_name))
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/modules/web/src/main/native/Tools/Scripts/webkitpy/port/factory_unittest.py	Fri Jul 05 17:42:24 2013 +0400
@@ -0,0 +1,82 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#    * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#    * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#    * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import unittest2 as unittest
+
+from webkitpy.tool.mocktool import MockOptions
+from webkitpy.common.system.systemhost_mock import MockSystemHost
+
+from webkitpy.port import factory
+from webkitpy.port import gtk
+from webkitpy.port import mac
+from webkitpy.port import qt
+from webkitpy.port import test
+from webkitpy.port import win
+
+
+class FactoryTest(unittest.TestCase):
+    """Test that the factory creates the proper port object for given combination of port_name, host.platform, and options."""
+    # FIXME: The ports themselves should expose what options they require,
+    # instead of passing generic "options".
+
+    def setUp(self):
+        self.webkit_options = MockOptions(pixel_tests=False)
+
+    def assert_port(self, port_name=None, os_name=None, os_version=None, options=None, cls=None):
+        host = MockSystemHost(os_name=os_name, os_version=os_version)
+        port = factory.PortFactory(host).get(port_name, options=options)
+        self.assertIsInstance(port, cls)
+
+    def test_mac(self):
+        self.assert_port(port_name='mac-lion', cls=mac.MacPort)
+        self.assert_port(port_name='mac-lion-wk2', cls=mac.MacPort)
+        self.assert_port(port_name='mac', os_name='mac', os_version='lion', cls=mac.MacPort)
+        self.assert_port(port_name=None,  os_name='mac', os_version='lion', cls=mac.MacPort)
+
+    def test_win(self):
+        self.assert_port(port_name='win-xp', cls=win.WinPort)
+        self.assert_port(port_name='win-xp-wk2', cls=win.WinPort)
+        self.assert_port(port_name='win', os_name='win', os_version='xp', cls=win.WinPort)
+        self.assert_port(port_name=None, os_name='win', os_version='xp', cls=win.WinPort)
+        self.assert_port(port_name=None, os_name='win', os_version='xp', options=self.webkit_options, cls=win.WinPort)
+
+    def test_gtk(self):
+        self.assert_port(port_name='gtk', cls=gtk.GtkPort)
+
+    def test_qt(self):
+        self.assert_port(port_name='qt', cls=qt.QtPort)
+
+    def test_unknown_specified(self):
+        self.assertRaises(NotImplementedError, factory.PortFactory(MockSystemHost()).get, port_name='unknown')
+
+    def test_unknown_default(self):
+        self.assertRaises(NotImplementedError, factory.PortFactory(MockSystemHost(os_name='vms')).get)
+
+    def test_get_from_builder_name(self):
+        self.assertEqual(factory.PortFactory(MockSystemHost()).get_from_builder_name('Apple Lion Release WK1 (Tests)').name(),
+                          'mac-lion')
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/modules/web/src/main/native/Tools/Scripts/webkitpy/port/gtk.py	Fri Jul 05 17:42:24 2013 +0400
@@ -0,0 +1,179 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the Google name nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import os
+import subprocess
+
+from webkitpy.layout_tests.models.test_configuration import TestConfiguration
+from webkitpy.port.base import Port
+from webkitpy.port.pulseaudio_sanitizer import PulseAudioSanitizer
+from webkitpy.port.xvfbdriver import XvfbDriver
+
+
+class GtkPort(Port):
+    port_name = "gtk"
+
+    def __init__(self, *args, **kwargs):
+        super(GtkPort, self).__init__(*args, **kwargs)
+        self._pulseaudio_sanitizer = PulseAudioSanitizer()
+
+    def warn_if_bug_missing_in_test_expectations(self):
+        return not self.get_option('webkit_test_runner')
+
+    def _port_flag_for_scripts(self):
+        return "--gtk"
+
+    def _driver_class(self):
+        return XvfbDriver
+
+    def default_timeout_ms(self):
+        if self.get_option('configuration') == 'Debug':
+            return 12 * 1000
+        return 6 * 1000
+
+    def setup_test_run(self):
+        super(GtkPort, self).setup_test_run()
+        self._pulseaudio_sanitizer.unload_pulseaudio_module()
+
+    def clean_up_test_run(self):
+        super(GtkPort, self).clean_up_test_run()
+        self._pulseaudio_sanitizer.restore_pulseaudio_module()
+
+    def setup_environ_for_server(self, server_name=None):
+        environment = super(GtkPort, self).setup_environ_for_server(server_name)
+        environment['GTK_MODULES'] = 'gail'
+        environment['GSETTINGS_BACKEND'] = 'memory'
+        environment['LIBOVERLAY_SCROLLBAR'] = '0'
+        environment['TEST_RUNNER_INJECTED_BUNDLE_FILENAME'] = self._build_path('Libraries', 'libTestRunnerInjectedBundle.la')
+        environment['TEST_RUNNER_TEST_PLUGIN_PATH'] = self._build_path('TestNetscapePlugin', '.libs')
+        environment['WEBKIT_INSPECTOR_PATH'] = self._build_path('Programs', 'resources', 'inspector')
+        environment['AUDIO_RESOURCES_PATH'] = self.path_from_webkit_base('Source', 'WebCore', 'platform', 'audio', 'resources')
+        self._copy_value_from_environ_if_set(environment, 'WEBKITOUTPUTDIR')
+        return environment
+
+    def _generate_all_test_configurations(self):
+        configurations = []
+        for build_type in self.ALL_BUILD_TYPES:
+            configurations.append(TestConfiguration(version=self._version, architecture='x86', build_type=build_type))
+        return configurations
+
+    def _path_to_driver(self):
+        return self._build_path('Programs', self.driver_name())
+
+    def _path_to_image_diff(self):
+        return self._build_path('Programs', 'ImageDiff')
+
+    def _path_to_webcore_library(self):
+        gtk_library_names = [
+            "libwebkitgtk-1.0.so",
+            "libwebkitgtk-3.0.so",
+            "libwebkit2gtk-1.0.so",
+        ]
+
+        for library in gtk_library_names:
+            full_library = self._build_path(".libs", library)
+            if self._filesystem.isfile(full_library):
+                return full_library
+        return None
+
+    def _search_paths(self):
+        search_paths = []
+        if self.get_option('webkit_test_runner'):
+            search_paths.extend([self.port_name + '-wk2', 'wk2'])
+        else:
+            search_paths.append(self.port_name + '-wk1')
+        search_paths.append(self.port_name)
+        search_paths.extend(self.get_option("additional_platform_directory", []))
+        return search_paths
+
+    def default_baseline_search_path(self):
+        return map(self._webkit_baseline_path, self._search_paths())
+
+    def _port_specific_expectations_files(self):
+        return [self._filesystem.join(self._webkit_baseline_path(p), 'TestExpectations') for p in reversed(self._search_paths())]
+
+    # FIXME: We should find a way to share this implmentation with Gtk,
+    # or teach run-launcher how to call run-safari and move this down to Port.
+    def show_results_html_file(self, results_filename):
+        run_launcher_args = ["file://%s" % results_filename]
+        if self.get_option('webkit_test_runner'):
+            run_launcher_args.append('-2')
+        # FIXME: old-run-webkit-tests also added ["-graphicssystem", "raster", "-style", "windows"]
+        # FIXME: old-run-webkit-tests converted results_filename path for cygwin.
+        self._run_script("run-launcher", run_launcher_args)
+
+    def check_sys_deps(self, needs_http):
+        return super(GtkPort, self).check_sys_deps(needs_http) and XvfbDriver.check_xvfb(self)
+
+    def _get_gdb_output(self, coredump_path):
+        cmd = ['gdb', '-ex', 'thread apply all bt 1024', '--batch', str(self._path_to_driver()), coredump_path]
+        proc = subprocess.Popen(cmd, stdin=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+        stdout, stderr = proc.communicate()
+        errors = [l.strip().decode('utf8', 'ignore') for l in stderr.splitlines()]
+        return (stdout.decode('utf8', 'ignore'), errors)
+
+    def _get_crash_log(self, name, pid, stdout, stderr, newer_than):
+        pid_representation = str(pid or '<unknown>')
+        log_directory = os.environ.get("WEBKIT_CORE_DUMPS_DIRECTORY")
+        errors = []
+        crash_log = ''
+        expected_crash_dump_filename = "core-pid_%s-_-process_%s" % (pid_representation, name)
+
+        def match_filename(filesystem, directory, filename):
+            if pid:
+                return filename == expected_crash_dump_filename
+            return filename.find(name) > -1
+
+        if log_directory:
+            dumps = self._filesystem.files_under(log_directory, file_filter=match_filename)
+            if dumps:
+                # Get the most recent coredump matching the pid and/or process name.
+                coredump_path = list(reversed(sorted(dumps)))[0]
+                if not newer_than or self._filesystem.mtime(coredump_path) > newer_than:
+                    crash_log, errors = self._get_gdb_output(coredump_path)
+
+        stderr_lines = errors + (stderr or '<empty>').decode('utf8', 'ignore').splitlines()
+        errors_str = '\n'.join(('STDERR: ' + l) for l in stderr_lines)
+        if not crash_log:
+            if not log_directory:
+                log_directory = "/path/to/coredumps"
+            core_pattern = os.path.join(log_directory, "core-pid_%p-_-process_%e")
+            crash_log = """\
+Coredump %(expected_crash_dump_filename)s not found. To enable crash logs:
+
+- run this command as super-user: echo "%(core_pattern)s" > /proc/sys/kernel/core_pattern
+- enable core dumps: ulimit -c unlimited
+- set the WEBKIT_CORE_DUMPS_DIRECTORY environment variable: export WEBKIT_CORE_DUMPS_DIRECTORY=%(log_directory)s
+
+""" % locals()
+
+        return (stderr, """\
+Crash log for %(name)s (pid %(pid_representation)s):
+
+%(crash_log)s
+%(errors_str)s""" % locals())
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/modules/web/src/main/native/Tools/Scripts/webkitpy/port/gtk_unittest.py	Fri Jul 05 17:42:24 2013 +0400
@@ -0,0 +1,110 @@
+# Copyright (C) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#    * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#    * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#    * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import unittest2 as unittest
+import sys
+import os
+
+from webkitpy.common.system.executive_mock import MockExecutive
+from webkitpy.common.system.filesystem_mock import MockFileSystem
+from webkitpy.common.system.outputcapture import OutputCapture
+from webkitpy.port.gtk import GtkPort
+from webkitpy.port.pulseaudio_sanitizer_mock import PulseAudioSanitizerMock
+from webkitpy.port import port_testcase
+from webkitpy.thirdparty.mock import Mock
+from webkitpy.tool.mocktool import MockOptions
+
+
+class GtkPortTest(port_testcase.PortTestCase):
+    port_name = 'gtk'
+    port_maker = GtkPort
+
+    # Additionally mocks out the PulseAudioSanitizer methods.
+    def make_port(self, host=None, port_name=None, options=None, os_name=None, os_version=None, **kwargs):
+        port = super(GtkPortTest, self).make_port(host, port_name, options, os_name, os_version, **kwargs)
+        port._pulseaudio_sanitizer = PulseAudioSanitizerMock()
+        return port
+
+    def test_default_baseline_search_path(self):
+        port = self.make_port()
+        self.assertEqual(port.default_baseline_search_path(), ['/mock-checkout/LayoutTests/platform/gtk-wk1',
+            '/mock-checkout/LayoutTests/platform/gtk'])
+
+        port = self.make_port(options=MockOptions(webkit_test_runner=True))
+        self.assertEqual(port.default_baseline_search_path(), ['/mock-checkout/LayoutTests/platform/gtk-wk2',
+            '/mock-checkout/LayoutTests/platform/wk2', '/mock-checkout/LayoutTests/platform/gtk'])
+
+    def test_port_specific_expectations_files(self):
+        port = self.make_port()
+        self.assertEqual(port.expectations_files(), ['/mock-checkout/LayoutTests/TestExpectations',
+            '/mock-checkout/LayoutTests/platform/gtk/TestExpectations',
+            '/mock-checkout/LayoutTests/platform/gtk-wk1/TestExpectations'])
+
+        port = self.make_port(options=MockOptions(webkit_test_runner=True))
+        self.assertEqual(port.expectations_files(), ['/mock-checkout/LayoutTests/TestExpectations',
+            '/mock-checkout/LayoutTests/platform/gtk/TestExpectations',
+            '/mock-checkout/LayoutTests/platform/wk2/TestExpectations',
+            '/mock-checkout/LayoutTests/platform/gtk-wk2/TestExpectations'])
+
+    def test_show_results_html_file(self):
+        port = self.make_port()
+        port._executive = MockExecutive(should_log=True)
+        expected_logs = "MOCK run_command: ['Tools/Scripts/run-launcher', '--release', '--gtk', 'file://test.html'], cwd=/mock-checkout\n"
+        OutputCapture().assert_outputs(self, port.show_results_html_file, ["test.html"], expected_logs=expected_logs)
+
+    def test_default_timeout_ms(self):
+        self.assertEqual(self.make_port(options=MockOptions(configuration='Release')).default_timeout_ms(), 6000)
+        self.assertEqual(self.make_port(options=MockOptions(configuration='Debug')).default_timeout_ms(), 12000)
+
+    def test_get_crash_log(self):
+        core_directory = os.environ.get('WEBKIT_CORE_DUMPS_DIRECTORY', '/path/to/coredumps')
+        core_pattern = os.path.join(core_directory, "core-pid_%p-_-process_%e")
+        mock_empty_crash_log = """\
+Crash log for DumpRenderTree (pid 28529):
+
+Coredump core-pid_28529-_-process_DumpRenderTree not found. To enable crash logs:
+
+- run this command as super-user: echo "%(core_pattern)s" > /proc/sys/kernel/core_pattern
+- enable core dumps: ulimit -c unlimited
+- set the WEBKIT_CORE_DUMPS_DIRECTORY environment variable: export WEBKIT_CORE_DUMPS_DIRECTORY=%(core_directory)s
+
+
+STDERR: <empty>""" % locals()
+
+        def _mock_gdb_output(coredump_path):
+            return (mock_empty_crash_log, [])
+
+        port = self.make_port()
+        port._get_gdb_output = mock_empty_crash_log
+        stderr, log = port._get_crash_log("DumpRenderTree", 28529, "", "", newer_than=None)
+        self.assertEqual(stderr, "")
+        self.assertMultiLineEqual(log, mock_empty_crash_log)
+
+        stderr, log = port._get_crash_log("DumpRenderTree", 28529, "", "", newer_than=0.0)
+        self.assertEqual(stderr, "")
+        self.assertMultiLineEqual(log, mock_empty_crash_log)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/modules/web/src/main/native/Tools/Scripts/webkitpy/port/http_lock.py	Fri Jul 05 17:42:24 2013 +0400
@@ -0,0 +1,137 @@
+# Copyright (C) 2010 Gabor Rapcsanyi (rgabor@inf.u-szeged.hu), University of Szeged
+# Copyright (C) 2010 Andras Becsi (abecsi@inf.u-szeged.hu), University of Szeged
+#
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+#    notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+#    notice, this list of conditions and the following disclaimer in the
+#    documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY UNIVERSITY OF SZEGED ``AS IS'' AND ANY
+# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+# PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL UNIVERSITY OF SZEGED OR
+# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# FIXME: rename this file, and add more text about how this is
+# different from the base file_lock class.
+
+"""This class helps to block NRWT threads when more NRWTs run
+perf, http and websocket tests in a same time."""
+
+import logging
+import os
+import sys
+import tempfile
+import time
+
+from webkitpy.common.system.executive import Executive
+from webkitpy.common.system.file_lock import FileLock
+from webkitpy.common.system.filesystem import FileSystem
+
+
+_log = logging.getLogger(__name__)
+
+
+class HttpLock(object):
+    def __init__(self, lock_path, lock_file_prefix="WebKitHttpd.lock.", guard_lock="WebKit.lock", filesystem=None, executive=None, name='HTTP'):
+        self._executive = executive or Executive()
+        self._filesystem = filesystem or FileSystem()
+        self._lock_path = lock_path
+        if not self._lock_path:
+            # FIXME: FileSystem should have an accessor for tempdir()
+            self._lock_path = tempfile.gettempdir()
+        self._lock_file_prefix = lock_file_prefix
+        self._lock_file_path_prefix = self._filesystem.join(self._lock_path, self._lock_file_prefix)
+        self._guard_lock_file = self._filesystem.join(self._lock_path, guard_lock)
+        self._guard_lock = FileLock(self._guard_lock_file)
+        self._process_lock_file_name = ""
+        self._name = name
+
+    def cleanup_http_lock(self):
+        """Delete the lock file if exists."""
+        if self._filesystem.exists(self._process_lock_file_name):
+            _log.debug("Removing lock file: %s" % self._process_lock_file_name)
+            self._filesystem.remove(self._process_lock_file_name)
+
+    def _extract_lock_number(self, lock_file_name):
+        """Return the lock number from lock file."""
+        prefix_length = len(self._lock_file_path_prefix)
+        return int(lock_file_name[prefix_length:])
+
+    def _lock_file_list(self):
+        """Return the list of lock files sequentially."""
+        lock_list = self._filesystem.glob(self._lock_file_path_prefix + '*')
+        lock_list.sort(key=self._extract_lock_number)
+        return lock_list
+
+    def _next_lock_number(self):
+        """Return the next available lock number."""
+        lock_list = self._lock_file_list()
+        if not lock_list:
+            return 0
+        return self._extract_lock_number(lock_list[-1]) + 1
+
+    def _current_lock_pid(self):
+        """Return with the current lock pid. If the lock is not valid
+        it deletes the lock file."""
+        lock_list = self._lock_file_list()
+        if not lock_list:
+            _log.debug("No lock file list")
+            return
+        try:
+            current_pid = self._filesystem.read_text_file(lock_list[0])
+            if not (current_pid and self._executive.check_running_pid(int(current_pid))):
+                _log.debug("Removing stuck lock file: %s" % lock_list[0])
+                self._filesystem.remove(lock_list[0])
+                return
+        except IOError, e:
+            _log.debug("IOError: %s" % e)
+            return
+        except OSError, e:
+            _log.debug("OSError: %s" % e)
+            return
+        return int(current_pid)
+
+    def _create_lock_file(self):
+        """The lock files are used to schedule the running test sessions in first
+        come first served order. The guard lock ensures that the lock numbers are
+        sequential."""
+        if not self._filesystem.exists(self._lock_path):
+            _log.debug("Lock directory does not exist: %s" % self._lock_path)
+            return False
+
+        if not self._guard_lock.acquire_lock():
+            _log.debug("Guard lock timed out!")
+            return False
+
+        self._process_lock_file_name = (self._lock_file_path_prefix + str(self._next_lock_number()))
+        _log.debug("Creating lock file: %s" % self._process_lock_file_name)
+        # FIXME: Executive.py should have an accessor for getpid()
+        self._filesystem.write_text_file(self._process_lock_file_name, str(os.getpid()))
+        self._guard_lock.release_lock()
+        return True
+
+    def wait_for_httpd_lock(self):
+        """Create a lock file and wait until it's turn comes. If something goes wrong
+        it wont do any locking."""
+        if not self._create_lock_file():
+            _log.debug("Warning, %s locking failed!" % self._name)
+            return
+
+        # FIXME: This can hang forever!
+        while self._current_lock_pid() != os.getpid():
+            time.sleep(1)
+
+        _log.debug("%s lock acquired" % self._name)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/modules/web/src/main/native/Tools/Scripts/webkitpy/port/http_lock_unittest.py	Fri Jul 05 17:42:24 2013 +0400
@@ -0,0 +1,120 @@
+# Copyright (C) 2010 Gabor Rapcsanyi (rgabor@inf.u-szeged.hu), University of Szeged
+#
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+#    notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+#    notice, this list of conditions and the following disclaimer in the
+#    documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY UNIVERSITY OF SZEGED ``AS IS'' AND ANY
+# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+# PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL UNIVERSITY OF SZEGED OR
+# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from http_lock import HttpLock
+import os  # Used for os.getpid()
+import unittest2 as unittest
+
+from webkitpy.common.system.filesystem_mock import MockFileSystem
+from webkitpy.common.system.executive_mock import MockExecutive
+
+
+# FIXME: These tests all touch the real disk, but could be written to a MockFileSystem instead.
+class HttpLockTestWithRealFileSystem(unittest.TestCase):
+    # FIXME: Unit tests do not use an __init__ method, but rather setUp and tearDown methods.
+    def __init__(self, testFunc):
+        self.http_lock = HttpLock(None, "WebKitTestHttpd.lock.", "WebKitTest.lock")
+        self.filesystem = self.http_lock._filesystem  # FIXME: We should be passing in a MockFileSystem instead.
+        self.lock_file_path_prefix = self.filesystem.join(self.http_lock._lock_path, self.http_lock._lock_file_prefix)
+        self.lock_file_name = self.lock_file_path_prefix + "0"
+        self.guard_lock_file = self.http_lock._guard_lock_file
+        self.clean_all_lockfile()
+        unittest.TestCase.__init__(self, testFunc)
+
+    def clean_all_lockfile(self):
+        if self.filesystem.exists(self.guard_lock_file):
+            self.filesystem.remove(self.guard_lock_file)
+        lock_list = self.filesystem.glob(self.lock_file_path_prefix + '*')
+        for file_name in lock_list:
+            self.filesystem.remove(file_name)
+
+    def assertEqual(self, first, second):
+        if first != second:
+            self.clean_all_lockfile()
+        unittest.TestCase.assertEqual(self, first, second)
+
+    def _check_lock_file(self):
+        if self.filesystem.exists(self.lock_file_name):
+            pid = os.getpid()
+            lock_file_pid = self.filesystem.read_text_file(self.lock_file_name)
+            self.assertEqual(pid, int(lock_file_pid))
+            return True
+        return False
+
+    def test_lock_lifecycle(self):
+        self.http_lock._create_lock_file()
+
+        self.assertEqual(True, self._check_lock_file())
+        self.assertEqual(1, self.http_lock._next_lock_number())
+
+        self.http_lock.cleanup_http_lock()
+
+        self.assertEqual(False, self._check_lock_file())
+        self.assertEqual(0, self.http_lock._next_lock_number())
+
+
+class HttpLockTest(unittest.TestCase):
+    def setUp(self):
+        self.filesystem = MockFileSystem()
+        self.http_lock = HttpLock(None, "WebKitTestHttpd.lock.", "WebKitTest.lock", filesystem=self.filesystem, executive=MockExecutive())
+        # FIXME: Shouldn't we be able to get these values from the http_lock object directly?
+        self.lock_file_path_prefix = self.filesystem.join(self.http_lock._lock_path, self.http_lock._lock_file_prefix)
+        self.lock_file_name = self.lock_file_path_prefix + "0"
+
+    def test_current_lock_pid(self):
+        # FIXME: Once Executive wraps getpid, we can mock this and not use a real pid.
+        current_pid = os.getpid()
+        self.http_lock._filesystem.write_text_file(self.lock_file_name, str(current_pid))
+        self.assertEqual(self.http_lock._current_lock_pid(), current_pid)
+
+    def test_extract_lock_number(self):
+        lock_file_list = (
+            self.lock_file_path_prefix + "00",
+            self.lock_file_path_prefix + "9",
+            self.lock_file_path_prefix + "001",
+            self.lock_file_path_prefix + "021",
+        )
+
+        expected_number_list = (0, 9, 1, 21)
+
+        for lock_file, expected in zip(lock_file_list, expected_number_list):
+            self.assertEqual(self.http_lock._extract_lock_number(lock_file), expected)
+
+    def test_lock_file_list(self):
+        self.http_lock._filesystem = MockFileSystem({
+            self.lock_file_path_prefix + "6": "",
+            self.lock_file_path_prefix + "1": "",
+            self.lock_file_path_prefix + "4": "",
+            self.lock_file_path_prefix + "3": "",
+        })
+
+        expected_file_list = [
+            self.lock_file_path_prefix + "1",
+            self.lock_file_path_prefix + "3",
+            self.lock_file_path_prefix + "4",
+            self.lock_file_path_prefix + "6",
+        ]
+
+        self.assertEqual(self.http_lock._lock_file_list(), expected_file_list)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/modules/web/src/main/native/Tools/Scripts/webkitpy/port/image_diff.py	Fri Jul 05 17:42:24 2013 +0400
@@ -0,0 +1,118 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+# Copyright (C) 2010 Gabor Rapcsanyi <rgabor@inf.u-szeged.hu>, University of Szeged
+# Copyright (C) 2011, 2012 Apple Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the Google name nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""WebKit implementations of the Port interface."""
+
+import logging
+import re
+import time
+
+from webkitpy.port import server_process
+
+
+_log = logging.getLogger(__name__)
+
+
+class ImageDiffer(object):
+    def __init__(self, port):
+        self._port = port
+        self._tolerance = None
+        self._process = None
+
+    def diff_image(self, expected_contents, actual_contents, tolerance):
+        if tolerance != self._tolerance:
+            self.stop()
+        try:
+            assert(expected_contents)
+            assert(actual_contents)
+            assert(tolerance is not None)
+
+            if not self._process:
+                self._start(tolerance)
+            # Note that although we are handed 'old', 'new', ImageDiff wants 'new', 'old'.
+            self._process.write('Content-Length: %d\n%sContent-Length: %d\n%s' % (
+                len(actual_contents), actual_contents,
+                len(expected_contents), expected_contents))
+            return self._read()
+        except IOError as exception:
+            return (None, 0, "Failed to compute an image diff: %s" % str(exception))
+
+    def _start(self, tolerance):
+        command = [self._port._path_to_image_diff(), '--tolerance', str(tolerance)]
+        environment = self._port.setup_environ_for_server('ImageDiff')
+        self._process = self._port._server_process_constructor(self._port, 'ImageDiff', command, environment)
+        self._process.start()
+        self._tolerance = tolerance
+
+    def _read(self):
+        deadline = time.time() + 2.0
+        output = None
+        output_image = ""
+
+        while not self._process.timed_out and not self._process.has_crashed():
+            output = self._process.read_stdout_line(deadline)
+            if self._process.timed_out or self._process.has_crashed() or not output:
+                break
+
+            if output.startswith('diff'):  # This is the last line ImageDiff prints.
+                break
+
+            if output.startswith('Content-Length'):
+                m = re.match('Content-Length: (\d+)', output)
+                content_length = int(m.group(1))
+                output_image = self._process.read_stdout(deadline, content_length)
+                output = self._process.read_stdout_line(deadline)
+                break
+
+        stderr = self._process.pop_all_buffered_stderr()
+        err_str = ''
+        if stderr:
+            err_str += "ImageDiff produced stderr output:\n" + stderr
+        if self._process.timed_out:
+            err_str += "ImageDiff timed out\n"
+        if self._process.has_crashed():
+            err_str += "ImageDiff crashed\n"
+
+        # FIXME: There is no need to shut down the ImageDiff server after every diff.
+        self._process.stop()
+
+        diff_percent = 0
+        if output and output.startswith('diff'):
+            m = re.match('diff: (.+)% (passed|failed)', output)
+            if m.group(2) == 'passed':
+                return (None, 0, None)
+            diff_percent = float(m.group(1))
+
+        return (output_image, diff_percent, err_str or None)
+
+    def stop(self):
+        if self._process:
+            self._process.stop()
+            self._process = None
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/modules/web/src/main/native/Tools/Scripts/webkitpy/port/image_diff_unittest.py	Fri Jul 05 17:42:24 2013 +0400
@@ -0,0 +1,57 @@
+# Copyright (C) 2012 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#    * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#    * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#    * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Unit testing base class for Port implementations."""
+
+import unittest2 as unittest
+
+from webkitpy.port.server_process_mock import MockServerProcess
+from webkitpy.port.image_diff import ImageDiffer
+
+
+class FakePort(object):
+    def __init__(self, server_process_output):
+        self._server_process_constructor = lambda port, nm, cmd, env: MockServerProcess(lines=server_process_output)
+
+    def _path_to_image_diff(self):
+        return ''
+
+    def setup_environ_for_server(self, nm):
+        return None
+
+
+class TestImageDiffer(unittest.TestCase):
+    def test_diff_image_failed(self):
+        port = FakePort(['diff: 100% failed\n'])
+        image_differ = ImageDiffer(port)
+        self.assertEqual(image_differ.diff_image('foo', 'bar', 0.1), ('', 100.0, None))
+
+    def test_diff_image_passed(self):
+        port = FakePort(['diff: 0% passed\n'])
+        image_differ = ImageDiffer(port)
+        self.assertEqual(image_differ.diff_image('foo', 'bar', 0.1), (None, 0, None))
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/modules/web/src/main/native/Tools/Scripts/webkitpy/port/leakdetector.py	Fri Jul 05 17:42:24 2013 +0400
@@ -0,0 +1,153 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the Google name nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import logging
+import re
+
+from webkitpy.common.system.executive import ScriptError
+
+_log = logging.getLogger(__name__)
+
+
+# If other ports/platforms decide to support --leaks, we should see about sharing as much of this code as possible.
+# Right now this code is only used by Apple's MacPort.
+
+class LeakDetector(object):
+    def __init__(self, port):
+        # We should operate on a "platform" not a port here.
+        self._port = port
+        self._executive = port._executive
+        self._filesystem = port._filesystem
+
+    # We exclude the following reported leaks so they do not get in our way when looking for WebKit leaks:
+    # This allows us ignore known leaks and only be alerted when new leaks occur. Some leaks are in the old
+    # versions of the system frameworks that are being used by the leaks bots. Even though a leak has been
+    # fixed, it will be listed here until the bot has been updated with the newer frameworks.
+    def _types_to_exlude_from_leaks(self):
+        # Currently we don't have any type excludes from OS leaks, but we will likely again in the future.
+        return []
+
+    def _callstacks_to_exclude_from_leaks(self):
+        callstacks = [
+            "Flash_EnforceLocalSecurity",  # leaks in Flash plug-in code, rdar://problem/4449747
+            "ScanFromString", # <http://code.google.com/p/angleproject/issues/detail?id=249> leak in ANGLE
+        ]
+        if self._port.is_snowleopard():
+            callstacks += [
+                "readMakerNoteProps",  # <rdar://problem/7156432> leak in ImageIO
+                "QTKitMovieControllerView completeUISetup",  # <rdar://problem/7155156> leak in QTKit
+                "getVMInitArgs",  # <rdar://problem/7714444> leak in Java
+                "Java_java_lang_System_initProperties",  # <rdar://problem/7714465> leak in Java
+                "glrCompExecuteKernel",  # <rdar://problem/7815391> leak in graphics driver while using OpenGL
+                "NSNumberFormatter getObjectValue:forString:errorDescription:",  # <rdar://problem/7149350> Leak in NSNumberFormatter
+            ]
+        elif self._port.is_lion():
+            callstacks += [
+                "FigByteFlumeCustomURLCreateWithURL", # <rdar://problem/10461926> leak in CoreMedia
+                "PDFPage\(PDFPageInternal\) pageLayoutIfAvail", # <rdar://problem/10462055> leak in PDFKit
+                "SecTransformExecute", # <rdar://problem/10470667> leak in Security.framework
+                "_NSCopyStyleRefForFocusRingStyleClip", # <rdar://problem/10462031> leak in AppKit
+            ]
+        return callstacks
+
+    def _leaks_args(self, pid):
+        leaks_args = []
+        for callstack in self._callstacks_to_exclude_from_leaks():
+            leaks_args += ['--exclude-callstack=%s' % callstack]
+        for excluded_type in self._types_to_exlude_from_leaks():
+            leaks_args += ['--exclude-type=%s' % excluded_type]
+        leaks_args.append(pid)
+        return leaks_args
+
+    def _parse_leaks_output(self, leaks_output):
+        _, count, bytes = re.search(r'Process (?P<pid>\d+): (?P<count>\d+) leaks? for (?P<bytes>\d+) total', leaks_output).groups()
+        excluded_match = re.search(r'(?P<excluded>\d+) leaks? excluded', leaks_output)
+        excluded = excluded_match.group('excluded') if excluded_match else 0
+        return int(count), int(excluded), int(bytes)
+
+    def leaks_files_in_directory(self, directory):
+        return self._filesystem.glob(self._filesystem.join(directory, "*-leaks.txt"))
+
+    def leaks_file_name(self, process_name, process_pid):
+        # We include the number of files this worker has already written in the name to prevent overwritting previous leak results..
+        return "%s-%s-leaks.txt" % (process_name, process_pid)
+
+    def count_total_bytes_and_unique_leaks(self, leak_files):
+        merge_depth = 5  # ORWT had a --merge-leak-depth argument, but that seems out of scope for the run-webkit-tests tool.
+        args = [
+            '--merge-depth',
+            merge_depth,
+        ] + leak_files
+        try:
+            parse_malloc_history_output = self._port._run_script("parse-malloc-history", args, include_configuration_arguments=False)
+        except ScriptError, e:
+            _log.warn("Failed to parse leaks output: %s" % e.message_with_output())
+            return
+
+        # total: 5,888 bytes (0 bytes excluded).
+        unique_leak_count = len(re.findall(r'^(\d*)\scalls', parse_malloc_history_output, re.MULTILINE))
+        total_bytes_string = re.search(r'^total\:\s(.+)\s\(', parse_malloc_history_output, re.MULTILINE).group(1)
+        return (total_bytes_string, unique_leak_count)
+
+    def count_total_leaks(self, leak_file_paths):
+        total_leaks = 0
+        for leak_file_path in leak_file_paths:
+            # Leaks have been seen to include non-utf8 data, so we use read_binary_file.
+            # See https://bugs.webkit.org/show_bug.cgi?id=71112.
+            leaks_output = self._filesystem.read_binary_file(leak_file_path)
+            count, _, _ = self._parse_leaks_output(leaks_output)
+            total_leaks += count
+        return total_leaks
+
+    def check_for_leaks(self, process_name, process_pid):
+        _log.debug("Checking for leaks in %s" % process_name)
+        try:
+            # Oddly enough, run-leaks (or the underlying leaks tool) does not seem to always output utf-8,
+            # thus we pass decode_output=False.  Without this code we've seen errors like:
+            # "UnicodeDecodeError: 'utf8' codec can't decode byte 0x88 in position 779874: unexpected code byte"
+            leaks_output = self._port._run_script("run-leaks", self._leaks_args(process_pid), include_configuration_arguments=False, decode_output=False)
+        except ScriptError, e:
+            _log.warn("Failed to run leaks tool: %s" % e.message_with_output())
+            return
+
+        # FIXME: We end up parsing this output 3 times.  Once here and twice for summarizing.
+        count, excluded, bytes = self._parse_leaks_output(leaks_output)
+        adjusted_count = count - excluded
+        if not adjusted_count:
+            return
+
+        leaks_filename = self.leaks_file_name(process_name, process_pid)
+        leaks_output_path = self._filesystem.join(self._port.results_directory(), leaks_filename)
+        self._filesystem.write_binary_file(leaks_output_path, leaks_output)
+
+        # FIXME: Ideally we would not be logging from the worker process, but rather pass the leak
+        # information back to the manager and have it log.
+        if excluded:
+            _log.info("%s leaks (%s bytes including %s excluded leaks) were found, details in %s" % (adjusted_count, bytes, excluded, leaks_output_path))
+        else:
+            _log.info("%s leaks (%s bytes) were found, details in %s" % (count, bytes, leaks_output_path))
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/modules/web/src/main/native/Tools/Scripts/webkitpy/port/leakdetector_unittest.py	Fri Jul 05 17:42:24 2013 +0400
@@ -0,0 +1,152 @@
+# Copyright (C) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#    * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#    * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#    * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import unittest2 as unittest
+
+from webkitpy.port.leakdetector import LeakDetector
+from webkitpy.common.system.filesystem_mock import MockFileSystem
+from webkitpy.common.system.outputcapture import OutputCapture
+from webkitpy.common.system.executive_mock import MockExecutive
+
+
+class LeakDetectorTest(unittest.TestCase):
+    def _mock_port(self):
+        class MockPort(object):
+            def __init__(self):
+                self._filesystem = MockFileSystem()
+                self._executive = MockExecutive()
+
+        return MockPort()
+
+    def _make_detector(self):
+        return LeakDetector(self._mock_port())
+
+    def test_leaks_args(self):
+        detector = self._make_detector()
+        detector._callstacks_to_exclude_from_leaks = lambda: ['foo bar', 'BAZ']
+        detector._types_to_exlude_from_leaks = lambda: ['abcdefg', 'hi jklmno']
+        expected_args = ['--exclude-callstack=foo bar', '--exclude-callstack=BAZ', '--exclude-type=abcdefg', '--exclude-type=hi jklmno', 1234]
+        self.assertEqual(detector._leaks_args(1234), expected_args)
+
+    example_leaks_output = """Process 5122: 663744 nodes malloced for 78683 KB
+Process 5122: 337301 leaks for 6525216 total leaked bytes.
+Leak: 0x38cb600  size=3072  zone: DefaultMallocZone_0x1d94000   instance of 'NSCFData', type ObjC, implemented in Foundation
+        0xa033f0b8 0x01001384 0x00000b3a 0x00000b3a     ..3.....:...:...
+        0x00000000 0x038cb620 0x00000000 0x00000000     .... ...........
+        0x00000000 0x21000000 0x726c6468 0x00000000     .......!hdlr....
+        0x00000000 0x7269646d 0x6c707061 0x00000000     ....mdirappl....
+        0x00000000 0x04000000 0x736c69c1 0x00000074     .........ilst...
+        0x6f74a923 0x0000006f 0x7461641b 0x00000061     #.too....data...
+        0x00000001 0x76614c00 0x2e323566 0x302e3236     .....Lavf52.62.0
+        0x37000000 0x6d616ea9 0x2f000000 0x61746164     ...7.nam.../data
+        ...
+Leak: 0x2a9c960  size=288  zone: DefaultMallocZone_0x1d94000
+        0x09a1cc47 0x1bda8560 0x3d472cd1 0xfbe9bccd     G...`....,G=....
+        0x8bcda008 0x9e972a91 0xa892cf63 0x2448bdb0     .....*..c.....H$
+        0x4736fc34 0xdbe2d94e 0x25f56688 0x839402a4     4.6GN....f.%....
+        0xd12496b3 0x59c40c12 0x8cfcab2a 0xd20ef9c4     ..$....Y*.......
+        0xe7c56b1b 0x5835af45 0xc69115de 0x6923e4bb     .k..E.5X......#i
+        0x86f15553 0x15d40fa9 0x681288a4 0xc33298a9     SU.........h..2.
+        0x439bb535 0xc4fc743d 0x7dfaaff8 0x2cc49a4a     5..C=t.....}J..,
+        0xdd119df8 0x7e086821 0x3d7d129e 0x2e1b1547     ....!h.~..}=G...
+        ...
+Leak: 0x25102fe0  size=176  zone: DefaultMallocZone_0x1d94000   string 'NSException Data'
+"""
+
+    example_leaks_output_with_exclusions = """
+Process 57064: 865808 nodes malloced for 81032 KB
+Process 57064: 282 leaks for 21920 total leaked bytes.
+Leak: 0x7fc506023960  size=576  zone: DefaultMallocZone_0x107c29000   URLConnectionLoader::LoaderConnectionEventQueue  C++  CFNetwork
+        0x73395460 0x00007fff 0x7488af40 0x00007fff     `T9s....@..t....
+        0x73395488 0x00007fff 0x46eecd74 0x0001ed83     .T9s....t..F....
+        0x0100000a 0x00000000 0x7488bfc0 0x00007fff     ...........t....
+        0x00000000 0x00000000 0x46eecd8b 0x0001ed83     ...........F....
+        0x00000000 0x00000000 0x00000000 0x00000000     ................
+        0x00000000 0x00000000 0x46eecda3 0x0001ed83     ...........F....
+        0x00000000 0x00000000 0x00000000 0x00000000     ................
+        0x00000000 0x00000000 0x46eecdbc 0x0001ed83     ...........F....
+        ...
+Leak: 0x7fc506025980  size=432  zone: DefaultMallocZone_0x107c29000   URLConnectionInstanceData  CFType  CFNetwork
+        0x74862b28 0x00007fff 0x00012b80 0x00000001     (+.t.....+......
+        0x73395310 0x00007fff 0x733953f8 0x00007fff     .S9s.....S9s....
+        0x4d555458 0x00000000 0x00000000 0x00002068     XTUM........h ..
+        0x00000000 0x00000000 0x00000b00 0x00000b00     ................
+        0x00000000 0x00000000 0x060259b8 0x00007fc5     .........Y......
+        0x060259bc 0x00007fc5 0x00000000 0x00000000     .Y..............
+        0x73395418 0x00007fff 0x06025950 0x00007fc5     .T9s....PY......
+        0x73395440 0x00007fff 0x00005013 0x00000001     @T9s.....P......
+        ...
+
+
+Binary Images:
+       0x107ac2000 -        0x107b4aff7 +DumpRenderTree (??? - ???) <5694BE03-A60A-30B2-9D40-27CFFCFB88EE> /Volumes/Data/WebKit-BuildSlave/lion-intel-leaks/build/WebKitBuild/Debug/DumpRenderTree
+       0x107c2f000 -        0x107c58fff +libWebCoreTestSupport.dylib (535.8.0 - compatibility 1.0.0) <E4F7A13E-5807-30F7-A399-62F8395F9106> /Volumes/Data/WebKit-BuildSlave/lion-intel-leaks/build/WebKitBuild/Debug/libWebCoreTestSupport.dylib
+17 leaks excluded (not printed)
+"""
+
+    def test_parse_leaks_output(self):
+        self.assertEqual(self._make_detector()._parse_leaks_output(self.example_leaks_output), (337301, 0, 6525216))
+        self.assertEqual(self._make_detector()._parse_leaks_output(self.example_leaks_output_with_exclusions), (282, 17, 21920))
+
+    def test_leaks_files_in_directory(self):
+        detector = self._make_detector()
+        self.assertEqual(detector.leaks_files_in_directory('/bogus-directory'), [])
+        detector._filesystem = MockFileSystem({
+            '/mock-results/DumpRenderTree-1234-leaks.txt': '',
+            '/mock-results/DumpRenderTree-23423-leaks.txt': '',
+            '/mock-results/DumpRenderTree-823-leaks.txt': '',
+        })
+        self.assertEqual(len(detector.leaks_files_in_directory('/mock-results')), 3)
+<