Skip to content

Commit

Permalink
Browse files Browse the repository at this point in the history
2010-08-24 Dirk Pranke <dpranke@chromium.org>
        Reviewed by Eric Seidel.

        speed up new-run-webkit-tests unit tests

        Add a --no-record-results flag that turns off generating the JSON
        results file on every test run. Generating the file requires us to
        fetch the old results down from the bots, which can be slow. This
        flag is off by default.

        Reduce the sleep time in wait_for_threads_to_finish from 0.1s to 0.01s.

        These changes together shorten the test cycle from ~4.5s to ~1.5s
        - a 3x speedup.

        https://bugs.webkit.org/show_bug.cgi?id=44553

        * Scripts/webkitpy/layout_tests/run_webkit_tests.py:
        * Scripts/webkitpy/layout_tests/run_webkit_tests_unittest.py:

Canonical link: https://commits.webkit.org/56728@main
git-svn-id: https://svn.webkit.org/repository/webkit/trunk@65949 268f45cc-cd09-0410-ab3c-d52691b4dbfc
  • Loading branch information
dpranke committed Aug 24, 2010
1 parent db81743 commit 17d02a3
Show file tree
Hide file tree
Showing 3 changed files with 39 additions and 15 deletions.
21 changes: 21 additions & 0 deletions WebKitTools/ChangeLog
@@ -1,3 +1,24 @@
2010-08-24 Dirk Pranke <dpranke@chromium.org>

Reviewed by Eric Seidel.

speed up new-run-webkit-tests unit tests

Add a --no-record-results flag that turns off generating the JSON
results file on every test run. Generating the file requires us to
fetch the old results down from the bots, which can be slow. This
flag is off by default.

Reduce the sleep time in wait_for_threads_to_finish from 0.1s to 0.01s.

These changes together shorten the test cycle from ~4.5s to ~1.5s
- a 3x speedup.

https://bugs.webkit.org/show_bug.cgi?id=44553

* Scripts/webkitpy/layout_tests/run_webkit_tests.py:
* Scripts/webkitpy/layout_tests/run_webkit_tests_unittest.py:

2010-08-24 Tony Chang <tony@chromium.org>

Reviewed by Eric Seidel.
Expand Down
16 changes: 10 additions & 6 deletions WebKitTools/Scripts/webkitpy/layout_tests/run_webkit_tests.py
Expand Up @@ -687,7 +687,7 @@ def _wait_for_threads_to_finish(self, threads, result_summary):
self.update_summary(result_summary)

if some_thread_is_alive:
time.sleep(0.1)
time.sleep(0.01)

except KeyboardInterrupt:
keyboard_interrupted = True
Expand Down Expand Up @@ -779,12 +779,13 @@ def run(self, result_summary):
self._expectations, result_summary, retry_summary)
self._printer.print_unexpected_results(unexpected_results)

# Write the same data to log files.
self._write_json_files(unexpected_results, result_summary,
individual_test_timings)
if self._options.record_results:
# Write the same data to log files.
self._write_json_files(unexpected_results, result_summary,
individual_test_timings)

# Upload generated JSON files to appengine server.
self._upload_json_files()
# Upload generated JSON files to appengine server.
self._upload_json_files()

# Write the summary to disk (results.html) and display it if requested.
wrote_results = self._write_results_html_file(result_summary)
Expand Down Expand Up @@ -1545,6 +1546,9 @@ def parse_args(args=None):
default=False, help="Clobbers test results from previous runs."),
optparse.make_option("--platform",
help="Override the platform for expected results"),
optparse.make_option("--no-record-results", action="store_false",
default=True, dest="record_results",
help="Don't record the results."),
# old-run-webkit-tests also has HTTP toggle options:
# --[no-]http Run (or do not run) http tests
# (default: run)
Expand Down
Expand Up @@ -48,16 +48,18 @@
from webkitpy.thirdparty.mock import Mock


def passing_run(args, port_obj=None, logging_included=False):
if not logging_included:
args.extend(['--print', 'nothing'])
def passing_run(args, port_obj=None, record_results=False):
args.extend(['--print', 'nothing'])
if not record_results:
args.append('--no-record-results')
options, args = run_webkit_tests.parse_args(args)
if port_obj is None:
port_obj = port.get(options.platform, options)
res = run_webkit_tests.run(port_obj, options, args)
return res == 0

def logging_run(args):
args.extend(['--no-record-results'])
options, args = run_webkit_tests.parse_args(args)
port_obj = port.get(options.platform, options)
buildbot_output = array_stream.ArrayStream()
Expand All @@ -74,32 +76,29 @@ def test_fast(self):
self.assertTrue(passing_run(['--platform', 'test', '--run-singly']))
self.assertTrue(passing_run(['--platform', 'test',
'text/article-element.html']))
self.assertTrue(passing_run(['--platform', 'test',
'--child-processes', '1',
'--print', 'unexpected']))

def test_child_processes(self):
def test_one_child_process(self):
(res, buildbot_output, regular_output) = logging_run(
['--platform', 'test', '--print', 'config', '--child-processes',
'1'])
self.assertTrue('Running one DumpRenderTree\n'
in regular_output.get())

def test_two_child_processes(self):
(res, buildbot_output, regular_output) = logging_run(
['--platform', 'test', '--print', 'config', '--child-processes',
'2'])
self.assertTrue('Running 2 DumpRenderTrees in parallel\n'
in regular_output.get())

def test_last_results(self):
passing_run(['--platform', 'test'])
passing_run(['--platform', 'test'], record_results=True)
(res, buildbot_output, regular_output) = logging_run(
['--platform', 'test', '--print-last-failures'])
self.assertEqual(regular_output.get(), ['\n\n'])
self.assertEqual(buildbot_output.get(), [])



def _mocked_open(original_open, file_list):
def _wrapper(name, mode, encoding):
if name.find("-expected.") != -1 and mode == "w":
Expand Down

0 comments on commit 17d02a3

Please sign in to comment.