New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Useful scripts for interacting with WPT logs #24841
Merged
+209
−0
Merged
Changes from all commits
Commits
Show all changes
10 commits
Select commit
Hold shift + click to select a range
4c82161
Add script to summarize WPT test directories with failing tests.
jdm a634d90
Add a script to extra logs for particular test filenames from full WP…
jdm ed715fc
Add a script to report timing data for all tests in a WPT log.
jdm 5867e11
Fix tidy issues in wpt-timing.py.
jdm 46af28a
Fix tidy issues in wpt-summarize.py.
jdm 6cad3db
Remove unused import.
jdm cc1aadf
Add explanatory comment for wpt-summarize.py.
jdm 0c294de
Add explanatory comment to wpt-timing.py.
jdm 804780f
Add explanatory comment for wpt_result_analyzer.py.
jdm 41d1eca
Fix visual indent error.
jdm File filter...
Filter file types
Jump to…
Jump to file
Failed to load files.
| @@ -0,0 +1,46 @@ | ||
| #!/usr/bin/env python | ||
|
|
||
| # Copyright 2019 The Servo Project Developers. See the COPYRIGHT | ||
| # file at the top-level directory of this distribution. | ||
| # | ||
| # Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or | ||
| # http://www.apache.org/licenses/LICENSE-2.0> or the MIT license | ||
| # <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your | ||
| # option. This file may not be copied, modified, or distributed | ||
| # except according to those terms. | ||
|
|
||
| # Usage: python wpt-summarize.py /wpt/test/url.html [--full] | ||
| # | ||
| # Extract all log lines for a particular test file from a WPT | ||
| # logs, outputting invidual JSON objects that can be manipulated | ||
| # with tools like jq. If a particular URL results in no output, | ||
| # the URL is likely used as a reference test's reference file, | ||
| # so passing `--full` will find any output from Servo process | ||
| # command lines that include the URL. | ||
|
|
||
| import sys | ||
| import json | ||
|
|
||
| full_search = len(sys.argv) > 3 and sys.argv[3] == '--full' | ||
|
|
||
| with open(sys.argv[1]) as f: | ||
| data = f.readlines() | ||
| thread = None | ||
| for entry in data: | ||
| entry = json.loads(entry) | ||
| if thread and "thread" in entry: | ||
| if entry["thread"] == thread: | ||
| print(json.dumps(entry)) | ||
|
This conversation was marked as resolved
by jdm
jdm
Author
Member
|
||
| if "action" in entry and entry["action"] == "test_end": | ||
| thread = None | ||
| else: | ||
| if ("action" in entry and | ||
| entry["action"] == "test_start" and | ||
| entry["test"] == sys.argv[2]): | ||
| thread = entry["thread"] | ||
| print(json.dumps(entry)) | ||
| elif (full_search and | ||
| "command" in entry and | ||
| sys.argv[2] in entry["command"]): | ||
| thread = entry["thread"] | ||
| print(json.dumps(entry)) | ||
| @@ -0,0 +1,95 @@ | ||
| #!/usr/bin/env python | ||
|
|
||
| # Copyright 2019 The Servo Project Developers. See the COPYRIGHT | ||
| # file at the top-level directory of this distribution. | ||
| # | ||
| # Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or | ||
| # http://www.apache.org/licenses/LICENSE-2.0> or the MIT license | ||
| # <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your | ||
| # option. This file may not be copied, modified, or distributed | ||
| # except according to those terms. | ||
|
|
||
| # Usage: python wpt-timing.py [path/to/wpt.log] ... | ||
| # | ||
| # Given a series of WPT log files as arguments, this script | ||
| # extracts the status of each test file (ok; error; timeout; etc.) | ||
| # and how long it took to ran, then creates three CSV files, each | ||
| # sorted by runtime: | ||
| # | ||
| # - longest_ok.csv: all tests that passed | ||
| # - longest_err.csv: all tests that failed or had an error | ||
| # - timeouts.csv: all tests that timed out | ||
| # | ||
| # This information can be used to quickly determine the longest-running | ||
| # tests in the WPT testsuite in order to improve the overall testsuite | ||
| # runtime on CI. | ||
|
|
||
| import sys | ||
| import json | ||
| import collections | ||
| import csv | ||
|
|
||
|
|
||
| def process_log(data): | ||
| tests = {} | ||
| test_results = collections.defaultdict(list) | ||
|
|
||
| for entry in data: | ||
| entry = json.loads(entry) | ||
| if "action" in entry: | ||
| if entry["action"] == "test_start": | ||
| tests[entry["test"]] = { | ||
| "start": int(entry["time"]), | ||
| "end": 0, | ||
| } | ||
| elif entry["action"] == "test_end": | ||
| test = tests[entry["test"]] | ||
| test["end"] = int(entry["time"]) | ||
| test_results[entry["status"]] += [ | ||
| (entry["test"], test["end"] - test["start"]) | ||
| ] | ||
|
|
||
| return test_results | ||
|
|
||
| test_results = { | ||
| "SKIP": [], | ||
| "OK": [], | ||
| "PASS": [], | ||
| "ERROR": [], | ||
| "FAIL": [], | ||
| "CRASH": [], | ||
| "TIMEOUT": [], | ||
| } | ||
| for log_path in sys.argv[1:]: | ||
| with open(log_path) as f: | ||
| data = f.readlines() | ||
| for k, v in process_log(data).items(): | ||
| test_results[k] += v | ||
|
|
||
| print("Skipped %d tests." % len(test_results["SKIP"])) | ||
| print("%d tests timed out." % len(test_results["TIMEOUT"])) | ||
|
|
||
| longest_crash = sorted(test_results["CRASH"], key=lambda x: x[1], reverse=True) | ||
| print("Longest CRASH test took %dms (%s)" % (longest_crash[0][1], longest_crash[0][0])) | ||
|
|
||
| longest_ok = sorted( | ||
| test_results["PASS"] + test_results["OK"], | ||
| key=lambda x: x[1], reverse=True | ||
| ) | ||
| csv_data = [['Test path', 'Milliseconds']] | ||
| with open('longest_ok.csv', 'w') as csv_file: | ||
| writer = csv.writer(csv_file) | ||
| writer.writerows(csv_data + longest_ok) | ||
|
|
||
| longest_fail = sorted( | ||
| test_results["ERROR"] + test_results["FAIL"], | ||
| key=lambda x: x[1], reverse=True | ||
| ) | ||
| with open('longest_err.csv', 'w') as csv_file: | ||
| writer = csv.writer(csv_file) | ||
| writer.writerows(csv_data + longest_fail) | ||
|
|
||
| longest_timeout = sorted(test_results["TIMEOUT"], key=lambda x: x[1], reverse=True) | ||
| with open('timeouts.csv', 'w') as csv_file: | ||
| writer = csv.writer(csv_file) | ||
| writer.writerows(csv_data + longest_timeout) |
| @@ -0,0 +1,68 @@ | ||
| #!/usr/bin/env python | ||
|
|
||
| # Copyright 2019 The Servo Project Developers. See the COPYRIGHT | ||
| # file at the top-level directory of this distribution. | ||
| # | ||
| # Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or | ||
| # http://www.apache.org/licenses/LICENSE-2.0> or the MIT license | ||
| # <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your | ||
| # option. This file may not be copied, modified, or distributed | ||
| # except according to those terms. | ||
|
|
||
| # Usage: python etc/wpt_result_analyzer.py | ||
| # | ||
| # Analyze the state of WPT tests in Servo by walking all of the | ||
| # test directories, counting the number of tests present, and | ||
| # counting the number of ini files present in the corresponding | ||
| # test result directory. Prints out a list of directories that | ||
| # have non-zero failure counts, ordered by overall number of tests | ||
| # and percentage of tests that fail. | ||
|
|
||
| import os | ||
|
|
||
| test_root = os.path.join('tests', 'wpt', 'web-platform-tests') | ||
| meta_root = os.path.join('tests', 'wpt', 'metadata') | ||
|
|
||
| test_counts = {} | ||
| meta_counts = {} | ||
|
|
||
| for base_dir, dir_names, files in os.walk(test_root): | ||
| if base_dir == test_root: | ||
| continue | ||
|
|
||
| rel_base = os.path.relpath(base_dir, test_root) | ||
| if not os.path.exists(os.path.join(meta_root, rel_base)): | ||
| continue | ||
|
|
||
| test_files = [] | ||
| exts = ['.html', '.htm', '.xht', '.xhtml', '.window.js', '.worker.js', '.any.js'] | ||
| for f in files: | ||
| for ext in exts: | ||
| if f.endswith(ext): | ||
| test_files += [f] | ||
| test_counts[rel_base] = len(test_files) | ||
|
|
||
| for base_dir, dir_names, files in os.walk(meta_root): | ||
| if base_dir == meta_root: | ||
| continue | ||
|
|
||
| rel_base = os.path.relpath(base_dir, meta_root) | ||
| num_files = len(files) | ||
| if '__dir__.ini' in files: | ||
| num_files -= 1 | ||
| meta_counts[rel_base] = num_files | ||
|
|
||
| final_counts = [] | ||
| for (test_dir, test_count) in test_counts.items(): | ||
| if not test_count: | ||
| continue | ||
| meta_count = meta_counts.get(test_dir, 0) | ||
| final_counts += [(test_dir, test_count, meta_count)] | ||
|
|
||
| print('Test counts') | ||
| print('dir: %% failed (num tests / num failures)') | ||
| s = sorted(final_counts, key=lambda x: x[2] / x[1]) | ||
| for (test_dir, test_count, meta_count) in reversed(sorted(s, key=lambda x: x[2])): | ||
| if not meta_count: | ||
| continue | ||
| print('%s: %.2f%% (%d / %d)' % (test_dir, meta_count / test_count * 100, test_count, meta_count)) |
ProTip!
Use n and p to navigate between commits in a pull request.
Add this suggestion to a batch that can be applied as a single commit.
This suggestion is invalid because no changes were made to the code.
Suggestions cannot be applied while the pull request is closed.
Suggestions cannot be applied while viewing a subset of changes.
Only one suggestion per line can be applied in a batch.
Add this suggestion to a batch that can be applied as a single commit.
Applying suggestions on deleted lines is not supported.
You must change the existing code in this line in order to create a valid suggestion.
Outdated suggestions cannot be applied.
This suggestion has been applied or marked resolved.
Suggestions cannot be applied from pending reviews.
Suggestions cannot be applied on multi-line comments.
won't this skip test_starts from other threads?