diff --git a/llvm/utils/lit/lit/Test.py b/llvm/utils/lit/lit/Test.py index a38ea4e7717a32..59fefbc7f08919 100644 --- a/llvm/utils/lit/lit/Test.py +++ b/llvm/utils/lit/lit/Test.py @@ -150,6 +150,8 @@ def __init__(self, code, output='', elapsed=None): self.output = output # The wall timing to execute the test, if timing. self.elapsed = elapsed + self.start = None + self.pid = None # The metrics reported by this test. self.metrics = {} # The micro-test results reported by this test. diff --git a/llvm/utils/lit/lit/cl_arguments.py b/llvm/utils/lit/lit/cl_arguments.py index baeb3635298f50..69166e00aba8c2 100644 --- a/llvm/utils/lit/lit/cl_arguments.py +++ b/llvm/utils/lit/lit/cl_arguments.py @@ -109,6 +109,9 @@ def parse_args(): execution_group.add_argument("--xunit-xml-output", type=lit.reports.XunitReport, help="Write XUnit-compatible XML test reports to the specified file") + execution_group.add_argument("--time-trace-output", + type=lit.reports.TimeTraceReport, + help="Write Chrome tracing compatible JSON to the specified file") execution_group.add_argument("--timeout", dest="maxIndividualTestTime", help="Maximum time to spend running a single test (in seconds). " @@ -195,7 +198,7 @@ def parse_args(): else: opts.shard = None - opts.reports = filter(None, [opts.output, opts.xunit_xml_output]) + opts.reports = filter(None, [opts.output, opts.xunit_xml_output, opts.time_trace_output]) return opts diff --git a/llvm/utils/lit/lit/reports.py b/llvm/utils/lit/lit/reports.py index 3ce961b44029e0..b43f77911673cf 100755 --- a/llvm/utils/lit/lit/reports.py +++ b/llvm/utils/lit/lit/reports.py @@ -136,3 +136,35 @@ def _get_skip_reason(self, test): if features: return 'Missing required feature(s): ' + ', '.join(features) return 'Unsupported configuration' + + +class TimeTraceReport(object): + def __init__(self, output_file): + self.output_file = output_file + self.skipped_codes = {lit.Test.EXCLUDED, + lit.Test.SKIPPED, lit.Test.UNSUPPORTED} + + def write_results(self, tests, elapsed): + # Find when first test started so we can make start times relative. + first_start_time = min([t.result.start for t in tests]) + events = [self._get_test_event( + x, first_start_time) for x in tests if x.result.code not in self.skipped_codes] + + json_data = {'traceEvents': events} + + with open(self.output_file, "w") as time_trace_file: + json.dump(json_data, time_trace_file, indent=2, sort_keys=True) + + def _get_test_event(self, test, first_start_time): + test_name = test.getFullName() + elapsed_time = test.result.elapsed or 0.0 + start_time = test.result.start - first_start_time if test.result.start else 0.0 + pid = test.result.pid or 0 + return { + 'pid': pid, + 'tid': 1, + 'ph': 'X', + 'ts': int(start_time * 1000000.), + 'dur': int(elapsed_time * 1000000.), + 'name': test_name, + } diff --git a/llvm/utils/lit/lit/worker.py b/llvm/utils/lit/lit/worker.py index 04fc77239e025e..ba9b919f50ebbb 100644 --- a/llvm/utils/lit/lit/worker.py +++ b/llvm/utils/lit/lit/worker.py @@ -6,6 +6,7 @@ and store it in global variables. This reduces the cost of each task. """ import contextlib +import os import signal import time import traceback @@ -65,6 +66,8 @@ def _execute(test, lit_config): start = time.time() result = _execute_test_handle_errors(test, lit_config) result.elapsed = time.time() - start + result.start = start + result.pid = os.getpid() return result