Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion libcxx/test/benchmarks/spec.gen.py
Original file line number Diff line number Diff line change
Expand Up @@ -74,5 +74,5 @@

# Parse the results into a LNT-compatible format. This also errors out if there are no CSV files, which
# means that the benchmark didn't run properly (the `runcpu` command above never reports a failure).
print(f'RUN: %{{libcxx-dir}}/utils/parse-spec-result %T/result/CPUv8.001.*.train.csv --output-format=lnt > %T/results.lnt')
print(f'RUN: %{{libcxx-dir}}/utils/parse-spec-results %T/result/CPUv8.001.*.train.csv --output-format=lnt > %T/results.lnt')
print(f'RUN: cat %T/results.lnt')
4 changes: 4 additions & 0 deletions libcxx/utils/libcxx/test/format.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,8 @@
import os
import re

THIS_FILE = os.path.abspath(__file__)
LIBCXX_UTILS = os.path.dirname(os.path.dirname(os.path.dirname(THIS_FILE)))

def _getTempPaths(test):
"""
Expand Down Expand Up @@ -353,6 +355,8 @@ def execute(self, test, litConfig):
]
if "enable-benchmarks=run" in test.config.available_features:
steps += ["%dbg(EXECUTED AS) %{exec} %t.exe --benchmark_out=%T/benchmark-result.json --benchmark_out_format=json"]
parse_results = os.path.join(LIBCXX_UTILS, 'parse-google-benchmark-results')
steps += [f"{parse_results} %T/benchmark-result.json --output-format=lnt > %T/results.lnt"]
return self._executeShTest(test, litConfig, steps)
elif re.search('[.]gen[.][^.]+$', filename): # This only happens when a generator test is not supported
return self._executeShTest(test, litConfig, [])
Expand Down
45 changes: 45 additions & 0 deletions libcxx/utils/parse-google-benchmark-results
Original file line number Diff line number Diff line change
@@ -0,0 +1,45 @@
#!/usr/bin/env python3

import argparse
import csv
import json
import sys

def main(argv):
parser = argparse.ArgumentParser(
prog='parse-google-benchmark-results',
description='Parse Google Benchmark result files (in JSON format) into CSV or LNT compatible output.')
parser.add_argument('filename', type=argparse.FileType('r'), nargs='+',
help='One of more JSON files to extract the results from. The results parsed from each '
'file are concatenated together.')
parser.add_argument('--timing', type=str, choices=['real_time', 'cpu_time'], default='real_time',
help='The timing to extract from the Google Benchmark results. This can either be the '
'"real time" or the "CPU time". Default is "real time".')
parser.add_argument('--output-format', type=str, choices=['csv', 'lnt'], default='csv',
help='The desired output format for the data. `csv` is CSV format and `lnt` is a format compatible with '
'`lnt importreport` (see https://llvm.org/docs/lnt/importing_data.html#importing-data-in-a-text-file).')
args = parser.parse_args(argv)

# Parse the data from all files, aggregating the results
headers = ['Benchmark', args.timing]
rows = []
for file in args.filename:
js = json.load(file)
for bm in js['benchmarks']:
row = [bm['name'], bm[args.timing]]
rows.append(row)

# Print the results in the right format
if args.output_format == 'csv':
writer = csv.writer(sys.stdout)
writer.writerow(headers)
for row in rows:
writer.writerow(row)
elif args.output_format == 'lnt':
benchmark = headers.index('Benchmark')
time = headers.index(args.timing)
for row in rows:
print(f'{row[benchmark].replace(".", "_")}.execution_time {row[time]}')

if __name__ == '__main__':
main(sys.argv[1:])
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ def main(argv):
description='Parse SPEC result files (in CSV format) and extract the selected result table, in the selected format.')
parser.add_argument('filename', type=argparse.FileType('r'), nargs='+',
help='One of more CSV files to extract the results from. The results parsed from each file are concatenated '
'together, creating a single CSV table.')
'together.')
parser.add_argument('--table', type=str, choices=['full', 'selected'], default='full',
help='The name of the table to extract from SPEC results. `full` means extracting the Full Results Table '
'and `selected` means extracting the Selected Results Table. Default is `full`.')
Expand Down
Loading