Skip to content

Commit a40930b

Browse files
authored
[libc++] Add an optional integration of libc++'s test suite with SPEC (#156953)
This allows running the SPEC benchmarks as part of libc++'s own benchmarks by providing an external installation of SPEC.
1 parent aef11db commit a40930b

File tree

3 files changed

+197
-0
lines changed

3 files changed

+197
-0
lines changed

libcxx/test/benchmarks/spec.gen.py

Lines changed: 78 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,78 @@
1+
# ===----------------------------------------------------------------------===##
2+
#
3+
# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4+
# See https://llvm.org/LICENSE.txt for license information.
5+
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6+
#
7+
# ===----------------------------------------------------------------------===##
8+
9+
# REQUIRES: enable-spec-benchmarks
10+
11+
# RUN: mkdir -p %T
12+
# RUN: echo "%{cxx}" > %T/cxx.subs
13+
# RUN: echo "%{compile_flags}" > %T/compile_flags.subs
14+
# RUN: echo "%{flags}" > %T/flags.subs
15+
# RUN: echo "%{link_flags}" > %T/link_flags.subs
16+
# RUN: echo "%{spec_dir}" > %T/spec_dir.subs
17+
# RUN: %{python} %s %T
18+
# END.
19+
20+
import json
21+
import pathlib
22+
import sys
23+
24+
test_dir = pathlib.Path(sys.argv[1])
25+
cxx = (test_dir / 'cxx.subs').open().read().strip()
26+
compile_flags = (test_dir / 'compile_flags.subs').open().read().strip()
27+
flags = (test_dir / 'flags.subs').open().read().strip()
28+
link_flags = (test_dir / 'link_flags.subs').open().read().strip()
29+
spec_dir = pathlib.Path((test_dir / 'spec_dir.subs').open().read().strip())
30+
31+
# Setup the configuration file
32+
test_dir.mkdir(parents=True, exist_ok=True)
33+
spec_config = test_dir / 'spec-config.cfg'
34+
spec_config.write_text(f"""
35+
default:
36+
ignore_errors = 1
37+
iterations = 1
38+
label = spec-stdlib
39+
log_line_width = 4096
40+
makeflags = --jobs=8
41+
mean_anyway = 1
42+
output_format = csv
43+
preenv = 0
44+
reportable = 0
45+
tune = base
46+
copies = 1
47+
threads = 1
48+
CC = cc -O3
49+
CXX = {cxx} {compile_flags} {flags} {link_flags} -Wno-error
50+
CC_VERSION_OPTION = --version
51+
CXX_VERSION_OPTION = --version
52+
EXTRA_PORTABILITY = -DSPEC_NO_CXX17_SPECIAL_MATH_FUNCTIONS # because libc++ doesn't implement the special math functions yet
53+
""")
54+
55+
# Build the list of benchmarks. We take all intrate and fprate benchmarks that contain C++ and
56+
# discard the ones that contain Fortran, since this test suite isn't set up to build Fortran code.
57+
spec_benchmarks = set()
58+
no_fortran = set()
59+
with open(spec_dir / 'benchspec' / 'CPU' / 'intrate_any_cpp.bset', 'r') as f:
60+
spec_benchmarks.update(json.load(f)['benchmarks'])
61+
with open(spec_dir / 'benchspec' / 'CPU' / 'fprate_any_cpp.bset', 'r') as f:
62+
spec_benchmarks.update(json.load(f)['benchmarks'])
63+
with open(spec_dir / 'benchspec' / 'CPU' / 'no_fortran.bset', 'r') as f:
64+
no_fortran.update(json.load(f)['benchmarks'])
65+
spec_benchmarks &= no_fortran
66+
67+
for benchmark in spec_benchmarks:
68+
print(f'#--- {benchmark}.sh.test')
69+
print(f'RUN: rm -rf %T') # clean up any previous (potentially incomplete) run
70+
print(f'RUN: mkdir %T')
71+
print(f'RUN: cp {spec_config} %T/spec-config.cfg')
72+
print(f'RUN: %{{spec_dir}}/bin/runcpu --config %T/spec-config.cfg --size train --output-root %T --rebuild {benchmark}')
73+
print(f'RUN: rm -rf %T/benchspec') # remove the temporary directory, which can become quite large
74+
75+
# Parse the results into a LNT-compatible format. This also errors out if there are no CSV files, which
76+
# means that the benchmark didn't run properly (the `runcpu` command above never reports a failure).
77+
print(f'RUN: %{{libcxx-dir}}/utils/parse-spec-result %T/result/CPUv8.001.*.train.csv --output-format=lnt > %T/results.lnt')
78+
print(f'RUN: cat %T/results.lnt')

libcxx/utils/libcxx/test/params.py

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -374,6 +374,17 @@ def getSuitableClangTidy(cfg):
374374
help="Whether to run the benchmarks in the test suite, to only dry-run them or to disable them entirely.",
375375
actions=lambda mode: [AddFeature(f"enable-benchmarks={mode}")],
376376
),
377+
Parameter(
378+
name="spec_dir",
379+
type=str,
380+
default="none",
381+
help="Path to the SPEC benchmarks. This is required in order to run the SPEC benchmarks as part of "
382+
"the libc++ test suite. If provided, the appropriate SPEC toolset must already be built and installed.",
383+
actions=lambda spec_dir: [
384+
AddSubstitution("%{spec_dir}", spec_dir),
385+
AddFeature('enable-spec-benchmarks')
386+
] if spec_dir != "none" else [],
387+
),
377388
Parameter(
378389
name="long_tests",
379390
choices=[True, False],

libcxx/utils/parse-spec-result

Lines changed: 108 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,108 @@
1+
#!/usr/bin/env python3
2+
3+
import argparse
4+
import csv
5+
import sys
6+
7+
def parse_table(rows, table_title):
8+
"""
9+
Parse a CSV table out of an iterator over rows.
10+
11+
Return a tuple containing (extracted headers, extracted rows).
12+
"""
13+
in_table = False
14+
rows_iter = iter(rows)
15+
extracted = []
16+
headers = None
17+
while True:
18+
try:
19+
row = next(rows_iter)
20+
except StopIteration:
21+
break
22+
23+
if not in_table and row == [table_title]:
24+
in_table = True
25+
next_row = next(rows_iter)
26+
assert next_row == [], f'There should be an empty row after the title of the table, found {next_row}'
27+
headers = next(rows_iter) # Extract the headers
28+
continue
29+
30+
elif in_table and row == []: # An empty row marks the end of the table
31+
in_table = False
32+
break
33+
34+
elif in_table:
35+
extracted.append(row)
36+
37+
assert len(extracted) != 0, f'Could not extract rows from the table, this is suspicious. Table title was {table_title}'
38+
assert headers is not None, f'Could not extract headers from the table, this is suspicious. Table title was {table_title}'
39+
40+
return (headers, extracted)
41+
42+
def main(argv):
43+
parser = argparse.ArgumentParser(
44+
prog='parse-spec-results',
45+
description='Parse SPEC result files (in CSV format) and extract the selected result table, in the selected format.')
46+
parser.add_argument('filename', type=argparse.FileType('r'), nargs='+',
47+
help='One of more CSV files to extract the results from. The results parsed from each file are concatenated '
48+
'together, creating a single CSV table.')
49+
parser.add_argument('--table', type=str, choices=['full', 'selected'], default='full',
50+
help='The name of the table to extract from SPEC results. `full` means extracting the Full Results Table '
51+
'and `selected` means extracting the Selected Results Table. Default is `full`.')
52+
parser.add_argument('--output-format', type=str, choices=['csv', 'lnt'], default='csv',
53+
help='The desired output format for the data. `csv` is CSV format and `lnt` is a format compatible with '
54+
'`lnt importreport` (see https://llvm.org/docs/lnt/importing_data.html#importing-data-in-a-text-file).')
55+
parser.add_argument('--extract', type=str,
56+
help='A comma-separated list of headers to extract from the table. If provided, only the data associated to '
57+
'those headers will be present in the resulting data. Invalid header names are diagnosed. Please make '
58+
'sure to use appropriate quoting for header names that contain spaces. This option only makes sense '
59+
'when the output format is CSV.')
60+
parser.add_argument('--keep-not-run', action='store_true',
61+
help='Keep entries whose \'Base Status\' is marked as \'NR\', aka \'Not Run\'. By default, such entries are discarded.')
62+
args = parser.parse_args(argv)
63+
64+
if args.table == 'full':
65+
table_title = 'Full Results Table'
66+
elif args.table == 'selected':
67+
table_title = 'Selected Results Table'
68+
69+
# Parse the headers and the rows in each file, aggregating all the results
70+
headers = None
71+
rows = []
72+
for file in args.filename:
73+
reader = csv.reader(file)
74+
(parsed_headers, parsed_rows) = parse_table(reader, table_title)
75+
assert headers is None or headers == parsed_headers, f'Found files with different headers: {headers} and {parsed_headers}'
76+
headers = parsed_headers
77+
rows.extend(parsed_rows)
78+
79+
# Remove rows that were not run unless we were asked to keep them
80+
if not args.keep_not_run:
81+
not_run = headers.index('Base Status')
82+
rows = [row for row in rows if row[not_run] != 'NR']
83+
84+
if args.extract is not None:
85+
if args.output_format != 'csv':
86+
raise RuntimeError('Passing --extract requires the output format to be csv')
87+
for h in args.extract.split(','):
88+
if h not in headers:
89+
raise RuntimeError(f'Header name {h} was not present in the parsed headers {headers}')
90+
91+
extracted_fields = [headers.index(h) for h in args.extract.split(',')]
92+
headers = [headers[i] for i in extracted_fields]
93+
rows = [[row[i] for i in extracted_fields] for row in rows]
94+
95+
# Print the results in the right format
96+
if args.output_format == 'csv':
97+
writer = csv.writer(sys.stdout)
98+
writer.writerow(headers)
99+
for row in rows:
100+
writer.writerow(row)
101+
elif args.output_format == 'lnt':
102+
benchmark = headers.index('Benchmark')
103+
time = headers.index('Est. Base Run Time')
104+
for row in rows:
105+
print(f'{row[benchmark].replace('.', '_')}.execution_time {row[time]}')
106+
107+
if __name__ == '__main__':
108+
main(sys.argv[1:])

0 commit comments

Comments
 (0)