Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
78 changes: 78 additions & 0 deletions libcxx/test/benchmarks/spec.gen.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,78 @@
# ===----------------------------------------------------------------------===##
#
# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
# ===----------------------------------------------------------------------===##

# REQUIRES: enable-spec-benchmarks

# RUN: mkdir -p %T
# RUN: echo "%{cxx}" > %T/cxx.subs
# RUN: echo "%{compile_flags}" > %T/compile_flags.subs
# RUN: echo "%{flags}" > %T/flags.subs
# RUN: echo "%{link_flags}" > %T/link_flags.subs
# RUN: echo "%{spec_dir}" > %T/spec_dir.subs
# RUN: %{python} %s %T
# END.

import json
import pathlib
import sys

test_dir = pathlib.Path(sys.argv[1])
cxx = (test_dir / 'cxx.subs').open().read().strip()
compile_flags = (test_dir / 'compile_flags.subs').open().read().strip()
flags = (test_dir / 'flags.subs').open().read().strip()
link_flags = (test_dir / 'link_flags.subs').open().read().strip()
spec_dir = pathlib.Path((test_dir / 'spec_dir.subs').open().read().strip())

# Setup the configuration file
test_dir.mkdir(parents=True, exist_ok=True)
spec_config = test_dir / 'spec-config.cfg'
spec_config.write_text(f"""
default:
ignore_errors = 1
iterations = 1
label = spec-stdlib
log_line_width = 4096
makeflags = --jobs=8
mean_anyway = 1
output_format = csv
preenv = 0
reportable = 0
tune = base
copies = 1
threads = 1
CC = cc -O3
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think in an ideal world we'd take the same optimization level as we have in the CXX flags. Not sure how easy that would be though.

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I agree. I think it's difficult to achieve at the moment and it shouldn't make a huge different for what we care about, but the way we're configuring the C compiler as a whole is less than ideal. Ideally we'd also use CMAKE_C_COMPILER but that would require threading additional information throughout the test suite just for that purpose, so for now I'm tempted to keep it simple.

CXX = {cxx} {compile_flags} {flags} {link_flags} -Wno-error
CC_VERSION_OPTION = --version
CXX_VERSION_OPTION = --version
EXTRA_PORTABILITY = -DSPEC_NO_CXX17_SPECIAL_MATH_FUNCTIONS # because libc++ doesn't implement the special math functions yet
""")

# Build the list of benchmarks. We take all intrate and fprate benchmarks that contain C++ and
# discard the ones that contain Fortran, since this test suite isn't set up to build Fortran code.
spec_benchmarks = set()
no_fortran = set()
with open(spec_dir / 'benchspec' / 'CPU' / 'intrate_any_cpp.bset', 'r') as f:
spec_benchmarks.update(json.load(f)['benchmarks'])
with open(spec_dir / 'benchspec' / 'CPU' / 'fprate_any_cpp.bset', 'r') as f:
spec_benchmarks.update(json.load(f)['benchmarks'])
with open(spec_dir / 'benchspec' / 'CPU' / 'no_fortran.bset', 'r') as f:
no_fortran.update(json.load(f)['benchmarks'])
spec_benchmarks &= no_fortran

for benchmark in spec_benchmarks:
print(f'#--- {benchmark}.sh.test')
print(f'RUN: rm -rf %T') # clean up any previous (potentially incomplete) run
print(f'RUN: mkdir %T')
print(f'RUN: cp {spec_config} %T/spec-config.cfg')
print(f'RUN: %{{spec_dir}}/bin/runcpu --config %T/spec-config.cfg --size train --output-root %T --rebuild {benchmark}')
print(f'RUN: rm -rf %T/benchspec') # remove the temporary directory, which can become quite large

# Parse the results into a LNT-compatible format. This also errors out if there are no CSV files, which
# means that the benchmark didn't run properly (the `runcpu` command above never reports a failure).
print(f'RUN: %{{libcxx-dir}}/utils/parse-spec-result %T/result/CPUv8.001.*.train.csv --output-format=lnt > %T/results.lnt')
print(f'RUN: cat %T/results.lnt')
11 changes: 11 additions & 0 deletions libcxx/utils/libcxx/test/params.py
Original file line number Diff line number Diff line change
Expand Up @@ -374,6 +374,17 @@ def getSuitableClangTidy(cfg):
help="Whether to run the benchmarks in the test suite, to only dry-run them or to disable them entirely.",
actions=lambda mode: [AddFeature(f"enable-benchmarks={mode}")],
),
Parameter(
name="spec_dir",
type=str,
default="none",
help="Path to the SPEC benchmarks. This is required in order to run the SPEC benchmarks as part of "
"the libc++ test suite. If provided, the appropriate SPEC toolset must already be built and installed.",
actions=lambda spec_dir: [
AddSubstitution("%{spec_dir}", spec_dir),
AddFeature('enable-spec-benchmarks')
] if spec_dir != "none" else [],
),
Parameter(
name="long_tests",
choices=[True, False],
Expand Down
108 changes: 108 additions & 0 deletions libcxx/utils/parse-spec-result
Original file line number Diff line number Diff line change
@@ -0,0 +1,108 @@
#!/usr/bin/env python3

import argparse
import csv
import sys

def parse_table(rows, table_title):
"""
Parse a CSV table out of an iterator over rows.

Return a tuple containing (extracted headers, extracted rows).
"""
in_table = False
rows_iter = iter(rows)
extracted = []
headers = None
while True:
try:
row = next(rows_iter)
except StopIteration:
break

if not in_table and row == [table_title]:
in_table = True
next_row = next(rows_iter)
assert next_row == [], f'There should be an empty row after the title of the table, found {next_row}'
headers = next(rows_iter) # Extract the headers
continue

elif in_table and row == []: # An empty row marks the end of the table
in_table = False
break

elif in_table:
extracted.append(row)

assert len(extracted) != 0, f'Could not extract rows from the table, this is suspicious. Table title was {table_title}'
assert headers is not None, f'Could not extract headers from the table, this is suspicious. Table title was {table_title}'

return (headers, extracted)

def main(argv):
parser = argparse.ArgumentParser(
prog='parse-spec-results',
description='Parse SPEC result files (in CSV format) and extract the selected result table, in the selected format.')
parser.add_argument('filename', type=argparse.FileType('r'), nargs='+',
help='One of more CSV files to extract the results from. The results parsed from each file are concatenated '
'together, creating a single CSV table.')
parser.add_argument('--table', type=str, choices=['full', 'selected'], default='full',
help='The name of the table to extract from SPEC results. `full` means extracting the Full Results Table '
'and `selected` means extracting the Selected Results Table. Default is `full`.')
parser.add_argument('--output-format', type=str, choices=['csv', 'lnt'], default='csv',
help='The desired output format for the data. `csv` is CSV format and `lnt` is a format compatible with '
'`lnt importreport` (see https://llvm.org/docs/lnt/importing_data.html#importing-data-in-a-text-file).')
parser.add_argument('--extract', type=str,
help='A comma-separated list of headers to extract from the table. If provided, only the data associated to '
'those headers will be present in the resulting data. Invalid header names are diagnosed. Please make '
'sure to use appropriate quoting for header names that contain spaces. This option only makes sense '
'when the output format is CSV.')
parser.add_argument('--keep-not-run', action='store_true',
help='Keep entries whose \'Base Status\' is marked as \'NR\', aka \'Not Run\'. By default, such entries are discarded.')
args = parser.parse_args(argv)

if args.table == 'full':
table_title = 'Full Results Table'
elif args.table == 'selected':
table_title = 'Selected Results Table'

# Parse the headers and the rows in each file, aggregating all the results
headers = None
rows = []
for file in args.filename:
reader = csv.reader(file)
(parsed_headers, parsed_rows) = parse_table(reader, table_title)
assert headers is None or headers == parsed_headers, f'Found files with different headers: {headers} and {parsed_headers}'
headers = parsed_headers
rows.extend(parsed_rows)

# Remove rows that were not run unless we were asked to keep them
if not args.keep_not_run:
not_run = headers.index('Base Status')
rows = [row for row in rows if row[not_run] != 'NR']

if args.extract is not None:
if args.output_format != 'csv':
raise RuntimeError('Passing --extract requires the output format to be csv')
for h in args.extract.split(','):
if h not in headers:
raise RuntimeError(f'Header name {h} was not present in the parsed headers {headers}')

extracted_fields = [headers.index(h) for h in args.extract.split(',')]
headers = [headers[i] for i in extracted_fields]
rows = [[row[i] for i in extracted_fields] for row in rows]

# Print the results in the right format
if args.output_format == 'csv':
writer = csv.writer(sys.stdout)
writer.writerow(headers)
for row in rows:
writer.writerow(row)
elif args.output_format == 'lnt':
benchmark = headers.index('Benchmark')
time = headers.index('Est. Base Run Time')
for row in rows:
print(f'{row[benchmark].replace('.', '_')}.execution_time {row[time]}')

if __name__ == '__main__':
main(sys.argv[1:])
Loading