Skip to content

Commit 3b456fa

Browse files
committed
[libc++] Add an --open option to compare-benchmarks
This makes the tool more consistent with visualize-historical.
1 parent 72c512f commit 3b456fa

File tree

1 file changed

+24
-8
lines changed

1 file changed

+24
-8
lines changed

libcxx/utils/compare-benchmarks

Lines changed: 24 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -1,9 +1,11 @@
11
#!/usr/bin/env python3
22

33
import argparse
4+
import pathlib
45
import re
56
import statistics
67
import sys
8+
import tempfile
79

810
import plotly
911
import tabulate
@@ -89,19 +91,27 @@ def main(argv):
8991
help='Path to a LNT format file containing the benchmark results for the baseline.')
9092
parser.add_argument('candidate', type=argparse.FileType('r'),
9193
help='Path to a LNT format file containing the benchmark results for the candidate.')
92-
parser.add_argument('--output', '-o', type=argparse.FileType('w'), default=sys.stdout,
93-
help='Path of a file where to output the resulting comparison. Default to stdout.')
94+
parser.add_argument('--output', '-o', type=pathlib.Path, required=False,
95+
help='Path of a file where to output the resulting comparison. If the output format is `text`, '
96+
'default to stdout. If the output format is `chart`, default to a temporary file which is '
97+
'opened automatically once generated, but not removed after creation.')
9498
parser.add_argument('--metric', type=str, default='execution_time',
9599
help='The metric to compare. LNT data may contain multiple metrics (e.g. code size, execution time, etc) -- '
96-
'this option allows selecting which metric is being analyzed. The default is "execution_time".')
100+
'this option allows selecting which metric is being analyzed. The default is `execution_time`.')
97101
parser.add_argument('--filter', type=str, required=False,
98102
help='An optional regular expression used to filter the benchmarks included in the comparison. '
99103
'Only benchmarks whose names match the regular expression will be included.')
100104
parser.add_argument('--format', type=str, choices=['text', 'chart'], default='text',
101-
help='Select the output format. "text" generates a plain-text comparison in tabular form, and "chart" '
102-
'generates a self-contained HTML graph that can be opened in a browser. The default is text.')
105+
help='Select the output format. `text` generates a plain-text comparison in tabular form, and `chart` '
106+
'generates a self-contained HTML graph that can be opened in a browser. The default is `text`.')
107+
parser.add_argument('--open', action='store_true',
108+
help='Whether to automatically open the generated HTML file when finished. This option only makes sense '
109+
'when the output format is `chart`.')
103110
args = parser.parse_args(argv)
104111

112+
if args.format == 'text' and args.open:
113+
parser.error('Passing --open makes no sense with --format=text')
114+
105115
baseline = parse_lnt(args.baseline.readlines())
106116
candidate = parse_lnt(args.candidate.readlines())
107117

@@ -114,11 +124,17 @@ def main(argv):
114124

115125
if args.format == 'chart':
116126
figure = create_chart(benchmarks, baseline_series, candidate_series)
117-
plotly.io.write_html(figure, file=args.output)
127+
do_open = args.output is None or args.open
128+
output = args.output or tempfile.NamedTemporaryFile(suffix='.html').name
129+
plotly.io.write_html(figure, file=output, auto_open=do_open)
118130
else:
119131
diff = plain_text_comparison(benchmarks, baseline_series, candidate_series)
120-
args.output.write(diff)
121-
args.output.write('\n')
132+
diff += '\n'
133+
if args.output is not None:
134+
with open(args.output, 'w') as out:
135+
out.write(diff)
136+
else:
137+
sys.stdout.write(diff)
122138

123139
if __name__ == '__main__':
124140
main(sys.argv[1:])

0 commit comments

Comments
 (0)