Skip to content

Commit 13c70c6

Browse files
committed
[test-suite] Update test-suite microbenchmarks to use JSON (fix bug 41327)
Google benchmark is hoping to drop the CSV output format. This updates the microbenchmark module to use the JSON output. This fixes PR41327 Reviewers: lebedev.ri https://reviews.llvm.org/D60205 llvm-svn: 357704
1 parent 0d87980 commit 13c70c6

File tree

1 file changed

+11
-11
lines changed

1 file changed

+11
-11
lines changed

litsupport/modules/microbenchmark.py

Lines changed: 11 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -1,17 +1,17 @@
11
'''Test module to collect google benchmark results.'''
22
from litsupport import shellcommand
33
from litsupport import testplan
4-
import csv
4+
import json
55
import lit.Test
66

77

88
def _mutateCommandLine(context, commandline):
99
cmd = shellcommand.parse(commandline)
10-
cmd.arguments.append("--benchmark_format=csv")
10+
cmd.arguments.append("--benchmark_format=json")
1111
# We need stdout outself to get the benchmark csv data.
1212
if cmd.stdout is not None:
1313
raise Exception("Rerouting stdout not allowed for microbenchmarks")
14-
benchfile = context.tmpBase + '.bench.csv'
14+
benchfile = context.tmpBase + '.bench.json'
1515
cmd.stdout = benchfile
1616
context.microbenchfiles.append(benchfile)
1717

@@ -25,18 +25,18 @@ def _mutateScript(context, script):
2525
def _collectMicrobenchmarkTime(context, microbenchfiles):
2626
for f in microbenchfiles:
2727
content = context.read_result_file(context, f)
28-
lines = csv.reader(content.splitlines())
29-
# First line: "name,iterations,real_time,cpu_time,time_unit..."
30-
for line in lines:
31-
if line[0] == 'name':
32-
continue
28+
data = json.loads(content)
29+
30+
# Create a micro_result for each benchmark
31+
for benchmark in data['benchmarks']:
3332
# Name for MicroBenchmark
34-
name = line[0]
33+
name = benchmark['name']
34+
3535
# Create Result object with PASS
3636
microBenchmark = lit.Test.Result(lit.Test.PASS)
3737

38-
# Index 3 is cpu_time
39-
exec_time_metric = lit.Test.toMetricValue(float(line[3]))
38+
# Add the exec_time metric for this result
39+
exec_time_metric = lit.Test.toMetricValue(benchmark['cpu_time'])
4040
microBenchmark.addMetric('exec_time', exec_time_metric)
4141

4242
# Add Micro Result

0 commit comments

Comments
 (0)