@@ -276,6 +276,7 @@ def __init__(self, name, bench_path, benchmarks, python_path=None):
276
276
def rules (self , output , benchmarks , bm_suite_args ):
277
277
bench_name = os .path .basename (os .path .splitext (benchmarks [0 ])[0 ])
278
278
arg = " " .join (self ._benchmarks [bench_name ])
279
+
279
280
return [
280
281
# warmup curves
281
282
StdOutRule (
@@ -309,6 +310,21 @@ def rules(self, output, benchmarks, bm_suite_args):
309
310
),
310
311
]
311
312
313
+ def runAndReturnStdOut (self , benchmarks , bmSuiteArgs ):
314
+ # host-vm rewrite rules
315
+ ret_code , out , dims = super (PythonBenchmarkSuite , self ).runAndReturnStdOut (benchmarks , bmSuiteArgs )
316
+
317
+ def _replace_host_vm (key ):
318
+ host_vm = dims .get ("host-vm" )
319
+ if host_vm and host_vm .startswith (key ):
320
+ dims ['host-vm' ] = key
321
+ mx .logv ("[DEBUG] replace 'host-vm': '{key}-python' -> '{key}'" .format (key = key ))
322
+
323
+ _replace_host_vm ('graalvm-ce' )
324
+ _replace_host_vm ('graalvm-ee' )
325
+
326
+ return ret_code , out , dims
327
+
312
328
def run (self , benchmarks , bm_suite_args ):
313
329
results = super (PythonBenchmarkSuite , self ).run (benchmarks , bm_suite_args )
314
330
self .addAverageAcrossLatestResults (results )
0 commit comments