@@ -357,6 +357,22 @@ def rules(self, output, benchmarks, bm_suite_args):
357
357
"config.run-flags" : "" .join (arg ),
358
358
}
359
359
),
360
+ # secondary metric(s)
361
+ StdOutRule (
362
+ r"### WARMUP detected at iteration: (?P<endOfWarmup>[0-9]+$)" ,
363
+ {
364
+ "benchmark" : '{}.{}' .format (self ._name , bench_name ),
365
+ "metric.name" : "end-of-warmup" ,
366
+ "metric.iteration" : 0 ,
367
+ "metric.type" : "numeric" ,
368
+ "metric.value" : ("<endOfWarmup>" , int ),
369
+ "metric.unit" : "s" ,
370
+ "metric.score-function" : "id" ,
371
+ "metric.better" : "lower" ,
372
+ "config.run-flags" : "" .join (arg ),
373
+ }
374
+ ),
375
+
360
376
# no warmups
361
377
StdOutRule (
362
378
r"^@@@ name=(?P<benchmark>[a-zA-Z0-9._\-]+), duration=(?P<time>[0-9]+(\.[0-9]+)?$)" , # pylint: disable=line-too-long
@@ -375,9 +391,9 @@ def rules(self, output, benchmarks, bm_suite_args):
375
391
]
376
392
377
393
def runAndReturnStdOut (self , benchmarks , bmSuiteArgs ):
378
- # host-vm rewrite rules
379
394
ret_code , out , dims = super (PythonBenchmarkSuite , self ).runAndReturnStdOut (benchmarks , bmSuiteArgs )
380
395
396
+ # host-vm rewrite rules
381
397
def _replace_host_vm (key ):
382
398
host_vm = dims .get ("host-vm" )
383
399
if host_vm and host_vm .startswith (key ):
@@ -450,10 +466,20 @@ def createVmCommandLineArgs(self, benchmarks, bmSuiteArgs):
450
466
451
467
if "-i" not in run_args :
452
468
run_args += self ._benchmarks [benchmark ]
469
+ num_iterations = self .defaultIterations (benchmark ) + self .getExtraIterationCount (self .defaultIterations (benchmark ))
470
+ run_args [run_args .index ("-i" ) + 1 ] = str (num_iterations )
453
471
vm_options , run_args = self .postprocess_run_args (run_args )
454
472
cmd_args .extend (run_args )
455
473
return vm_options + vm_args + cmd_args
456
474
475
+ def defaultIterations (self , bm ):
476
+ default_bench_args = self ._benchmarks [bm ]
477
+ if "-i" in default_bench_args :
478
+ bench_idx = default_bench_args .index ("-i" )
479
+ if bench_idx + 1 < len (default_bench_args ):
480
+ return int (default_bench_args [bench_idx + 1 ])
481
+ return DEFAULT_ITERATIONS
482
+
457
483
def benchmarkList (self , bm_suite_args ):
458
484
return list (self ._benchmarks .keys ())
459
485
0 commit comments