@@ -176,6 +176,15 @@ def _parse_cli_arguments():
176176 parser .add_argument (
177177 "-output-file" , type = Path , help = "File path to write JSON formatted results to."
178178 )
179+ parser .add_argument (
180+ "--run-once-and-discard" ,
181+ action = "store_true" ,
182+ help = (
183+ "Run benchmark function once first without recording time to "
184+ "ignore the effect of any initial one-off costs such as just-in-time "
185+ "compilation."
186+ ),
187+ )
179188 return parser .parse_args ()
180189
181190
@@ -206,6 +215,7 @@ def run_benchmarks(
206215 number_repeats ,
207216 print_results = True ,
208217 parameter_overrides = None ,
218+ run_once_and_discard = False ,
209219):
210220 """Run a set of benchmarks.
211221
@@ -219,6 +229,9 @@ def run_benchmarks(
219229 print_results: Whether to print benchmark results to stdout.
220230 parameter_overrides: Dictionary specifying any overrides for parameter values
221231 set in `benchmark` decorator.
232+ run_once_and_discard: Whether to run benchmark function once first without
233+ recording time to ignore the effect of any initial one-off costs such as
234+ just-in-time compilation.
222235
223236 Returns:
224237 Dictionary containing timing (and potentially memory usage) results for each
@@ -236,6 +249,8 @@ def run_benchmarks(
236249 try :
237250 precomputes = benchmark .setup (** parameter_set )
238251 benchmark_function = partial (benchmark , ** precomputes , ** parameter_set )
252+ if run_once_and_discard :
253+ benchmark_function ()
239254 run_times = [
240255 time / number_runs
241256 for time in timeit .repeat (
@@ -300,6 +315,7 @@ def parse_args_collect_and_run_benchmarks(module=None):
300315 number_runs = args .number_runs ,
301316 number_repeats = args .repeats ,
302317 parameter_overrides = parameter_overrides ,
318+ run_once_and_discard = args .run_once_and_discard ,
303319 )
304320 if args .output_file is not None :
305321 with open (args .output_file , "w" ) as f :
0 commit comments