@@ -78,6 +78,28 @@ def run_benchmark(
7878 print_results = True ,
7979 run_id : int = None ,
8080):
81+ """Run specific benchmark.
82+
83+ Args:
84+ bname (str, semi-optional): Name of the benchmark. Either name, either
85+ configuration must be provided.
86+ benchmark (Benchmark, semi-optional): Benchmark configuration. Either
87+ name, either configuration must be provided.
88+ implementation_postfix: (str, optional): Implementation postfixes
89+ to be executed. If not provided, all possible implementations will
90+ be executed.
91+ preset (str, optional): Problem size. Defaults to "S".
92+ repeat (int, optional): Number of repetitions. Defaults to 1.
93+ validate (bool, optional): Whether to validate against NumPy.
94+ Defaults to True.
95+ timeout (float, optional): Timeout setting. Defaults to 10.0.
96+ conn: connection to database. If not provided results won't be stored.
97+ print_results (bool, optional): Either print results. Defaults to True.
98+ run_id (int, optional): Either store result to specific run_id.
99+ If not provided, new run_id will be created.
100+
101+ Returns: nothing.
102+ """
81103 bench_cfg = get_benchmark (benchmark = benchmark , benchmark_name = bname )
82104 bname = bench_cfg .name
83105 print ("" )
@@ -119,37 +141,45 @@ def run_benchmarks(
119141 repeat = 10 ,
120142 validate = True ,
121143 timeout = 200.0 ,
122- dbfile = None ,
123144 print_results = True ,
124145 run_id = None ,
146+ implementations : list [str ] = None ,
125147):
126148 """Run all benchmarks in the dpbench benchmark directory
149+
127150 Args:
128- bconfig_path (str, optional): Path to benchmark configurations.
129- Defaults to None.
130151 preset (str, optional): Problem size. Defaults to "S".
131152 repeat (int, optional): Number of repetitions. Defaults to 1.
132153 validate (bool, optional): Whether to validate against NumPy.
133- Defaults to True.
154+ Defaults to True.
134155 timeout (float, optional): Timeout setting. Defaults to 10.0.
156+ print_results (bool, optional): Either print results. Defaults to True.
157+ run_id (int, optional): Either store result to specific run_id.
158+ If not provided, new run_id will be created.
159+ implementations: (list[str], optional): List of implementation postfixes
160+ to be executed. If not provided, all possible implementations will
161+ be executed.
162+
163+ Returns: nothing.
135164 """
136165
137166 print ("===============================================================" )
138167 print ("" )
139168 print ("***Start Running DPBench***" )
140- if not dbfile :
141- dbfile = "results.db"
142169
143170 dpbi .create_results_table ()
144- conn = dpbi .create_connection (db_file = dbfile )
171+ conn = dpbi .create_connection (db_file = "results.db" )
145172 if run_id is None :
146173 run_id = dpbi .create_run (conn )
147174
175+ if implementations is None :
176+ implementations = [impl .postfix for impl in cfg .GLOBAL .implementations ]
177+
148178 for b in cfg .GLOBAL .benchmarks :
149- for impl in cfg . GLOBAL . implementations :
179+ for impl in implementations :
150180 run_benchmark (
151181 benchmark = b ,
152- implementation_postfix = impl . postfix ,
182+ implementation_postfix = impl ,
153183 preset = preset ,
154184 repeat = repeat ,
155185 validate = validate ,
@@ -167,6 +197,14 @@ def run_benchmarks(
167197 print ("===============================================================" )
168198 print ("" )
169199
170- dpbi .generate_impl_summary_report (conn , run_id = run_id )
200+ if print_results :
201+ dpbi .generate_impl_summary_report (
202+ conn , run_id = run_id , implementations = implementations
203+ )
171204
172- return dbfile
205+ dpbi .generate_performance_report (
206+ conn ,
207+ run_id = run_id ,
208+ implementations = implementations ,
209+ headless = True ,
210+ )
0 commit comments