55"""Run subcommand package."""
66
77import argparse
8+ import logging
89
910import sqlalchemy
1011
12+ import dpbench .config as cfg
13+ import dpbench .infrastructure as dpbi
14+ from dpbench .infrastructure .benchmark_runner import BenchmarkRunner , RunConfig
15+
1116from ._namespace import Namespace
1217
1318
@@ -94,6 +99,23 @@ def add_run_arguments(parser: argparse.ArgumentParser):
9499 default = None ,
95100 help = "Sycl device to overwrite for framework configurations." ,
96101 )
102+ parser .add_argument (
103+ "--skip-expected-failures" ,
104+ action = argparse .BooleanOptionalAction ,
105+ default = True ,
106+ help = "Either to save execution into database." ,
107+ )
108+
109+
110+ def _find_framework_config (implementation : str ) -> cfg .Framework :
111+ framework = None
112+
113+ for f in cfg .GLOBAL .frameworks :
114+ for impl in f .postfixes :
115+ if impl .postfix == implementation :
116+ framework = f
117+
118+ return framework
97119
98120
99121def execute_run (args : Namespace , conn : sqlalchemy .Engine ):
@@ -105,33 +127,63 @@ def execute_run(args: Namespace, conn: sqlalchemy.Engine):
105127 args: object with all input arguments.
106128 conn: database connection.
107129 """
108- import dpbench .config as cfg
109- import dpbench .infrastructure as dpbi
110- from dpbench .infrastructure .runner import run_benchmarks
111-
112130 cfg .GLOBAL = cfg .read_configs (
113131 benchmarks = args .benchmarks ,
114- implementations = args .implementations ,
132+ implementations = set ( args .implementations ) ,
115133 no_dpbench = not args .dpbench ,
116134 with_npbench = args .npbench ,
117135 with_polybench = args .polybench ,
118136 )
119137
138+ if args .all_implementations :
139+ args .implementations = {
140+ impl .postfix for impl in cfg .GLOBAL .implementations
141+ }
142+
120143 if args .sycl_device :
121144 for framework in cfg .GLOBAL .frameworks :
122145 framework .sycl_device = args .sycl_device
123146
124- if args .run_id is None :
147+ if args .save and args . run_id is None :
125148 args .run_id = dpbi .create_run (conn )
126149
127- run_benchmarks (
128- conn = conn ,
129- preset = args .preset ,
130- repeat = args .repeat ,
131- validate = args .validate ,
132- timeout = args .timeout ,
133- precision = args .precision ,
134- print_results = args .print_results ,
135- run_id = args .run_id ,
136- implementations = list (args .implementations ),
137- )
150+ runner = BenchmarkRunner ()
151+
152+ for benchmark in cfg .GLOBAL .benchmarks :
153+ print ("" )
154+ print (
155+ f"================ Benchmark { benchmark .name } ({ benchmark .module_name } ) ========================"
156+ )
157+ print ("" )
158+
159+ for implementation in args .implementations :
160+ framework = _find_framework_config (implementation )
161+
162+ if not framework :
163+ logging .error (
164+ f"Could not find framework for { implementation } implementation"
165+ )
166+ continue
167+
168+ logging .info (
169+ f"Running { benchmark .module_name } ({ implementation } ) on { framework .simple_name } "
170+ )
171+
172+ runner .run_benchmark_and_save (
173+ RunConfig (
174+ conn = conn ,
175+ benchmark = benchmark ,
176+ framework = framework ,
177+ implementation = implementation ,
178+ preset = args .preset ,
179+ repeat = args .repeat ,
180+ validate = args .validate ,
181+ timeout = args .timeout ,
182+ precision = args .precision ,
183+ print_results = args .print_results ,
184+ run_id = args .run_id ,
185+ skip_expected_failures = args .skip_expected_failures ,
186+ )
187+ )
188+
189+ runner .close_connections ()
0 commit comments