Skip to content

Commit 9ec5c0c

Browse files
committed
add force option, omitting traceable()
Signed-off-by: Mateusz P. Nowak <[email protected]>
1 parent 86f36f7 commit 9ec5c0c

File tree

11 files changed

+6858
-51
lines changed

11 files changed

+6858
-51
lines changed

devops/scripts/benchmarks/benches/base.py

Lines changed: 24 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -89,12 +89,18 @@ def teardown(self):
8989
pass
9090

9191
@abstractmethod
92-
def run(self, env_vars, run_trace: TracingType = TracingType.NONE) -> list[Result]:
92+
def run(
93+
self,
94+
env_vars,
95+
run_trace: TracingType = TracingType.NONE,
96+
force_trace: bool = False,
97+
) -> list[Result]:
9398
"""Execute the benchmark with the given environment variables.
9499
95100
Args:
96101
env_vars: Environment variables to use when running the benchmark.
97102
run_trace: The type of tracing to run (NONE, UNITRACE, or FLAMEGRAPH).
103+
force_trace: If True, ignore the traceable() method and force tracing.
98104
99105
Returns:
100106
A list of Result objects with the benchmark results.
@@ -125,6 +131,7 @@ def run_bench(
125131
use_stdout=True,
126132
run_trace: TracingType = TracingType.NONE,
127133
extra_trace_opt=None,
134+
force_trace: bool = False,
128135
):
129136
env_vars = env_vars.copy()
130137
if options.ur is not None:
@@ -137,7 +144,10 @@ def run_bench(
137144
ld_libraries = options.extra_ld_libraries.copy()
138145
ld_libraries.extend(ld_library)
139146

140-
if self.traceable(TracingType.UNITRACE) and run_trace == TracingType.UNITRACE:
147+
unitrace_output = None
148+
if (
149+
self.traceable(TracingType.UNITRACE) or force_trace
150+
) and run_trace == TracingType.UNITRACE:
141151
if extra_trace_opt is None:
142152
extra_trace_opt = []
143153
unitrace_output, command = get_unitrace().setup(
@@ -146,29 +156,12 @@ def run_bench(
146156
log.debug(f"Unitrace output: {unitrace_output}")
147157
log.debug(f"Unitrace command: {' '.join(command)}")
148158

149-
try:
150-
result = run(
151-
command=command,
152-
env_vars=env_vars,
153-
add_sycl=add_sycl,
154-
cwd=options.benchmark_cwd,
155-
ld_library=ld_libraries,
156-
)
157-
except subprocess.CalledProcessError:
158-
if run_trace == TracingType.UNITRACE:
159-
get_unitrace().cleanup(options.benchmark_cwd, unitrace_output)
160-
raise
161-
162-
if self.traceable(TracingType.UNITRACE) and run_trace == TracingType.UNITRACE:
163-
get_unitrace().handle_output(unitrace_output)
164-
165159
# flamegraph run
166160

167161
perf_data_file = None
168162
if (
169-
self.traceable(TracingType.FLAMEGRAPH)
170-
and run_trace == TracingType.FLAMEGRAPH
171-
):
163+
self.traceable(TracingType.FLAMEGRAPH) or force_trace
164+
) and run_trace == TracingType.FLAMEGRAPH:
172165
perf_data_file, command = get_flamegraph().setup(
173166
self.name(), self.get_suite_name(), command
174167
)
@@ -184,12 +177,21 @@ def run_bench(
184177
ld_library=ld_libraries,
185178
)
186179
except subprocess.CalledProcessError:
180+
if run_trace == TracingType.UNITRACE and unitrace_output:
181+
get_unitrace().cleanup(options.benchmark_cwd, unitrace_output)
187182
if run_trace == TracingType.FLAMEGRAPH and perf_data_file:
188183
get_flamegraph().cleanup(perf_data_file)
189184
raise
190185

191186
if (
192-
self.traceable(TracingType.FLAMEGRAPH)
187+
(self.traceable(TracingType.UNITRACE) or force_trace)
188+
and run_trace == TracingType.UNITRACE
189+
and unitrace_output
190+
):
191+
get_unitrace().handle_output(unitrace_output)
192+
193+
if (
194+
(self.traceable(TracingType.FLAMEGRAPH) or force_trace)
193195
and run_trace == TracingType.FLAMEGRAPH
194196
and perf_data_file
195197
):

devops/scripts/benchmarks/benches/benchdnn.py

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -132,7 +132,12 @@ def setup(self):
132132
if not self.bench_bin.exists():
133133
raise FileNotFoundError(f"Benchmark binary not found: {self.bench_bin}")
134134

135-
def run(self, env_vars, run_trace: TracingType = TracingType.NONE) -> list[Result]:
135+
def run(
136+
self,
137+
env_vars,
138+
run_trace: TracingType = TracingType.NONE,
139+
force_trace: bool = False,
140+
) -> list[Result]:
136141
# Determine extra trace options based on tracing type
137142
if run_trace == TracingType.UNITRACE:
138143
extra_trace_opt = ["--chrome-dnn-logging"]
@@ -159,6 +164,7 @@ def run(self, env_vars, run_trace: TracingType = TracingType.NONE) -> list[Resul
159164
use_stdout=True,
160165
run_trace=run_trace,
161166
extra_trace_opt=extra_trace_opt,
167+
force_trace=force_trace,
162168
)
163169
result_value = self._extract_time(output)
164170

devops/scripts/benchmarks/benches/compute.py

Lines changed: 9 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -339,7 +339,12 @@ def explicit_group(self):
339339
def description(self) -> str:
340340
return ""
341341

342-
def run(self, env_vars, run_trace: TracingType = TracingType.NONE) -> list[Result]:
342+
def run(
343+
self,
344+
env_vars,
345+
run_trace: TracingType = TracingType.NONE,
346+
force_trace: bool = False,
347+
) -> list[Result]:
343348
command = [
344349
f"{self.benchmark_bin}",
345350
f"--test={self.test}",
@@ -350,7 +355,9 @@ def run(self, env_vars, run_trace: TracingType = TracingType.NONE) -> list[Resul
350355
command += self.bin_args()
351356
env_vars.update(self.extra_env_vars())
352357

353-
result = self.run_bench(command, env_vars, run_trace=run_trace)
358+
result = self.run_bench(
359+
command, env_vars, run_trace=run_trace, force_trace=force_trace
360+
)
354361
parsed_results = self.parse_output(result)
355362
ret = []
356363
for label, median, stddev, unit in parsed_results:

devops/scripts/benchmarks/benches/gromacs.py

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -169,7 +169,12 @@ def setup(self):
169169
ld_library=self.suite.oneapi.ld_libraries(),
170170
)
171171

172-
def run(self, env_vars, run_trace: TracingType = TracingType.NONE) -> list[Result]:
172+
def run(
173+
self,
174+
env_vars,
175+
run_trace: TracingType = TracingType.NONE,
176+
force_trace: bool = False,
177+
) -> list[Result]:
173178
model_dir = self.grappa_dir / self.model
174179

175180
env_vars.update({"SYCL_CACHE_PERSISTENT": "1"})
@@ -209,6 +214,7 @@ def run(self, env_vars, run_trace: TracingType = TracingType.NONE) -> list[Resul
209214
use_stdout=False,
210215
ld_library=self.suite.oneapi.ld_libraries(),
211216
run_trace=run_trace,
217+
force_trace=force_trace,
212218
)
213219

214220
if not self._validate_correctness(options.benchmark_cwd + "/md.log"):

devops/scripts/benchmarks/benches/llamacpp.py

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -115,7 +115,12 @@ def get_tags(self):
115115
def lower_is_better(self):
116116
return False
117117

118-
def run(self, env_vars, run_trace: TracingType = TracingType.NONE) -> list[Result]:
118+
def run(
119+
self,
120+
env_vars,
121+
run_trace: TracingType = TracingType.NONE,
122+
force_trace: bool = False,
123+
) -> list[Result]:
119124
command = [
120125
f"{self.benchmark_bin}",
121126
"--output",
@@ -145,6 +150,7 @@ def run(self, env_vars, run_trace: TracingType = TracingType.NONE) -> list[Resul
145150
env_vars,
146151
ld_library=self.bench.oneapi.ld_libraries(),
147152
run_trace=run_trace,
153+
force_trace=force_trace,
148154
)
149155
parsed = self.parse_output(result)
150156
results = []

devops/scripts/benchmarks/benches/syclbench.py

Lines changed: 7 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -137,7 +137,12 @@ def setup(self):
137137
self.directory, "sycl-bench-build", self.bench_name
138138
)
139139

140-
def run(self, env_vars, run_trace: TracingType = TracingType.NONE) -> list[Result]:
140+
def run(
141+
self,
142+
env_vars,
143+
run_trace: TracingType = TracingType.NONE,
144+
force_trace: bool = False,
145+
) -> list[Result]:
141146
self.outputfile = os.path.join(self.bench.directory, self.test + ".csv")
142147

143148
command = [
@@ -151,7 +156,7 @@ def run(self, env_vars, run_trace: TracingType = TracingType.NONE) -> list[Resul
151156
env_vars.update(self.extra_env_vars())
152157

153158
# no output to stdout, all in outputfile
154-
self.run_bench(command, env_vars, run_trace=run_trace)
159+
self.run_bench(command, env_vars, run_trace=run_trace, force_trace=force_trace)
155160

156161
with open(self.outputfile, "r") as f:
157162
reader = csv.reader(f)

devops/scripts/benchmarks/benches/test.py

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -88,7 +88,12 @@ def notes(self) -> str:
8888
def unstable(self) -> str:
8989
return self.unstable_text
9090

91-
def run(self, env_vars, run_trace: TracingType = TracingType.NONE) -> list[Result]:
91+
def run(
92+
self,
93+
env_vars,
94+
run_trace: TracingType = TracingType.NONE,
95+
force_trace: bool = False,
96+
) -> list[Result]:
9297
random_value = self.value + random.uniform(-1 * (self.diff), self.diff)
9398
return [
9499
Result(

devops/scripts/benchmarks/benches/umf.py

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -137,7 +137,12 @@ def get_names_of_benchmarks_to_be_run(self, command, env_vars):
137137

138138
return all_names
139139

140-
def run(self, env_vars, run_trace: TracingType = TracingType.NONE) -> list[Result]:
140+
def run(
141+
self,
142+
env_vars,
143+
run_trace: TracingType = TracingType.NONE,
144+
force_trace: bool = False,
145+
) -> list[Result]:
141146
command = [f"{self.benchmark_bin}"]
142147

143148
all_names = self.get_names_of_benchmarks_to_be_run(command, env_vars)
@@ -156,6 +161,7 @@ def run(self, env_vars, run_trace: TracingType = TracingType.NONE) -> list[Resul
156161
add_sycl=False,
157162
ld_library=[self.umf_lib],
158163
run_trace=run_trace,
164+
force_trace=force_trace,
159165
)
160166

161167
parsed = self.parse_output(result)

devops/scripts/benchmarks/benches/velocity.py

Lines changed: 14 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -130,7 +130,12 @@ def description(self) -> str:
130130
def get_tags(self):
131131
return ["SYCL", "application"]
132132

133-
def run(self, env_vars, run_trace: TracingType = TracingType.NONE) -> list[Result]:
133+
def run(
134+
self,
135+
env_vars,
136+
run_trace: TracingType = TracingType.NONE,
137+
force_trace: bool = False,
138+
) -> list[Result]:
134139
env_vars.update(self.extra_env_vars())
135140

136141
command = [
@@ -143,6 +148,7 @@ def run(self, env_vars, run_trace: TracingType = TracingType.NONE) -> list[Resul
143148
env_vars,
144149
ld_library=self.ld_libraries(),
145150
run_trace=run_trace,
151+
force_trace=force_trace,
146152
)
147153

148154
return [
@@ -287,15 +293,20 @@ class QuickSilver(VelocityBase):
287293
def __init__(self, vb: VelocityBench):
288294
super().__init__("QuickSilver", "qs", vb, "MMS/CTT")
289295

290-
def run(self, env_vars, run_trace: TracingType = TracingType.NONE) -> list[Result]:
296+
def run(
297+
self,
298+
env_vars,
299+
run_trace: TracingType = TracingType.NONE,
300+
force_trace: bool = False,
301+
) -> list[Result]:
291302
# TODO: fix the crash in QuickSilver when UR_L0_USE_IMMEDIATE_COMMANDLISTS=0
292303
if (
293304
"UR_L0_USE_IMMEDIATE_COMMANDLISTS" in env_vars
294305
and env_vars["UR_L0_USE_IMMEDIATE_COMMANDLISTS"] == "0"
295306
):
296307
return None
297308

298-
return super().run(env_vars, run_trace)
309+
return super().run(env_vars, run_trace, force_trace)
299310

300311
def name(self):
301312
return "Velocity-Bench QuickSilver"

0 commit comments

Comments
 (0)