Skip to content

Commit 86f36f7

Browse files
committed
significantly rebuild
Signed-off-by: Mateusz P. Nowak <[email protected]>
1 parent 0987a08 commit 86f36f7

File tree

19 files changed

+650
-629
lines changed

19 files changed

+650
-629
lines changed

devops/scripts/benchmarks/benches/base.py

Lines changed: 24 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -13,13 +13,14 @@
1313
from utils.utils import download, run
1414
from abc import ABC, abstractmethod
1515
from utils.unitrace import get_unitrace
16-
from utils.logger import log
1716
from utils.flamegraph import get_flamegraph
17+
from utils.logger import log
1818

1919

2020
class TracingType(Enum):
2121
"""Enumeration of available tracing types."""
2222

23+
NONE = ""
2324
UNITRACE = "unitrace"
2425
FLAMEGRAPH = "flamegraph"
2526

@@ -88,15 +89,12 @@ def teardown(self):
8889
pass
8990

9091
@abstractmethod
91-
def run(
92-
self, env_vars, run_unitrace: bool = False, run_flamegraph: bool = False
93-
) -> list[Result]:
92+
def run(self, env_vars, run_trace: TracingType = TracingType.NONE) -> list[Result]:
9493
"""Execute the benchmark with the given environment variables.
9594
9695
Args:
9796
env_vars: Environment variables to use when running the benchmark.
98-
run_unitrace: Whether to run benchmark under Unitrace.
99-
run_flamegraph: Whether to run benchmark under FlameGraph.
97+
run_trace: The type of tracing to run (NONE, UNITRACE, or FLAMEGRAPH).
10098
10199
Returns:
102100
A list of Result objects with the benchmark results.
@@ -125,10 +123,8 @@ def run_bench(
125123
ld_library=[],
126124
add_sycl=True,
127125
use_stdout=True,
128-
run_unitrace=False,
129-
extra_unitrace_opt=None,
130-
run_flamegraph=False,
131-
extra_perf_opt=None, # VERIFY
126+
run_trace: TracingType = TracingType.NONE,
127+
extra_trace_opt=None,
132128
):
133129
env_vars = env_vars.copy()
134130
if options.ur is not None:
@@ -141,11 +137,11 @@ def run_bench(
141137
ld_libraries = options.extra_ld_libraries.copy()
142138
ld_libraries.extend(ld_library)
143139

144-
if self.traceable(TracingType.UNITRACE) and run_unitrace:
145-
if extra_unitrace_opt is None:
146-
extra_unitrace_opt = []
140+
if self.traceable(TracingType.UNITRACE) and run_trace == TracingType.UNITRACE:
141+
if extra_trace_opt is None:
142+
extra_trace_opt = []
147143
unitrace_output, command = get_unitrace().setup(
148-
self.name(), command, extra_unitrace_opt
144+
self.name(), command, extra_trace_opt
149145
)
150146
log.debug(f"Unitrace output: {unitrace_output}")
151147
log.debug(f"Unitrace command: {' '.join(command)}")
@@ -159,24 +155,22 @@ def run_bench(
159155
ld_library=ld_libraries,
160156
)
161157
except subprocess.CalledProcessError:
162-
if run_unitrace:
158+
if run_trace == TracingType.UNITRACE:
163159
get_unitrace().cleanup(options.benchmark_cwd, unitrace_output)
164160
raise
165161

166-
if self.traceable(TracingType.UNITRACE) and run_unitrace:
162+
if self.traceable(TracingType.UNITRACE) and run_trace == TracingType.UNITRACE:
167163
get_unitrace().handle_output(unitrace_output)
168164

169165
# flamegraph run
170166

171-
ld_libraries = options.extra_ld_libraries.copy()
172-
ld_libraries.extend(ld_library)
173-
174167
perf_data_file = None
175-
if self.traceable(TracingType.FLAMEGRAPH) and run_flamegraph:
176-
if extra_perf_opt is None:
177-
extra_perf_opt = []
168+
if (
169+
self.traceable(TracingType.FLAMEGRAPH)
170+
and run_trace == TracingType.FLAMEGRAPH
171+
):
178172
perf_data_file, command = get_flamegraph().setup(
179-
self.name(), command, extra_perf_opt
173+
self.name(), self.get_suite_name(), command
180174
)
181175
log.debug(f"FlameGraph perf data: {perf_data_file}")
182176
log.debug(f"FlameGraph command: {' '.join(command)}")
@@ -190,11 +184,15 @@ def run_bench(
190184
ld_library=ld_libraries,
191185
)
192186
except subprocess.CalledProcessError:
193-
if run_flamegraph and perf_data_file:
194-
get_flamegraph().cleanup(options.benchmark_cwd, perf_data_file)
187+
if run_trace == TracingType.FLAMEGRAPH and perf_data_file:
188+
get_flamegraph().cleanup(perf_data_file)
195189
raise
196190

197-
if self.traceable(TracingType.FLAMEGRAPH) and run_flamegraph and perf_data_file:
191+
if (
192+
self.traceable(TracingType.FLAMEGRAPH)
193+
and run_trace == TracingType.FLAMEGRAPH
194+
and perf_data_file
195+
):
198196
svg_file = get_flamegraph().handle_output(
199197
self.name(), perf_data_file, self.get_suite_name()
200198
)

devops/scripts/benchmarks/benches/benchdnn.py

Lines changed: 10 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@
66

77
from pathlib import Path
88

9-
from .base import Suite, Benchmark
9+
from .base import Suite, Benchmark, TracingType
1010
from options import options
1111
from utils.utils import git_clone, run, create_build_path
1212
from utils.result import Result
@@ -132,9 +132,13 @@ def setup(self):
132132
if not self.bench_bin.exists():
133133
raise FileNotFoundError(f"Benchmark binary not found: {self.bench_bin}")
134134

135-
def run(
136-
self, env_vars, run_flamegraph: bool = False, run_unitrace: bool = False
137-
) -> list[Result]:
135+
def run(self, env_vars, run_trace: TracingType = TracingType.NONE) -> list[Result]:
136+
# Determine extra trace options based on tracing type
137+
if run_trace == TracingType.UNITRACE:
138+
extra_trace_opt = ["--chrome-dnn-logging"]
139+
else:
140+
extra_trace_opt = None
141+
138142
command = [
139143
str(self.bench_bin),
140144
*self.bench_args.split(),
@@ -153,9 +157,8 @@ def run(
153157
add_sycl=True,
154158
ld_library=ld_library,
155159
use_stdout=True,
156-
run_unitrace=run_unitrace,
157-
extra_unitrace_opt=["--chrome-dnn-logging"],
158-
run_flamegraph=run_flamegraph,
160+
run_trace=run_trace,
161+
extra_trace_opt=extra_trace_opt,
159162
)
160163
result_value = self._extract_time(output)
161164

devops/scripts/benchmarks/benches/benchdnn_list.py

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -48,16 +48,16 @@
4848
16x32x48x5",
4949
False, # Do not run graph for this benchmark
5050
],
51-
[
52-
"sum",
53-
"padding-1",
54-
"--ddt=f32 --sdt=f32:f32 --stag=aBcd16b --dtag=aBcd16b 1x8x64x64 1x8x640x1024 1x24x640x1024",
55-
],
56-
[
57-
"sum",
58-
"padding-2",
59-
"--sdt=bf16:bf16 --ddt=bf16 --stag=AB48a16b:AB48a16b --dtag=AB48a16b 512x1024",
60-
],
51+
# [
52+
# "sum",
53+
# "padding-1",
54+
# "--ddt=f32 --sdt=f32:f32 --stag=aBcd16b --dtag=aBcd16b 1x8x64x64 1x8x640x1024 1x24x640x1024",
55+
# ],
56+
# [
57+
# "sum",
58+
# "padding-2",
59+
# "--sdt=bf16:bf16 --ddt=bf16 --stag=AB48a16b:AB48a16b --dtag=AB48a16b 512x1024",
60+
# ],
6161
[
6262
"graph",
6363
"sdpa-plain-f16",

devops/scripts/benchmarks/benches/compute.py

Lines changed: 3 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@
88
import io
99
import copy
1010
from utils.utils import run, git_clone, create_build_path
11-
from .base import Benchmark, Suite
11+
from .base import Benchmark, Suite, TracingType
1212
from utils.result import BenchmarkMetadata, Result
1313
from options import options
1414
from enum import Enum
@@ -339,9 +339,7 @@ def explicit_group(self):
339339
def description(self) -> str:
340340
return ""
341341

342-
def run(
343-
self, env_vars, run_unitrace: bool = False, run_flamegraph: bool = False
344-
) -> list[Result]:
342+
def run(self, env_vars, run_trace: TracingType = TracingType.NONE) -> list[Result]:
345343
command = [
346344
f"{self.benchmark_bin}",
347345
f"--test={self.test}",
@@ -352,9 +350,7 @@ def run(
352350
command += self.bin_args()
353351
env_vars.update(self.extra_env_vars())
354352

355-
result = self.run_bench(
356-
command, env_vars, run_unitrace=run_unitrace, run_flamegraph=run_flamegraph
357-
)
353+
result = self.run_bench(command, env_vars, run_trace=run_trace)
358354
parsed_results = self.parse_output(result)
359355
ret = []
360356
for label, median, stddev, unit in parsed_results:

devops/scripts/benchmarks/benches/gromacs.py

Lines changed: 3 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@
77
from pathlib import Path
88
import re
99

10-
from .base import Suite, Benchmark
10+
from .base import Suite, Benchmark, TracingType
1111
from options import options
1212
from utils.utils import git_clone, download, run, create_build_path
1313
from utils.result import Result
@@ -169,9 +169,7 @@ def setup(self):
169169
ld_library=self.suite.oneapi.ld_libraries(),
170170
)
171171

172-
def run(
173-
self, env_vars, run_flamegraph: bool = False, run_unitrace: bool = False
174-
) -> list[Result]:
172+
def run(self, env_vars, run_trace: TracingType = TracingType.NONE) -> list[Result]:
175173
model_dir = self.grappa_dir / self.model
176174

177175
env_vars.update({"SYCL_CACHE_PERSISTENT": "1"})
@@ -210,8 +208,7 @@ def run(
210208
add_sycl=True,
211209
use_stdout=False,
212210
ld_library=self.suite.oneapi.ld_libraries(),
213-
run_unitrace=run_unitrace,
214-
run_flamegraph=run_flamegraph,
211+
run_trace=run_trace,
215212
)
216213

217214
if not self._validate_correctness(options.benchmark_cwd + "/md.log"):

devops/scripts/benchmarks/benches/llamacpp.py

Lines changed: 3 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@
77
import io
88
from pathlib import Path
99
from utils.utils import download, git_clone
10-
from .base import Benchmark, Suite
10+
from .base import Benchmark, Suite, TracingType
1111
from utils.result import Result
1212
from utils.utils import run, create_build_path
1313
from options import options
@@ -115,9 +115,7 @@ def get_tags(self):
115115
def lower_is_better(self):
116116
return False
117117

118-
def run(
119-
self, env_vars, run_unitrace: bool = False, run_flamegraph: bool = False
120-
) -> list[Result]:
118+
def run(self, env_vars, run_trace: TracingType = TracingType.NONE) -> list[Result]:
121119
command = [
122120
f"{self.benchmark_bin}",
123121
"--output",
@@ -146,8 +144,7 @@ def run(
146144
command,
147145
env_vars,
148146
ld_library=self.bench.oneapi.ld_libraries(),
149-
run_unitrace=run_unitrace,
150-
run_flamegraph=run_flamegraph,
147+
run_trace=run_trace,
151148
)
152149
parsed = self.parse_output(result)
153150
results = []

devops/scripts/benchmarks/benches/syclbench.py

Lines changed: 3 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@
77
import csv
88
import io
99
from utils.utils import run, git_clone, create_build_path
10-
from .base import Benchmark, Suite
10+
from .base import Benchmark, Suite, TracingType
1111
from utils.result import Result
1212
from options import options
1313

@@ -137,9 +137,7 @@ def setup(self):
137137
self.directory, "sycl-bench-build", self.bench_name
138138
)
139139

140-
def run(
141-
self, env_vars, run_unitrace: bool = False, run_flamegraph: bool = False
142-
) -> list[Result]:
140+
def run(self, env_vars, run_trace: TracingType = TracingType.NONE) -> list[Result]:
143141
self.outputfile = os.path.join(self.bench.directory, self.test + ".csv")
144142

145143
command = [
@@ -153,9 +151,7 @@ def run(
153151
env_vars.update(self.extra_env_vars())
154152

155153
# no output to stdout, all in outputfile
156-
self.run_bench(
157-
command, env_vars, run_unitrace=run_unitrace, run_flamegraph=run_flamegraph
158-
)
154+
self.run_bench(command, env_vars, run_trace=run_trace)
159155

160156
with open(self.outputfile, "r") as f:
161157
reader = csv.reader(f)

devops/scripts/benchmarks/benches/test.py

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@
55

66
import random
77
from utils.utils import git_clone
8-
from .base import Benchmark, Suite
8+
from .base import Benchmark, Suite, TracingType
99
from utils.result import BenchmarkMetadata, Result
1010
from utils.utils import run, create_build_path
1111
from options import options
@@ -88,9 +88,7 @@ def notes(self) -> str:
8888
def unstable(self) -> str:
8989
return self.unstable_text
9090

91-
def run(
92-
self, env_vars, run_unitrace: bool = False, run_flamegraph: bool = False
93-
) -> list[Result]:
91+
def run(self, env_vars, run_trace: TracingType = TracingType.NONE) -> list[Result]:
9492
random_value = self.value + random.uniform(-1 * (self.diff), self.diff)
9593
return [
9694
Result(

devops/scripts/benchmarks/benches/umf.py

Lines changed: 3 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@
88
import io
99
import re
1010

11-
from .base import Benchmark, Suite
11+
from .base import Benchmark, Suite, TracingType
1212
from utils.result import Result
1313
from options import options
1414
from utils.oneapi import get_oneapi
@@ -137,9 +137,7 @@ def get_names_of_benchmarks_to_be_run(self, command, env_vars):
137137

138138
return all_names
139139

140-
def run(
141-
self, env_vars, run_unitrace: bool = False, run_flamegraph: bool = False
142-
) -> list[Result]:
140+
def run(self, env_vars, run_trace: TracingType = TracingType.NONE) -> list[Result]:
143141
command = [f"{self.benchmark_bin}"]
144142

145143
all_names = self.get_names_of_benchmarks_to_be_run(command, env_vars)
@@ -157,8 +155,7 @@ def run(
157155
env_vars,
158156
add_sycl=False,
159157
ld_library=[self.umf_lib],
160-
run_unitrace=run_unitrace,
161-
run_flamegraph=run_flamegraph,
158+
run_trace=run_trace,
162159
)
163160

164161
parsed = self.parse_output(result)

0 commit comments

Comments
 (0)