Skip to content

Commit 5323386

Browse files
committed
add SubmitGraph benchmark
... and apply black formatting.
1 parent a884df8 commit 5323386

File tree

5 files changed

+68
-17
lines changed

5 files changed

+68
-17
lines changed

devops/scripts/benchmarks/benches/compute.py

Lines changed: 62 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,7 @@ def setup(self):
2828
self.directory,
2929
"compute-benchmarks-repo",
3030
"https://github.com/intel/compute-benchmarks.git",
31-
"dfdbf2ff9437ee159627cc2cd9159c289da1a7ba",
31+
"b5cc46acf61766ab00da04e85bd4da4f7591eb21",
3232
)
3333
build_path = create_build_path(self.directory, "compute-benchmarks-build")
3434

@@ -87,6 +87,19 @@ def benchmarks(self) -> list[Benchmark]:
8787
UllsKernelSwitch(self, RUNTIMES.LEVEL_ZERO, 8, 200, 0, 0, 1, 1),
8888
]
8989

90+
for in_order_queue in [0, 1]:
91+
for num_kernels in [4, 32]:
92+
for measure_completion_time in [0, 1]:
93+
benches.append(
94+
GraphApiSubmitGraph(
95+
self,
96+
RUNTIMES.SYCL,
97+
in_order_queue,
98+
num_kernels,
99+
measure_completion_time,
100+
)
101+
)
102+
90103
if options.ur is not None:
91104
benches += [
92105
SubmitKernelUR(self, 0, 0),
@@ -536,14 +549,46 @@ def bin_args(self) -> list[str]:
536549
"--immediateAppendCmdList=0",
537550
]
538551

552+
553+
class GraphApiSubmitGraph(ComputeBenchmark):
554+
def __init__(
555+
self, bench, runtime: RUNTIMES, inOrderQueue, numKernels, measureCompletionTime
556+
):
557+
self.inOrderQueue = inOrderQueue
558+
self.numKernels = numKernels
559+
self.runtime = runtime
560+
self.measureCompletionTime = measureCompletionTime
561+
super().__init__(bench, f"graph_api_benchmark_{runtime.value}", "SubmitGraph")
562+
563+
def explicit_group(self):
564+
return f"SubmitGraph {self.numKernels}"
565+
566+
def description(self) -> str:
567+
return (
568+
f"Measures {self.runtime.value.upper()} performance when executing {self.numKernels} "
569+
f"trivial kernels using graphs. Tests overhead and benefits of graph-based execution."
570+
)
571+
572+
def name(self):
573+
return f"graph_api_benchmark_{self.runtime.value} SubmitGraph numKernels:{self.numKernels} ioq {self.inOrderQueue} measureCompletion {self.measureCompletionTime}"
574+
575+
def bin_args(self) -> list[str]:
576+
return [
577+
"--iterations=10000",
578+
f"--NumKernels={self.numKernels}",
579+
f"--MeasureCompletionTime={self.measureCompletionTime}",
580+
f"--InOrderQueue={self.inOrderQueue}",
581+
"--Profiling=0",
582+
"--KernelExecutionTime=1",
583+
]
584+
585+
539586
class UllsEmptyKernel(ComputeBenchmark):
540587
def __init__(self, bench, runtime: RUNTIMES, wgc, wgs):
541588
self.wgc = wgc
542589
self.wgs = wgs
543590
self.runtime = runtime
544-
super().__init__(
545-
bench, f"ulls_benchmark_{runtime.value}", "EmptyKernel"
546-
)
591+
super().__init__(bench, f"ulls_benchmark_{runtime.value}", "EmptyKernel")
547592

548593
def explicit_group(self):
549594
return f"EmptyKernel {self.wgc} {self.wgs}"
@@ -561,18 +606,27 @@ def bin_args(self) -> list[str]:
561606
f"--wgc={self.wgs}",
562607
]
563608

609+
564610
class UllsKernelSwitch(ComputeBenchmark):
565-
def __init__(self, bench, runtime: RUNTIMES, count, kernelTime, barrier, hostVisible, ioq, ctrBasedEvents):
611+
def __init__(
612+
self,
613+
bench,
614+
runtime: RUNTIMES,
615+
count,
616+
kernelTime,
617+
barrier,
618+
hostVisible,
619+
ioq,
620+
ctrBasedEvents,
621+
):
566622
self.count = count
567623
self.kernelTime = kernelTime
568624
self.barrier = barrier
569625
self.hostVisible = hostVisible
570626
self.ctrBasedEvents = ctrBasedEvents
571627
self.runtime = runtime
572628
self.ioq = ioq
573-
super().__init__(
574-
bench, f"ulls_benchmark_{runtime.value}", "KernelSwitch"
575-
)
629+
super().__init__(bench, f"ulls_benchmark_{runtime.value}", "KernelSwitch")
576630

577631
def explicit_group(self):
578632
return f"KernelSwitch {self.count} {self.kernelTime}"

devops/scripts/benchmarks/history.py

Lines changed: 3 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -83,7 +83,7 @@ def create_run(self, name: str, results: list[Result]) -> BenchmarkRun:
8383
github_repo=github_repo,
8484
date=datetime.now(tz=timezone.utc),
8585
results=results,
86-
hostname=socket.gethostname()
86+
hostname=socket.gethostname(),
8787
)
8888

8989
def save(self, save_name, results: list[Result], to_file=True):
@@ -99,11 +99,7 @@ def save(self, save_name, results: list[Result], to_file=True):
9999

100100
# Use formatted timestamp for the filename
101101
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
102-
file_path = Path(
103-
os.path.join(
104-
results_dir, f"{save_name}_{timestamp}.json"
105-
)
106-
)
102+
file_path = Path(os.path.join(results_dir, f"{save_name}_{timestamp}.json"))
107103
with file_path.open("w") as file:
108104
json.dump(serialized, file, indent=4)
109105
print(f"Benchmark results saved to {file_path}")
@@ -134,7 +130,7 @@ def compute_average(self, data: list[BenchmarkRun]):
134130
name=first_run.name,
135131
git_hash="average",
136132
date=first_run.date, # should this be different?
137-
hostname=first_run.hostname
133+
hostname=first_run.hostname,
138134
)
139135

140136
return average_benchmark_run

devops/scripts/benchmarks/main.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -409,7 +409,7 @@ def validate_and_parse_env_args(env_args):
409409
"--output-dir",
410410
type=str,
411411
help="Location for output files, if --output-html or --output_markdown was specified.",
412-
default=None
412+
default=None,
413413
)
414414
parser.add_argument(
415415
"--dry-run",

devops/scripts/benchmarks/options.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,7 @@
33

44
from presets import presets
55

6+
67
class Compare(Enum):
78
LATEST = "latest"
89
AVERAGE = "average"

devops/scripts/benchmarks/presets.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -30,9 +30,9 @@
3030
],
3131
}
3232

33+
3334
def enabled_suites(preset: str) -> list[str]:
3435
try:
3536
return presets[preset]
3637
except KeyError:
3738
raise ValueError(f"Preset '{preset}' not found.")
38-

0 commit comments

Comments
 (0)