Skip to content
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
66 changes: 13 additions & 53 deletions devops/scripts/benchmarks/benches/compute.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@
from utils.result import BenchmarkMetadata, Result

from .base import Benchmark, Suite, TracingType
from .compute_metadata import ComputeMetadataGenerator


class RUNTIMES(Enum):
Expand Down Expand Up @@ -99,61 +100,20 @@ def setup(self) -> None:
self.project.build(add_sycl=True)

def additional_metadata(self) -> dict[str, BenchmarkMetadata]:
metadata = {
"SinKernelGraph": BenchmarkMetadata(
type="group",
unstable="This benchmark combines both eager and graph execution, and may not be representative of real use cases.",
tags=["submit", "memory", "proxy", "SYCL", "UR", "L0", "graph"],
),
"FinalizeGraph": BenchmarkMetadata(
type="group", tags=["finalize", "micro", "SYCL", "graph"]
),
}

# Add metadata for all SubmitKernel group variants
submit_kernel_metadata = BenchmarkMetadata(
type="group",
notes="Each layer builds on top of the previous layer, adding functionality and overhead.\n"
"The first layer is the Level Zero API, the second is the Unified Runtime API, and the third is the SYCL API.\n"
"The UR v2 adapter noticeably reduces UR layer overhead, also improving SYCL performance.\n"
"Work is ongoing to reduce the overhead of the SYCL API\n",
tags=["submit", "micro", "SYCL", "UR", "L0"],
range_min=0.0,
)
for order in ["in order", "out of order"]:
for completion in ["", " with completion"]:
for events in ["", " using events"]:
group_name = f"SubmitKernel {order}{completion}{events} long kernel"
metadata[group_name] = copy.deepcopy(submit_kernel_metadata)
metadata[group_name].description = (
f"Measures CPU time overhead of submitting {order} kernels with longer execution times through different APIs."
)
# CPU count variants
cpu_count_group = f"{group_name}, CPU count"
metadata[cpu_count_group] = copy.deepcopy(submit_kernel_metadata)
metadata[cpu_count_group].description = (
f"Measures CPU instruction count overhead of submitting {order} kernels with longer execution times through different APIs."
)

# Add metadata for all SubmitGraph group variants
submit_graph_metadata = BenchmarkMetadata(
type="group", tags=["submit", "micro", "SYCL", "UR", "L0", "graph"]
)
for order in ["in order", "out of order"]:
for completion in ["", " with completion"]:
for events in ["", " using events"]:
for num_kernels in self.submit_graph_num_kernels:
for host_tasks in ["", " use host tasks"]:
group_name = f"SubmitGraph {order}{completion}{events}{host_tasks}, {num_kernels} kernels"
metadata[group_name] = copy.deepcopy(submit_graph_metadata)
# CPU count variants
cpu_count_group = f"{group_name}, CPU count"
metadata[cpu_count_group] = copy.deepcopy(
submit_graph_metadata
)
return metadata
"""
Returns:
Dictionary mapping group names to their metadata
"""
# Generate metadata based on actual benchmark instances
generator = ComputeMetadataGenerator()
benchmarks = self.benchmarks()
return generator.generate_metadata_from_benchmarks(benchmarks)

def benchmarks(self) -> list[Benchmark]:
"""
Returns:
List of all possible benchmark instances
"""
benches = []

# hand-picked value so that total execution time of the benchmark is
Expand Down
87 changes: 87 additions & 0 deletions devops/scripts/benchmarks/benches/compute_metadata.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,87 @@
# Copyright (C) 2025 Intel Corporation
# Part of the Unified-Runtime Project, under the Apache License v2.0 with LLVM Exceptions.
# See LICENSE.TXT
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception

"""
Metadata generator for Compute Benchmarks.

This module provides centralized metadata generation for Compute Benchmark groups,
ensuring consistency between benchmark group membership and group metadata definitions.
"""

from typing import Dict, List

from utils.result import BenchmarkMetadata

from .base import Benchmark


class ComputeMetadataGenerator:
"""
Generates metadata for Compute Benchmark groups.

This class keeps the logic for creating group metadata, ensuring that
all possible benchmark group configurations have corresponding metadata entries.
"""

def __init__(self):
# Base metadata for core groups
self._base_group_metadata = {
"SubmitKernel": {
"description": "Measures CPU time overhead of submitting kernels through different APIs.",
"notes": (
"Each layer builds on top of the previous layer, adding functionality and overhead.\n"
"The first layer is the Level Zero API, the second is the Unified Runtime API, and the third is the SYCL API.\n"
"The UR v2 adapter noticeably reduces UR layer overhead, also improving SYCL performance.\n"
"Work is ongoing to reduce the overhead of the SYCL API\n"
),
"tags": ["submit", "micro", "SYCL", "UR", "L0"],
"range_min": 0.0,
},
"SinKernelGraph": {
"unstable": "This benchmark combines both eager and graph execution, and may not be representative of real use cases.",
"tags": ["submit", "memory", "proxy", "SYCL", "UR", "L0", "graph"],
},
"SubmitGraph": {"tags": ["submit", "micro", "SYCL", "UR", "L0", "graph"]},
"FinalizeGraph": {"tags": ["finalize", "micro", "SYCL", "graph"]},
}

def generate_metadata_from_benchmarks(
self, benchmarks: List[Benchmark]
) -> Dict[str, BenchmarkMetadata]:
"""
Generate group metadata based on actual benchmark configurations.

Args:
benchmarks: List of benchmark instances to analyze

Returns:
Dictionary mapping group names to their metadata
"""
metadata = {}
# Discover all group names from actual benchmarks
for benchmark in benchmarks:
if hasattr(benchmark, "explicit_group") and callable(
benchmark.explicit_group
):
group_name = benchmark.explicit_group()
if group_name:
self._generate_metadata(metadata, group_name)

return metadata

def _generate_metadata(
self, metadata: Dict[str, BenchmarkMetadata], group_name: str
):
base_metadata = self._base_group_metadata.get(group_name.split()[0], {})
metadata[group_name] = BenchmarkMetadata(
type="group",
description=base_metadata.get("description"),
notes=base_metadata.get("notes"),
unstable=base_metadata.get("unstable"),
tags=base_metadata.get("tags", []),
range_min=base_metadata.get("range_min"),
range_max=base_metadata.get("range_max"),
explicit_group=group_name,
)
4 changes: 2 additions & 2 deletions devops/scripts/benchmarks/utils/result.py
Original file line number Diff line number Diff line change
Expand Up @@ -75,8 +75,8 @@ class BenchmarkMetadata:
notes: str = None
unstable: str = None
tags: list[str] = field(default_factory=list)
range_min: float = None
range_max: float = None
range_min: float | None = None
range_max: float | None = None
display_name: str = None
explicit_group: str = None

Expand Down
Loading