Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
74 changes: 61 additions & 13 deletions devops/scripts/benchmarks/benches/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,13 +7,24 @@
import shutil
import subprocess
from pathlib import Path
from enum import Enum
from utils.result import BenchmarkMetadata, BenchmarkTag, Result
from options import options
from utils.utils import download, run
from abc import ABC, abstractmethod
from utils.unitrace import get_unitrace
from utils.flamegraph import get_flamegraph
from utils.logger import log


class TracingType(Enum):
"""Enumeration of available tracing types."""

NONE = ""
UNITRACE = "unitrace"
FLAMEGRAPH = "flamegraph"


benchmark_tags = [
BenchmarkTag("SYCL", "Benchmark uses SYCL runtime"),
BenchmarkTag("UR", "Benchmark uses Unified Runtime API"),
Expand Down Expand Up @@ -62,12 +73,17 @@ def enabled(self) -> bool:
By default, it returns True, but can be overridden to disable a benchmark."""
return True

def traceable(self) -> bool:
"""Returns whether this benchmark should be traced by Unitrace.
By default, it returns True, but can be overridden to disable tracing for a benchmark.
def traceable(self, tracing_type: TracingType) -> bool:
"""Returns whether this benchmark should be traced by the specified tracing method.
By default, it returns True for all tracing types, but can be overridden
to disable specific tracing methods for a benchmark.
"""
return True

def tracing_enabled(self, run_trace, force_trace, tr_type: TracingType):
"""Returns whether tracing is enabled for the given type."""
return (self.traceable(tr_type) or force_trace) and run_trace == tr_type

@abstractmethod
def setup(self):
pass
Expand All @@ -77,12 +93,18 @@ def teardown(self):
pass

@abstractmethod
def run(self, env_vars, run_unitrace: bool = False) -> list[Result]:
def run(
self,
env_vars,
run_trace: TracingType = TracingType.NONE,
force_trace: bool = False,
) -> list[Result]:
"""Execute the benchmark with the given environment variables.

Args:
env_vars: Environment variables to use when running the benchmark.
run_unitrace: Whether to run benchmark under Unitrace.
run_trace: The type of tracing to run (NONE, UNITRACE, or FLAMEGRAPH).
force_trace: If True, ignore the traceable() method and force tracing.

Returns:
A list of Result objects with the benchmark results.
Expand Down Expand Up @@ -111,8 +133,9 @@ def run_bench(
ld_library=[],
add_sycl=True,
use_stdout=True,
run_unitrace=False,
extra_unitrace_opt=None,
run_trace: TracingType = TracingType.NONE,
extra_trace_opt=None,
force_trace: bool = False,
):
env_vars = env_vars.copy()
if options.ur is not None:
Expand All @@ -125,15 +148,26 @@ def run_bench(
ld_libraries = options.extra_ld_libraries.copy()
ld_libraries.extend(ld_library)

if self.traceable() and run_unitrace:
if extra_unitrace_opt is None:
extra_unitrace_opt = []
unitrace_output = None
if self.tracing_enabled(run_trace, force_trace, TracingType.UNITRACE):
if extra_trace_opt is None:
extra_trace_opt = []
unitrace_output, command = get_unitrace().setup(
self.name(), command, extra_unitrace_opt
self.name(), command, extra_trace_opt
)
log.debug(f"Unitrace output: {unitrace_output}")
log.debug(f"Unitrace command: {' '.join(command)}")

# flamegraph run

perf_data_file = None
if self.tracing_enabled(run_trace, force_trace, TracingType.FLAMEGRAPH):
perf_data_file, command = get_flamegraph().setup(
self.name(), self.get_suite_name(), command
)
log.debug(f"FlameGraph perf data: {perf_data_file}")
log.debug(f"FlameGraph command: {' '.join(command)}")

try:
result = run(
command=command,
Expand All @@ -143,13 +177,27 @@ def run_bench(
ld_library=ld_libraries,
)
except subprocess.CalledProcessError:
if run_unitrace:
if run_trace == TracingType.UNITRACE and unitrace_output:
get_unitrace().cleanup(options.benchmark_cwd, unitrace_output)
if run_trace == TracingType.FLAMEGRAPH and perf_data_file:
get_flamegraph().cleanup(perf_data_file)
raise

if self.traceable() and run_unitrace:
if (
self.tracing_enabled(run_trace, force_trace, TracingType.UNITRACE)
and unitrace_output
):
get_unitrace().handle_output(unitrace_output)

if (
self.tracing_enabled(run_trace, force_trace, TracingType.FLAMEGRAPH)
and perf_data_file
):
svg_file = get_flamegraph().handle_output(
self.name(), perf_data_file, self.get_suite_name()
)
log.info(f"FlameGraph generated: {svg_file}")

if use_stdout:
return result.stdout.decode()
else:
Expand Down
20 changes: 16 additions & 4 deletions devops/scripts/benchmarks/benches/benchdnn.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@

from pathlib import Path

from .base import Suite, Benchmark
from .base import Suite, Benchmark, TracingType
from options import options
from utils.utils import git_clone, run, create_build_path
from utils.result import Result
Expand Down Expand Up @@ -132,7 +132,18 @@ def setup(self):
if not self.bench_bin.exists():
raise FileNotFoundError(f"Benchmark binary not found: {self.bench_bin}")

def run(self, env_vars, run_unitrace: bool = False) -> list[Result]:
def run(
self,
env_vars,
run_trace: TracingType = TracingType.NONE,
force_trace: bool = False,
) -> list[Result]:
# Determine extra trace options based on tracing type
if run_trace == TracingType.UNITRACE:
extra_trace_opt = ["--chrome-dnn-logging"]
else:
extra_trace_opt = None

command = [
str(self.bench_bin),
*self.bench_args.split(),
Expand All @@ -151,8 +162,9 @@ def run(self, env_vars, run_unitrace: bool = False) -> list[Result]:
add_sycl=True,
ld_library=ld_library,
use_stdout=True,
run_unitrace=run_unitrace,
extra_unitrace_opt=["--chrome-dnn-logging"],
run_trace=run_trace,
extra_trace_opt=extra_trace_opt,
force_trace=force_trace,
)
result_value = self._extract_time(output)

Expand Down
Loading