Skip to content

Commit 7ed55aa

Browse files
pbalcerianayl
authored andcommitted
simplify presets, remove suites if all set
1 parent 0a7e76b commit 7ed55aa

File tree

4 files changed

+91
-104
lines changed

4 files changed

+91
-104
lines changed

devops/scripts/benchmarks/html/scripts.js

Lines changed: 28 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,7 @@
66
// Core state
77
let activeRuns = new Set(defaultCompareNames);
88
let chartInstances = new Map();
9+
let suiteNames = new Set();
910
let timeseriesData, barChartsData, allRunNames;
1011

1112
// DOM Elements
@@ -114,14 +115,12 @@ function createChart(data, containerId, type) {
114115

115116
const chartConfig = {
116117
type: type === 'time' ? 'line' : 'bar',
117-
data: type === 'time' ?
118-
{
119-
datasets: createTimeseriesDatasets(data)
120-
} :
121-
{
122-
labels: data.labels,
123-
datasets: data.datasets
124-
},
118+
data: type === 'time' ? {
119+
datasets: createTimeseriesDatasets(data)
120+
} : {
121+
labels: data.labels,
122+
datasets: data.datasets
123+
},
125124
options: options
126125
};
127126

@@ -221,10 +220,12 @@ function createChartContainer(data, canvasId) {
221220
summary.appendChild(downloadButton);
222221
details.appendChild(summary);
223222

223+
latestRunsLookup = createLatestRunsLookup(benchmarkRuns);
224+
224225
// Create and append extra info
225226
const extraInfo = document.createElement('div');
226227
extraInfo.className = 'extra-info';
227-
extraInfo.innerHTML = generateExtraInfo(data);
228+
extraInfo.innerHTML = generateExtraInfo(latestRunsLookup, data);
228229
details.appendChild(extraInfo);
229230

230231
container.appendChild(details);
@@ -252,9 +253,8 @@ function createLatestRunsLookup(benchmarkRuns) {
252253

253254
return latestRunsMap;
254255
}
255-
const latestRunsLookup = createLatestRunsLookup(benchmarkRuns);
256256

257-
function generateExtraInfo(data) {
257+
function generateExtraInfo(latestRunsLookup, data) {
258258
const labels = data.datasets ? data.datasets.map(dataset => dataset.label) : [data.label];
259259

260260
return labels.map(label => {
@@ -283,7 +283,7 @@ function downloadChart(canvasId, label) {
283283
const chart = chartInstances.get(canvasId);
284284
if (chart) {
285285
const link = document.createElement('a');
286-
link.href = chart.toBase64Image('image/jpeg', 1)
286+
link.href = chart.toBase64Image('image/png', 1)
287287
link.download = `${label}.png`;
288288
link.click();
289289
}
@@ -307,7 +307,7 @@ function updateURL() {
307307
url.searchParams.delete('regex');
308308
}
309309

310-
if (activeSuites.length > 0) {
310+
if (activeSuites.length > 0 && activeSuites.length != suiteNames.size) {
311311
url.searchParams.set('suites', activeSuites.join(','));
312312
} else {
313313
url.searchParams.delete('suites');
@@ -445,6 +445,12 @@ function setupRunSelector() {
445445
function setupSuiteFilters() {
446446
suiteFiltersContainer = document.getElementById('suite-filters');
447447

448+
benchmarkRuns.forEach(run => {
449+
run.results.forEach(result => {
450+
suiteNames.add(result.suite);
451+
});
452+
});
453+
448454
suiteNames.forEach(suite => {
449455
const label = document.createElement('label');
450456
const checkbox = document.createElement('input');
@@ -530,16 +536,18 @@ function loadData() {
530536
const loadingIndicator = document.getElementById('loading-indicator');
531537
loadingIndicator.style.display = 'block'; // Show loading indicator
532538

533-
if (config.remoteDataUrl && config.remoteDataUrl !== '') {
539+
if (typeof remoteDataUrl !== 'undefined' && remoteDataUrl !== '') {
534540
// Fetch data from remote URL
535-
fetch(config.remoteDataUrl)
536-
.then(response => response.text())
537-
.then(scriptContent => {
538-
// Evaluate the script content
539-
eval(scriptContent);
541+
fetch(remoteDataUrl)
542+
.then(response => response.json())
543+
.then(data => {
544+
benchmarkRuns = data;
540545
initializeCharts();
541546
})
542-
.catch(error => console.error('Error fetching remote data:', error))
547+
.catch(error => {
548+
console.error('Error fetching remote data:', error);
549+
loadingIndicator.textContent = 'Fetching remote data failed.';
550+
})
543551
.finally(() => {
544552
loadingIndicator.style.display = 'none'; // Hide loading indicator
545553
});

devops/scripts/benchmarks/main.py

Lines changed: 27 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,7 @@
1717
from history import BenchmarkHistory
1818
from utils.utils import prepare_workdir
1919
from utils.compute_runtime import *
20-
from presets import preset_get_by_name, presets
20+
from presets import enabled_suites, presets
2121

2222
import argparse
2323
import re
@@ -164,6 +164,9 @@ def main(directory, additional_env_vars, save_name, compare_names, filter):
164164
failures = {}
165165

166166
for s in suites:
167+
if s.name() not in enabled_suites(options.preset):
168+
continue
169+
167170
suite_benchmarks = s.benchmarks()
168171
if filter:
169172
suite_benchmarks = [
@@ -183,14 +186,13 @@ def main(directory, additional_env_vars, save_name, compare_names, filter):
183186
print(f"{type(s).__name__} setup complete.")
184187
benchmarks += suite_benchmarks
185188

186-
for b in benchmarks:
187-
print(b.name())
188-
189189
for benchmark in benchmarks:
190190
try:
191-
print(f"Setting up {benchmark.name()}... ")
191+
if options.verbose:
192+
print(f"Setting up {benchmark.name()}... ")
192193
benchmark.setup()
193-
print(f"{benchmark.name()} setup complete.")
194+
if options.verbose:
195+
print(f"{benchmark.name()} setup complete.")
194196

195197
except Exception as e:
196198
if options.exit_on_failure:
@@ -240,7 +242,10 @@ def main(directory, additional_env_vars, save_name, compare_names, filter):
240242
if not options.dry_run:
241243
chart_data = {this_name: results}
242244

243-
history = BenchmarkHistory(directory)
245+
results_dir = directory
246+
if options.custom_results_dir:
247+
results_dir = Path(options.custom_results_dir)
248+
history = BenchmarkHistory(results_dir)
244249
# limit how many files we load.
245250
# should this be configurable?
246251
history.load(1000)
@@ -280,8 +285,6 @@ def main(directory, additional_env_vars, save_name, compare_names, filter):
280285
if options.output_html:
281286
generate_html(history.runs, compare_names)
282287

283-
print(f"See {os.getcwd()}/html/index.html for the results.")
284-
285288

286289
def validate_and_parse_env_args(env_args):
287290
env_vars = {}
@@ -363,12 +366,6 @@ def validate_and_parse_env_args(env_args):
363366
help="Regex pattern to filter benchmarks by name.",
364367
default=None,
365368
)
366-
parser.add_argument(
367-
"--epsilon",
368-
type=float,
369-
help="Threshold to consider change of performance significant",
370-
default=options.epsilon,
371-
)
372369
parser.add_argument(
373370
"--verbose", help="Print output of all the commands.", action="store_true"
374371
)
@@ -395,7 +392,11 @@ def validate_and_parse_env_args(env_args):
395392
help="Specify whether markdown output should fit the content size limit for request validation",
396393
)
397394
parser.add_argument(
398-
"--output-html", help="Create HTML output", action="store_true", default=False
395+
"--output-html",
396+
help="Create HTML output. Local output is for direct local viewing of the html file, remote is for server deployment.",
397+
nargs="?",
398+
const=options.output_html,
399+
choices=["local", "remote"],
399400
)
400401
parser.add_argument(
401402
"--dry-run",
@@ -442,9 +443,15 @@ def validate_and_parse_env_args(env_args):
442443
parser.add_argument(
443444
"--preset",
444445
type=str,
445-
choices=[p.name() for p in presets],
446+
choices=[p for p in presets.keys()],
446447
help="Benchmark preset to run.",
447-
default=options.preset.name(),
448+
default=options.preset,
449+
)
450+
parser.add_argument(
451+
"--results-dir",
452+
type=str,
453+
help="Specify a custom results directory",
454+
default=options.custom_results_dir,
448455
)
449456

450457
args = parser.parse_args()
@@ -457,7 +464,6 @@ def validate_and_parse_env_args(env_args):
457464
options.sycl = args.sycl
458465
options.iterations = args.iterations
459466
options.timeout = args.timeout
460-
options.epsilon = args.epsilon
461467
options.ur = args.ur
462468
options.ur_adapter = args.adapter
463469
options.exit_on_failure = args.exit_on_failure
@@ -472,7 +478,8 @@ def validate_and_parse_env_args(env_args):
472478
options.current_run_name = args.relative_perf
473479
options.cudnn_directory = args.cudnn_directory
474480
options.cublas_directory = args.cublas_directory
475-
options.preset = preset_get_by_name(args.preset)
481+
options.preset = args.preset
482+
options.custom_results_dir = args.results_dir
476483

477484
if args.build_igc and args.compute_runtime is None:
478485
parser.error("--build-igc requires --compute-runtime to be set")

devops/scripts/benchmarks/options.py

Lines changed: 4 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
from dataclasses import dataclass, field
22
from enum import Enum
3-
from presets import Preset, presets
43

4+
from presets import presets
55

66
class Compare(Enum):
77
LATEST = "latest"
@@ -30,19 +30,18 @@ class Options:
3030
compare: Compare = Compare.LATEST
3131
compare_max: int = 10 # average/median over how many results
3232
output_markdown: MarkdownSize = MarkdownSize.SHORT
33-
output_html: bool = False
33+
output_html: str = "local"
3434
dry_run: bool = False
35-
# these two should probably be merged into one setting
3635
stddev_threshold: float = 0.02
37-
epsilon: float = 0.02
3836
iterations_stddev: int = 5
3937
build_compute_runtime: bool = False
4038
extra_ld_libraries: list[str] = field(default_factory=list)
4139
extra_env_vars: dict = field(default_factory=dict)
4240
compute_runtime_tag: str = "25.05.32567.18"
4341
build_igc: bool = False
4442
current_run_name: str = "This PR"
45-
preset: Preset = presets[0]
43+
preset: str = "Full"
44+
custom_results_dir = None
4645

4746

4847
options = Options()

devops/scripts/benchmarks/presets.py

Lines changed: 32 additions & 59 deletions
Original file line numberDiff line numberDiff line change
@@ -3,63 +3,36 @@
33
# See LICENSE.TXT
44
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
55

6-
from typing import List, Type
6+
presets: dict[str, list[str]] = {
7+
"Full": [
8+
"Compute Benchmarks",
9+
"llama.cpp bench",
10+
"SYCL-Bench",
11+
"Velocity Bench",
12+
"UMF",
13+
],
14+
"SYCL": [
15+
"Compute Benchmarks",
16+
"llama.cpp bench",
17+
"SYCL-Bench",
18+
"Velocity Bench",
19+
],
20+
"Minimal": [
21+
"Compute Benchmarks",
22+
],
23+
"Normal": [
24+
"Compute Benchmarks",
25+
"llama.cpp bench",
26+
"Velocity Bench",
27+
],
28+
"Test": [
29+
"Test Suite",
30+
],
31+
}
32+
33+
def enabled_suites(preset: str) -> list[str]:
34+
try:
35+
return presets[preset]
36+
except KeyError:
37+
raise ValueError(f"Preset '{preset}' not found.")
738

8-
class Preset:
9-
def description(self) -> str:
10-
raise NotImplementedError
11-
12-
def name(self) -> str:
13-
return self.__class__.__name__
14-
15-
def suites(self) -> List[str]:
16-
raise NotImplementedError
17-
18-
class Full(Preset):
19-
def description(self) -> str:
20-
return "All available benchmarks."
21-
22-
def suites(self) -> List[str]:
23-
return [
24-
"Compute Benchmarks",
25-
"llama.cpp bench",
26-
"SYCL-Bench",
27-
"Velocity Bench",
28-
"UMF",
29-
]
30-
31-
class SYCL(Preset):
32-
def description(self) -> str:
33-
return "All available benchmarks related to SYCL."
34-
35-
def suites(self) -> List[str]:
36-
return ["Compute Benchmarks", "llama.cpp bench", "SYCL-Bench", "Velocity Bench"]
37-
38-
class Minimal(Preset):
39-
def description(self) -> str:
40-
return "Short microbenchmarks."
41-
42-
def suites(self) -> List[str]:
43-
return ["Compute Benchmarks"]
44-
45-
class Normal(Preset):
46-
def description(self) -> str:
47-
return "Comprehensive mix of microbenchmarks and real applications."
48-
49-
def suites(self) -> List[str]:
50-
return ["Compute Benchmarks", "llama.cpp bench", "Velocity Bench"]
51-
52-
class Test(Preset):
53-
def description(self) -> str:
54-
return "Noop benchmarks for framework testing."
55-
56-
def suites(self) -> List[str]:
57-
return ["Test Suite"]
58-
59-
presets = [Full(), SYCL(), Minimal(), Normal(), Test()]
60-
61-
def preset_get_by_name(name: str) -> Preset:
62-
for p in presets:
63-
if p.name().upper() == name.upper():
64-
return p
65-
raise ValueError(f"Preset '{name}' not found.")

0 commit comments

Comments
 (0)