Skip to content

Commit e4ebc65

Browse files
author
akolic
committed
Fix how AnalysisReportJsonFileRule looks for the JSON file when diagnostics mode is enabled and the benchmark is bundle based; disable the 'agent' stage for 'micronaut-pegasus' by overriding the 'stages' method - this way the problematic 'agent' stage does not get executed even if it is requested.
1 parent ad42202 commit e4ebc65

File tree

3 files changed

+44
-28
lines changed

3 files changed

+44
-28
lines changed

sdk/mx.sdk/mx_sdk_benchmark.py

Lines changed: 2 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -1191,13 +1191,7 @@ def rules(self, out, benchmarks, bmSuiteArgs):
11911191
"micronaut-similarity": {},
11921192
"micronaut-pegasus": {},
11931193
"quarkus-hello-world": {},
1194-
"quarkus-tika-odt": {
1195-
"barista-bench-name": "quarkus-tika",
1196-
},
1197-
"quarkus-tika-pdf": {
1198-
"barista-bench-name": "quarkus-tika",
1199-
"workload": "pdf-workload.barista.json",
1200-
},
1194+
"quarkus-tika": {},
12011195
"spring-hello-world": {},
12021196
"spring-petclinic": {},
12031197
},
@@ -1262,7 +1256,7 @@ def subgroup(self):
12621256

12631257
def benchmarkList(self, bmSuiteArgs):
12641258
exclude = []
1265-
# Barista currently does not support running 'micronaut-pegasus' on the JVM - running it results in a crash
1259+
# Barista currently does not support running 'micronaut-pegasus' on the JVM - running it results in a crash (GR-59793)
12661260
exclude.append("micronaut-pegasus")
12671261
return [b for b in self.completeBenchmarkList(bmSuiteArgs) if not b in exclude]
12681262

substratevm/mx.substratevm/mx_substratevm_benchmark.py

Lines changed: 11 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -31,6 +31,7 @@
3131
import re
3232
from glob import glob
3333
from pathlib import Path
34+
from typing import List
3435

3536
import mx
3637
import mx_benchmark
@@ -287,13 +288,16 @@ def benchmarkName(self):
287288
return self.context.benchmark
288289

289290
def benchmarkList(self, bmSuiteArgs):
290-
return self.completeBenchmarkList(bmSuiteArgs)
291-
292-
def default_stages(self):
293-
if self.context.benchmark == "micronaut-pegasus":
294-
# The 'agent' stage is not supported, as currently we cannot run micronaut-pegasus on the JVM
295-
return ['instrument-image', 'instrument-run', 'image', 'run']
296-
return super().default_stages()
291+
exclude = []
292+
return [b for b in self.completeBenchmarkList(bmSuiteArgs) if b not in exclude]
293+
294+
def stages(self, bm_suite_args: List[str]) -> List[mx_sdk_benchmark.Stage]:
295+
stages = super().stages(bm_suite_args)
296+
if self.context.benchmark == "micronaut-pegasus" and mx_sdk_benchmark.Stage.AGENT in stages:
297+
# The 'agent' stage is not supported, as currently we cannot run micronaut-pegasus on the JVM (GR-59793)
298+
stages.remove(mx_sdk_benchmark.Stage.AGENT)
299+
mx.warn(f"Skipping the 'agent' stage as it is not supported for the 'micronaut-pegasus' benchmark. The stages that will be executed are: {[stage.value for stage in stages]}")
300+
return stages
297301

298302
def application_nib(self):
299303
if self.benchmarkName() not in self._application_nibs:

vm/mx.vm/mx_vm_benchmark.py

Lines changed: 31 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -181,6 +181,7 @@ def __init__(self, vm: NativeImageVM, bm_suite: BenchmarkSuite | NativeImageBenc
181181

182182
# Path to the X.nib bundle file --bundle-apply is specified
183183
bundle_apply_path = self.get_bundle_path_if_present()
184+
self.is_bundle_based = bundle_apply_path is not None
184185
# Path to the X.output directory if a bundle is created
185186
# In that case, files generated by Native Image are generated in that folder structure
186187
bundle_create_path = self.get_bundle_create_path_if_present()
@@ -197,13 +198,13 @@ def __init__(self, vm: NativeImageVM, bm_suite: BenchmarkSuite | NativeImageBenc
197198

198199
if bundle_create_path:
199200
self.bundle_output_path = bundle_create_path
200-
elif bundle_apply_path:
201+
elif self.is_bundle_based:
201202
bundle_dir = bundle_apply_path.parent
202203
bundle_name = bundle_apply_path.name
203204
assert bundle_name.endswith(BUNDLE_EXTENSION), bundle_name
204205
self.bundle_output_path = bundle_dir / f"{bundle_name[:-len(BUNDLE_EXTENSION)]}.output"
205206

206-
if not bundle_apply_path:
207+
if not self.is_bundle_based:
207208
base_image_build_args += self.classpath_arguments
208209
base_image_build_args += self.modulepath_arguments
209210
base_image_build_args += self.executable
@@ -258,7 +259,7 @@ def __init__(self, vm: NativeImageVM, bm_suite: BenchmarkSuite | NativeImageBenc
258259
# Inform the StagesInfo object about removed stages
259260
bm_suite.stages_info.setup(removed_stages)
260261

261-
bundle_args = [f'--bundle-apply={bundle_apply_path}'] if bundle_apply_path else []
262+
bundle_args = [f'--bundle-apply={bundle_apply_path}'] if self.is_bundle_based else []
262263
# benchmarks are allowed to use experimental options
263264
# the bundle might also inject experimental options, but they will be appropriately locked/unlocked.
264265
self.base_image_build_args = [os.path.join(vm.home(), 'bin', 'native-image')] + svm_experimental_options(base_image_build_args) + bundle_args
@@ -972,7 +973,7 @@ def image_build_general_rules(self, benchmarks):
972973

973974
def image_build_analysis_rules(self, benchmarks):
974975
return [
975-
AnalysisReportJsonFileRule(self.config.image_build_reports_directory, self.is_gate, {
976+
AnalysisReportJsonFileRule(self.config.image_build_reports_directory, self.config.bundle_output_path, self.is_gate, self.config.is_bundle_based, {
976977
"bench-suite": self.config.benchmark_suite_name,
977978
"benchmark": benchmarks[0],
978979
"metric.name": "analysis-stats",
@@ -983,7 +984,7 @@ def image_build_analysis_rules(self, benchmarks):
983984
"metric.iteration": 0,
984985
"metric.object": "call-edges",
985986
}, ['total_call_edges']),
986-
AnalysisReportJsonFileRule(self.config.image_build_reports_directory, self.is_gate, {
987+
AnalysisReportJsonFileRule(self.config.image_build_reports_directory, self.config.bundle_output_path, self.is_gate, self.config.is_bundle_based, {
987988
"bench-suite": self.config.benchmark_suite_name,
988989
"benchmark": benchmarks[0],
989990
"metric.name": "analysis-stats",
@@ -994,7 +995,7 @@ def image_build_analysis_rules(self, benchmarks):
994995
"metric.iteration": 0,
995996
"metric.object": "reachable-types",
996997
}, ['total_reachable_types']),
997-
AnalysisReportJsonFileRule(self.config.image_build_reports_directory, self.is_gate, {
998+
AnalysisReportJsonFileRule(self.config.image_build_reports_directory, self.config.bundle_output_path, self.is_gate, self.config.is_bundle_based, {
998999
"bench-suite": self.config.benchmark_suite_name,
9991000
"benchmark": benchmarks[0],
10001001
"metric.name": "analysis-stats",
@@ -1005,7 +1006,7 @@ def image_build_analysis_rules(self, benchmarks):
10051006
"metric.iteration": 0,
10061007
"metric.object": "reachable-methods",
10071008
}, ['total_reachable_methods']),
1008-
AnalysisReportJsonFileRule(self.config.image_build_reports_directory, self.is_gate, {
1009+
AnalysisReportJsonFileRule(self.config.image_build_reports_directory, self.config.bundle_output_path, self.is_gate, self.config.is_bundle_based, {
10091010
"bench-suite": self.config.benchmark_suite_name,
10101011
"benchmark": benchmarks[0],
10111012
"metric.name": "analysis-stats",
@@ -1016,7 +1017,7 @@ def image_build_analysis_rules(self, benchmarks):
10161017
"metric.iteration": 0,
10171018
"metric.object": "reachable-fields",
10181019
}, ['total_reachable_fields']),
1019-
AnalysisReportJsonFileRule(self.config.image_build_reports_directory, self.is_gate, {
1020+
AnalysisReportJsonFileRule(self.config.image_build_reports_directory, self.config.bundle_output_path, self.is_gate, self.config.is_bundle_based, {
10201021
"bench-suite": self.config.benchmark_suite_name,
10211022
"benchmark": benchmarks[0],
10221023
"metric.name": "analysis-stats",
@@ -1427,20 +1428,37 @@ class AnalysisReportJsonFileRule(mx_benchmark.JsonStdOutFileRule):
14271428
final path of the ``reports`` directory, instead.
14281429
"""
14291430

1430-
def __init__(self, report_directory, is_diagnostics_mode, replacement, keys):
1431+
def __init__(self, report_directory, bundle_output_dir, is_diagnostics_mode, is_bundle_based, replacement, keys):
14311432
super().__init__(r"^# Printing analysis results stats to: (?P<path>\S+?)$", "path", replacement, keys)
14321433
self.is_diagnostics_mode = is_diagnostics_mode
1434+
self.is_bundle_based = is_bundle_based
14331435
self.report_directory = report_directory
1436+
self.bundle_output_dir = bundle_output_dir
1437+
1438+
def get_diagnostics_dir_name(self, json_file_path) -> Path:
1439+
"""Extracts the name of the diagnostics directory, the directory containing the JSON file, from the absolute path of the JSON file."""
1440+
return Path(json_file_path).parent.name
1441+
1442+
def get_base_search_dir(self, json_file_path) -> Path:
1443+
"""Returns the absolute path to the directory where we expect to find the JSON file containing analysis results stats.
1444+
1445+
DEVELOPER NOTE:
1446+
Unfortunately, the analysis results JSON file ends up in different locations depending on:
1447+
- whether the diagnostics mode is enabled (the results end up inside the diagnostics directory)
1448+
- whether the benchmark is bundle based (the diagnostics directory ends up in the "other" subdirectory of the bundle output directory)
1449+
"""
1450+
if self.is_diagnostics_mode and self.is_bundle_based:
1451+
return self.bundle_output_dir / "other" / self.get_diagnostics_dir_name(json_file_path)
1452+
if self.is_diagnostics_mode:
1453+
return self.report_directory / self.get_diagnostics_dir_name(json_file_path)
1454+
return self.report_directory
14341455

14351456
def getJsonFiles(self, text):
14361457
json_files = super().getJsonFiles(text)
14371458
found_json_files = []
14381459
for json_file_path in json_files:
14391460
json_file_name = os.path.basename(json_file_path)
1440-
base_search_dir = self.report_directory
1441-
if self.is_diagnostics_mode:
1442-
base_search_dir = os.path.join(base_search_dir, os.path.basename(os.path.dirname(json_file_path)))
1443-
expected_json_file_path = os.path.join(base_search_dir, json_file_name)
1461+
expected_json_file_path = os.path.join(self.get_base_search_dir(json_file_path), json_file_name)
14441462
if exists(expected_json_file_path):
14451463
found_json_files.append(expected_json_file_path)
14461464
else:

0 commit comments

Comments
 (0)