Skip to content

Commit bd34399

Browse files
Fix Bug: do not skip measurements checks
1 parent c461e67 commit bd34399

File tree

3 files changed

+16
-13
lines changed

3 files changed

+16
-13
lines changed

tools/submission/submission_checker/checks/base.py

Lines changed: 10 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -23,10 +23,16 @@ def run_checks(self):
2323
valid = True
2424
errors = []
2525
for check in self.checks:
26-
v = self.execute(check)
27-
valid &= v
28-
if not valid:
29-
return False
26+
try:
27+
v = self.execute(check)
28+
valid &= v
29+
except BaseException:
30+
valid &= False
31+
self.log.error(
32+
"Execution occurred in running check %s. Running %s in %s",
33+
self.path,
34+
check.__name__,
35+
self.__class__.__name__)
3036
return valid
3137

3238
def execute(self, check):

tools/submission/submission_checker/checks/performance_check.py

Lines changed: 6 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -232,9 +232,8 @@ def network_check(self):
232232
is_valid = expected_state_by_division[self.division] is is_network_system
233233
if not is_valid:
234234
self.log.error(
235-
f"{
236-
self.path} incorrect network mode(={is_network_system}) for division '{
237-
self.division}'"
235+
f"{self.path} incorrect network mode(={is_network_system}) "
236+
f"for division {self.division}"
238237
)
239238
return False
240239

@@ -298,7 +297,7 @@ def inferred_check(self):
298297
("singlestream", "offline")
299298
]
300299
if (self.scenario.lower(), self.scenario_fixed.lower()
301-
) not in list_inferred:
300+
) not in list_inferred:
302301
self.log.error(
303302
"Result for scenario %s can not be inferred from %s for: %s",
304303
self.scenario_fixed,
@@ -362,12 +361,12 @@ def get_inferred_result(self, res):
362361
res = qps_wo_loadgen_overhead
363362

364363
if (self.scenario_fixed in ["Offline"]
365-
) and self.scenario in ["MultiStream"]:
364+
) and self.scenario in ["MultiStream"]:
366365
inferred = True
367366
res = samples_per_query * S_TO_MS / (latency_mean / MS_TO_NS)
368367

369368
if (self.scenario_fixed in ["MultiStream"]
370-
) and self.scenario in ["SingleStream"]:
369+
) and self.scenario in ["SingleStream"]:
371370
inferred = True
372371
# samples_per_query does not match with the one reported in the logs
373372
# when inferring MultiStream from SingleStream
@@ -384,6 +383,6 @@ def get_inferred_result(self, res):
384383
else:
385384
res = (latency_99_percentile * samples_per_query) / MS_TO_NS
386385
if (self.scenario_fixed in ["Interactive"]
387-
) and self.scenario not in ["Server"]:
386+
) and self.scenario not in ["Server"]:
388387
is_valid = False
389388
return res, is_valid

tools/submission/submission_checker/main.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -155,8 +155,6 @@ def main():
155155
log, logs.loader_data["system_path"], config, logs)
156156
measurements_checks = MeasurementsCheck(
157157
log, logs.loader_data["measurements_path"], config, logs)
158-
measurements_checks = ComplianceCheck(
159-
log, logs.loader_data["compliance_path"], config, logs)
160158
power_checks = PowerCheck(
161159
log, logs.loader_data["power_dir_path"], config, logs)
162160
# Run checks

0 commit comments

Comments
 (0)