Skip to content

Commit 393bcc1

Browse files
Format files
1 parent 2da1fb6 commit 393bcc1

File tree

2 files changed

+19
-14
lines changed

2 files changed

+19
-14
lines changed

tools/submission/submission_checker/checks/accuracy_check.py

Lines changed: 14 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -5,8 +5,11 @@
55
import re
66
import os
77

8+
89
class AccuracyCheck(BaseCheck):
9-
def __init__(self, log, path, config: Config, submission_logs: SubmissionLogs):
10+
def __init__(
11+
self, log, path, config: Config, submission_logs: SubmissionLogs
12+
):
1013
super().__init__(log, path)
1114
self.name = "accuracy checks"
1215
self.submission_logs = submission_logs
@@ -15,9 +18,12 @@ def __init__(self, log, path, config: Config, submission_logs: SubmissionLogs):
1518
self.accuracy_json = self. submission_logs.accuracy_json
1619
self.config = config
1720
self.model = self.submission_logs.loader_data.get("benchmark", "")
18-
self.model_mapping = self.submission_logs.loader_data.get("model_mapping", {})
19-
self.model = self.config.get_mlperf_model(self.model, self.model_mapping)
20-
self.scenario_fixed = self.submission_logs.loader_data.get("scenario", "")
21+
self.model_mapping = self.submission_logs.loader_data.get(
22+
"model_mapping", {})
23+
self.model = self.config.get_mlperf_model(
24+
self.model, self.model_mapping)
25+
self.scenario_fixed = self.submission_logs.loader_data.get(
26+
"scenario", "")
2127
self.scenario = self.mlperf_log["effective_scenario"]
2228
self.division = self.submission_logs.loader_data.get("division", "")
2329
self.setup_checks()
@@ -97,7 +103,7 @@ def accuracy_result_check(self):
97103
if self.division.lower() == "open":
98104
return True
99105
return is_valid
100-
106+
101107
def accuracy_json_check(self):
102108
if not os.path.exists(self.accuracy_json):
103109
self.log.error("%s is missing", self.accuracy_json)
@@ -107,7 +113,7 @@ def accuracy_json_check(self):
107113
self.log.error("%s is not truncated", self.accuracy_json)
108114
return False
109115
return True
110-
116+
111117
def loadgen_errors_check(self):
112118
if self.mlperf_log.has_error():
113119
if self.config.ignore_uncommited:
@@ -125,7 +131,7 @@ def loadgen_errors_check(self):
125131
)
126132
return False
127133
return True
128-
134+
129135
def dataset_check(self):
130136
if self.config.skip_dataset_size_check:
131137
self.log.info(
@@ -139,4 +145,4 @@ def dataset_check(self):
139145
"%s accurcy run does not cover all dataset, accuracy samples: %s, dataset size: %s", self.path, qsl_total_count, expected_qsl_total_count
140146
)
141147
return False
142-
return True
148+
return True

tools/submission/submission_checker/configuration/configuration.py

Lines changed: 5 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -37,10 +37,9 @@ def __init__(
3737
self.skip_calibration_check = skip_calibration_check
3838
self.skip_dataset_size_check = skip_dataset_size_check
3939
self.load_config(version)
40-
4140

4241
def load_config(self, version):
43-
# TODO: Load values from
42+
# TODO: Load values from
4443
self.models = self.base["models"]
4544
self.seeds = self.base["seeds"]
4645
if self.base.get("test05_seeds"):
@@ -120,7 +119,7 @@ def get_accuracy_target(self, model):
120119

121120
def get_accuracy_upper_limit(self, model):
122121
return self.accuracy_upper_limit.get(model, None)
123-
122+
124123
def get_accuracy_values(self, model):
125124
patterns = []
126125
acc_targets = []
@@ -167,7 +166,7 @@ def get_min_query_count(self, model, scenario):
167166
if model not in self.min_queries:
168167
raise ValueError("model not known: " + model)
169168
return self.min_queries[model].get(scenario)
170-
169+
171170
def get_dataset_size(self, model):
172171
model = self.get_mlperf_model(model)
173172
if model not in self.dataset_size:
@@ -207,7 +206,7 @@ def requires_equal_issue(self, model, division):
207206
]
208207
and self.version in ["v4.1"]
209208
)
210-
209+
211210
def get_llm_models(self):
212211
return [
213212
"llama2-70b-99",
@@ -217,6 +216,6 @@ def get_llm_models(self):
217216
"mixtral-8x7b",
218217
"llama3.1-405b",
219218
"llama3.1-8b",
220-
"llama3.1-8b-edge",
219+
"llama3.1-8b-edge",
221220
"deepseek-r1"
222221
]

0 commit comments

Comments
 (0)