diff --git a/tools/submission/submission_checker/constants.py b/tools/submission/submission_checker/constants.py index 5c915df03c..faa9967716 100644 --- a/tools/submission/submission_checker/constants.py +++ b/tools/submission/submission_checker/constants.py @@ -1143,6 +1143,12 @@ "MultiStream": "early_stopping_latency_ms", "Server": "result_completed_samples_per_sec", }, + "v6.0": { + "Offline": "result_samples_per_second", + "SingleStream": "early_stopping_latency_ss", + "MultiStream": "early_stopping_latency_ms", + "Server": "result_completed_samples_per_sec", + }, } RESULT_FIELD_BENCHMARK_OVERWRITE = { diff --git a/tools/submission/submission_checker/main.py b/tools/submission/submission_checker/main.py index bfd5c0ac30..34dc4061f7 100644 --- a/tools/submission/submission_checker/main.py +++ b/tools/submission/submission_checker/main.py @@ -1,8 +1,11 @@ import argparse import logging import os -import sys -sys.path.append(os.path.dirname(os.path.dirname(__file__))) + +if __name__ == "__main__" and __package__ is None: + import sys + sys.path.append(os.path.dirname(os.path.dirname(__file__))) + __package__ = "submission_checker" from .constants import MODEL_CONFIG from .configuration.configuration import Config @@ -109,15 +112,15 @@ def main(): args.version, args.extra_model_benchmark_map, ignore_uncommited=args.submission_exceptions, - skip_compliance = args.skip_power_check, - skip_power_check = args.skip_power_check, - skip_meaningful_fields_emptiness_check = args.skip_meaningful_fields_emptiness_check, - skip_check_power_measure_files = args.skip_check_power_measure_files, - skip_empty_files_check = args.skip_empty_files_check, - skip_extra_files_in_root_check = args.skip_extra_files_in_root_check, - skip_extra_accuracy_files_check = args.skip_extra_accuracy_files_check, - skip_all_systems_have_results_check = args.skip_all_systems_have_results_check, - skip_calibration_check = args.skip_calibration_check, + skip_compliance=args.skip_power_check, + skip_power_check=args.skip_power_check, + skip_meaningful_fields_emptiness_check=args.skip_meaningful_fields_emptiness_check, + skip_check_power_measure_files=args.skip_check_power_measure_files, + skip_empty_files_check=args.skip_empty_files_check, + skip_extra_files_in_root_check=args.skip_extra_files_in_root_check, + skip_extra_accuracy_files_check=args.skip_extra_accuracy_files_check, + skip_all_systems_have_results_check=args.skip_all_systems_have_results_check, + skip_calibration_check=args.skip_calibration_check, ) if args.scenarios_to_skip: @@ -138,12 +141,18 @@ def main(): # Main loop over all the submissions for logs in loader.load(): # Initialize check classes - performance_checks = PerformanceCheck(log, logs.loader_data["perf_path"], config, logs) - accuracy_checks = AccuracyCheck(log, logs.loader_data["acc_path"], config, logs) - system_checks = SystemCheck(log, logs.loader_data["system_path"], config, logs) - measurements_checks = MeasurementsCheck(log, logs.loader_data["measurements_path"], config, logs) - measurements_checks = ComplianceCheck(log, logs.loader_data["compliance_path"], config, logs) - power_checks = PowerCheck(log, logs.loader_data["power_dir_path"], config, logs) + performance_checks = PerformanceCheck( + log, logs.loader_data["perf_path"], config, logs) + accuracy_checks = AccuracyCheck( + log, logs.loader_data["acc_path"], config, logs) + system_checks = SystemCheck( + log, logs.loader_data["system_path"], config, logs) + measurements_checks = MeasurementsCheck( + log, logs.loader_data["measurements_path"], config, logs) + measurements_checks = ComplianceCheck( + log, logs.loader_data["compliance_path"], config, logs) + power_checks = PowerCheck( + log, logs.loader_data["power_dir_path"], config, logs) # Run checks valid = True valid &= performance_checks() @@ -154,19 +163,26 @@ def main(): # Add results to summary if valid: # Results dictionary - results[logs.loader_data.get("perf_path")] = logs.loader_data.get("performance_metric") + results[logs.loader_data.get("perf_path")] = logs.loader_data.get( + "performance_metric") # System dictionary system_id = logs.loader_data.get("system") if os.path.exists(logs.loader_data.get("power_dir_path", "")): - if system_id in systems[logs.loader_data.get("division")]["power"]: - systems[logs.loader_data.get("division")]["power"][system_id] += 1 + if system_id in systems[logs.loader_data.get( + "division")]["power"]: + systems[logs.loader_data.get( + "division")]["power"][system_id] += 1 else: - systems[logs.loader_data.get("division")]["power"][system_id] = 1 + systems[logs.loader_data.get( + "division")]["power"][system_id] = 1 else: - if system_id in systems[logs.loader_data.get("division")]["non_power"]: - systems[logs.loader_data.get("division")]["non_power"][system_id] += 1 + if system_id in systems[logs.loader_data.get( + "division")]["non_power"]: + systems[logs.loader_data.get( + "division")]["non_power"][system_id] += 1 else: - systems[logs.loader_data.get("division")]["non_power"][system_id] = 1 + systems[logs.loader_data.get( + "division")]["non_power"][system_id] = 1 # CSV exporter exporter.add_result(logs) else: @@ -186,7 +202,6 @@ def main(): if v is None: log.error("NoResults %s", k) - closed_systems = systems.get("closed", {}) open_systems = systems.get("open", {}) network_systems = systems.get("network", {}) diff --git a/tools/submission/submission_checker/results.py b/tools/submission/submission_checker/results.py index 46d36f8ebe..dc5bf681ec 100644 --- a/tools/submission/submission_checker/results.py +++ b/tools/submission/submission_checker/results.py @@ -89,7 +89,7 @@ def add_result(self, submission_logs: SubmissionLogs): def export_row(self, row: dict): - values = [str(row.get(key, "")) for key in self.head] + values = [f'"{row.get(key, "")}"' for key in self.head] csv_row = ",".join(values) + "\n" with open(self.csv_path, "+a") as csv: csv.write(csv_row) @@ -100,4 +100,4 @@ def export(self): with open(self.csv_path, "w") as csv: csv.write(csv_header) for row in self.rows: - self.export_row(row) \ No newline at end of file + self.export_row(row)