|
17 | 17 | from unittest.mock import Mock, MagicMock
|
18 | 18 |
|
19 | 19 | from openvino.tools.accuracy_checker.evaluators import ModelEvaluator
|
20 |
| - |
| 20 | +from openvino.tools.accuracy_checker.evaluators.model_evaluator import get_config_metrics |
21 | 21 |
|
22 | 22 | class TestModelEvaluator:
|
23 | 23 | def setup_method(self):
|
@@ -143,6 +143,58 @@ def test_process_dataset_with_loading_predictions_and_with_dataset_processors(se
|
143 | 143 | assert not self.postprocessor.process_dataset.called
|
144 | 144 | assert self.postprocessor.full_process.called
|
145 | 145 |
|
| 146 | + def test_model_evaluator_get_config_metrics(self, mocker): |
| 147 | + dataset_config = { |
| 148 | + 'metrics': [{'type': 'accuracy', 'top_k': 1, 'reference': 0.78}], |
| 149 | + 'subset_metrics': [{'subset_size': '20%', |
| 150 | + 'metrics': [{'type': 'accuracy', 'top_k': 5, 'reference': 0.65}]}] |
| 151 | + } |
| 152 | + metric = {'type': 'accuracy', 'top_k': 1, 'reference': 0.78} |
| 153 | + selected_metric = get_config_metrics(dataset_config)[0] |
| 154 | + |
| 155 | + assert metric['reference'] == selected_metric['reference'] |
| 156 | + assert metric['top_k'] == selected_metric['top_k'] |
| 157 | + |
| 158 | + def test_model_evaluator_get_config_metrics_is_first_subset_metrics(self, mocker): |
| 159 | + dataset_config_sub_evaluation = { 'sub_evaluation' : 'True', |
| 160 | + 'metrics': [{'type': 'accuracy', 'top_k': 1, 'reference': 0.78}], |
| 161 | + 'subset_metrics': [ |
| 162 | + {'subset_size': '10%', 'metrics': [{'type': 'accuracy', 'top_k': 5, 'reference': 0.65}]}, |
| 163 | + {'subset_size': '20%', 'metrics': [{'type': 'accuracy', 'top_k': 5, 'reference': 0.72}]}] |
| 164 | + } |
| 165 | + subset_metric = {'type': 'accuracy', 'top_k': 5, 'reference': 0.65} |
| 166 | + selected_metric = get_config_metrics(dataset_config_sub_evaluation)[0] |
| 167 | + |
| 168 | + assert subset_metric['reference'] == selected_metric['reference'] |
| 169 | + assert subset_metric['top_k'] == selected_metric['top_k'] |
| 170 | + |
| 171 | + def test_model_evaluator_get_config_metrics_with_subsample_size_from_subset_metrics(self, mocker): |
| 172 | + dataset_config_sub_evaluation = { 'sub_evaluation' : 'True', 'subsample_size': '20%', |
| 173 | + 'metrics': [{'type': 'accuracy', 'top_k': 1, 'reference': 0.78}], |
| 174 | + 'subset_metrics': [ |
| 175 | + {'subset_size': '10%', 'metrics': [{'type': 'accuracy', 'top_k': 5, 'reference': 0.65}]}, |
| 176 | + {'subset_size': '20%', 'metrics': [{'type': 'accuracy', 'top_k': 5, 'reference': 0.72}]}] |
| 177 | + } |
| 178 | + subset_metric = {'type': 'accuracy', 'top_k': 5, 'reference': 0.72} |
| 179 | + selected_metric = get_config_metrics(dataset_config_sub_evaluation)[0] |
| 180 | + |
| 181 | + assert subset_metric['reference'] == selected_metric['reference'] |
| 182 | + assert subset_metric['top_k'] == selected_metric['top_k'] |
| 183 | + |
| 184 | + |
| 185 | + def test_model_evaluator_get_config_metrics_from_subset_metrics(self, mocker): |
| 186 | + dataset_config_sub_evaluation = { 'sub_evaluation' : 'True', |
| 187 | + 'metrics': [{'type': 'accuracy', 'top_k': 1, 'reference': 0.78}], |
| 188 | + 'subset_metrics': [{'subset_size': '20%', |
| 189 | + 'metrics': [{'type': 'accuracy', 'top_k': 5, 'reference': 0.65}]}] |
| 190 | + } |
| 191 | + subset_metric = {'type': 'accuracy', 'top_k': 5, 'reference': 0.65} |
| 192 | + selected_metric = get_config_metrics(dataset_config_sub_evaluation)[0] |
| 193 | + |
| 194 | + assert subset_metric['reference'] == selected_metric['reference'] |
| 195 | + assert subset_metric['top_k'] == selected_metric['top_k'] |
| 196 | + |
| 197 | + |
146 | 198 |
|
147 | 199 | class TestModelEvaluatorAsync:
|
148 | 200 | def setup_method(self):
|
|
0 commit comments