Skip to content

Commit 2aed296

Browse files
authored
AC: update profiling (#2894)
1 parent 59ff55d commit 2aed296

16 files changed

+276
-105
lines changed

tools/accuracy_checker/accuracy_checker/evaluators/model_evaluator.py

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -366,11 +366,16 @@ def _process_batch_results(
366366
self.adapter.output_blob = self.adapter.output_blob or self.launcher.output_blob
367367
batch_predictions = self.adapter.process(batch_predictions, batch_identifiers, batch_meta)
368368

369+
copy_annotations, copy_predictions = None, None
370+
if self.metric_executor.profiler is not None and self.metric_executor.profiler.required_postprocessing:
371+
copy_annotations, copy_predictions = copy.deepcopy(batch_annotations), copy.deepcopy(batch_predictions)
372+
copy_annotations, copy_predictions = self.postprocessor.deprocess_batch(
373+
copy_annotations, copy_predictions, batch_meta)
369374
annotations, predictions = self.postprocessor.process_batch(
370375
batch_annotations, batch_predictions, batch_meta
371376
)
372377
_, profile_result = self.metric_executor.update_metrics_on_batch(
373-
batch_input_ids, annotations, predictions, enable_profiling
378+
batch_input_ids, annotations, predictions, enable_profiling, copy_annotations, copy_predictions
374379
)
375380
if output_callback:
376381
callback_kwargs = {'profiling_result': profile_result} if enable_profiling else {}

tools/accuracy_checker/accuracy_checker/metrics/coco_metrics.py

Lines changed: 42 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -69,6 +69,7 @@ def parameters(cls):
6969
def configure(self):
7070
self.max_detections = self.get_value_from_config('max_detections')
7171
threshold = process_threshold(self.get_value_from_config('threshold'))
72+
self.config.pop('threshold', None)
7273
self.thresholds = get_or_parse_value(threshold, COCO_THRESHOLDS)
7374
if not self.dataset:
7475
raise ConfigError('coco metrics require dataset metadata providing in dataset_meta'
@@ -120,13 +121,19 @@ def _update_label_stat_for_non_matched_classes(self, labels_stat, predictions):
120121
matched_classes = set(labels_stat)
121122
background = self.dataset.metadata.get('background_label')
122123
prediction_classes = np.unique([pred.labels for pred in predictions])
124+
get_polygon = hasattr(predictions[0], 'to_polygon')
123125
for pc in prediction_classes:
124126
if pc == background or pc in matched_classes:
125127
continue
126-
prediction_boxes, _, _ = _prepare_prediction_boxes(
127-
pc, predictions, True
128-
)
129-
conf = prediction_boxes[:, 0]
128+
if not get_polygon:
129+
prediction_boxes, _, _ = _prepare_prediction_boxes(
130+
pc, predictions, True
131+
)
132+
conf = prediction_boxes[:, 0] if not get_polygon else []
133+
prediction_boxes = prediction_boxes[:, 1:]
134+
else:
135+
prediction_boxes = [p.to_polygon().get(pc, []) for p in predictions][0]
136+
conf = [p.scores[p.labels == pc] for p in predictions][0]
130137
label_report = {
131138
'precision': [],
132139
'recall': [],
@@ -135,7 +142,7 @@ def _update_label_stat_for_non_matched_classes(self, labels_stat, predictions):
135142
'scores': conf,
136143
'matched': defaultdict(list),
137144
'gt': [],
138-
'dt': prediction_boxes[:, 1:],
145+
'dt': prediction_boxes,
139146
'prediction_matches': 0,
140147
'annotation_matches': 0,
141148
'iou': []
@@ -413,8 +420,8 @@ def evaluate(self, annotations, predictions):
413420
return recalls
414421

415422

416-
class MSCOCOSegmAveragePrecision(MSCOCOAveragePrecision):
417-
__provider__ = 'coco_segm_precision'
423+
class MSCOCOSegmBase(MSCOCOBaseMetric):
424+
__provider__ = 'coco_segm'
418425

419426
annotation_types = (CoCoInstanceSegmentationAnnotation,)
420427
prediction_types = (CoCoInstanceSegmentationPrediction,)
@@ -424,17 +431,37 @@ def configure(self):
424431
if isinstance(maskUtils, UnsupportedPackage):
425432
maskUtils.raise_error(self.__provider__)
426433

434+
def update(self, annotation, prediction):
435+
compute_iou, create_boxes = select_specific_parameters(annotation)
436+
per_class_results = []
437+
profile_boxes = self.profiler is not None
438+
if profile_boxes:
439+
annotation_polygons = annotation.to_polygon()
440+
prediction_polygons = prediction.to_polygon()
427441

428-
class MSCOCOSegmRecall(MSCOCORecall):
429-
__provider__ = 'coco_segm_recall'
442+
for label_id, label in enumerate(self.labels):
430443

431-
annotation_types = (CoCoInstanceSegmentationAnnotation,)
432-
prediction_types = (CoCoInstanceSegmentationPrediction,)
444+
detections, scores, dt_difficult = prepare_predictions(prediction, label, self.max_detections)
445+
ground_truth, gt_difficult, iscrowd, boxes, areas = prepare_annotations(annotation, label, create_boxes)
446+
iou = compute_iou(ground_truth, detections, annotation_boxes=boxes, annotation_areas=areas, iscrowd=iscrowd)
447+
eval_result = evaluate_image(
448+
ground_truth if not profile_boxes else annotation_polygons.get(label, []),
449+
gt_difficult, iscrowd, detections if not profile_boxes else prediction_polygons.get(label, []),
450+
dt_difficult, scores, iou, self.thresholds,
451+
profile_boxes
452+
)
453+
self.matching_results[label_id].append(eval_result)
454+
per_class_results.append(eval_result)
433455

434-
def configure(self):
435-
super().configure()
436-
if isinstance(maskUtils, UnsupportedPackage):
437-
maskUtils.raise_error(self.__provider__)
456+
return per_class_results
457+
458+
459+
class MSCOCOSegmAveragePrecision(MSCOCOAveragePrecision, MSCOCOSegmBase):
460+
__provider__ = 'coco_segm_precision'
461+
462+
463+
class MSCOCOSegmRecall(MSCOCORecall, MSCOCOSegmBase):
464+
__provider__ = 'coco_segm_recall'
438465

439466

440467
@singledispatch

tools/accuracy_checker/accuracy_checker/metrics/coco_orig_metrics.py

Lines changed: 6 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -34,8 +34,8 @@
3434
from ..logging import print_info
3535
from ..config import BaseField, BoolField, ConfigError
3636
from ..utils import get_or_parse_value, UnsupportedPackage
37-
from .metric import FullDatasetEvaluationMetric, PerImageEvaluationMetric, Metric
38-
from .coco_metrics import COCO_THRESHOLDS, process_threshold, compute_precision_recall
37+
from .metric import FullDatasetEvaluationMetric, Metric, PerImageEvaluationMetric
38+
from .coco_metrics import COCO_THRESHOLDS, process_threshold
3939

4040
try:
4141
from pycocotools.coco import COCO
@@ -79,6 +79,7 @@ def parameters(cls):
7979

8080
def configure(self):
8181
threshold = process_threshold(self.get_value_from_config('threshold'))
82+
self.config.pop('threshold', None)
8283
self.threshold = get_or_parse_value(threshold, COCO_THRESHOLDS)
8384
self.box_side_delta = int(self.get_value_from_config('include_boundaries'))
8485
if not self.dataset.metadata:
@@ -99,7 +100,7 @@ def configure(self):
99100
def set_profiler(self, profiler):
100101
self.profiler = profiler
101102
self.profiling_helper = Metric.provide(
102-
self.__provider__.replace('_orig', ''), {}, self.dataset, self.name, profiler=profiler
103+
self.__provider__.replace('_orig', ''), {'max_detections': 100}, self.dataset, self.name, profiler=profiler
103104
)
104105

105106
def update(self, annotation, prediction):
@@ -472,7 +473,7 @@ def evaluate(self, annotations, predictions):
472473
pass
473474

474475

475-
class MSCOCOorigAveragePrecision(MSCOCOorigBaseMetric):
476+
class MSCOCOorigAveragePrecision(MSCOCOorigBaseMetric, PerImageEvaluationMetric):
476477
__provider__ = 'coco_orig_precision'
477478

478479
def evaluate(self, annotations, predictions):
@@ -490,34 +491,12 @@ class MSCOCOOrigSegmAveragePrecision(MSCOCOorigAveragePrecision, PerImageEvaluat
490491

491492
def update(self, annotation, prediction):
492493
if self.profiler:
493-
per_class_matching = {}
494-
for _, label in enumerate(self.labels):
495-
detections, scores, dt_difficult = self._prepare_predictions(prediction, label)
496-
ground_truth, gt_difficult, iscrowd = self._prepare_annotations(annotation, label)
497-
if not ground_truth.size:
498-
continue
499-
iou = self._compute_iou(ground_truth, detections, iscrowd)
500-
eval_result = self._evaluate_image(
501-
ground_truth, gt_difficult, iscrowd, detections, dt_difficult, scores, iou, self.threshold,
502-
True
503-
)
504-
eval_result['gt'] = annotation.to_polygon()[label]
505-
eval_result['dt'] = annotation.to_polygon()[label]
506-
per_class_matching[label] = eval_result
507-
per_class_result = {k: compute_precision_recall(
508-
self.threshold, [v])[0] for k, v in per_class_matching.items()
509-
}
510-
for label in per_class_matching:
511-
per_class_matching[label]['result'] = per_class_result[label]
512-
self.profiler.update(
513-
annotation.identifier, per_class_matching, self.name, np.nanmean(list(per_class_result.values()))
514-
)
494+
self.profiling_helper.update(annotation, prediction)
515495

516496
@staticmethod
517497
def _compute_iou(gt, dets, iscrowd):
518498
return iou_calc(list(dets), list(gt), iscrowd)
519499

520-
521500
@staticmethod
522501
def _prepare_predictions(prediction, label):
523502
if prediction.size == 0:
@@ -552,7 +531,6 @@ def _prepare_annotations(annotation, label):
552531

553532
return ann[order], difficult_label[order], iscrowd_label[order]
554533

555-
556534
@staticmethod
557535
def _iou_type_data_to_coco(data_to_store, data, box_side_delta):
558536
encoded_masks = data.mask

tools/accuracy_checker/accuracy_checker/metrics/detection.py

Lines changed: 16 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -302,18 +302,18 @@ def _calculate_map(self, annotations, predictions, profile_boxes=False, return_l
302302
labels_stat = self.per_class_detection_statistics(annotations, predictions, valid_labels, profile_boxes)
303303

304304
average_precisions = []
305-
for label in labels_stat:
306-
label_precision = labels_stat[label]['precision']
307-
label_recall = labels_stat[label]['recall']
305+
for value in labels_stat.values():
306+
label_precision = value['precision']
307+
label_recall = value['recall']
308308
label_miss_rate = 1 - label_recall
309-
labels_stat[label]['miss_rate'] = label_miss_rate
309+
value['miss_rate'] = label_miss_rate
310310
if label_recall.size:
311311
ap = average_precision(label_precision, label_recall, self.integral)
312312
average_precisions.append(ap)
313313
else:
314314
average_precisions.append(np.nan)
315-
labels_stat[label]['ap'] = average_precisions[-1]
316-
labels_stat[label]['result'] = average_precisions[-1]
315+
value['ap'] = average_precisions[-1]
316+
value['result'] = average_precisions[-1]
317317
if profile_boxes:
318318
self._update_label_stat_for_non_matched_classes(labels_stat, predictions)
319319
self.profiler.update(annotations[0].identifier, labels_stat, self.name, np.nanmean(average_precisions))
@@ -376,16 +376,16 @@ def update(self, annotation, prediction):
376376
[annotation], [prediction], valid_labels, self.profiler is not None
377377
)
378378
miss_rates = []
379-
for label in labels_stat:
380-
label_miss_rate = 1.0 - labels_stat[label]['recall']
381-
label_fppi = labels_stat[label]['fppi']
379+
for value in labels_stat.values():
380+
label_miss_rate = 1.0 - value['recall']
381+
label_fppi = value['fppi']
382382

383383
position = bisect.bisect_left(label_fppi, self.fppi_level)
384384
m0 = max(0, position - 1)
385385
m1 = position if position < len(label_miss_rate) else m0
386386
miss_rates.append(0.5 * (label_miss_rate[m0] + label_miss_rate[m1]))
387387
if self.profiler:
388-
labels_stat[label]['result'] = miss_rates[-1]
388+
value['result'] = miss_rates[-1]
389389
if self.profiler:
390390
self.profiler.update(annotation[0].identifier, labels_stat, self.name, np.nanmean(miss_rates))
391391

@@ -397,9 +397,9 @@ def evaluate(self, annotations, predictions):
397397
labels_stat = self.per_class_detection_statistics(annotations, predictions, valid_labels)
398398

399399
miss_rates = []
400-
for label in labels_stat:
401-
label_miss_rate = 1.0 - labels_stat[label]['recall']
402-
label_fppi = labels_stat[label]['fppi']
400+
for value in labels_stat.values():
401+
label_miss_rate = 1.0 - value['recall']
402+
label_fppi = value['fppi']
403403

404404
position = bisect.bisect_left(label_fppi, self.fppi_level)
405405
m0 = max(0, position - 1)
@@ -437,15 +437,15 @@ def _calculate_recall(self, annotations, predictions, profile_boxes=False):
437437
labels_stat = self.per_class_detection_statistics(annotations, predictions, valid_labels, profile_boxes)
438438

439439
recalls = []
440-
for label in labels_stat:
441-
label_recall = labels_stat[label]['recall']
440+
for value in labels_stat.values():
441+
label_recall = value['recall']
442442
if label_recall.size:
443443
max_recall = label_recall[-1]
444444
recalls.append(max_recall)
445445
else:
446446
recalls.append(np.nan)
447447
if profile_boxes:
448-
labels_stat[label]['result'] = recalls[-1]
448+
value['result'] = recalls[-1]
449449
if profile_boxes:
450450
self.profiler.update(annotations[0].identifier, labels_stat, self.name, np.nanmean(recalls))
451451

tools/accuracy_checker/accuracy_checker/metrics/metric_executor.py

Lines changed: 24 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -82,7 +82,8 @@ def update_metrics_on_object(self, annotation, prediction):
8282

8383
return metric_results
8484

85-
def update_metrics_on_batch(self, batch_ids, annotation, prediction, profile=False):
85+
def update_metrics_on_batch(self, batch_ids, annotation, prediction,
86+
profile=False, deprocessed_annotation=None, deprocessed_prediction=None):
8687
"""
8788
Updates metric value corresponding given batch.
8889
@@ -94,7 +95,11 @@ def update_metrics_on_batch(self, batch_ids, annotation, prediction, profile=Fal
9495
results = OrderedDict()
9596
profile_results = OrderedDict()
9697

97-
for input_id, single_annotation, single_prediction in zip(batch_ids, annotation, prediction):
98+
for idx, (input_id, single_annotation, single_prediction) in enumerate(zip(batch_ids, annotation, prediction)):
99+
if profile:
100+
if deprocessed_annotation is not None and deprocessed_prediction is not None:
101+
self.profiler.update_annotation_and_prediction(
102+
deprocessed_annotation[idx], deprocessed_prediction[idx])
98103
results[input_id] = self.update_metrics_on_object(single_annotation, single_prediction)
99104
if profile:
100105
profile_results[input_id] = self.profiler.get_last_report()
@@ -115,6 +120,20 @@ def iterate_metrics(self, annotations, predictions):
115120
profiling_file=profiling_file
116121
)
117122

123+
def get_metric_result_template(self, ignore_refs):
124+
for name, metric_type, functor, reference, abs_threshold, rel_threshold, presenter in self.metrics:
125+
profiling_file = None if functor.profiler is None else functor.profiler.report_file
126+
yield presenter, EvaluationResult(
127+
name=name,
128+
metric_type=metric_type,
129+
evaluated_value=functor.result_template,
130+
reference_value=reference if not ignore_refs else None,
131+
abs_threshold=abs_threshold,
132+
rel_threshold=rel_threshold,
133+
meta=functor.meta,
134+
profiling_file=profiling_file
135+
)
136+
118137
def register_metric(self, metric_config_entry):
119138
type_ = 'type'
120139
identifier = 'name'
@@ -150,9 +169,9 @@ def register_metric(self, metric_config_entry):
150169
metric_type, metric_config_entry, self.dataset, metric_identifier, state=self.state, **metric_kwargs
151170
)
152171
metric_presenter = BasePresenter.provide(metric_config_entry.get(presenter, 'print_scalar'))
153-
threshold_v = metric_config_entry.get(threshold)
154-
abs_threshold_v = metric_config_entry.get(abs_threshold)
155-
reference_v = metric_config_entry.get(reference)
172+
threshold_v = metric_fn.config.get(threshold)
173+
abs_threshold_v = metric_fn.config.get(abs_threshold)
174+
reference_v = metric_fn.config.get(reference)
156175
if reference_v is not None and not isinstance(reference_v, (int, float, dict)):
157176
raise ConfigError(
158177
'reference value should be represented as number or dictionary with numbers for each submetric'

tools/accuracy_checker/accuracy_checker/metrics/metric_profiler/base_profiler.py

Lines changed: 9 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -69,6 +69,7 @@ def __init__(self, dump_iterations=100, report_type='csv', name=None):
6969
self.storage = OrderedDict()
7070
self.write_result = self.write_csv_result if report_type == 'csv' else self.write_json_result
7171
self._last_profile = None
72+
self.required_postprocessing = False
7273

7374
def register_metric(self, metric_name):
7475
self.fields.append('{}_result'.format(metric_name))
@@ -106,7 +107,7 @@ def write_summary(self, summary):
106107
out_path = self.out_dir / self.report_file
107108
new_file = not out_path.exists()
108109
if not new_file:
109-
with open(str(out_path), 'r') as f:
110+
with open(str(out_path), 'r', encoding='utf-8') as f:
110111
out_dict = json.load(f)
111112
else:
112113
out_dict = {
@@ -122,7 +123,7 @@ def write_summary(self, summary):
122123
'dataset_meta': self.dataset_meta,
123124
}
124125
out_dict.update(summary)
125-
with open(str(out_path), 'w') as f:
126+
with open(str(out_path), 'w', encoding='utf-8') as f:
126127
json.dump(out_dict, f)
127128

128129
def reset(self):
@@ -142,7 +143,7 @@ def write_csv_result(self):
142143
else:
143144
data_to_store.append(value)
144145

145-
with open(str(out_path), 'a+', newline='') as f:
146+
with open(str(out_path), 'a+', newline='', encoding='utf-8') as f:
146147
writer = DictWriter(f, fieldnames=self.fields)
147148
if new_file:
148149
writer.writeheader()
@@ -154,7 +155,7 @@ def write_json_result(self):
154155
out_path = self.out_dir / self.report_file
155156
new_file = not out_path.exists()
156157
if not new_file:
157-
with open(str(out_path), 'r') as f:
158+
with open(str(out_path), 'r', encoding='utf-8') as f:
158159
out_dict = json.load(f)
159160
out_dict['report'].extend(list(self.storage.values()))
160161
else:
@@ -170,7 +171,7 @@ def write_json_result(self):
170171
'report_type': self.__provider__,
171172
'dataset_meta': self.dataset_meta
172173
}
173-
with open(str(out_path), 'w') as f:
174+
with open(str(out_path), 'w', encoding='utf-8') as f:
174175
json.dump(out_dict, f)
175176

176177
self._reset_storage()
@@ -193,6 +194,9 @@ def set_dataset_meta(self, meta):
193194
def last_report(self):
194195
return self._last_profile
195196

197+
def update_annotation_and_prediction(self, annotation, prediction):
198+
pass
199+
196200

197201
def create_profiler(metric_type, metric_name):
198202
profiler = None

0 commit comments

Comments
 (0)