|
54 | 54 |
|
55 | 55 | def evaluate_coco(img_path, set_name, image_ids, coco, model, threshold=0.05): |
56 | 56 | results = [] |
57 | | - processed_image_ids = [] |
58 | 57 |
|
59 | 58 | regressBoxes = BBoxTransform() |
60 | 59 | clipBoxes = ClipBoxes() |
@@ -86,8 +85,6 @@ def evaluate_coco(img_path, set_name, image_ids, coco, model, threshold=0.05): |
86 | 85 | if not preds: |
87 | 86 | continue |
88 | 87 |
|
89 | | - processed_image_ids.append(image_id) |
90 | | - |
91 | 88 | preds = invert_affine(framed_metas, preds)[0] |
92 | 89 |
|
93 | 90 | scores = preds['scores'] |
@@ -124,8 +121,6 @@ def evaluate_coco(img_path, set_name, image_ids, coco, model, threshold=0.05): |
124 | 121 | os.remove(filepath) |
125 | 122 | json.dump(results, open(filepath, 'w'), indent=4) |
126 | 123 |
|
127 | | - return processed_image_ids |
128 | | - |
129 | 124 |
|
130 | 125 | def _eval(coco_gt, image_ids, pred_json_path): |
131 | 126 | # load results in COCO evaluation tool |
@@ -161,6 +156,6 @@ def _eval(coco_gt, image_ids, pred_json_path): |
161 | 156 | if use_float16: |
162 | 157 | model.half() |
163 | 158 |
|
164 | | - image_ids = evaluate_coco(VAL_IMGS, SET_NAME, image_ids, coco_gt, model) |
| 159 | + evaluate_coco(VAL_IMGS, SET_NAME, image_ids, coco_gt, model) |
165 | 160 |
|
166 | 161 | _eval(coco_gt, image_ids, f'{SET_NAME}_bbox_results.json') |
0 commit comments