Skip to content

Commit ce4b996

Browse files
authored
fix(evaluator): compat bug and update requirment (Megvii-BaseDetection#1416)
1 parent 48fd2a9 commit ce4b996

File tree

6 files changed

+21
-52
lines changed

6 files changed

+21
-52
lines changed

requirements.txt

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -3,14 +3,11 @@ numpy
33
torch>=1.7
44
opencv_python
55
loguru
6-
scikit-image
76
tqdm
87
torchvision
9-
Pillow
108
thop
119
ninja
1210
tabulate
13-
tensorboard
1411

1512
# verified versions
1613
# pycocotools corresponds to https://github.com/ppwwyyxx/cocoapi

setup.cfg

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -3,8 +3,8 @@ line_length = 100
33
multi_line_output = 3
44
balanced_wrapping = True
55
known_standard_library = setuptools
6-
known_third_party = tqdm,loguru
7-
known_data_processing = cv2,numpy,scipy,PIL,matplotlib,scikit_image
6+
known_third_party = tqdm,loguru,tabulate
7+
known_data_processing = cv2,numpy,scipy,PIL,matplotlib
88
known_datasets = pycocotools
99
known_deeplearning = torch,torchvision,caffe2,onnx,apex,timm,thop,torch2trt,tensorrt,openvino,onnxruntime
1010
known_myself = yolox

yolox/evaluators/coco_evaluator.py

Lines changed: 2 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -114,14 +114,8 @@ def __init__(
114114
self.per_class_AR = per_class_AR
115115

116116
def evaluate(
117-
self,
118-
model,
119-
distributed=False,
120-
half=False,
121-
trt_file=None,
122-
decoder=None,
123-
test_size=None,
124-
return_outputs=False
117+
self, model, distributed=False, half=False, trt_file=None,
118+
decoder=None, test_size=None, return_outputs=False
125119
):
126120
"""
127121
COCO average precision (AP) Evaluation. Iterate inference on the test dataset

yolox/evaluators/voc_eval.py

Lines changed: 5 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,4 @@
11
#!/usr/bin/env python3
2-
# -*- coding:utf-8 -*-
32
# Code are based on
43
# https://github.com/rbgirshick/py-faster-rcnn/blob/master/lib/datasets/voc_eval.py
54
# Copyright (c) Bharath Hariharan.
@@ -13,7 +12,7 @@
1312

1413

1514
def parse_rec(filename):
16-
""" Parse a PASCAL VOC xml file """
15+
"""Parse a PASCAL VOC xml file"""
1716
tree = ET.parse(filename)
1817
objects = []
1918
for obj in tree.findall("object"):
@@ -35,7 +34,7 @@ def parse_rec(filename):
3534

3635

3736
def voc_ap(rec, prec, use_07_metric=False):
38-
"""ap = voc_ap(rec, prec, [use_07_metric])
37+
"""
3938
Compute VOC AP given precision and recall.
4039
If use_07_metric is true, uses the
4140
VOC 07 11 point method (default:False).
@@ -92,9 +91,9 @@ def voc_eval(
9291
for i, imagename in enumerate(imagenames):
9392
recs[imagename] = parse_rec(annopath.format(imagename))
9493
if i % 100 == 0:
95-
print("Reading annotation for {:d}/{:d}".format(i + 1, len(imagenames)))
94+
print(f"Reading annotation for {i + 1}/{len(imagenames)}")
9695
# save
97-
print("Saving cached annotations to {:s}".format(cachefile))
96+
print(f"Saving cached annotations to {cachefile}")
9897
with open(cachefile, "wb") as f:
9998
pickle.dump(recs, f)
10099
else:
@@ -155,8 +154,7 @@ def voc_eval(
155154
# union
156155
uni = (
157156
(bb[2] - bb[0] + 1.0) * (bb[3] - bb[1] + 1.0)
158-
+ (BBGT[:, 2] - BBGT[:, 0] + 1.0) * (BBGT[:, 3] - BBGT[:, 1] + 1.0)
159-
- inters
157+
+ (BBGT[:, 2] - BBGT[:, 0] + 1.0) * (BBGT[:, 3] - BBGT[:, 1] + 1.0) - inters
160158
)
161159

162160
overlaps = inters / uni

yolox/evaluators/voc_evaluator.py

Lines changed: 10 additions & 31 deletions
Original file line numberDiff line numberDiff line change
@@ -21,14 +21,7 @@ class VOCEvaluator:
2121
VOC AP Evaluation class.
2222
"""
2323

24-
def __init__(
25-
self,
26-
dataloader,
27-
img_size,
28-
confthre,
29-
nmsthre,
30-
num_classes,
31-
):
24+
def __init__(self, dataloader, img_size, confthre, nmsthre, num_classes):
3225
"""
3326
Args:
3427
dataloader (Dataloader): evaluate dataloader.
@@ -46,13 +39,8 @@ def __init__(
4639
self.num_images = len(dataloader.dataset)
4740

4841
def evaluate(
49-
self,
50-
model,
51-
distributed=False,
52-
half=False,
53-
trt_file=None,
54-
decoder=None,
55-
test_size=None,
42+
self, model, distributed=False, half=False, trt_file=None,
43+
decoder=None, test_size=None, return_outputs=False,
5644
):
5745
"""
5846
VOC average precision (AP) Evaluation. Iterate inference on the test dataset
@@ -91,9 +79,7 @@ def evaluate(
9179
model(x)
9280
model = model_trt
9381

94-
for cur_iter, (imgs, _, info_imgs, ids) in enumerate(
95-
progress_bar(self.dataloader)
96-
):
82+
for cur_iter, (imgs, _, info_imgs, ids) in enumerate(progress_bar(self.dataloader)):
9783
with torch.no_grad():
9884
imgs = imgs.type(tensor_type)
9985

@@ -127,13 +113,13 @@ def evaluate(
127113

128114
eval_results = self.evaluate_prediction(data_list, statistics)
129115
synchronize()
116+
if return_outputs:
117+
return eval_results, data_list
130118
return eval_results
131119

132120
def convert_to_voc_format(self, outputs, info_imgs, ids):
133121
predictions = {}
134-
for (output, img_h, img_w, img_id) in zip(
135-
outputs, info_imgs[0], info_imgs[1], ids
136-
):
122+
for output, img_h, img_w, img_id in zip(outputs, info_imgs[0], info_imgs[1], ids):
137123
if output is None:
138124
predictions[int(img_id)] = (None, None, None)
139125
continue
@@ -142,9 +128,7 @@ def convert_to_voc_format(self, outputs, info_imgs, ids):
142128
bboxes = output[:, 0:4]
143129

144130
# preprocessing: resize
145-
scale = min(
146-
self.img_size[0] / float(img_h), self.img_size[1] / float(img_w)
147-
)
131+
scale = min(self.img_size[0] / float(img_h), self.img_size[1] / float(img_w))
148132
bboxes /= scale
149133

150134
cls = output[:, 6]
@@ -175,7 +159,6 @@ def evaluate_prediction(self, data_dict, statistics):
175159
)
176160
]
177161
)
178-
179162
info = time_info + "\n"
180163

181164
all_boxes = [
@@ -196,13 +179,9 @@ def evaluate_prediction(self, data_dict, statistics):
196179
c_dets = torch.cat((bboxes, scores.unsqueeze(1)), dim=1)
197180
all_boxes[j][img_num] = c_dets[mask_c].numpy()
198181

199-
sys.stdout.write(
200-
"im_eval: {:d}/{:d} \r".format(img_num + 1, self.num_images)
201-
)
182+
sys.stdout.write(f"im_eval: {img_num + 1}/{self.num_images} \r")
202183
sys.stdout.flush()
203184

204185
with tempfile.TemporaryDirectory() as tempdir:
205-
mAP50, mAP70 = self.dataloader.dataset.evaluate_detections(
206-
all_boxes, tempdir
207-
)
186+
mAP50, mAP70 = self.dataloader.dataset.evaluate_detections(all_boxes, tempdir)
208187
return mAP50, mAP70, info

yolox/utils/model_utils.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,6 @@
88

99
import torch
1010
import torch.nn as nn
11-
from thop import profile
1211

1312
__all__ = [
1413
"fuse_conv_and_bn",
@@ -21,6 +20,8 @@
2120

2221

2322
def get_model_info(model: nn.Module, tsize: Sequence[int]) -> str:
23+
from thop import profile
24+
2425
stride = 64
2526
img = torch.zeros((1, 3, stride, stride), device=next(model.parameters()).device)
2627
flops, params = profile(deepcopy(model), inputs=(img,), verbose=False)

0 commit comments

Comments
 (0)