|
| 1 | +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license |
| 2 | +""" |
| 3 | +Validate a trained YOLOv5 detection model on a detection dataset |
| 4 | +
|
| 5 | +Usage: |
| 6 | + $ python val.py --weights yolov5s.pt --data coco128.yaml --img 640 |
| 7 | +
|
| 8 | +Usage - formats: |
| 9 | + $ python val.py --weights yolov5s.pt # PyTorch |
| 10 | + yolov5s.torchscript # TorchScript |
| 11 | + yolov5s.onnx # ONNX Runtime or OpenCV DNN with --dnn |
| 12 | + yolov5s_openvino_model # OpenVINO |
| 13 | + yolov5s.engine # TensorRT |
| 14 | + yolov5s.mlmodel # CoreML (macOS-only) |
| 15 | + yolov5s_saved_model # TensorFlow SavedModel |
| 16 | + yolov5s.pb # TensorFlow GraphDef |
| 17 | + yolov5s.tflite # TensorFlow Lite |
| 18 | + yolov5s_edgetpu.tflite # TensorFlow Edge TPU |
| 19 | + yolov5s_paddle_model # PaddlePaddle |
| 20 | +""" |
| 21 | + |
| 22 | +import argparse |
| 23 | +import os |
| 24 | +import sys |
| 25 | +from pathlib import Path |
| 26 | + |
| 27 | +import numpy as np |
| 28 | +import torch |
| 29 | +from tqdm import tqdm |
| 30 | + |
| 31 | +FILE = Path(__file__).resolve() |
| 32 | +ROOT = str(FILE.parents[0]) + "/yolov5" # YOLOv5 root directory |
| 33 | +if ROOT not in sys.path: |
| 34 | + sys.path.append(ROOT) # add ROOT to PATH |
| 35 | + |
| 36 | +from models import yolov5n, yolov5s |
| 37 | +from yolov5.utils.dataloaders import create_dataloader |
| 38 | +from yolov5.utils.general import ( |
| 39 | + LOGGER, |
| 40 | + TQDM_BAR_FORMAT, |
| 41 | + Profile, |
| 42 | + colorstr, |
| 43 | + non_max_suppression, |
| 44 | + scale_boxes, |
| 45 | + xywh2xyxy, |
| 46 | + xyxy2xywh, |
| 47 | +) |
| 48 | +from yolov5.utils.metrics import ap_per_class, box_iou |
| 49 | + |
| 50 | +from sparsebit.quantization import QuantModel, parse_qconfig |
| 51 | + |
| 52 | + |
| 53 | +def set_seed(seed): |
| 54 | + os.environ["PYTHONHASHSEED"] = str(seed) |
| 55 | + np.random.seed(seed) |
| 56 | + torch.manual_seed(seed) |
| 57 | + torch.cuda.manual_seed(seed) |
| 58 | + torch.cuda.manual_seed_all(seed) |
| 59 | + torch.backends.cudnn.deterministic = True |
| 60 | + torch.backends.cudnn.benchmark = False |
| 61 | + torch.backends.cudnn.enabled = False |
| 62 | + |
| 63 | + |
| 64 | +def save_one_txt(predn, save_conf, shape, file): |
| 65 | + # Save one txt result |
| 66 | + gn = torch.tensor(shape)[[1, 0, 1, 0]] # normalization gain whwh |
| 67 | + for *xyxy, conf, cls in predn.tolist(): |
| 68 | + xywh = ( |
| 69 | + (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() |
| 70 | + ) # normalized xywh |
| 71 | + line = (cls, *xywh, conf) if save_conf else (cls, *xywh) # label format |
| 72 | + with open(file, "a") as f: |
| 73 | + f.write(("%g " * len(line)).rstrip() % line + "\n") |
| 74 | + |
| 75 | + |
| 76 | +def save_one_json(predn, jdict, path, class_map): |
| 77 | + # Save one JSON result {"image_id": 42, "category_id": 18, "bbox": [258.15, 41.29, 348.26, 243.78], "score": 0.236} |
| 78 | + image_id = int(path.stem) if path.stem.isnumeric() else path.stem |
| 79 | + box = xyxy2xywh(predn[:, :4]) # xywh |
| 80 | + box[:, :2] -= box[:, 2:] / 2 # xy center to top-left corner |
| 81 | + for p, b in zip(predn.tolist(), box.tolist()): |
| 82 | + jdict.append( |
| 83 | + { |
| 84 | + "image_id": image_id, |
| 85 | + "category_id": class_map[int(p[5])], |
| 86 | + "bbox": [round(x, 3) for x in b], |
| 87 | + "score": round(p[4], 5), |
| 88 | + } |
| 89 | + ) |
| 90 | + |
| 91 | + |
| 92 | +def process_batch(detections, labels, iouv): |
| 93 | + """ |
| 94 | + Return correct prediction matrix |
| 95 | + Arguments: |
| 96 | + detections (array[N, 6]), x1, y1, x2, y2, conf, class |
| 97 | + labels (array[M, 5]), class, x1, y1, x2, y2 |
| 98 | + Returns: |
| 99 | + correct (array[N, 10]), for 10 IoU levels |
| 100 | + """ |
| 101 | + correct = np.zeros((detections.shape[0], iouv.shape[0])).astype(bool) |
| 102 | + iou = box_iou(labels[:, 1:], detections[:, :4]) |
| 103 | + correct_class = labels[:, 0:1] == detections[:, 5] |
| 104 | + for i in range(len(iouv)): |
| 105 | + x = torch.where( |
| 106 | + (iou >= iouv[i]) & correct_class |
| 107 | + ) # IoU > threshold and classes match |
| 108 | + if x[0].shape[0]: |
| 109 | + matches = ( |
| 110 | + torch.cat((torch.stack(x, 1), iou[x[0], x[1]][:, None]), 1) |
| 111 | + .cpu() |
| 112 | + .numpy() |
| 113 | + ) # [label, detect, iou] |
| 114 | + if x[0].shape[0] > 1: |
| 115 | + matches = matches[matches[:, 2].argsort()[::-1]] |
| 116 | + matches = matches[np.unique(matches[:, 1], return_index=True)[1]] |
| 117 | + # matches = matches[matches[:, 2].argsort()[::-1]] |
| 118 | + matches = matches[np.unique(matches[:, 0], return_index=True)[1]] |
| 119 | + correct[matches[:, 1].astype(int), i] = True |
| 120 | + return torch.tensor(correct, dtype=torch.bool, device=iouv.device) |
| 121 | + |
| 122 | + |
| 123 | +@torch.no_grad() |
| 124 | +def main(args): |
| 125 | + set_seed(args.seed) |
| 126 | + if args.model_name == "yolov5n": |
| 127 | + model = yolov5n(checkpoint_path=args.checkpoint_path) |
| 128 | + elif args.model_name == "yolov5s": |
| 129 | + model = yolov5s(checkpoint_path=args.checkpoint_path) |
| 130 | + else: |
| 131 | + raise NotImplementedError |
| 132 | + |
| 133 | + qconfig = parse_qconfig(args.qconfig_path) |
| 134 | + qmodel = QuantModel(model.model, config=qconfig) |
| 135 | + |
| 136 | + setattr(model, "model", qmodel) |
| 137 | + imgsz = 640 |
| 138 | + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") |
| 139 | + |
| 140 | + # Configure |
| 141 | + model.eval() |
| 142 | + model.cuda() |
| 143 | + |
| 144 | + cuda = device.type != "cpu" |
| 145 | + nc = 80 # number of classes |
| 146 | + iouv = torch. linspace( 0.5, 0.95, 10, device=device) # iou vector for [email protected]:0.95 |
| 147 | + niou = iouv.numel() |
| 148 | + task = "val" # path to val images |
| 149 | + dataloader = create_dataloader( |
| 150 | + os.path.join(args.data_path, "val2017.txt"), |
| 151 | + imgsz, |
| 152 | + 1, |
| 153 | + 32, |
| 154 | + False, |
| 155 | + pad=0.5, |
| 156 | + rect=True, |
| 157 | + workers=args.workers, |
| 158 | + prefix=colorstr(f"{task}: "), |
| 159 | + )[0] |
| 160 | + |
| 161 | + # Calibration |
| 162 | + calib_loader = create_dataloader( |
| 163 | + os.path.join(args.data_path, "calib2017.txt"), |
| 164 | + imgsz, |
| 165 | + 1, |
| 166 | + 32, |
| 167 | + False, |
| 168 | + pad=0.5, |
| 169 | + rect=True, |
| 170 | + workers=args.workers, |
| 171 | + prefix=colorstr(f"{task}: "), |
| 172 | + )[0] |
| 173 | + |
| 174 | + qmodel.prepare_calibration() |
| 175 | + for batch_meta in calib_loader: |
| 176 | + data = batch_meta[0] / 255 |
| 177 | + with torch.no_grad(): |
| 178 | + _ = qmodel(data.to(device, non_blocking=True)) |
| 179 | + qmodel.calc_qparams() |
| 180 | + qmodel.set_quant(w_quant=True, a_quant=True) |
| 181 | + |
| 182 | + seen = 0 |
| 183 | + names = ( |
| 184 | + model.names if hasattr(model, "names") else model.module.names |
| 185 | + ) # get class names |
| 186 | + if isinstance(names, (list, tuple)): # old format |
| 187 | + names = dict(enumerate(names)) |
| 188 | + s = ("%22s" + "%11s" * 6) % ( |
| 189 | + "Class", |
| 190 | + "Images", |
| 191 | + "Instances", |
| 192 | + "P", |
| 193 | + "R", |
| 194 | + "mAP50", |
| 195 | + "mAP50-95", |
| 196 | + ) |
| 197 | + p, r, mp, mr, map50, ap50, map = 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 |
| 198 | + dt = Profile(), Profile(), Profile() # profiling times |
| 199 | + stats, ap = [], [] |
| 200 | + pbar = tqdm(dataloader, desc=s, bar_format=TQDM_BAR_FORMAT) # progress bar |
| 201 | + for im, targets, paths, shapes in pbar: |
| 202 | + with dt[0]: |
| 203 | + if cuda: |
| 204 | + im = im.to(device, non_blocking=True) |
| 205 | + targets = targets.to(device) |
| 206 | + im = im.float() # uint8 to fp16/32 |
| 207 | + im /= 255 # 0 - 255 to 0.0 - 1.0 |
| 208 | + _, _, height, width = im.shape # batch size, channels, height, width |
| 209 | + |
| 210 | + # Inference |
| 211 | + with dt[1]: |
| 212 | + preds = model(im) |
| 213 | + |
| 214 | + # NMS |
| 215 | + targets[:, 2:] *= torch.tensor( |
| 216 | + (width, height, width, height), device=device |
| 217 | + ) # to pixels |
| 218 | + with dt[2]: |
| 219 | + preds = non_max_suppression( |
| 220 | + preds, |
| 221 | + args.conf_thres, |
| 222 | + args.iou_thres, |
| 223 | + labels=[], |
| 224 | + multi_label=True, |
| 225 | + agnostic=False, |
| 226 | + max_det=300, |
| 227 | + ) |
| 228 | + |
| 229 | + # Metrics |
| 230 | + for si, pred in enumerate(preds): |
| 231 | + labels = targets[targets[:, 0] == si, 1:] |
| 232 | + npr = pred.shape[0] # number of labels, predictions |
| 233 | + shape = shapes[si][0] |
| 234 | + correct = torch.zeros(npr, niou, dtype=torch.bool, device=device) # init |
| 235 | + seen += 1 |
| 236 | + |
| 237 | + if npr == 0: |
| 238 | + stats.append( |
| 239 | + (correct, *torch.zeros((2, 0), device=device), labels[:, 0]) |
| 240 | + ) |
| 241 | + continue |
| 242 | + |
| 243 | + # Predictions |
| 244 | + predn = pred.clone() |
| 245 | + scale_boxes( |
| 246 | + im[si].shape[1:], predn[:, :4], shape, shapes[si][1] |
| 247 | + ) # native-space pred |
| 248 | + |
| 249 | + # Evaluate |
| 250 | + tbox = xywh2xyxy(labels[:, 1:5]) # target boxes |
| 251 | + scale_boxes( |
| 252 | + im[si].shape[1:], tbox, shape, shapes[si][1] |
| 253 | + ) # native-space labels |
| 254 | + labelsn = torch.cat((labels[:, 0:1], tbox), 1) # native-space labels |
| 255 | + correct = process_batch(predn, labelsn, iouv) |
| 256 | + |
| 257 | + stats.append( |
| 258 | + (correct, pred[:, 4], pred[:, 5], labels[:, 0]) |
| 259 | + ) # (correct, conf, pcls, tcls) |
| 260 | + |
| 261 | + # Compute metrics |
| 262 | + stats = [torch.cat(x, 0).cpu().numpy() for x in zip(*stats)] # to numpy |
| 263 | + if len(stats) and stats[0].any(): |
| 264 | + tp, fp, p, r, f1, ap, ap_class = ap_per_class( |
| 265 | + *stats, plot=False, save_dir="", names=names |
| 266 | + ) |
| 267 | + ap50, ap = ap[:, 0], ap. mean( 1) # [email protected], [email protected]:0.95 |
| 268 | + mp, mr, map50, map = p.mean(), r.mean(), ap50.mean(), ap.mean() |
| 269 | + nt = np.bincount(stats[3].astype(int), minlength=nc) # number of targets per class |
| 270 | + |
| 271 | + # Print results |
| 272 | + pf = "%22s" + "%11i" * 2 + "%11.3g" * 4 # print format |
| 273 | + LOGGER.info(pf % ("all", seen, nt.sum(), mp, mr, map50, map)) |
| 274 | + if nt.sum() == 0: |
| 275 | + LOGGER.warning( |
| 276 | + f"WARNING ⚠️ no labels found in {task} set, can not compute metrics without labels" |
| 277 | + ) |
| 278 | + |
| 279 | + |
| 280 | +def parse_args(): |
| 281 | + parser = argparse.ArgumentParser() |
| 282 | + parser.add_argument( |
| 283 | + "--model_name", type=str, default="yolov5n", choices=["yolov5n", "yolov5s"] |
| 284 | + ) |
| 285 | + parser.add_argument("--qconfig_path", type=str, default="./qconfig.yaml") |
| 286 | + parser.add_argument("--data_path", type=str, required=True) |
| 287 | + parser.add_argument("--checkpoint_path", type=str, required=True) |
| 288 | + parser.add_argument("--seed", type=int, default=42) |
| 289 | + parser.add_argument("--workers", type=int, default=8, help="dataloader workers") |
| 290 | + parser.add_argument( |
| 291 | + "--conf_thres", type=float, default=0.001, help="confidence threshold" |
| 292 | + ) |
| 293 | + parser.add_argument( |
| 294 | + "--iou_thres", type=float, default=0.6, help="NMS IoU threshold" |
| 295 | + ) |
| 296 | + |
| 297 | + args = parser.parse_args() |
| 298 | + return args |
| 299 | + |
| 300 | + |
| 301 | +if __name__ == "__main__": |
| 302 | + args = parse_args() |
| 303 | + main(args) |
0 commit comments