|
| 1 | +from typing import Optional, List, Union |
| 2 | +import os |
| 3 | +import torch |
| 4 | +import logging |
| 5 | +import backend |
| 6 | +from collections import namedtuple |
| 7 | +from model.painter import Painter |
| 8 | +from model.pointpillars_core import PointPillarsPre, PointPillarsPos |
| 9 | +import numpy as np |
| 10 | +from tools.process import keep_bbox_from_image_range |
| 11 | +from waymo import Waymo |
| 12 | +import onnxruntime as ort |
| 13 | + |
| 14 | + |
| 15 | +logging.basicConfig(level=logging.INFO) |
| 16 | +log = logging.getLogger("backend-onnx") |
| 17 | + |
| 18 | + |
| 19 | +def change_calib_device(calib, cuda): |
| 20 | + result = {} |
| 21 | + if cuda: |
| 22 | + device = 'cuda' |
| 23 | + else: |
| 24 | + device = 'cpu' |
| 25 | + result['R0_rect'] = calib['R0_rect'].to(device=device, dtype=torch.float) |
| 26 | + for i in range(5): |
| 27 | + result['P' + str(i)] = calib['P' + str(i) |
| 28 | + ].to(device=device, dtype=torch.float) |
| 29 | + result['Tr_velo_to_cam_' + |
| 30 | + str(i)] = calib['Tr_velo_to_cam_' + |
| 31 | + str(i)].to(device=device, dtype=torch.float) |
| 32 | + return result |
| 33 | + |
| 34 | + |
| 35 | +def to_numpy(tensor): |
| 36 | + return tensor.detach().cpu().numpy() if tensor.requires_grad else tensor.cpu().numpy() |
| 37 | + |
| 38 | + |
| 39 | +class BackendOnnx(backend.Backend): |
| 40 | + def __init__( |
| 41 | + self, |
| 42 | + segmentor_path, |
| 43 | + lidar_detector_path, |
| 44 | + data_path |
| 45 | + ): |
| 46 | + super(BackendOnnx, self).__init__() |
| 47 | + self.segmentor_path = segmentor_path |
| 48 | + self.lidar_detector_path = lidar_detector_path |
| 49 | + # self.segmentation_classes = 18 |
| 50 | + self.detection_classes = 3 |
| 51 | + self.data_root = data_path |
| 52 | + CLASSES = Waymo.CLASSES |
| 53 | + self.LABEL2CLASSES = {v: k for k, v in CLASSES.items()} |
| 54 | + |
| 55 | + def version(self): |
| 56 | + return torch.__version__ |
| 57 | + |
| 58 | + def name(self): |
| 59 | + return "python-SUT" |
| 60 | + |
| 61 | + def load(self): |
| 62 | + device = torch.device("cpu") |
| 63 | + PaintArgs = namedtuple( |
| 64 | + 'PaintArgs', [ |
| 65 | + 'training_path', 'model_path', 'cam_sync']) |
| 66 | + painting_args = PaintArgs( |
| 67 | + os.path.join( |
| 68 | + self.data_root, |
| 69 | + 'training'), |
| 70 | + self.segmentor_path, |
| 71 | + False) |
| 72 | + self.painter = Painter(painting_args, onnx=True) |
| 73 | + self.segmentor = self.painter.model |
| 74 | + model_pre = PointPillarsPre() |
| 75 | + model_post = PointPillarsPos(self.detection_classes) |
| 76 | + model_pre.eval() |
| 77 | + model_post.eval() |
| 78 | + ort_sess = ort.InferenceSession(self.lidar_detector_path) |
| 79 | + self.lidar_detector = ort_sess |
| 80 | + self.model_pre = model_pre |
| 81 | + self.model_post = model_post |
| 82 | + return self |
| 83 | + |
| 84 | + def predict(self, inputs): |
| 85 | + dimensions, locations, rotation_y, box2d, class_labels, class_scores, ids = [ |
| 86 | + ], [], [], [], [], [], [] |
| 87 | + with torch.inference_mode(): |
| 88 | + model_input = inputs[0] |
| 89 | + batched_pts = model_input['pts'] |
| 90 | + scores_from_cam = [] |
| 91 | + for i in range(len(model_input['images'])): |
| 92 | + input_image_name = self.segmentor.get_inputs()[0].name |
| 93 | + input_data = { |
| 94 | + input_image_name: to_numpy( |
| 95 | + model_input['images'][i])} |
| 96 | + segmentation_score = self.segmentor.run(None, input_data) |
| 97 | + segmentation_score = [ |
| 98 | + torch.from_numpy(item) for item in segmentation_score] |
| 99 | + scores_from_cam.append( |
| 100 | + self.painter.get_score( |
| 101 | + segmentation_score[0].squeeze(0)).cpu()) |
| 102 | + points = self.painter.augment_lidar_class_scores_both( |
| 103 | + scores_from_cam, batched_pts, model_input['calib_info']) |
| 104 | + pillars, coors_batch, npoints_per_pillar = self.model_pre(batched_pts=[ |
| 105 | + points]) |
| 106 | + input_pillars_name = self.lidar_detector.get_inputs()[0].name |
| 107 | + input_coors_batch_name = self.lidar_detector.get_inputs()[1].name |
| 108 | + input_npoints_per_pillar_name = self.lidar_detector.get_inputs()[ |
| 109 | + 2].name |
| 110 | + input_data = {input_pillars_name: to_numpy(pillars), |
| 111 | + input_coors_batch_name: to_numpy(coors_batch), |
| 112 | + input_npoints_per_pillar_name: to_numpy(npoints_per_pillar)} |
| 113 | + result = self.lidar_detector.run(None, input_data) |
| 114 | + result = [torch.from_numpy(item) for item in result] |
| 115 | + batch_results = self.model_post(result) |
| 116 | + for j, result in enumerate(batch_results): |
| 117 | + format_result = { |
| 118 | + 'class': [], |
| 119 | + 'truncated': [], |
| 120 | + 'occluded': [], |
| 121 | + 'alpha': [], |
| 122 | + 'bbox': [], |
| 123 | + 'dimensions': [], |
| 124 | + 'location': [], |
| 125 | + 'rotation_y': [], |
| 126 | + 'score': [], |
| 127 | + 'idx': -1 |
| 128 | + } |
| 129 | + |
| 130 | + calib_info = model_input['calib_info'] |
| 131 | + image_info = model_input['image_info'] |
| 132 | + idx = model_input['image_info']['image_idx'] |
| 133 | + format_result['idx'] = idx |
| 134 | + calib_info = change_calib_device(calib_info, False) |
| 135 | + result_filter = keep_bbox_from_image_range( |
| 136 | + result, calib_info, 5, image_info, False) |
| 137 | + |
| 138 | + lidar_bboxes = result_filter['lidar_bboxes'] |
| 139 | + labels, scores = result_filter['labels'], result_filter['scores'] |
| 140 | + bboxes2d, camera_bboxes = result_filter['bboxes2d'], result_filter['camera_bboxes'] |
| 141 | + for lidar_bbox, label, score, bbox2d, camera_bbox in \ |
| 142 | + zip(lidar_bboxes, labels, scores, bboxes2d, camera_bboxes): |
| 143 | + format_result['class'].append(label.item()) |
| 144 | + format_result['truncated'].append(0.0) |
| 145 | + format_result['occluded'].append(0) |
| 146 | + alpha = camera_bbox[6] - \ |
| 147 | + np.arctan2(camera_bbox[0], camera_bbox[2]) |
| 148 | + format_result['alpha'].append(alpha.item()) |
| 149 | + format_result['bbox'].append(bbox2d.tolist()) |
| 150 | + format_result['dimensions'].append(camera_bbox[3:6]) |
| 151 | + format_result['location'].append(camera_bbox[:3]) |
| 152 | + format_result['rotation_y'].append(camera_bbox[6].item()) |
| 153 | + format_result['score'].append(score.item()) |
| 154 | + |
| 155 | + if len(format_result['dimensions']) > 0: |
| 156 | + format_result['dimensions'] = torch.stack( |
| 157 | + format_result['dimensions']) |
| 158 | + format_result['location'] = torch.stack( |
| 159 | + format_result['location']) |
| 160 | + dimensions.append(format_result['dimensions']) |
| 161 | + locations.append(format_result['location']) |
| 162 | + rotation_y.append(format_result['rotation_y']) |
| 163 | + class_labels.append(format_result['class']) |
| 164 | + class_scores.append(format_result['score']) |
| 165 | + box2d.append(format_result['bbox']) |
| 166 | + ids.append(format_result['idx']) |
| 167 | + |
| 168 | + return dimensions, locations, rotation_y, box2d, class_labels, class_scores, ids |
0 commit comments