diff --git a/deep_sort/detection.py b/deep_sort/detection.py index c7e1888..d1969d2 100644 --- a/deep_sort/detection.py +++ b/deep_sort/detection.py @@ -29,7 +29,7 @@ class Detection(object): """ def __init__(self, tlwh, confidence, class_name, feature): - self.tlwh = np.asarray(tlwh, dtype=np.float) + self.tlwh = np.asarray(tlwh, dtype=np.float64) self.confidence = float(confidence) self.class_name = class_name self.feature = np.asarray(feature, dtype=np.float32) diff --git a/deep_sort/preprocessing.py b/deep_sort/preprocessing.py index 1ee8c3b..13c2817 100644 --- a/deep_sort/preprocessing.py +++ b/deep_sort/preprocessing.py @@ -38,7 +38,7 @@ def non_max_suppression(boxes, classes, max_bbox_overlap, scores=None): if len(boxes) == 0: return [] - boxes = boxes.astype(np.float) + boxes = boxes.astype(np.float64) pick = [] x1 = boxes[:, 0] diff --git a/tools/generate_detections.py b/tools/generate_detections.py index 200f16b..6d2f6f4 100644 --- a/tools/generate_detections.py +++ b/tools/generate_detections.py @@ -60,7 +60,7 @@ def extract_image_patch(image, bbox, patch_shape): # convert to top left, bottom right bbox[2:] += bbox[:2] - bbox = bbox.astype(np.int) + bbox = bbox.astype(np.int64) # clip at image boundaries bbox[:2] = np.maximum(0, bbox[:2]) @@ -164,9 +164,9 @@ def generate_detections(encoder, mot_dir, output_dir, detection_dir=None): detections_in = np.loadtxt(detection_file, delimiter=',') detections_out = [] - frame_indices = detections_in[:, 0].astype(np.int) - min_frame_idx = frame_indices.astype(np.int).min() - max_frame_idx = frame_indices.astype(np.int).max() + frame_indices = detections_in[:, 0].astype(np.int64) + min_frame_idx = frame_indices.astype(np.int64).min() + max_frame_idx = frame_indices.astype(np.int64).max() for frame_idx in range(min_frame_idx, max_frame_idx + 1): print("Frame %05d/%05d" % (frame_idx, max_frame_idx)) mask = frame_indices == frame_idx diff --git a/tracking_helpers.py b/tracking_helpers.py index 2ebb89e..908c38e 100644 --- a/tracking_helpers.py +++ b/tracking_helpers.py @@ -86,7 +86,7 @@ def extract_image_patch(image, bbox, patch_shape): # convert to top left, bottom right bbox[2:] += bbox[:2] - bbox = bbox.astype(np.int) + bbox = bbox.astype(int) # clip at image boundaries bbox[:2] = np.maximum(0, bbox[:2]) @@ -190,9 +190,9 @@ def generate_detections(encoder, mot_dir, output_dir, detection_dir=None): detections_in = np.loadtxt(detection_file, delimiter=',') detections_out = [] - frame_indices = detections_in[:, 0].astype(np.int) - min_frame_idx = frame_indices.astype(np.int).min() - max_frame_idx = frame_indices.astype(np.int).max() + frame_indices = detections_in[:, 0].astype(np.int64) + min_frame_idx = frame_indices.astype(np.int64).min() + max_frame_idx = frame_indices.astype(np.int64).max() for frame_idx in range(min_frame_idx, max_frame_idx + 1): print("Frame %05d/%05d" % (frame_idx, max_frame_idx)) mask = frame_indices == frame_idx diff --git a/utils/datasets.py b/utils/datasets.py index 0cdc72c..8704e57 100644 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -415,7 +415,7 @@ def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, r x[:, 0] = 0 n = len(shapes) # number of images - bi = np.floor(np.arange(n) / batch_size).astype(np.int) # batch index + bi = np.floor(np.arange(n) / batch_size).astype(np.int64) # batch index nb = bi[-1] + 1 # number of batches self.batch = bi # batch index of image self.n = n @@ -443,7 +443,7 @@ def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, r elif mini > 1: shapes[i] = [1, 1 / mini] - self.batch_shapes = np.ceil(np.array(shapes) * img_size / stride + pad).astype(np.int) * stride + self.batch_shapes = np.ceil(np.array(shapes) * img_size / stride + pad).astype(np.int64) * stride # Cache images into memory for faster training (WARNING: large datasets may exceed system RAM) self.imgs = [None] * n @@ -1200,7 +1200,7 @@ def pastein(image, labels, sample_labels, sample_images, sample_masks): r_image = cv2.resize(sample_images[sel_ind], (r_w, r_h)) temp_crop = image[ymin:ymin+r_h, xmin:xmin+r_w] m_ind = r_mask > 0 - if m_ind.astype(np.int).sum() > 60: + if m_ind.astype(np.int64).sum() > 60: temp_crop[m_ind] = r_image[m_ind] #print(sample_labels[sel_ind]) #print(sample_images[sel_ind].shape) @@ -1283,7 +1283,7 @@ def extract_boxes(path='../coco/'): # from utils.datasets import *; extract_box b = x[1:] * [w, h, w, h] # box # b[2:] = b[2:].max() # rectangle to square b[2:] = b[2:] * 1.2 + 3 # pad - b = xywh2xyxy(b.reshape(-1, 4)).ravel().astype(np.int) + b = xywh2xyxy(b.reshape(-1, 4)).ravel().astype(np.int64) b[[0, 2]] = np.clip(b[[0, 2]], 0, w) # clip boxes outside of image b[[1, 3]] = np.clip(b[[1, 3]], 0, h) diff --git a/utils/general.py b/utils/general.py index b00dc27..60dda01 100644 --- a/utils/general.py +++ b/utils/general.py @@ -219,7 +219,7 @@ def labels_to_class_weights(labels, nc=80): return torch.Tensor() labels = np.concatenate(labels, 0) # labels.shape = (866643, 5) for COCO - classes = labels[:, 0].astype(np.int) # labels = [class xywh] + classes = labels[:, 0].astype(np.int64) # labels = [class xywh] weights = np.bincount(classes, minlength=nc) # occurrences per class # Prepend gridpoint count (for uCE training) @@ -234,7 +234,7 @@ def labels_to_class_weights(labels, nc=80): def labels_to_image_weights(labels, nc=80, class_weights=np.ones(80)): # Produces image weights based on class_weights and image contents - class_counts = np.array([np.bincount(x[:, 0].astype(np.int), minlength=nc) for x in labels]) + class_counts = np.array([np.bincount(x[:, 0].astype(np.int64), minlength=nc) for x in labels]) image_weights = (class_weights.reshape(1, nc) * class_counts).sum(1) # index = random.choices(range(n), weights=image_weights, k=1) # weight image sample return image_weights