-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathcharuco_compute_extrinsic_transform.py
More file actions
163 lines (122 loc) · 5.87 KB
/
charuco_compute_extrinsic_transform.py
File metadata and controls
163 lines (122 loc) · 5.87 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
from argparse import ArgumentParser
from dataset.dataset_interface import DatasetInterface
from pathlib import Path
import numpy as np
import cv2
from utils.transformation_utils import image_points_to_camera_points, rs_ci, zv_ci
def __compute_transform_matrix(A, B):
# https://github.com/nghiaho12/rigid_transform_3D/blob/master/rigid_transform_3D.py
assert A.shape == B.shape
num_rows, num_cols = A.shape
if num_rows != 3:
raise Exception(f"matrix A is not 3xN, it is {num_rows}x{num_cols}")
num_rows, num_cols = B.shape
if num_rows != 3:
raise Exception(f"matrix B is not 3xN, it is {num_rows}x{num_cols}")
# find mean column wise
centroid_A = np.mean(A, axis=1)
centroid_B = np.mean(B, axis=1)
# ensure centroids are 3x1
centroid_A = centroid_A.reshape(-1, 1)
centroid_B = centroid_B.reshape(-1, 1)
# subtract mean
Am = A - centroid_A
Bm = B - centroid_B
H = Am @ np.transpose(Bm)
# find rotation
U, S, Vt = np.linalg.svd(H)
R = Vt.T @ U.T
# special reflection case
if np.linalg.det(R) < 0:
print("det(R) < R, reflection detected!, correcting for it ...")
Vt[2, :] *= -1
R = Vt.T @ U.T
t = -R @ centroid_A + centroid_B
return R, t
def main(args):
# get all charuco images with small charuco board
files = list(args.dir_with_charuco_images.rglob("*.npz"))
print(f"files for calibration: {len(files)}")
charuco_dict = cv2.aruco.Dictionary_get(cv2.aruco.DICT_4X4_50)
# charuco_board = cv2.aruco.CharucoBoard_create(7, 5, 7.5, 5.625, charuco_dict) # large
charuco_board = cv2.aruco.CharucoBoard_create(5, 3, 6.5, 4.875, charuco_dict) # small (prob. not correct)
common_points_threshold = 10
# Reference: https://stackoverflow.com/questions/64612924/opencv-stereocalibration-of-two-cameras-using-charuco
rs_imgs_xyz = []
zv_imgs_xyz = []
imgs_tuples = [DatasetInterface.load(file)[:4] for file in files]
for rs_rgb, rs_depth, zv_rgb, zv_depth in imgs_tuples:
try:
# convert BGR to Gray
rs_rgb = cv2.cvtColor(rs_rgb, cv2.COLOR_BGR2GRAY)
zv_rgb = cv2.cvtColor(zv_rgb, cv2.COLOR_BGR2GRAY)
# Find markers corners
rs_corners, rs_ids, _ = cv2.aruco.detectMarkers(rs_rgb, charuco_dict)
zv_corners, zv_ids, _ = cv2.aruco.detectMarkers(zv_rgb, charuco_dict)
if not rs_corners or not zv_corners:
raise Exception("No markers detected")
# find charcuo corners # TODO: try with cameraMatrix/distCoeffs
retA, rs_corners, rs_ids = cv2.aruco.interpolateCornersCharuco(rs_corners, rs_ids, rs_rgb, charuco_board)
retB, zv_corners, zv_ids = cv2.aruco.interpolateCornersCharuco(zv_corners, zv_ids, zv_rgb, charuco_board)
if not retA or not retB:
raise Exception("Can't interpolate corners")
# Find common points in both frames (is there a nicer way?)
rs_obj_points, rs_points = cv2.aruco.getBoardObjectAndImagePoints(charuco_board, rs_corners, rs_ids)
zv_obj_points, zv_points = cv2.aruco.getBoardObjectAndImagePoints(charuco_board, zv_corners, zv_ids)
# Create dictionary for each frame objectPoint:imagePoint to get common markers detected
rs_obj_to_points = {
tuple(a): tuple(b)
for a, b in zip(rs_obj_points[:, 0], rs_points[:, 0])
}
zv_obj_to_points = {
tuple(a): tuple(b)
for a, b in zip(zv_obj_points[:, 0], zv_points[:, 0])
}
common = set(rs_obj_to_points.keys()) & set(zv_obj_to_points.keys())
if len(common) < common_points_threshold:
raise Exception(
f"To few respective points found in images ({len(common)})"
)
# fill arrays where each index specifies one markers objectPoint and both
# respective imagePoints
rs_points = []
zv_points = []
for objP in common:
rs_points.append(rs_obj_to_points[objP])
zv_points.append(zv_obj_to_points[objP])
rs_points = np.array(rs_points)[:, [1, 0]] # swap height, width
zv_points = np.array(zv_points)[:, [1, 0]] # swap height, width
rs_indices = tuple(rs_points.astype(np.uint16).T)
zv_indices = tuple(zv_points.astype(np.uint16).T)
rs_rgb = np.repeat(rs_rgb[:, :, None], repeats=3, axis=2)
zv_rgb = np.repeat(zv_rgb[:, :, None], repeats=3, axis=2)
rs_depths = rs_depth[rs_indices]
zv_depths = zv_depth[zv_indices]
rs_xyz = np.concatenate((rs_points, rs_depths[:, None]), axis=1)
zv_xyz = np.concatenate((zv_points, zv_depths[:, None]), axis=1)
# remove nan points
no_nans = ~np.logical_or(np.isnan(zv_xyz), np.isnan(rs_xyz)).any(axis=1)
rs_imgs_xyz.extend(rs_xyz[no_nans])
zv_imgs_xyz.extend(zv_xyz[no_nans])
except Exception as e:
print(f"Exception: {e}")
continue
if len(rs_imgs_xyz) == 0:
raise Exception("No image with enough feature points given")
rs_imgs_xyz = np.array(rs_imgs_xyz)
zv_imgs_xyz = np.array(zv_imgs_xyz)
rs_camera_xyz = image_points_to_camera_points(rs_imgs_xyz, rs_ci)
zv_camera_xyz = image_points_to_camera_points(zv_imgs_xyz, zv_ci)
R, t = __compute_transform_matrix(zv_camera_xyz.T, rs_camera_xyz.T)
trans = np.identity(4)
trans[:3, :3] = R
trans[:3, 3] = t[:, 0]
print(f"""
Computed extrinsic transformation matrix:
{trans}
""")
if __name__ == "__main__":
argparse = ArgumentParser()
argparse.add_argument("dir_with_charuco_images", type=Path,
help="directory where the raw charuco images are located")
main(argparse.parse_args())