diff --git a/README.md b/README.md index 17f96e3..d842a7f 100644 --- a/README.md +++ b/README.md @@ -112,7 +112,7 @@ cd ros2_calib # Create a virtual environment python -m venv .venv -source ./venv/bin/activate +source .venv/bin/activate # Install in development mode python -m pip install . diff --git a/ros2_calib/bag_handler.py b/ros2_calib/bag_handler.py index 1a92a02..9c22ee5 100644 --- a/ros2_calib/bag_handler.py +++ b/ros2_calib/bag_handler.py @@ -501,17 +501,29 @@ def run(self): image_topic = self.selected_topics_data["image_topic"] pointcloud_topic = self.selected_topics_data["pointcloud_topic"] camerainfo_topic = self.selected_topics_data["camerainfo_topic"] - raw_messages = read_synchronized_image_cloud( + raw_messages = read_all_messages_optimized( self.bag_file, - image_topic, - pointcloud_topic, - camerainfo_topic, self.topics_to_read, self.progress_updated, - max_time_diff=self.sync_tolerance, - frame_samples=self.frame_samples, - ros_version=self.ros_version, + self.total_messages, + self.frame_samples, + self.topic_message_counts, + self.ros_version, ) + ## \NOTE. 'read_synchronized_image_cloud' method should be revised, as it extracts the first N frames + ## (lidar+camera) that are synchronized. However, the ideal approach would be to extract all synchronized + ## frames and extract N equispaced frames from the dataset, where N is the number of frames selected. + # raw_messages = read_synchronized_image_cloud( + # self.bag_file, + # image_topic, + # pointcloud_topic, + # camerainfo_topic, + # self.topics_to_read, + # self.progress_updated, + # max_time_diff=self.sync_tolerance, + # frame_samples=self.frame_samples, + # ros_version=self.ros_version, + # ) self.progress_updated.emit(95, "Finalizing data...") topic_types = {name: type_str for name, type_str in self.topics_to_read.items()} diff --git a/ros2_calib/calibration.py b/ros2_calib/calibration.py index 35786c7..2ac0dde 100644 --- a/ros2_calib/calibration.py +++ b/ros2_calib/calibration.py @@ -20,23 +20,29 @@ # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. +import copy import cv2 import numpy as np from scipy.optimize import least_squares from scipy.spatial.transform import Rotation -def objective_function(params, points_3d, points_2d, K): +def objective_function(params, points_3d, points_2d, K, dist_coeffs, is_fisheye): rvec = params[:3] tvec = params[3:] - points_proj, _ = cv2.projectPoints(points_3d, rvec, tvec, K, None) + if is_fisheye and dist_coeffs is not None: + points_3d_proc = np.ascontiguousarray(points_3d).reshape(-1, 1, 3) + points_proj, _ = cv2.fisheye.projectPoints(points_3d_proc, rvec, tvec, K, dist_coeffs) + else: + points_proj, _ = cv2.projectPoints(points_3d, rvec, tvec, K, None) + error = (points_proj.reshape(-1, 2) - points_2d).ravel() return error -def calibrate( - correspondences, K, pnp_method=cv2.SOLVEPNP_ITERATIVE, lsq_method="lm", lsq_verbose=2 +def calibrate(correspondences, K, pnp_method=cv2.SOLVEPNP_ITERATIVE, lsq_method="lm", lsq_verbose=2, + dist_coeffs=None, is_fisheye=False, ): print("---" + "-" * 10 + " Starting Calibration " + "-" * 10 + "---") if len(correspondences) < 4: @@ -48,19 +54,28 @@ def calibrate( initial_params = np.zeros(6) + K_pnp = copy.deepcopy(K) if pnp_method is not None: print(f"Running RANSAC on {len(points_2d)} correspondences...") try: + if (is_fisheye and dist_coeffs is not None): + # Fisheye PnP with RANSAC is not directly supported in OpenCV + points_2d = cv2.fisheye.undistortPoints( + points_2d.reshape(-1,1,2), K, dist_coeffs + ).reshape(-1,2) + K_pnp = np.eye(3) + # Use solvePnPRansac to get a robust initial estimate and identify inliers success, rvec_ransac, tvec_ransac, inliers = cv2.solvePnPRansac( points_3d, points_2d, - K, + K_pnp, None, iterationsCount=100, reprojectionError=8.0, flags=pnp_method, ) + if not success: print("RANSAC failed to find a solution.") return np.identity(4) @@ -89,7 +104,7 @@ def calibrate( res = least_squares( objective_function, initial_params, - args=(points_3d, points_2d, K), + args=(points_3d, points_2d, K, dist_coeffs, is_fisheye), method=lsq_method, verbose=lsq_verbose, ) @@ -106,7 +121,9 @@ def calibrate( print("\n---" + "-" * 10 + " Calibration Finished " + "-" * 10 + "---") rpy = Rotation.from_matrix(R_opt).as_euler("xyz", degrees=True) print(f"Translation (x, y, z): {tvec_opt[0]:.4f}, {tvec_opt[1]:.4f}, {tvec_opt[2]:.4f}") - print(f"Rotation (roll, pitch, yaw): {rpy[0]:.4f}, {rpy[1]:.4f}, {rpy[2]:.4f}") + print(f"Rotation in degrees (roll, pitch, yaw): {rpy[0]:.4f}, {rpy[1]:.4f}, {rpy[2]:.4f}") + print(f"Rotation in radians (roll, pitch, yaw): \ + {np.radians(rpy[0]):.4f}, {np.radians(rpy[1]):.4f}, {np.radians(rpy[2]):.4f}") print("Final Extrinsic Matrix:") print(extrinsics) @@ -217,7 +234,8 @@ def solve_rigid_transform_3d(source_points, target_points): def global_dual_lidar_objective( - params, master_cam_correspondences, second_cam_correspondences, lidar_lidar_correspondences, K + params, master_cam_correspondences, second_cam_correspondences, lidar_lidar_correspondences, K, + dist_coeffs, is_fisheye ): """ Global objective function for dual LiDAR calibration. @@ -246,9 +264,16 @@ def global_dual_lidar_objective( master_points_3d = np.array([c[1] for c in master_cam_correspondences], dtype=np.float32) master_points_2d = np.array([c[0] for c in master_cam_correspondences], dtype=np.float32) - master_points_proj, _ = cv2.projectPoints( - master_points_3d, master_rvec, master_tvec, K, None - ) + if is_fisheye and dist_coeffs is not None: + master_points_3d_proc = np.ascontiguousarray(master_points_3d).reshape(-1, 1, 3) + master_points_proj, _ = cv2.fisheye.projectPoints( + master_points_3d_proc, master_rvec, master_tvec, K, dist_coeffs + ) + else: + master_points_proj, _ = cv2.projectPoints( + master_points_3d, master_rvec, master_tvec, K, None + ) + master_errors = (master_points_proj.reshape(-1, 2) - master_points_2d).ravel() errors.extend(master_errors) @@ -257,9 +282,16 @@ def global_dual_lidar_objective( second_points_3d = np.array([c[1] for c in second_cam_correspondences], dtype=np.float32) second_points_2d = np.array([c[0] for c in second_cam_correspondences], dtype=np.float32) - second_points_proj, _ = cv2.projectPoints( - second_points_3d, second_rvec, second_tvec, K, None - ) + if is_fisheye and dist_coeffs is not None: + second_points_3d_proc = np.ascontiguousarray(second_points_3d).reshape(-1, 1, 3) + second_points_proj, _ = cv2.fisheye.projectPoints( + second_points_3d_proc, second_rvec, second_tvec, K, dist_coeffs + ) + else: + second_points_proj, _ = cv2.projectPoints( + second_points_3d, second_rvec, second_tvec, K, None + ) + second_errors = (second_points_proj.reshape(-1, 2) - second_points_2d).ravel() errors.extend(second_errors) @@ -304,6 +336,8 @@ def calibrate_dual_lidar_global( initial_second_transform=None, lsq_method="lm", lsq_verbose=2, + dist_coeffs=None, + is_fisheye=False, ): """ Global dual LiDAR calibration using simultaneous optimization. @@ -367,6 +401,8 @@ def calibrate_dual_lidar_global( second_cam_correspondences, lidar_lidar_correspondences, K, + dist_coeffs, + is_fisheye ), method=lsq_method, verbose=lsq_verbose, diff --git a/ros2_calib/calibration_widget.py b/ros2_calib/calibration_widget.py index 6bdd075..09aa2d8 100644 --- a/ros2_calib/calibration_widget.py +++ b/ros2_calib/calibration_widget.py @@ -204,6 +204,19 @@ def has_significant_distortion(self): threshold = 1e-6 return np.any(np.abs(dist_coeffs) > threshold) + def _ensure_fisheye_dist_coeffs(self, dist_coeffs): + """Ensure distortion coefficients match fisheye model requirement (exactly 4 coeffs).""" + if len(dist_coeffs) > 4: + return dist_coeffs[:4] + elif len(dist_coeffs) < 4: + return np.pad(dist_coeffs, (0, 4 - len(dist_coeffs)), mode="constant") + return dist_coeffs + + def _is_fisheye_camera(self): + """Check if camera uses fisheye or equidistant distortion model.""" + model = self.camerainfo_msg.distortion_model.lower() + return any(fisheye_type in model for fisheye_type in ("fisheye", "equidistant")) + def toggle_rectification(self, enabled): """Toggle image rectification on/off.""" self.is_rectification_enabled = enabled @@ -216,13 +229,21 @@ def rectify_image(self, image): # Get camera matrix and distortion coefficients K = np.array(self.camerainfo_msg.k).reshape(3, 3) - dist_coeffs = np.array(self.camerainfo_msg.d) + dist_coeffs = ( + np.array(self.camerainfo_msg.d) if hasattr(self.camerainfo_msg, "d") else np.zeros(7) + ) # Undistort the image try: - # Use cv2.undistort with the same camera matrix as newCameraMatrix - # This preserves the same image dimensions and focal length - rectified_image = cv2.undistort(image, K, dist_coeffs, None, K) + if self._is_fisheye_camera(): + # Use fisheye undistortion + dist_coeffs = self._ensure_fisheye_dist_coeffs(dist_coeffs) + rectified_image = cv2.fisheye.undistortImage(image, K, dist_coeffs, Knew=K) + else: + # Use standard undistortion + # Use cv2.undistort with the same camera matrix as newCameraMatrix + # This preserves the same image dimensions and focal length + rectified_image = cv2.undistort(image, K, dist_coeffs, None, K) return rectified_image except Exception as e: print(f"[WARNING] Failed to rectify image: {e}") @@ -873,13 +894,17 @@ def display_camera_intrinsics(self): display_text += coeffs_str + "]" # Add interpretation of common distortion models - if len(dist_coeffs) >= 4: + if self._is_fisheye_camera(): display_text += f"\nk1={dist_coeffs[0]:.6f}, k2={dist_coeffs[1]:.6f}" - display_text += f"\np1={dist_coeffs[2]:.6f}, p2={dist_coeffs[3]:.6f}" - if len(dist_coeffs) >= 5: - display_text += f", k3={dist_coeffs[4]:.6f}" - if len(dist_coeffs) >= 8: - display_text += f"\nk4={dist_coeffs[5]:.6f}, k5={dist_coeffs[6]:.6f}, k6={dist_coeffs[7]:.6f}" + display_text += f"\nk3={dist_coeffs[2]:.6f}, k4={dist_coeffs[3]:.6f}" + else: + if len(dist_coeffs) >= 4: + display_text += f"\nk1={dist_coeffs[0]:.6f}, k2={dist_coeffs[1]:.6f}" + display_text += f"\np1={dist_coeffs[2]:.6f}, p2={dist_coeffs[3]:.6f}" + if len(dist_coeffs) >= 5: + display_text += f", k3={dist_coeffs[4]:.6f}" + if len(dist_coeffs) >= 8: + display_text += f"\nk4={dist_coeffs[5]:.6f}, k5={dist_coeffs[6]:.6f}, k6={dist_coeffs[7]:.6f}" else: display_text += "\nDistortion Coeffs: None" @@ -895,7 +920,13 @@ def project_pointcloud(self, extrinsics=None, re_read_cloud=True): if extrinsics is not None: self.extrinsics = extrinsics self.clear_all_highlighting() - if self.point_cloud_item is not None and self.point_cloud_item.scene(): + try: + has_scene = ( + self.point_cloud_item is not None and self.point_cloud_item.scene() is not None + ) + except RuntimeError: + has_scene = False + if has_scene: self.scene.removeItem(self.point_cloud_item) self.point_cloud_item = None @@ -917,9 +948,21 @@ def project_pointcloud(self, extrinsics=None, re_read_cloud=True): return K = np.array(self.camerainfo_msg.k).reshape(3, 3) + dist_coeffs = ( + np.array(self.camerainfo_msg.d) if hasattr(self.camerainfo_msg, "d") else np.zeros(7) + ) rvec, _ = cv2.Rodrigues(self.extrinsics[:3, :3]) tvec = self.extrinsics[:3, 3] - points_proj_cv, _ = cv2.projectPoints(self.points_xyz, rvec, tvec, K, None) + + if self._is_fisheye_camera(): + dist_coeffs = self._ensure_fisheye_dist_coeffs(dist_coeffs) + points_xyz_proc = np.ascontiguousarray(self.points_xyz).reshape(-1, 1, 3) + points_proj_cv, _ = cv2.fisheye.projectPoints( + points_xyz_proc, rvec, tvec, K, dist_coeffs + ) + else: + points_proj_cv, _ = cv2.projectPoints(self.points_xyz, rvec, tvec, K, None) + points_proj_cv = points_proj_cv.reshape(-1, 2) points_cam = (self.extrinsics[:3, :3] @ self.points_xyz.T).T + tvec z_cam = points_cam[:, 2] @@ -976,7 +1019,14 @@ def project_second_pointcloud(self, transform=None): self.second_lidar_transform = transform # Remove existing second point cloud - if self.second_point_cloud_item is not None and self.second_point_cloud_item.scene(): + try: + has_scene = ( + self.second_point_cloud_item is not None + and self.second_point_cloud_item.scene() is not None + ) + except RuntimeError: + has_scene = False + if has_scene: self.scene.removeItem(self.second_point_cloud_item) self.second_point_cloud_item = None @@ -1000,9 +1050,21 @@ def project_second_pointcloud(self, transform=None): # Project using master LiDAR to camera transform K = np.array(self.camerainfo_msg.k).reshape(3, 3) + dist_coeffs = ( + np.array(self.camerainfo_msg.d) if hasattr(self.camerainfo_msg, "d") else np.zeros(7) + ) rvec, _ = cv2.Rodrigues(self.extrinsics[:3, :3]) tvec = self.extrinsics[:3, 3] - points_proj_cv, _ = cv2.projectPoints(transformed_points, rvec, tvec, K, None) + + if self._is_fisheye_camera(): + dist_coeffs = self._ensure_fisheye_dist_coeffs(dist_coeffs) + transformed_points_proc = np.ascontiguousarray(transformed_points).reshape(-1, 1, 3) + points_proj_cv, _ = cv2.fisheye.projectPoints( + transformed_points_proc, rvec, tvec, K, dist_coeffs + ) + else: + points_proj_cv, _ = cv2.projectPoints(transformed_points, rvec, tvec, K, None) + points_proj_cv = points_proj_cv.reshape(-1, 2) # Transform to camera coordinates to check visibility @@ -1057,17 +1119,22 @@ def update_corr_list(self): # Add master LiDAR to camera correspondences for p2d, corr_data in self.correspondences.items(): p3d = corr_data["3d_mean"] - item_text = f"Cam ({p2d[0]:.1f}, {p2d[1]:.1f}) ↔ Master ({p3d[0]:.2f}, {p3d[1]:.2f}, {p3d[2]:.2f})" + # normalize key to a hashable type for storage in item data + p2d_key = tuple(p2d) if isinstance(p2d, (list, np.ndarray)) else p2d + item_text = f"Cam ({p2d_key[0]:.1f}, {p2d_key[1]:.1f}) ↔ Master ({p3d[0]:.2f}, {p3d[1]:.2f}, {p3d[2]:.2f})" item = QListWidgetItem(item_text) - item.setData(Qt.UserRole, ("master_cam", p2d)) + item.setData(Qt.UserRole, ("master_cam", p2d_key)) self.corr_list_widget.addItem(item) # Add LiDAR-to-LiDAR correspondences for second_3d, corr_data in self.lidar_to_lidar_correspondences.items(): master_3d = corr_data["master_3d_mean"] - item_text = f"Second ({second_3d[0]:.2f}, {second_3d[1]:.2f}, {second_3d[2]:.2f}) ↔ Master ({master_3d[0]:.2f}, {master_3d[1]:.2f}, {master_3d[2]:.2f})" + second_key = ( + tuple(second_3d) if isinstance(second_3d, (list, np.ndarray)) else second_3d + ) + item_text = f"Second ({second_key[0]:.2f}, {second_key[1]:.2f}, {second_key[2]:.2f}) ↔ Master ({master_3d[0]:.2f}, {master_3d[1]:.2f}, {master_3d[2]:.2f})" item = QListWidgetItem(item_text) - item.setData(Qt.UserRole, ("lidar_lidar", second_3d)) + item.setData(Qt.UserRole, ("lidar_lidar", second_key)) self.corr_list_widget.addItem(item) def delete_correspondence(self): @@ -1076,10 +1143,14 @@ def delete_correspondence(self): corr_data = current_item.data(Qt.UserRole) if corr_data[0] == "master_cam": p2d_key = corr_data[1] + if isinstance(p2d_key, (list, np.ndarray)): + p2d_key = tuple(p2d_key) if p2d_key in self.correspondences: del self.correspondences[p2d_key] elif corr_data[0] == "lidar_lidar": second_3d_key = corr_data[1] + if isinstance(second_3d_key, (list, np.ndarray)): + second_3d_key = tuple(second_3d_key) if second_3d_key in self.lidar_to_lidar_correspondences: del self.lidar_to_lidar_correspondences[second_3d_key] self.update_corr_list() @@ -1098,6 +1169,8 @@ def highlight_from_list(self, current_item, previous_item): if corr_data[0] == "master_cam": # Highlight master LiDAR to camera correspondence p2d_key = corr_data[1] + if isinstance(p2d_key, (list, np.ndarray)): + p2d_key = tuple(p2d_key) corr = self.correspondences.get(p2d_key) if not corr: return @@ -1212,9 +1285,16 @@ def run_calibration(self): master_cam_corr, self.lidar_to_lidar_correspondences, K, pnp_flag, lsq_method ) else: + dist_coeffs = ( + np.array(self.camerainfo_msg.d) if hasattr(self.camerainfo_msg, "d") else np.zeros(7) + ) + # Single LiDAR calibration calib_corr = [(p2d, corr["3d_mean"]) for p2d, corr in self.correspondences.items()] - self.extrinsics = calibration.calibrate(calib_corr, K, pnp_flag, lsq_method) + self.extrinsics = calibration.calibrate(calib_corr, K, pnp_flag, lsq_method, + dist_coeffs=self._ensure_fisheye_dist_coeffs(dist_coeffs), + is_fisheye=self._is_fisheye_camera() + ) self.progress_bar.setVisible(False) self.project_pointcloud() diff --git a/ros2_calib/dual_calibration_widget.py b/ros2_calib/dual_calibration_widget.py index ce55cd3..2e00f1d 100644 --- a/ros2_calib/dual_calibration_widget.py +++ b/ros2_calib/dual_calibration_widget.py @@ -547,6 +547,21 @@ def display_camera_intrinsics(self): self.intrinsics_display.setPlainText(display_text) + def _ensure_fisheye_dist_coeffs(self, dist_coeffs): + """Ensure distortion coefficients match fisheye model requirement (exactly 4 coeffs).""" + if len(dist_coeffs) > 4: + return dist_coeffs[:4].astype(np.float32) + elif len(dist_coeffs) < 4: + return np.pad(dist_coeffs, (0, 4 - len(dist_coeffs)), mode="constant").astype( + np.float32 + ) + return dist_coeffs.astype(np.float32) + + def _is_fisheye_camera(self): + """Check if camera uses fisheye or equidistant distortion model.""" + model = self.camerainfo_msg.distortion_model.lower() + return any(fisheye_type in model for fisheye_type in ("fisheye", "equidistant")) + def project_master_pointcloud(self, re_read_cloud=True): """Project master point cloud to image.""" if not self.master_visible: @@ -578,9 +593,20 @@ def project_master_pointcloud(self, re_read_cloud=True): # Project points K = np.array(self.camerainfo_msg.k).reshape(3, 3) + dist_coeffs = ( + np.array(self.camerainfo_msg.d) if hasattr(self.camerainfo_msg, "d") else np.zeros(7) + ) rvec, _ = cv2.Rodrigues(self.master_extrinsics[:3, :3]) tvec = self.master_extrinsics[:3, 3] - points_proj_cv, _ = cv2.projectPoints(self.master_points_xyz, rvec, tvec, K, None) + + if self._is_fisheye_camera(): + dist_coeffs = self._ensure_fisheye_dist_coeffs(dist_coeffs) + master_points_xyz_proc = np.ascontiguousarray(self.master_points_xyz).reshape(-1, 1, 3) + points_proj_cv, _ = cv2.fisheye.projectPoints( + master_points_xyz_proc, rvec, tvec, K, dist_coeffs + ) + else: + points_proj_cv, _ = cv2.projectPoints(self.master_points_xyz, rvec, tvec, K, None) points_proj_cv = points_proj_cv.reshape(-1, 2) # Filter visible points @@ -657,9 +683,20 @@ def project_second_pointcloud(self, re_read_cloud=True): # Project points using second LiDAR's transformation to camera K = np.array(self.camerainfo_msg.k).reshape(3, 3) + dist_coeffs = ( + np.array(self.camerainfo_msg.d) if hasattr(self.camerainfo_msg, "d") else np.zeros(7) + ) rvec, _ = cv2.Rodrigues(self.second_extrinsics[:3, :3]) tvec = self.second_extrinsics[:3, 3] - points_proj_cv, _ = cv2.projectPoints(self.second_points_xyz, rvec, tvec, K, None) + + if self._is_fisheye_camera(): + dist_coeffs = self._ensure_fisheye_dist_coeffs(dist_coeffs) + second_points_xyz_proc = np.ascontiguousarray(self.second_points_xyz).reshape(-1, 1, 3) + points_proj_cv, _ = cv2.fisheye.projectPoints( + second_points_xyz_proc, rvec, tvec, K, dist_coeffs + ) + else: + points_proj_cv, _ = cv2.projectPoints(self.second_points_xyz, rvec, tvec, K, None) points_proj_cv = points_proj_cv.reshape(-1, 2) # Filter visible points @@ -1402,6 +1439,9 @@ def run_calibration(self): try: lsq_method = self.lsq_method_combo.currentText() K = np.array(self.camerainfo_msg.k).reshape(3, 3) + dist_coeffs = ( + np.array(self.camerainfo_msg.d) if hasattr(self.camerainfo_msg, "d") else np.zeros(7) + ) # Prepare master LiDAR to camera correspondences master_cam_corr = [ @@ -1427,6 +1467,8 @@ def run_calibration(self): self.master_extrinsics, # Use current transforms as initial guess self.second_extrinsics, lsq_method, + dist_coeffs=self._ensure_fisheye_dist_coeffs(dist_coeffs), + is_fisheye=self._is_fisheye_camera() ) )