diff --git a/example_boards/aruco_boards/A2_aruco_8x6_mar_len_05_sep_len_0125_DICT6X6_1000.svg b/example_boards/aruco_boards/A2_aruco_8x6_mar_len_05_sep_len_0125_DICT6X6_1000.svg
new file mode 100644
index 0000000..c320196
--- /dev/null
+++ b/example_boards/aruco_boards/A2_aruco_8x6_mar_len_05_sep_len_0125_DICT6X6_1000.svg
@@ -0,0 +1,710 @@
+
\ No newline at end of file
diff --git a/example_boards/aruco_boards/A3_aruco_6x4_mar_len_05_sep_len_0125_DICT6X6_1000.svg b/example_boards/aruco_boards/A3_aruco_6x4_mar_len_05_sep_len_0125_DICT6X6_1000.svg
new file mode 100644
index 0000000..307b47d
--- /dev/null
+++ b/example_boards/aruco_boards/A3_aruco_6x4_mar_len_05_sep_len_0125_DICT6X6_1000.svg
@@ -0,0 +1,352 @@
+
\ No newline at end of file
diff --git a/example_boards/aruco_boards/A4_aruco_4x3_mar_len_05_sep_len_0125_DICT6X6_1000.svg b/example_boards/aruco_boards/A4_aruco_4x3_mar_len_05_sep_len_0125_DICT6X6_1000.svg
new file mode 100644
index 0000000..ee7b923
--- /dev/null
+++ b/example_boards/aruco_boards/A4_aruco_4x3_mar_len_05_sep_len_0125_DICT6X6_1000.svg
@@ -0,0 +1,174 @@
+
\ No newline at end of file
diff --git a/example_boards/charuco_boards/A2/A2_charuco_4x3_mar_len_006_sq_len_012_DICT6X6_1000.svg b/example_boards/charuco_boards/A2/A2_charuco_4x3_mar_len_006_sq_len_012_DICT6X6_1000.svg
new file mode 100644
index 0000000..4805bc1
--- /dev/null
+++ b/example_boards/charuco_boards/A2/A2_charuco_4x3_mar_len_006_sq_len_012_DICT6X6_1000.svg
@@ -0,0 +1,97 @@
+
\ No newline at end of file
diff --git a/example_boards/charuco_boards/A2/A2_charuco_6x4_mar_len_004_sq_len_008_DICT6X6_1000.svg b/example_boards/charuco_boards/A2/A2_charuco_6x4_mar_len_004_sq_len_008_DICT6X6_1000.svg
new file mode 100644
index 0000000..9a6d118
--- /dev/null
+++ b/example_boards/charuco_boards/A2/A2_charuco_6x4_mar_len_004_sq_len_008_DICT6X6_1000.svg
@@ -0,0 +1,186 @@
+
\ No newline at end of file
diff --git a/example_boards/charuco_boards/A2/A2_charuco_8x6_mar_len_003_sq_len_006_DICT6X6_1000.svg b/example_boards/charuco_boards/A2/A2_charuco_8x6_mar_len_003_sq_len_006_DICT6X6_1000.svg
new file mode 100644
index 0000000..8eb5c9c
--- /dev/null
+++ b/example_boards/charuco_boards/A2/A2_charuco_8x6_mar_len_003_sq_len_006_DICT6X6_1000.svg
@@ -0,0 +1,376 @@
+
\ No newline at end of file
diff --git a/example_boards/charuco_boards/A3/A3_charuco_6x4_mar_len_003_sq_len_006_DICT6X6_1000.svg b/example_boards/charuco_boards/A3/A3_charuco_6x4_mar_len_003_sq_len_006_DICT6X6_1000.svg
new file mode 100644
index 0000000..cec0fbe
--- /dev/null
+++ b/example_boards/charuco_boards/A3/A3_charuco_6x4_mar_len_003_sq_len_006_DICT6X6_1000.svg
@@ -0,0 +1,186 @@
+
\ No newline at end of file
diff --git a/example_boards/charuco_boards/A4/A4_charuco_4x3_mar_len_004_sq_len_008_DICT6X6_1000.svg b/example_boards/charuco_boards/A4/A4_charuco_4x3_mar_len_004_sq_len_008_DICT6X6_1000.svg
new file mode 100644
index 0000000..75559a3
--- /dev/null
+++ b/example_boards/charuco_boards/A4/A4_charuco_4x3_mar_len_004_sq_len_008_DICT6X6_1000.svg
@@ -0,0 +1,97 @@
+
\ No newline at end of file
diff --git a/extrinsic_calibrator/package.xml b/extrinsic_calibrator/package.xml
index 6263224..26e3d4a 100644
--- a/extrinsic_calibrator/package.xml
+++ b/extrinsic_calibrator/package.xml
@@ -25,5 +25,4 @@
ament_cmake
-
-
\ No newline at end of file
+
\ No newline at end of file
diff --git a/extrinsic_calibrator_core/config/aruco_parameters.yaml b/extrinsic_calibrator_core/config/aruco_parameters.yaml
index fef39ca..c03ea54 100644
--- a/extrinsic_calibrator_core/config/aruco_parameters.yaml
+++ b/extrinsic_calibrator_core/config/aruco_parameters.yaml
@@ -2,6 +2,28 @@ aruco_params:
aruco_dict:
type: string
default_value: "DICT_6X6_250"
+ description: "Aruco dictionary to use. nXn_value. n - how many squares per marker. VALUE - how many pixels per marker. See - https://docs.opencv.org/4.x/d5/dae/tutorial_aruco_detection.html"
marker_length:
type: double
- default_value: 0.26
\ No newline at end of file
+ default_value: 0.26
+ description: "Length of one side of the marker in meters."
+ board_mode:
+ type: string
+ default_value: "charuco_board"
+ description: "Board generation mode. Options: single_markers, aruco_board, charuco_board"
+ board_rows:
+ type: int
+ default_value: 5
+ description: "Number of marker rows in the aruco board."
+ board_cols:
+ type: int
+ default_value: 5
+ description: "Number of marker columns in the aruco board."
+ marker_separation_length:
+ type: double
+ default_value: 0.04
+ description: "Separation between markers in the aruco board (meters). See 'b' on the image there - https://github.com/ethz-asl/kalibr/wiki/calibration-targets"
+ square_length:
+ type: double
+ default_value: 0.2
+ description: "Length of one side of the chess square in the charuco board (meters)."
\ No newline at end of file
diff --git a/extrinsic_calibrator_core/extrinsic_calibrator_core/src/aruco_generator_class.py b/extrinsic_calibrator_core/extrinsic_calibrator_core/src/aruco_generator_class.py
index 20086a9..bf4ad0e 100644
--- a/extrinsic_calibrator_core/extrinsic_calibrator_core/src/aruco_generator_class.py
+++ b/extrinsic_calibrator_core/extrinsic_calibrator_core/src/aruco_generator_class.py
@@ -55,7 +55,7 @@
# Custom parameters
from extrinsic_calibrator_core.python_aruco_parameters import aruco_params
-from extrinsic_calibrator_core.src.extrinsic_calibrator_class import ArucoParams
+from extrinsic_calibrator_core.src.models.aruco_params import ArucoMarkerParams
@@ -66,7 +66,7 @@ def __init__(self):
aruco_params_listener = aruco_params.ParamListener(self)
imported_aruco_params = aruco_params_listener.get_params()
- self.real_aruco_params = ArucoParams(self,imported_aruco_params)
+ self.real_aruco_params = ArucoMarkerParams(self,imported_aruco_params)
# Parameters for marker generation
self.declare_parameter('marker_size', 200)
@@ -98,7 +98,7 @@ def __init__(self):
rclpy.shutdown()
- def generate_marker(self, marker_id, marker_size, output_directory, aruco_params:ArucoParams):
+ def generate_marker(self, marker_id, marker_size, output_directory, aruco_params:ArucoMarkerParams):
# Generate the marker image
marker_image = cv2.aruco.generateImageMarker(aruco_params.aruco_dict, marker_id, marker_size)
@@ -111,4 +111,4 @@ def generate_marker(self, marker_id, marker_size, output_directory, aruco_params
# self.get_logger().info(f'Marker with ID {marker_id} generated and saved to {output_path}')
return True
-
+
\ No newline at end of file
diff --git a/extrinsic_calibrator_core/extrinsic_calibrator_core/src/detectors/__init__.py b/extrinsic_calibrator_core/extrinsic_calibrator_core/src/detectors/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/extrinsic_calibrator_core/extrinsic_calibrator_core/src/detectors/aruco_params.py b/extrinsic_calibrator_core/extrinsic_calibrator_core/src/detectors/aruco_params.py
new file mode 100644
index 0000000..cd3e174
--- /dev/null
+++ b/extrinsic_calibrator_core/extrinsic_calibrator_core/src/detectors/aruco_params.py
@@ -0,0 +1,63 @@
+import cv2
+
+from rclpy.node import Node
+
+class ArucoMarkerParams():
+ def __init__(self, node:Node, aruco_params):
+
+ if hasattr(cv2.aruco, aruco_params.aruco_dict):
+ self.aruco_dict = cv2.aruco.getPredefinedDictionary(getattr(cv2.aruco, aruco_params.aruco_dict))
+ else:
+ node.get_logger().error(f"cv2.aruco doesn't have a dictionary with the name '{aruco_params.aruco_dict}'")
+
+ self.marker_length = aruco_params.marker_length
+
+ node.get_logger().info(
+ f"{self.__class__.__name__} set:\n"
+ f" Dictionary: {aruco_params.aruco_dict}\n"
+ f" Marker Length: {self.marker_length}\n"
+ )
+
+class ArucoBoardParams():
+ def __init__(self, node:Node, aruco_params):
+
+ if hasattr(cv2.aruco, aruco_params.aruco_dict):
+ self.aruco_dict = cv2.aruco.getPredefinedDictionary(getattr(cv2.aruco, aruco_params.aruco_dict))
+ else:
+ node.get_logger().error(f"cv2.aruco doesn't have a dictionary with the name '{aruco_params.aruco_dict}'")
+
+ self.marker_length = aruco_params.marker_length
+ self.board_rows = aruco_params.board_rows
+ self.board_cols = aruco_params.board_cols
+ self.marker_separation_length = aruco_params.marker_separation_length
+
+ node.get_logger().info(
+ f"{self.__class__.__name__} set:\n"
+ f" Dictionary: {aruco_params.aruco_dict}\n"
+ f" Marker Length: {self.marker_length}\n"
+ f" Board Rows: {self.board_rows}\n"
+ f" Board Cols: {self.board_cols}\n"
+ f" Marker Separation: {self.marker_separation_length}\n"
+ )
+
+
+class ChArUcoBoardParams():
+ def __init__(self, node:Node, aruco_params):
+
+ if hasattr(cv2.aruco, aruco_params.aruco_dict):
+ self.aruco_dict = cv2.aruco.getPredefinedDictionary(getattr(cv2.aruco, aruco_params.aruco_dict))
+ else:
+ node.get_logger().error(f"cv2.aruco doesn't have a dictionary with the name '{aruco_params.aruco_dict}'")
+
+ self.marker_length = aruco_params.marker_length
+ self.square_length = aruco_params.square_length
+ self.board_rows = aruco_params.board_rows
+ self.board_cols = aruco_params.board_cols
+
+ node.get_logger().info(
+ f"[{self.__class__.__name__}] set:\n"
+ f" Dictionary: {aruco_params.aruco_dict}\n"
+ f" Marker Length: {self.marker_length}\n"
+ f" Board Rows: {self.board_rows}\n"
+ f" Board Cols: {self.board_cols}\n"
+ )
\ No newline at end of file
diff --git a/extrinsic_calibrator_core/extrinsic_calibrator_core/src/detectors/camera.py b/extrinsic_calibrator_core/extrinsic_calibrator_core/src/detectors/camera.py
new file mode 100644
index 0000000..9c17680
--- /dev/null
+++ b/extrinsic_calibrator_core/extrinsic_calibrator_core/src/detectors/camera.py
@@ -0,0 +1,332 @@
+import cv2
+import numpy as np
+from cv_bridge import CvBridge
+from scipy.spatial.transform import Rotation as R
+from abc import ABC, abstractmethod
+
+from collections import deque
+
+import tf2_ros
+from rclpy.node import Node
+from sensor_msgs.msg import Image, CameraInfo
+from std_msgs.msg import Float32
+
+from extrinsic_calibrator_core.src.models.aruco_params import ArucoMarkerParams, ArucoBoardParams, ChArUcoBoardParams
+
+class CameraBase(ABC):
+ def __init__(self, node:Node, camera_name:str, camera_id:int, image_topic:str, camera_info_topic:str, bridge:CvBridge, broadcaster:tf2_ros.TransformBroadcaster):
+
+ self.node = node
+ self.camera_name = camera_name
+ self.camera_id = camera_id
+ self.image_topic = "/robotic_platform/" + image_topic
+ self.camera_info_topic = camera_info_topic
+ self.bridge = bridge
+ self.tf_broadcaster = broadcaster
+
+ self.camera_matrix = None
+ self.dist_coeffs = None
+
+ self.camera_info_sub = self.node.create_subscription(CameraInfo, camera_info_topic, self.camera_info_callback, 1)
+ self.image_sub = self.node.create_subscription(Image, image_topic, self.image_callback, 1)
+ self.cv2_image_publisher = self.node.create_publisher(Image, f"{image_topic}/detected_markers", 10)
+
+ self.marker_transforms = {}
+ self.reliable_marker_transforms = {}
+ self.marker_pose_variations = {}
+
+ self.pose_variation_timer = self.node.create_timer(0.5, self.pose_variation_timer_callback)
+ self.pose_variation_pub = self.node.create_publisher(Float32, f'{self.node.get_name()}/{self.camera_name}/pose_variation', 10)
+
+ def pose_variation_timer_callback(self):
+ if self.marker_pose_variations:
+ msg = Float32()
+ msg.data = np.mean(list(self.marker_pose_variations.values()))
+ self.pose_variation_pub.publish(msg)
+
+ def camera_info_callback(self, msg : CameraInfo):
+ if self.camera_matrix is None:
+ self.camera_matrix = np.array(msg.k).reshape((3, 3))
+ self.dist_coeffs = np.array(msg.d)
+ self.node.get_logger().info(f"[Camera {self.camera_name}] Intrinsic parameters received.")
+
+ @abstractmethod
+ def image_callback(self, msg : Image):
+ pass
+
+ def log_initialization(self):
+ self.node.get_logger().info(f"Camera {self.camera_name} created.")
+ self.node.get_logger().info(f"Camera {self.camera_name} subscribed to image topic: {self.image_topic}.")
+ self.node.get_logger().info(f"Camera {self.camera_name} subscribed to camera_info topic: {self.camera_info_topic}.")
+
+ def check_precision(self, marker_id : int, transform : np.ndarray):
+ if self.is_precise(transform):
+ self.node.get_logger().info(f"[Camera {self.camera_name}] Marker {marker_id} is reliable")
+ # add the last transform of the array in the dictionary as reliable marker transform
+ self.reliable_marker_transforms[marker_id] = transform[-1]
+ self.marker_pose_variations[marker_id] = self.caluclate_pose_variation(*self.caluclate_position_rotation_range(transform))
+
+ def caluclate_position_rotation_range(self, transforms : np.ndarray):
+ positions = np.array([t[:3, 3] for t in transforms])
+ position_range = np.ptp(positions, axis=0)
+
+ rotations = [R.from_matrix(t[:3, :3]) for t in transforms]
+ quat = np.array([r.as_quat() for r in rotations])
+ angles = []
+ for i in range(len(quat)):
+ for j in range(i+1, len(quat)):
+ dot = np.abs(np.dot(quat[i], quat[j]))
+ angle = 2 * np.arccos(np.clip(dot, -1.0, 1.0))
+ angles.append(angle)
+ rotation_range = np.max(angles)
+
+ return position_range, rotation_range
+
+ def caluclate_pose_variation(self, position_range : np.ndarray, rotation_range : float) -> float:
+ pose_variation = np.mean(position_range) + np.degrees(rotation_range)
+
+ return pose_variation
+
+ def is_precise(self, transforms : np.ndarray):
+ if all(transform is not None for transform in transforms):
+ position_range, rotation_range = self.caluclate_position_rotation_range(transforms)
+
+ self.node.get_logger().debug(f"[{self.camera_name}] Position range: {np.mean(position_range)}, Rotation range (degrees): {np.degrees(rotation_range)}")
+
+ return np.all(position_range < 0.01) and np.all(rotation_range < np.radians(1))
+ else:
+ return False
+
+
+ def are_all_transforms_precise(self):
+ self.node.get_logger().info(f"Camera {self.camera_name}: Reliable markers: {list(self.reliable_marker_transforms.keys())}")
+ self.node.get_logger().info(f"Camera {self.camera_name}: Not Reliable markers: {list(self.marker_transforms.keys())}")
+ if len(self.reliable_marker_transforms) > 0 and len(self.marker_transforms) == 0:
+ self.node.get_logger().info(f"Camera {self.camera_name}: All markers are reliable")
+ return True
+ else:
+ return False
+
+
+class CameraAruco(CameraBase):
+ def __init__(self, node:Node, camera_name:str, camera_id:int, image_topic:str, camera_info_topic:str, bridge:CvBridge, broadcaster:tf2_ros.TransformBroadcaster, aruco_params : ArucoMarkerParams):
+ super().__init__(node, camera_name, camera_id, image_topic, camera_info_topic, bridge, broadcaster)
+
+ self.aruco_dict = aruco_params.aruco_dict
+ self.marker_length = aruco_params.marker_length
+
+ self.parameters = cv2.aruco.DetectorParameters()
+ self.detector = cv2.aruco.ArucoDetector(self.aruco_dict, self.parameters)
+
+ super().log_initialization()
+
+ # This method is unchanged from the original extrinsic_calibrator
+ def image_callback(self, msg : Image):
+ if self.camera_matrix is None or self.dist_coeffs is None:
+ self.node.get_logger().warn(f"Camera {self.camera_name} parameters not yet received.")
+ return
+
+ cv_image = self.bridge.imgmsg_to_cv2(msg, "bgr8")
+
+ # For ArUco detection, you can use the filtered_image directly
+ corners, ids, _ = self.detector.detectMarkers(cv_image)
+ detected_ids = set()
+ if ids is not None:
+ for i, id in enumerate(ids):
+ marker_id = id[0]
+ detected_ids.add(marker_id)
+
+ if marker_id not in self.marker_transforms and marker_id not in self.reliable_marker_transforms:
+ self.marker_transforms[marker_id] = deque(maxlen=30)
+
+ objPoints = np.array([ [-self.marker_length/2, self.marker_length/2, 0],
+ [self.marker_length/2, self.marker_length/2, 0],
+ [self.marker_length/2, -self.marker_length/2, 0],
+ [-self.marker_length/2,-self.marker_length/2, 0]], dtype=np.float32)
+
+ success, rvec, tvec = cv2.solvePnP(objPoints, corners[i], self.camera_matrix, self.dist_coeffs)
+ if success:
+ rot_matrix, _ = cv2.Rodrigues(rvec)
+ translation_matrix = np.eye(4)
+ translation_matrix[:3, :3] = rot_matrix
+ translation_matrix[:3, 3] = tvec.flatten()
+
+ # Draw the transform
+ cv2.aruco.drawDetectedMarkers(cv_image, corners, ids)
+ cv2.drawFrameAxes(cv_image, self.camera_matrix, self.dist_coeffs, rvec, tvec, self.marker_length/2)
+ ros_image = self.bridge.cv2_to_imgmsg(cv_image, "bgr8")
+ self.cv2_image_publisher.publish(ros_image)
+
+ # Filter out the already reliable markers
+ if marker_id in self.reliable_marker_transforms:
+ continue
+ else:
+ self.marker_transforms[marker_id].append(translation_matrix)
+
+ # Add None for markers not detected in this frame
+ for marker_id in self.marker_transforms:
+ if marker_id not in detected_ids:
+ # Restart the precision of the marker if not seen
+ # self.marker_transforms[marker_id].append(None)
+ pass
+
+ # iterate through each marker of the marker_transforms dictionary
+ for marker_id, transforms in self.marker_transforms.items():
+ if len(transforms) == 30:
+ self.check_precision(marker_id, transforms)
+
+ # delete all the transforms from the marker_transforms dictionary
+ for marker_id, transform in self.reliable_marker_transforms.items():
+ if marker_id in self.marker_transforms:
+ del self.marker_transforms[marker_id]
+
+class CameraArucoBoard(CameraBase):
+ def __init__(self, node:Node, camera_name:str, camera_id:int, image_topic:str, camera_info_topic:str, bridge:CvBridge, broadcaster:tf2_ros.TransformBroadcaster, aruco_params : ArucoBoardParams):
+ super().__init__(node, camera_name, camera_id, image_topic, camera_info_topic, bridge, broadcaster)
+
+ self.board = cv2.aruco.GridBoard(
+ (aruco_params.board_cols, aruco_params.board_rows),
+ aruco_params.marker_length,
+ aruco_params.marker_separation_length,
+ aruco_params.aruco_dict,
+ )
+
+ self.detector_parameters = cv2.aruco.DetectorParameters()
+ self.detector = cv2.aruco.ArucoDetector(aruco_params.aruco_dict, self.detector_parameters)
+
+ node.get_logger().info(
+ f" Camera {camera_name} Created board:\n"
+ f" Dictionary: {self.board.getDictionary().markerSize}X{self.board.getDictionary().markerSize}\n"
+ f" Marker Length: {self.board.getMarkerLength()}\n"
+ f" Board Size: {self.board.getGridSize()}\n"
+ f" Ids: {self.board.getIds()}\n"
+ f" Marker Separation: {self.board.getMarkerSeparation()}\n"
+ )
+
+ super().log_initialization()
+
+ def image_callback(self, msg : Image):
+
+ if self.camera_matrix is None or self.dist_coeffs is None:
+ self.node.get_logger().warn(f"Camera {self.camera_name} parameters not yet received.")
+ return
+
+ cv_image = self.bridge.imgmsg_to_cv2(msg, "bgr8")
+
+ corners, ids, _ = self.detector.detectMarkers(cv_image)
+
+ # If detection is successful
+ if ids is not None and len(corners) > 0 and len(ids) > 0:
+
+ # Visualize detected markers
+ cv2.aruco.drawDetectedMarkers(cv_image, corners, ids)
+
+ # Estimate the board pose
+ success, rvec, tvec = cv2.aruco.estimatePoseBoard(
+ corners, ids, self.board, self.camera_matrix, self.dist_coeffs, None, None
+ )
+
+ # If pose estimation is successful
+ if success > 0:
+ # Visualize board pose
+ cv2.drawFrameAxes(cv_image, self.camera_matrix, self.dist_coeffs, rvec, tvec, self.board.getMarkerLength() * 2)
+
+ if 0 not in self.marker_transforms and 0 not in self.reliable_marker_transforms:
+ self.marker_transforms[0] = deque(maxlen=30)
+
+ if 0 not in self.reliable_marker_transforms:
+ rot_matrix, _ = cv2.Rodrigues(rvec)
+ translation_matrix = np.eye(4)
+ translation_matrix[:3, :3] = rot_matrix
+ translation_matrix[:3, 3] = tvec.flatten()
+ self.marker_transforms[0].append(translation_matrix)
+
+ # iterate through each marker of the marker_transforms dictionary
+ for marker_id, transforms in self.marker_transforms.items():
+ if len(transforms) == 30:
+ self.check_precision(marker_id, transforms)
+
+ # delete all the transforms from the marker_transforms dictionary
+ for marker_id, _ in self.reliable_marker_transforms.items():
+ if marker_id in self.marker_transforms:
+ del self.marker_transforms[marker_id]
+
+ ros_image = self.bridge.cv2_to_imgmsg(cv_image, "bgr8")
+ self.cv2_image_publisher.publish(ros_image)
+
+class CameraChArUcoBoard(CameraBase):
+ def __init__(self, node:Node, camera_name:str, camera_id:int, image_topic:str, camera_info_topic:str, bridge:CvBridge, broadcaster:tf2_ros.TransformBroadcaster, aruco_params : ChArUcoBoardParams):
+ super().__init__(node, camera_name, camera_id, image_topic, camera_info_topic, bridge, broadcaster)
+
+ self.board = cv2.aruco.CharucoBoard(
+ (aruco_params.board_cols, aruco_params.board_rows),
+ aruco_params.square_length,
+ aruco_params.marker_length,
+ aruco_params.aruco_dict,
+ )
+
+ self.charuco_parameters = cv2.aruco.CharucoParameters()
+ self.detector_parameters = cv2.aruco.DetectorParameters()
+ self.detector = cv2.aruco.CharucoDetector(self.board, self.charuco_parameters, self.detector_parameters)
+
+ node.get_logger().info(
+ f" Camera {camera_name} Created board:\n"
+ f" Dictionary: {self.board.getDictionary().markerSize}X{self.board.getDictionary().markerSize}\n"
+ f" Marker Length: {self.board.getMarkerLength()}\n"
+ f" Square Length: {self.board.getSquareLength()}\n"
+ f" Chessboard Size: {self.board.getChessboardSize()}\n"
+ f" Ids: {self.board.getIds()}\n"
+ )
+
+ super().log_initialization()
+
+ def image_callback(self, msg : Image):
+
+ if self.camera_matrix is None or self.dist_coeffs is None:
+ self.node.get_logger().warn(f"Camera {self.camera_name} parameters not yet received.")
+ return
+
+ cv_image = self.bridge.imgmsg_to_cv2(msg, "bgr8")
+
+ charuco_corners, charuco_ids, marker_corners, marker_ids = self.detector.detectBoard(cv_image)
+
+ # If detection is successful
+ if (marker_ids is not None and len(marker_ids) > 0) and (charuco_ids is not None and len(charuco_ids) > 0):
+
+ # Visualize detected markers and chess square corners
+ cv2.aruco.drawDetectedMarkers(cv_image, marker_corners, marker_ids)
+ cv2.aruco.drawDetectedCornersCharuco(cv_image, charuco_corners, charuco_ids, (255, 0, 0))
+
+ # Estimate the board pose
+ success, rvec, tvec = cv2.aruco.estimatePoseCharucoBoard(
+ charuco_corners, charuco_ids, self.board, self.camera_matrix, self.dist_coeffs, None, None
+ )
+
+ # If pose estimation is successful
+ if success > 0:
+ # Visualize board pose
+ cv2.drawFrameAxes(cv_image, self.camera_matrix, self.dist_coeffs, rvec, tvec, self.board.getSquareLength() * 2)
+
+ # '0' marker == board pose
+ if 0 not in self.marker_transforms and 0 not in self.reliable_marker_transforms:
+ self.marker_transforms[0] = deque(maxlen=30)
+
+ if 0 not in self.reliable_marker_transforms:
+ rot_matrix, _ = cv2.Rodrigues(rvec)
+ translation_matrix = np.eye(4)
+ translation_matrix[:3, :3] = rot_matrix
+ translation_matrix[:3, 3] = tvec.flatten()
+ self.marker_transforms[0].append(translation_matrix)
+
+ # iterate through each marker of the marker_transforms dictionary
+ for marker_id, transforms in self.marker_transforms.items():
+ if len(transforms) == 30:
+ self.check_precision(marker_id, transforms)
+
+ # delete all the transforms from the marker_transforms dictionary
+ for marker_id, _ in self.reliable_marker_transforms.items():
+ if marker_id in self.marker_transforms:
+ del self.marker_transforms[marker_id]
+
+ ros_image = self.bridge.cv2_to_imgmsg(cv_image, "bgr8")
+ self.cv2_image_publisher.publish(ros_image)
\ No newline at end of file
diff --git a/extrinsic_calibrator_core/extrinsic_calibrator_core/src/extrinsic_calibrator_class.py b/extrinsic_calibrator_core/extrinsic_calibrator_core/src/extrinsic_calibrator_class.py
index 6427a9b..e228f21 100644
--- a/extrinsic_calibrator_core/extrinsic_calibrator_core/src/extrinsic_calibrator_class.py
+++ b/extrinsic_calibrator_core/extrinsic_calibrator_core/src/extrinsic_calibrator_class.py
@@ -49,7 +49,6 @@
from collections import deque
# Well-known libraries
-import cv2
import numpy as np
from prettytable import PrettyTable
from scipy.spatial.transform import Rotation as R
@@ -58,7 +57,6 @@
from cv_bridge import CvBridge
from geometry_msgs.msg import TransformStamped
from rclpy.node import Node
-from sensor_msgs.msg import CameraInfo, Image
import tf2_ros
import tf_transformations
@@ -66,6 +64,11 @@
from extrinsic_calibrator_core.python_aruco_parameters import aruco_params
from extrinsic_calibrator_core.python_camera_topics_parameters import cameras_params
+# Camera-detectors
+from extrinsic_calibrator_core.src.detectors.camera import CameraAruco, CameraArucoBoard, CameraChArUcoBoard, CameraBase
+
+# aruco params
+from extrinsic_calibrator_core.src.detectors.aruco_params import ArucoMarkerParams, ArucoBoardParams, ChArUcoBoardParams
class ExtrinsicCalibrator(Node):
def __init__(self):
@@ -77,15 +80,24 @@ def __init__(self):
# OpenCV bridge for converting ROS Image to OpenCV image
self.bridge = CvBridge()
+ # Import ArUco parameters
aruco_params_listener = aruco_params.ParamListener(self)
imported_aruco_params = aruco_params_listener.get_params()
- self.real_aruco_params = ArucoParams(self,imported_aruco_params)
+ if imported_aruco_params.board_mode == "single_markers":
+ self.real_aruco_params = ArucoMarkerParams(self, imported_aruco_params)
+ elif imported_aruco_params.board_mode == "aruco_board":
+ self.real_aruco_params = ArucoBoardParams(self, imported_aruco_params)
+ elif imported_aruco_params.board_mode == "charuco_board":
+ self.real_aruco_params = ChArUcoBoardParams(self, imported_aruco_params)
+
+ # Import Cameras parameters
cameras_param_listener = cameras_params.ParamListener(self)
self.imported_cameras_params = cameras_param_listener.get_params()
camera_names = list(self.imported_cameras_params.camera_names)
-
+ self.get_logger().info(f"Received camera names from rp_camera: {camera_names}")
+
# construct the cameras
self.array_of_cameras = []
for idx, cam_name in enumerate(camera_names):
@@ -98,7 +110,17 @@ def __init__(self):
if not image_topic or not camera_info_topic:
self.get_logger().error(f"Skipping camera '{cam_name}' due to missing 'image_topic' or 'camera_info_topic' parameter.")
else:
- self.array_of_cameras.append(Camera(self, cam_name, idx, image_topic, camera_info_topic, self.bridge, self.tf_broadcaster, self.real_aruco_params))
+ mode = imported_aruco_params.board_mode
+ if mode == "single_markers":
+ camera = CameraAruco(self, cam_name, idx, image_topic, camera_info_topic, self.bridge, self.tf_broadcaster, self.real_aruco_params)
+ elif mode == "aruco_board":
+ camera = CameraArucoBoard(self, cam_name, idx, image_topic, camera_info_topic, self.bridge, self.tf_broadcaster, self.real_aruco_params)
+ elif mode == "charuco_board":
+ camera = CameraChArUcoBoard(self, cam_name, idx, image_topic, camera_info_topic, self.bridge, self.tf_broadcaster, self.real_aruco_params)
+ else:
+ raise ValueError(f"Unknown board mode: {mode}. Supported modes are 'single_markers', 'aruco_board', and 'charuco_board'.")
+
+ self.array_of_cameras.append(camera)
# periodically check if all cameras are calibrated
self.timer = self.create_timer(2.0, self.check_camera_transforms_callback)
@@ -108,7 +130,6 @@ def check_camera_transforms_callback(self):
if all([camera.are_all_transforms_precise() for camera in self.array_of_cameras]):
self.get_logger().info(f"All marker transforms gathered successfully")
for camera in self.array_of_cameras:
- camera:Camera
self.get_logger().info(f"Camera {camera.camera_name} has received all marker transforms")
camera.image_sub.destroy()
camera.camera_info_sub.destroy()
@@ -117,7 +138,7 @@ def check_camera_transforms_callback(self):
return True
else:
for camera in self.array_of_cameras:
- camera:Camera
+ camera:CameraBase
if camera.camera_matrix is None or camera.dist_coeffs is None:
self.get_logger().warn(f"Camera {camera.camera_name} parameters not yet received. Is the camera_info topic correct?")
self.get_logger().warn(f"Not all marker transforms gathered successfully")
@@ -209,7 +230,7 @@ def find_central_marker(self):
# For each cell in the table, provide two tables, one indicating how many other markers are seen byt the same camera, and another indicating how many cameras sees one individual camera
for camera in self.array_of_cameras:
- camera:Camera
+ camera:CameraBase
for marker_id in range(self.largest_marker + 1):
camera_counter = 0
marker_counter = 0
@@ -225,7 +246,7 @@ def find_central_marker(self):
# The total result will be the addition of the two tables
for camera in self.array_of_cameras:
- camera:Camera
+ camera:CameraBase
for marker_id in range(self.largest_marker + 1):
if self.is_marker_visible_from_camera_table[marker_id][camera.camera_id]:
self.scores_table[marker_id][camera.camera_id] = cameras_table[marker_id][camera.camera_id] + markers_table[marker_id][camera.camera_id]
@@ -249,7 +270,7 @@ def find_random_max_index(self, scores_table):
# Loop over the table to find the max value and its indices
for marker_id in range(self.largest_marker + 1):
for camera in self.array_of_cameras:
- camera:Camera
+ camera:CameraBase
if scores_table[marker_id][camera.camera_id] is not None:
value = scores_table[marker_id][camera.camera_id]
if value > max_value:
@@ -270,7 +291,7 @@ def generate_transform_between_markers_table(self):
# Create one table connected markers by a single camera
for camera in self.array_of_cameras:
- camera:Camera
+ camera:CameraBase
camera.can_camera_connect_two_markers_table = [[False for _ in range(self.largest_marker + 1)] for _ in range(self.largest_marker + 1)]
for origin_marker_id in range(self.largest_marker + 1):
for destination_marker_id in range(self.largest_marker + 1):
@@ -281,7 +302,7 @@ def generate_transform_between_markers_table(self):
# Merge the previous table into a huge table to check which markers can be connected at all
self.does_transform_exist_between_markers_table = [[False for _ in range(self.largest_marker + 1)] for _ in range(self.largest_marker + 1)]
for camera in self.array_of_cameras:
- camera:Camera
+ camera:CameraBase
for origin_marker_id in range(self.largest_marker + 1):
for destination_marker_id in range(self.largest_marker + 1):
if camera.can_camera_connect_two_markers_table[origin_marker_id][destination_marker_id] == True:
@@ -416,7 +437,7 @@ def return_the_cameras_between_markers_in_path(self,path):
for marker_id_index in range(len(path_being_evaluated)-1):
if type(path_being_evaluated[marker_id_index]) is int and type(path_being_evaluated[marker_id_index + 1]) is not str:
for camera in self.array_of_cameras:
- camera:Camera
+ camera:CameraBase
if self.is_marker_visible_from_camera_table[path_being_evaluated[marker_id_index]][camera.camera_id] and self.is_marker_visible_from_camera_table[path_being_evaluated[marker_id_index + 1]][camera.camera_id]:
new_path = copy.deepcopy(path_being_evaluated)
new_path.insert(marker_id_index + 1, camera.camera_name)
@@ -465,7 +486,7 @@ def compose_marker_to_marker_transform(self, paths):
destination_marker_id = path[element_index + 2]
for camera in self.array_of_cameras:
if camera.camera_name == camera_name:
- camera:Camera
+ camera:CameraBase
if camera.can_camera_connect_two_markers_table[origin_marker_id][destination_marker_id]:
transform = np.dot(np.linalg.inv(camera.reliable_marker_transforms[origin_marker_id]), camera.reliable_marker_transforms[destination_marker_id])
path_transform = np.dot(path_transform, transform)
@@ -490,7 +511,7 @@ def generate_camera_to_marker_transform_table(self):
# Create a table with the transforms between the cameras and the markers
self.camera_to_marker_transform_table = [[None for _ in range(len(self.array_of_cameras))] for _ in range(self.largest_marker + 1)]
for camera in self.array_of_cameras:
- camera:Camera
+ camera:CameraBase
for marker_id in range(self.largest_marker + 1):
if self.is_marker_visible_from_camera_table[marker_id][camera.camera_id]:
self.camera_to_marker_transform_table[marker_id][camera.camera_id] = camera.reliable_marker_transforms[marker_id]
@@ -504,7 +525,7 @@ def generate_world_to_cameras_transform_table(self):
# Create a table with the transforms between "map" and the cameras
self.map_to_cameras_transform_table = [None for _ in range(len(self.array_of_cameras))]
for camera in self.array_of_cameras:
- camera:Camera
+ camera:CameraBase
# check if the self.does_transform_exist_between_markers_table[camera_id]: has any True marker
for marker_id in range(self.largest_marker + 1):
if self.is_marker_visible_from_camera_table[marker_id][camera.camera_id]:
@@ -555,7 +576,7 @@ def broadcast_cameras_and_markers_to_world(self):
t.header.stamp = self.get_clock().now().to_msg()
t.header.frame_id = "marker_0"
- t.child_frame_id = "world"
+ t.child_frame_id = "map"
translation = tf_transformations.translation_from_matrix(origin_transform)
quaternion = tf_transformations.quaternion_from_matrix(origin_transform)
@@ -597,11 +618,11 @@ def broadcast_cameras_and_markers_to_world(self):
# Add all the camera transforms
for camera in self.array_of_cameras:
- camera:Camera
+ camera:CameraBase
if self.map_to_cameras_transform_table[camera.camera_id] is not None:
t = TransformStamped()
t.header.stamp = self.get_clock().now().to_msg()
- t.header.frame_id = "world"
+ t.header.frame_id = "map"
t.child_frame_id = camera.camera_name
transform = self.map_to_cameras_transform_table[camera.camera_id]
@@ -645,151 +666,4 @@ def display_marker_to_marker_table(self, title, table_data):
else:
for marker_id, row in enumerate(table_data):
table.add_row([marker_id] + [cell for cell in row])
- self.get_logger().info(f"{title}\n" + table.get_string())
-
-
-
-class ArucoParams():
- def __init__(self, node:Node, aruco_params):
- if hasattr(cv2.aruco, aruco_params.aruco_dict):
- self.aruco_dict = cv2.aruco.getPredefinedDictionary(getattr(cv2.aruco, aruco_params.aruco_dict))
- else:
- node.get_logger().error(f"cv2.aruco doesn't have a dictionary with the name '{aruco_params.aruco_dict}'")
- self.marker_length = aruco_params.marker_length
-
-
-
-class Camera():
- def __init__(self, node:Node, camera_name:str, camera_id:int, image_topic:str, camera_info_topic:str, bridge:CvBridge, broadcaster:tf2_ros.TransformBroadcaster, aruco_params:ArucoParams):
-
- self.node = node
- self.camera_name = camera_name
- self.camera_id = camera_id
- self.image_topic = image_topic
- self.camera_info_topic = camera_info_topic
- self.bridge = bridge
- self.tf_broadcaster = broadcaster
-
- self.node.get_logger().info(f"Camera {self.camera_name} created.")
-
- self.camera_matrix = None
- self.dist_coeffs = None
-
- # Define Aruco marker properties
- self.aruco_dict = aruco_params.aruco_dict
- self.parameters = cv2.aruco.DetectorParameters()
- self.detector = cv2.aruco.ArucoDetector(self.aruco_dict, self.parameters)
- self.marker_length = aruco_params.marker_length # length of the marker side in meters (adjust as needed)
-
- # Subscribe to the camera image topic and camera info
- self.image_sub = self.node.create_subscription(Image, image_topic, self.image_callback, 1)
- self.camera_info_sub = self.node.create_subscription(CameraInfo, camera_info_topic, self.camera_info_callback, 1)
- self.cv2_image_publisher = self.node.create_publisher(Image, f"{image_topic}/detected_markers", 10)
-
- self.marker_transforms = {}
- self.reliable_marker_transforms = {}
-
-
- def camera_info_callback(self, msg):
- if self.camera_matrix is None:
- self.camera_matrix = np.array(msg.k).reshape((3, 3))
- self.dist_coeffs = np.array(msg.d)
- self.node.get_logger().info(f"Camera {self.camera_name} parameters received.")
-
-
- def image_callback(self, msg):
- if self.camera_matrix is None or self.dist_coeffs is None:
- self.node.get_logger().warn(f"Camera {self.camera_name} parameters not yet received.")
- return
-
- cv_image = self.bridge.imgmsg_to_cv2(msg, "bgr8")
-
- # For ArUco detection, you can use the filtered_image directly
- corners, ids, rejected_img_points = self.detector.detectMarkers(cv_image)
- detected_ids = set()
- if ids is not None:
- for i, id in enumerate(ids):
- marker_id = id[0]
- detected_ids.add(marker_id)
-
- if marker_id not in self.marker_transforms and marker_id not in self.reliable_marker_transforms:
- self.marker_transforms[marker_id] = deque(maxlen=30)
-
- objPoints = np.array([ [-self.marker_length/2, self.marker_length/2, 0],
- [self.marker_length/2, self.marker_length/2, 0],
- [self.marker_length/2, -self.marker_length/2, 0],
- [-self.marker_length/2,-self.marker_length/2, 0]], dtype=np.float32)
-
- success, rvec, tvec = cv2.solvePnP(objPoints, corners[i], self.camera_matrix, self.dist_coeffs)
- if success:
- rot_matrix, _ = cv2.Rodrigues(rvec)
- translation_matrix = np.eye(4)
- translation_matrix[:3, :3] = rot_matrix
- translation_matrix[:3, 3] = tvec.flatten()
-
- # Draw the transform
- cv2.aruco.drawDetectedMarkers(cv_image, corners, ids)
- cv2.drawFrameAxes(cv_image, self.camera_matrix, self.dist_coeffs, rvec, tvec, self.marker_length/2)
- ros_image = self.bridge.cv2_to_imgmsg(cv_image, "bgr8")
- self.cv2_image_publisher.publish(ros_image)
-
- # Filter out the already reliable markers
- if marker_id in self.reliable_marker_transforms:
- continue
- else:
- self.marker_transforms[marker_id].append(translation_matrix)
-
- # Add None for markers not detected in this frame
- for marker_id in self.marker_transforms:
- if marker_id not in detected_ids:
- # Restart the precision of the marker if not seen
- # self.marker_transforms[marker_id].append(None)
- pass
-
- # iterate through each marker of the marker_transforms dictionary
- for marker_id, transforms in self.marker_transforms.items():
- if len(transforms) == 30:
- self.check_precision(marker_id, transforms)
-
- # delete all the transforms from the marker_transforms dictionary
- for marker_id, transform in self.reliable_marker_transforms.items():
- if marker_id in self.marker_transforms:
- del self.marker_transforms[marker_id]
-
-
- def check_precision(self, marker_id, transform):
- if self.is_precise(transform):
- self.node.get_logger().info(f"Camera {self.camera_name}: Marker {marker_id} is reliable")
- # add the last transform of the array in the dictionary as reliable marker transform
- self.reliable_marker_transforms[marker_id] = transform[-1]
-
-
- def is_precise(self, transforms):
- if all(transform is not None for transform in transforms):
- positions = np.array([t[:3, 3] for t in transforms])
- position_range = np.ptp(positions, axis=0)
-
- rotations = [R.from_matrix(t[:3, :3]) for t in transforms]
- quat = np.array([r.as_quat() for r in rotations])
- angles = []
- for i in range(len(quat)):
- for j in range(i+1, len(quat)):
- dot = np.abs(np.dot(quat[i], quat[j]))
- angle = 2 * np.arccos(np.clip(dot, -1.0, 1.0))
- angles.append(angle)
- rotation_range = np.max(angles)
-
- return np.all(position_range < 0.01) and np.all(rotation_range < np.radians(1))
- else:
- return False
-
-
- def are_all_transforms_precise(self):
- if len(self.reliable_marker_transforms) > 0 and len(self.marker_transforms) == 0:
- self.node.get_logger().info(f"Camera {self.camera_name}: All markers are reliable")
- return True
- else:
- for marker_id, transform in self.marker_transforms.items():
- if marker_id not in self.reliable_marker_transforms.keys():
- self.node.get_logger().warn(f"Camera {self.camera_name}: Marker {marker_id} is not reliable, yet")
- return False
\ No newline at end of file
+ self.get_logger().info(f"{title}\n" + table.get_string())
\ No newline at end of file
diff --git a/extrinsic_calibrator_core/requirments.txt b/extrinsic_calibrator_core/requirments.txt
index facc113..c9b072c 100644
--- a/extrinsic_calibrator_core/requirments.txt
+++ b/extrinsic_calibrator_core/requirments.txt
@@ -1,6 +1,6 @@
joblib==1.5.2
-numpy==2.2.6
-opencv-python==4.12.0.88
+numpy==1.26.4
+opencv-contrib-python==4.12.0.88
prettytable==3.16.0
scikit-learn==1.7.1
scipy==1.15.3
diff --git a/generate_board.py b/generate_board.py
new file mode 100644
index 0000000..a91408a
--- /dev/null
+++ b/generate_board.py
@@ -0,0 +1,149 @@
+import cv2
+import numpy as np
+import argparse
+from pathlib import Path
+
+def save_svg(img, output_path: str, board_width_m: float, board_height_m: float) -> None:
+ if len(img.shape) == 3:
+ gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
+ else:
+ gray = img
+
+ _, bw = cv2.threshold(gray, 127, 255, cv2.THRESH_BINARY)
+ h, w = bw.shape
+
+ width_mm = board_width_m * 1000.0
+ height_mm = board_height_m * 1000.0
+
+ active = {}
+ rectangles = []
+
+ for y in range(h):
+ runs = []
+ row = bw[y]
+ x = 0
+ while x < w:
+ if row[x] == 0:
+ start = x
+ while x < w and row[x] == 0:
+ x += 1
+ runs.append((start, x - start))
+ else:
+ x += 1
+
+ current_keys = set(runs)
+ to_remove = []
+ for key, (sy, height) in active.items():
+ if key not in current_keys:
+ rx, rw = key
+ rectangles.append((rx, sy, rw, height))
+ to_remove.append(key)
+ for key in to_remove:
+ active.pop(key)
+
+ for run in runs:
+ if run in active:
+ sy, height = active[run]
+ active[run] = (sy, height + 1)
+ else:
+ active[run] = (y, 1)
+
+ for key, (sy, height) in active.items():
+ rx, rw = key
+ rectangles.append((rx, sy, rw, height))
+
+ lines = [
+ f'')
+ Path(output_path).write_text("\n".join(lines), encoding="utf-8")
+
+def generate_aruco_board(board_type, output_path, board_rows, board_cols, square_length, marker_separation_length, marker_length, aruco_dict, pixels_per_meter):
+ aruco_dictonary = cv2.aruco.getPredefinedDictionary(getattr(cv2.aruco, aruco_dict))
+
+ marker_px = int(round(marker_length * pixels_per_meter))
+ sep_px = int(round(marker_separation_length * pixels_per_meter))
+
+ if board_type == "aruco_board":
+ board = cv2.aruco.GridBoard(
+ (board_cols, board_rows),
+ marker_length,
+ marker_separation_length,
+ aruco_dictonary,
+ )
+
+ board_width_m = board_cols * marker_length + (board_cols - 1) * marker_separation_length
+ board_height_m = board_rows * marker_length + (board_rows - 1) * marker_separation_length
+
+ marker_px = int(round(marker_length * pixels_per_meter))
+ sep_px = int(round(marker_separation_length * pixels_per_meter))
+ inner_w_px = board_cols * marker_px + (board_cols - 1) * sep_px
+ inner_h_px = board_rows * marker_px + (board_rows - 1) * sep_px
+
+ elif board_type == "charuco_board":
+ board = cv2.aruco.CharucoBoard(
+ (board_cols, board_rows),
+ square_length,
+ marker_length,
+ aruco_dictonary,
+ )
+
+ board_width_m = board_cols * square_length
+ board_height_m = board_rows * square_length
+
+ square_px = int(round(square_length * pixels_per_meter))
+ inner_w_px = board_cols * square_px
+ inner_h_px = board_rows * square_px
+
+ else:
+ raise ValueError(f"Wrong board type: {board_type}. Options: aruco_board, charuco_board")
+
+ margin_m = 0.2 * marker_length
+ margin_px = int(round(margin_m * pixels_per_meter))
+
+ img_inner = board.generateImage((inner_w_px, inner_h_px), marginSize=0, borderBits=1)
+
+ # Add marigns. Done mannualy because maringSize in board.generateImage adds vertically smaller margins than horizontally
+ final_h = inner_h_px + 2 * margin_px
+ final_w = inner_w_px + 2 * margin_px
+ canvas = np.full((final_h, final_w), 255, dtype=img_inner.dtype)
+ canvas[margin_px:margin_px + inner_h_px, margin_px:margin_px + inner_w_px] = img_inner
+
+ total_width_m = board_width_m + 2 * margin_m
+ total_height_m = board_height_m + 2 * margin_m
+
+ if output_path.lower().endswith('.svg'):
+ save_svg(canvas, output_path, total_width_m, total_height_m)
+ else:
+ cv2.imwrite(output_path, canvas)
+
+ print(f"Aruco board saved to {output_path}")
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser(description="Generate an ArUco board board image.")
+ parser.add_argument("--board_type", type=str,default="charuco_board", help="Board type. Options: aruco_board, charuco_board")
+ parser.add_argument("--output", type=str, default="board.svg", help="Output image file path (.svg or raster like .png)")
+ parser.add_argument("--board_rows", type=int, default=5, help="Number of squares in Y direction")
+ parser.add_argument("--board_cols", type=int, default=5, help="Number of squares in X direction")
+ parser.add_argument("--square_length", type=float, default=0.2, help="Square length in meters")
+ parser.add_argument("--marker_separation_length", type=float, default=0.0125, help="Distance between markers in meters")
+ parser.add_argument("--marker_length", type=float, default=0.1, help="Marker length in meters")
+ parser.add_argument("--aruco_dict", type=str, default="DICT_6X6_1000", help="ArUco dictionary name")
+ parser.add_argument("--ppm", type=int, default=11811, help="Pixels per meter")
+ args = parser.parse_args()
+
+ generate_aruco_board(
+ args.board_type,
+ args.output,
+ args.board_rows,
+ args.board_cols,
+ args.square_length,
+ args.marker_separation_length,
+ args.marker_length,
+ args.aruco_dict,
+ args.ppm
+ )
\ No newline at end of file