diff --git a/.gitignore b/.gitignore index 7c67213b..0f642014 100644 --- a/.gitignore +++ b/.gitignore @@ -66,3 +66,35 @@ trees/debug.xml ada_feeding_perception/test/food_img/ ada_feeding_perception/test/output/ ada_feeding_action_select/data + +# Debug/scratch tree +trees/debug.xml + +# =============================================================== +# Python Artifacts (Consolidated and Added) +# =============================================================== +__pycache__/ +*.pyc +.ada_venv/ +venv/ +.venv/ +env/ + +# =============================================================== +# Project-Specific Data, Plots, and Benchmark Outputs (Added) +# =============================================================== +# Ignore generated data, plots, and saved trajectories +ada_feeding/data/ + +# Ignore all benchmark and analysis output directories +ada_feeding/scripts/benchmark_analysis_plots/ +ada_feeding/scripts/e2e_benchmark_output/ +ada_feeding/scripts/holistic_benchmark_output/ +ada_feeding/scripts/manifold_data/ +ada_feeding/scripts/visualization_output/ + +# Ignore specific generated files in the scripts directory +ada_feeding/scripts/camera_poses.json + +# Ignore screenshots directory at the root +screenshots/ diff --git a/ada_feeding/CMakeLists.txt b/ada_feeding/CMakeLists.txt index 8c603ed3..d5ff44e9 100644 --- a/ada_feeding/CMakeLists.txt +++ b/ada_feeding/CMakeLists.txt @@ -37,6 +37,9 @@ install(PROGRAMS scripts/dummy_ft_sensor.py scripts/joint_state_latency.py scripts/planner_benchmark.py + scripts/planner_benchmark_analysis.py + scripts/orientation_constrained_planner_benchmark.py + scripts/visualize_trajectory.py scripts/robot_state_service.py scripts/save_image.py DESTINATION lib/${PROJECT_NAME} diff --git a/ada_feeding/ada_feeding/behaviors/acquisition/__init__.py b/ada_feeding/ada_feeding/behaviors/acquisition/__init__.py index 4ab55a19..151e6b78 100644 --- a/ada_feeding/ada_feeding/behaviors/acquisition/__init__.py +++ b/ada_feeding/ada_feeding/behaviors/acquisition/__init__.py @@ -10,3 +10,7 @@ ComputeActionConstraints, ComputeActionTwist, ) +from .rotate_local_approach_poses import ( + RotateLocalApproachPoses, +) +from .conditionally_rotate_food_frame import ConditionallyRotateFoodFrame diff --git a/ada_feeding/ada_feeding/behaviors/acquisition/compute_action_constraints.py b/ada_feeding/ada_feeding/behaviors/acquisition/compute_action_constraints.py index 5c12b8c3..fd7eee02 100644 --- a/ada_feeding/ada_feeding/behaviors/acquisition/compute_action_constraints.py +++ b/ada_feeding/ada_feeding/behaviors/acquisition/compute_action_constraints.py @@ -5,12 +5,12 @@ """ This module defines behaviors that take an AcquisitionSchema.msg object (or return from AcquisitionSelect.srv) and computes the outputs -needed to send MoveIt2Plan calls. +needed to send MoveIt2Plan calls and execute primitive actions. """ # Standard imports from copy import deepcopy -from typing import Union, Optional +from typing import Union, Optional, List # Third-party imports from geometry_msgs.msg import ( @@ -45,7 +45,7 @@ class ComputeActionConstraints(BlackboardBehavior): """ Checks AcquisitionSelect response, implements stochastic policy choice, and then decomposes into individual - BlackboardKey objects for MoveIt2 Behaviors. + BlackboardKey objects for MoveIt2 Behaviors and Primitive Actions. Also sets static TF from food -> approach frame """ @@ -93,6 +93,11 @@ def blackboard_outputs( ext_thresh: Optional[BlackboardKey], # SetParameters.Request action: Optional[BlackboardKey], # AcquisitionSchema.msg action_index: Optional[BlackboardKey], # int + post_move_into_primitive_name: Optional[BlackboardKey], + post_move_into_primitive_params: Optional[BlackboardKey], + post_acquisition_primitive_name: Optional[BlackboardKey], + post_acquisition_primitive_params: Optional[BlackboardKey], + should_align_to_base: Optional[BlackboardKey], ) -> None: """ Blackboard Outputs @@ -106,6 +111,10 @@ def blackboard_outputs( grasp_thresh: SetParameters request to set thresholds in grasp ext_thresh: SetParameters request to set thresholds for extraction action: AcquisitionSchema object to use in later computations + post_move_into_primitive_name: The name of the primitive to run after MoveInto. + post_move_into_primitive_params: The parameters for that primitive. + post_acquisition_primitive_name: The name of the primitive to run after acquisition. + post_acquisition_primitive_params: The parameters for that primitive. """ # pylint: disable=unused-argument, duplicate-code # Arguments are handled generically in base class. @@ -206,7 +215,41 @@ def update(self) -> py_trees.common.Status: create_ft_thresh_request(action.ext_force, action.ext_torque), ) - ### Final write to Blackboard + # Post-Move-Into Primitive + # post_move_into_name = ( + # action.post_move_into_primitive_name or "NONE" + # ) # Default to NONE + post_move_into_name = action.post_move_into_primitive_name + post_move_into_params = list(action.post_move_into_primitive_params) + self.blackboard_set("post_move_into_primitive_name", post_move_into_name) + self.blackboard_set( + "post_move_into_primitive_params", post_move_into_params + ) + self.logger.info( + f"[{self.name}] Set Post-MoveInto Primitive to: '{post_move_into_name}' with params: {post_move_into_params}" + ) + + # Post-Acquisition Primitive + # post_acquisition_name = ( + # action.post_acquisition_primitive_name or "NONE" + # ) # Default to NONE + post_acquisition_name = action.post_acquisition_primitive_name + post_acquisition_params = list(action.post_acquisition_primitive_params) + self.blackboard_set( + "post_acquisition_primitive_name", post_acquisition_name + ) + self.blackboard_set( + "post_acquisition_primitive_params", post_acquisition_params + ) + self.logger.info( + f"[{self.name}] Set Post-Acquisition Primitive to: '{post_acquisition_name}' with params: {post_acquisition_params}" + ) + + self.blackboard_set("should_align_to_base", action.align_to_robot_base) + self.logger.info( + f"[{self.name}] Set 'should_align_to_base' flag to: {action.align_to_robot_base}" + ) + self.blackboard_set("action", action) self.blackboard_set("action_index", index) return py_trees.common.Status.SUCCESS @@ -232,6 +275,7 @@ def blackboard_inputs( action: Union[BlackboardKey, AcquisitionSchema], is_grasp: Union[BlackboardKey, bool] = True, approach_frame_id: Union[BlackboardKey, str] = "approach", + group_name: Union[BlackboardKey, str] = "jaco_arm_with_articutool", ) -> None: """ Blackboard Inputs @@ -241,6 +285,7 @@ def blackboard_inputs( action: AcquisitionSchema msg object is_grasp: if true, use the grasp action elements, else use extraction approach_frame_id: approach frame defined in AcquisitionSchema.msg + group_name: The name of the MoveIt2 planning group """ # pylint: disable=unused-argument, duplicate-code # Arguments are handled generically in base class. @@ -279,6 +324,9 @@ def setup(self, **kwargs): # Get Node from Kwargs self.node = kwargs["node"] + # Get group name from blackboard + self.group_name = self.blackboard_get("group_name") + # Get TF Listener from blackboard # For transform approach -> end_effector_frame self.tf_buffer, _, self.tf_lock = get_tf_object(self.blackboard, self.node) @@ -288,6 +336,7 @@ def setup(self, **kwargs): self.moveit2, self.moveit2_lock = get_moveit2_object( self.blackboard, self.node, + self.group_name, ) @override diff --git a/ada_feeding/ada_feeding/behaviors/acquisition/compute_food_frame.py b/ada_feeding/ada_feeding/behaviors/acquisition/compute_food_frame.py index 0a65ec15..ce8c1183 100644 --- a/ada_feeding/ada_feeding/behaviors/acquisition/compute_food_frame.py +++ b/ada_feeding/ada_feeding/behaviors/acquisition/compute_food_frame.py @@ -4,14 +4,23 @@ """ This module defines the ComputeFoodFrame behavior, which computes the -food frame from the Mask provided from a perception algorithm. +food frame from the Mask provided from a perception algorithm, with an +option to align it to the robot's base. """ # Standard imports +import math from typing import Optional, Tuple, Union # Third-party imports import cv2 as cv -from geometry_msgs.msg import PointStamped, TransformStamped, Vector3Stamped +from geometry_msgs.msg import ( + PointStamped, + TransformStamped, + Vector3Stamped, + Pose, + Quaternion, + Vector3, +) import numpy as np import numpy.typing as npt from overrides import override @@ -20,6 +29,8 @@ import rclpy from sensor_msgs.msg import CameraInfo import tf2_ros +from scipy.spatial.transform import Rotation as R +import ros2_numpy # Local imports from ada_feeding_msgs.msg import Mask @@ -36,8 +47,10 @@ class ComputeFoodFrame(BlackboardBehavior): """ - Computes the food reference frame. - See definition in AcquisitionSchema.msg + Computes the food reference frame from a perception mask. + Optionally rotates this frame around its Z-axis to align its + local X-axis with the vector from the robot base to the food origin. + See definition in AcquisitionSchema.msg. """ # pylint: disable=arguments-differ @@ -48,6 +61,7 @@ class ComputeFoodFrame(BlackboardBehavior): # pylint: disable=too-many-arguments # These are effectively config definitions # They require a lot of arguments. + EPSILON = 1e-6 def blackboard_inputs( self, @@ -56,6 +70,8 @@ def blackboard_inputs( timestamp: Union[BlackboardKey, rclpy.time.Time] = rclpy.time.Time(), food_frame_id: Union[BlackboardKey, str] = "food", world_frame: Union[BlackboardKey, str] = "world", + robot_base_frame_id: Union[BlackboardKey, str] = "j2n6s200_link_base", + align_to_robot_base: Union[BlackboardKey, bool] = True, flip_food_frame: Union[BlackboardKey, bool] = False, ) -> None: """ @@ -70,6 +86,9 @@ def blackboard_inputs( food_frame_id (string): If len>0, TF frame to publish static transform (relative to world_frame) world_frame (string): ID of the TF frame to represent the food frame in + robot_base_frame_id (string): ID of the robot's base frame for alignment. + align_to_robot_base (bool): If true, rotates the computed food_frame + around its Z-axis to align with the robot. flip_food_frame (bool): whether to rotate the food frame 180 about Z """ # pylint: disable=unused-argument, duplicate-code @@ -217,6 +236,10 @@ def get_mask_center( return (mask.roi.x_offset + c_x, mask.roi.y_offset + c_y, median_depth) + def _normalize_angle(self, angle: float) -> float: + """Normalize an angle to [-pi, pi].""" + return (angle + math.pi) % (2 * math.pi) - math.pi + @override def update(self) -> py_trees.common.Status: # Docstring copied from @override @@ -327,15 +350,71 @@ def update(self) -> py_trees.common.Status: # Project to world x-y plane x_pos.vector.z = 0.0 + world_food_initial_quat_xyzw = quat_between_vectors( + Vector3(x=1.0), x_pos.vector + ) + + R_world_food_initial = R.from_quat( + [ + world_food_initial_quat_xyzw.x, + world_food_initial_quat_xyzw.y, + world_food_initial_quat_xyzw.z, + world_food_initial_quat_xyzw.w, + ] + ) + + # Conditionally rotate the food frame to align with the robot base + R_correction_z = R.identity() # Start with no correction + if self.blackboard_get("align_to_robot_base"): + P_food_in_rb = ros2_numpy.numpify(center.point) + L_xy_rb_numpy = np.array([P_food_in_rb[0], P_food_in_rb[1]]) + + if np.linalg.norm(L_xy_rb_numpy) < self.EPSILON: + self.logger.info( + f"[{self.name}] Food origin too close to robot base. No alignment rotation applied." + ) + else: + desired_global_approach_yaw_rb = math.atan2( + L_xy_rb_numpy[1], L_xy_rb_numpy[0] + ) + + X_axis_in_food_frame = np.array([1.0, 0.0, 0.0]) + X_axis_in_rb_frame = R_world_food_initial.apply(X_axis_in_food_frame) + X_axis_xy_in_rb_frame = np.array( + [X_axis_in_rb_frame[0], X_axis_in_rb_frame[1]] + ) + + if np.linalg.norm(X_axis_xy_in_rb_frame) < self.EPSILON: + self.logger.warn( + f"[{self.name}] Food frame X-axis has no projection in robot base XY plane. Cannot align." + ) + else: + current_food_x_yaw_rb = math.atan2( + X_axis_xy_in_rb_frame[1], X_axis_xy_in_rb_frame[0] + ) + delta_psi_food_z = self._normalize_angle( + desired_global_approach_yaw_rb - current_food_x_yaw_rb + ) + + self.logger.info( + f"[{self.name}] Applying alignment rotation of {math.degrees(delta_psi_food_z):.1f} deg to food frame." + ) + R_correction_z = R.from_euler("z", delta_psi_food_z) + + # --- Final Transform Calculation --- + R_world_food_final = R_world_food_initial * R_correction_z + # Convert to TransformStamped world_to_food_transform.transform.translation.x = center.point.x world_to_food_transform.transform.translation.y = center.point.y world_to_food_transform.transform.translation.z = center.point.z - x_unit = Vector3Stamped() - x_unit.vector.x = 1.0 - world_to_food_transform.transform.rotation = quat_between_vectors( - x_unit.vector, x_pos.vector + final_quat_xyzw = R_world_food_final.as_quat() + world_to_food_transform.transform.rotation = Quaternion( + x=final_quat_xyzw[0], + y=final_quat_xyzw[1], + z=final_quat_xyzw[2], + w=final_quat_xyzw[3], ) # # If you need to send a fixed food frame to the robot arm, e.g., to diff --git a/ada_feeding/ada_feeding/behaviors/acquisition/conditionally_rotate_food_frame.py b/ada_feeding/ada_feeding/behaviors/acquisition/conditionally_rotate_food_frame.py new file mode 100644 index 00000000..8f324cd5 --- /dev/null +++ b/ada_feeding/ada_feeding/behaviors/acquisition/conditionally_rotate_food_frame.py @@ -0,0 +1,194 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +# Copyright (c) 2025, Personal Robotics Laboratory +# License: BSD 3-Clause. See LICENSE.md file in root directory. + +""" +This module defines the ConditionallyRotateFoodFrame behavior. +It takes an existing food_frame transform and, based on a boolean flag, +rotates it around its Z-axis to align with the robot's approach vector. +It then re-publishes the updated static TF transform. +""" + +import math +from typing import Union, Optional +from copy import deepcopy + +from geometry_msgs.msg import TransformStamped +import numpy as np +from overrides import override +import py_trees +from py_trees.common import Status +import rclpy +from rclpy.node import Node +from scipy.spatial.transform import Rotation as R +import ros2_numpy +import tf2_ros + +from ada_feeding.behaviors import BlackboardBehavior +from ada_feeding.helpers import BlackboardKey, get_tf_object, set_static_tf + + +class ConditionallyRotateFoodFrame(BlackboardBehavior): + """ + Conditionally rotates the food_frame around its Z-axis to align its + local X-axis with the vector from the robot base to the food origin. + This behavior reads the decision from the loaded action schema. + """ + + EPSILON = 1e-6 + + def blackboard_inputs( + self, + # The initial, un-aligned food_frame transform + initial_food_frame: Union[BlackboardKey, TransformStamped], + # The boolean flag read from the action schema + should_align_to_base: Union[BlackboardKey, bool], + # The frames needed for the calculation + food_frame_id: Union[BlackboardKey, str] = "food", + robot_base_frame_id: Union[BlackboardKey, str] = "j2n6s200_link_base", + ) -> None: + super().blackboard_inputs( + **{key: value for key, value in locals().items() if key != "self"} + ) + + def blackboard_outputs( + self, + # This behavior UPDATES the food_frame on the blackboard, so it needs + # to declare it as an output to get write access. + # It reads the key from its inputs and writes back to the same key. + food_frame_updated: BlackboardKey, + ) -> None: + super().blackboard_outputs( + **{key: value for key, value in locals().items() if key != "self"} + ) + + def __init__(self, name: str, **kwargs): + super().__init__(name=name, **kwargs) + self.node: Optional[Node] = None + self.tf_buffer: Optional[tf2_ros.Buffer] = None + self.tf_listener: Optional[tf2_ros.TransformListener] = None + # _initialized_properly is handled by the base class setup + + @override + def setup(self, **kwargs): + # This setup pattern is from your original code + try: + self.node = kwargs["node"] + self.tf_buffer, _, self.tf_lock = get_tf_object(self.blackboard, self.node) + except KeyError as e: + self.logger.error( + f"[{self.name}] Behaviour expects 'node' in setup kwargs. {e}" + ) + except Exception as e: + self.logger.error(f"[{self.name}] Error during setup: {e}") + + def _normalize_angle(self, angle: float) -> float: + return (angle + math.pi) % (2 * math.pi) - math.pi + + @override + def update(self) -> Status: + if not hasattr(self, "node") or self.node is None or self.tf_buffer is None: + self.feedback_message = "Behavior not properly initialized." + return Status.FAILURE + + try: + should_align = self.blackboard_get("should_align_to_base") + + if not should_align: + self.feedback_message = ( + "Alignment not required by action schema. Succeeding." + ) + self.logger.info(f"[{self.name}] {self.feedback_message}") + # We still need to write the original frame to the output key + # so the rest of the tree has a consistent key to read from. + original_transform: TransformStamped = self.blackboard_get( + "initial_food_frame" + ) + self.blackboard_set("food_frame_updated", original_transform) + return Status.SUCCESS + + initial_transform: TransformStamped = self.blackboard_get( + "initial_food_frame" + ) + food_fid: str = self.blackboard_get("food_frame_id") + robot_base_fid: str = self.blackboard_get("robot_base_frame_id") + + if initial_transform is None: + self.feedback_message = ( + "Initial food_frame transform not found on blackboard." + ) + return Status.FAILURE + + # This logic assumes world_frame and robot_base_frame are the same. + P_food_in_rb = ros2_numpy.numpify(initial_transform.transform.translation) + R_rb_food_initial = R.from_quat( + ros2_numpy.numpify(initial_transform.transform.rotation) + ) + + # --- Rotation logic from RotateLocalApproachPoses --- + L_xy_rb_numpy = np.array([P_food_in_rb[0], P_food_in_rb[1]]) + if np.linalg.norm(L_xy_rb_numpy) < self.EPSILON: + self.logger.info( + f"[{self.name}] Food origin too close to robot base. No alignment rotation applied." + ) + # Pass through the original transform + self.blackboard_set("food_frame_updated", initial_transform) + return Status.SUCCESS + + desired_global_approach_yaw_rb = math.atan2( + L_xy_rb_numpy[1], L_xy_rb_numpy[0] + ) + + X_axis_in_food_frame = np.array([1.0, 0.0, 0.0]) + X_axis_in_rb_frame = R_rb_food_initial.apply(X_axis_in_food_frame) + X_axis_xy_in_rb_frame = np.array( + [X_axis_in_rb_frame[0], X_axis_in_rb_frame[1]] + ) + + if np.linalg.norm(X_axis_xy_in_rb_frame) < self.EPSILON: + self.logger.warn( + f"[{self.name}] Food frame's X-axis has no projection. Cannot align." + ) + self.blackboard_set( + "food_frame_updated", initial_transform + ) # Pass through + return Status.SUCCESS + + current_food_x_yaw_rb = math.atan2( + X_axis_xy_in_rb_frame[1], X_axis_xy_in_rb_frame[0] + ) + delta_psi_food_z = self._normalize_angle( + desired_global_approach_yaw_rb - current_food_x_yaw_rb + ) + + self.logger.info( + f"[{self.name}] Applying alignment rotation of {math.degrees(delta_psi_food_z):.1f} deg to food frame." + ) + R_correction_z = R.from_euler("z", delta_psi_food_z) + + # --- Apply the rotation and prepare the final transform --- + R_world_food_final = R_rb_food_initial * R_correction_z + + final_transform = deepcopy(initial_transform) + final_quat_xyzw = R_world_food_final.as_quat() + final_transform.transform.rotation.x = final_quat_xyzw[0] + final_transform.transform.rotation.y = final_quat_xyzw[1] + final_transform.transform.rotation.z = final_quat_xyzw[2] + final_transform.transform.rotation.w = final_quat_xyzw[3] + + # Re-publish the static TF with the new, rotated orientation + set_static_tf(final_transform, self.blackboard, self.node) + + # Write the final transform to the output blackboard key + self.blackboard_set("food_frame_updated", final_transform) + + self.feedback_message = ( + f"Food frame realigned by {math.degrees(delta_psi_food_z):.1f} deg." + ) + return Status.SUCCESS + + except Exception as e: + self.feedback_message = f"Unexpected error: {e}" + self.logger.error(f"[{self.name}] {self.feedback_message}") + return Status.FAILURE diff --git a/ada_feeding/ada_feeding/behaviors/acquisition/rotate_local_approach_poses.py b/ada_feeding/ada_feeding/behaviors/acquisition/rotate_local_approach_poses.py new file mode 100644 index 00000000..c0e87b6a --- /dev/null +++ b/ada_feeding/ada_feeding/behaviors/acquisition/rotate_local_approach_poses.py @@ -0,0 +1,271 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2024-2025, Personal Robotics Laboratory +# License: BSD 3-Clause. See LICENSE.md file in root directory. + +""" +This module defines the RotateLocalApproachPoses behavior. +It takes original MoveAbove and MoveInto poses defined in a food_frame, +and rotates them around the food_frame's Z-axis. The rotation angle +is calculated to align the food_frame's Y-axis (when projected onto the +robot_base_frame's XY plane) with the vector from the robot_base_frame's +origin to the food_frame's origin (also projected onto the robot_base_frame's XY plane). +This is intended to align the tool's functional direction (assumed to be +oriented along the food_frame's Y-axis by a pre_quat) with the +robot-to-food line. +""" + +# Standard imports +import math +from typing import Union, Optional + +# Third-party imports +from geometry_msgs.msg import Pose, Point as PointMsg, Quaternion as QuaternionMsg +import numpy as np +from overrides import override +import py_trees +from py_trees.common import Status +import rclpy +from rclpy.node import Node +from scipy.spatial.transform import Rotation as R +import ros2_numpy + +# TF2 imports +import tf2_ros + +# Local imports +from ada_feeding.behaviors import BlackboardBehavior +from ada_feeding.helpers import BlackboardKey + + +class RotateLocalApproachPoses(BlackboardBehavior): + """ + Rotates local MoveAbove and MoveInto poses around the Z-axis of their + defining food_frame to align the food_frame's Y-axis (and thus a + tool Z-axis aligned with it) with the robot-base-to-food-origin direction. + """ + + EPSILON = 1e-6 + + def blackboard_inputs( + self, + move_above_pose_in_food_frame_orig: Union[BlackboardKey, Pose], + move_into_pose_in_food_frame_orig: Union[BlackboardKey, Pose], + food_frame_id: Union[BlackboardKey, str] = "food", + robot_base_frame_id: Union[BlackboardKey, str] = "j2n6s200_link_base", + ) -> None: + super().blackboard_inputs( + **{key: value for key, value in locals().items() if key != "self"} + ) + + def blackboard_outputs( + self, + move_above_pose_in_food_frame_rotated: Optional[BlackboardKey], + move_into_pose_in_food_frame_rotated: Optional[BlackboardKey], + ) -> None: + super().blackboard_outputs( + **{key: value for key, value in locals().items() if key != "self"} + ) + + def __init__(self, name: str, **kwargs): + super().__init__(name=name, **kwargs) + self.node: Optional[Node] = None + self.tf_buffer: Optional[tf2_ros.Buffer] = None + self.tf_listener: Optional[tf2_ros.TransformListener] = None + self._initialized_properly = False + + @override + def setup(self, **kwargs): + try: + self.node = kwargs["node"] + self.tf_buffer = tf2_ros.Buffer(node=self.node) + self.tf_listener = tf2_ros.TransformListener( + self.tf_buffer, self.node, spin_thread=False + ) + self.logger.debug(f"[{self.name}] TF Buffer and Listener initialized.") + self._initialized_properly = True + except KeyError as e: + self.logger.error( + f"[{self.name}] Behaviour expects 'node' in setup kwargs. {e}" + ) + self._initialized_properly = False + except Exception as e: + self.logger.error(f"[{self.name}] Error during setup: {e}") + self._initialized_properly = False + + def _normalize_angle(self, angle: float) -> float: + """Normalize an angle to [-pi, pi].""" + return (angle + math.pi) % (2 * math.pi) - math.pi + + @override + def update(self) -> Status: + if not self._initialized_properly or not self.node or not self.tf_buffer: + self.feedback_message = "Behavior not properly initialized in setup." + self.logger.error(f"[{self.name}] {self.feedback_message}") + return Status.FAILURE + + try: + ma_pose_orig_food: Pose = self.blackboard_get( + "move_above_pose_in_food_frame_orig" + ) + mi_pose_orig_food: Pose = self.blackboard_get( + "move_into_pose_in_food_frame_orig" + ) + food_fid: str = self.blackboard_get("food_frame_id") + robot_base_fid: str = self.blackboard_get("robot_base_frame_id") + + # 1. Get current transform T_RobotBase_FoodOriginal + try: + t_rb_food_stamped: TransformStamped = self.tf_buffer.lookup_transform( + robot_base_fid, + food_fid, + rclpy.time.Time(), + rclpy.duration.Duration(seconds=1.0), + ) + except Exception as e: + self.feedback_message = ( + f"TF lookup {robot_base_fid} -> {food_fid} failed: {e}" + ) + self.logger.warn( + f"[{self.name}] {self.feedback_message}. Retrying if BT ticks again." + ) + return Status.RUNNING + + R_rb_food = R.from_quat( + [ + t_rb_food_stamped.transform.rotation.x, + t_rb_food_stamped.transform.rotation.y, + t_rb_food_stamped.transform.rotation.z, + t_rb_food_stamped.transform.rotation.w, + ] + ) + P_food_in_rb = ros2_numpy.numpify(t_rb_food_stamped.transform.translation) + + # 2. Calculate Desired Approach Line in Robot Base XY plane (L_xy_rb) + L_xy_rb_numpy = np.array( + [P_food_in_rb[0], P_food_in_rb[1]] + ) # Only XY components + desired_global_approach_yaw_rb: float + if np.linalg.norm(L_xy_rb_numpy) < self.EPSILON: + self.logger.info( + f"[{self.name}] Food origin too close to robot base XY origin. Desired global yaw is ill-defined, using 0." + ) + desired_global_approach_yaw_rb = 0.0 + # If desired direction is undefined, delta_psi should ideally make current X-axis align with robot_base X-axis, + # or simply make delta_psi = 0 if current_food_x_yaw_rb is also ill-defined. + # For now, if desired is ill-defined, we'll effectively try to rotate food X to robot X. + else: + L_xy_rb_norm_vec = L_xy_rb_numpy / np.linalg.norm(L_xy_rb_numpy) + desired_global_approach_yaw_rb = math.atan2( + L_xy_rb_norm_vec[1], L_xy_rb_norm_vec[0] + ) + + # 3. Calculate Current Direction of Food Frame's X-axis in Robot Base XY plane + X_axis_in_food_frame = np.array([1.0, 0.0, 0.0]) # Food Frame's X-axis + X_axis_in_rb_frame = R_rb_food.apply( + X_axis_in_food_frame + ) # Transform to robot base + X_axis_xy_in_rb_frame = np.array( + [X_axis_in_rb_frame[0], X_axis_in_rb_frame[1]] + ) + + current_food_x_yaw_rb: float + if np.linalg.norm(X_axis_xy_in_rb_frame) < self.EPSILON: + self.logger.warn( + f"[{self.name}] Food frame's X-axis has near-zero XY projection in robot base. " + f"This implies food_frame Z is nearly aligned with robot_base XY plane (e.g. food frame is 'on its side'). " + f"Cannot reliably determine current food_X_yaw. Assuming 0 adjustment needed relative to this." + ) + # If current X-axis has no defined yaw, any desired_global_approach_yaw_rb becomes the delta directly, + # but this might not be what we want. It's better to make no change if current state is ambiguous. + delta_psi_food_z = 0.0 + else: + X_axis_xy_in_rb_norm_vec = X_axis_xy_in_rb_frame / np.linalg.norm( + X_axis_xy_in_rb_frame + ) + current_food_x_yaw_rb = math.atan2( + X_axis_xy_in_rb_norm_vec[1], X_axis_xy_in_rb_norm_vec[0] + ) + + # 4. Calculate Yaw Correction delta_psi_food_z + # This is the rotation around the food_frame's Z-axis needed to align its X-axis + # (projected to robot_base XY) with the desired_global_approach_yaw_rb. + delta_psi_food_z = self._normalize_angle( + desired_global_approach_yaw_rb - current_food_x_yaw_rb + ) + self.logger.info( + f"[{self.name}] Aligning Food_X to base-food line. " + f"Current Food_X Yaw (in RB): {math.degrees(current_food_x_yaw_rb):.1f} deg. " + f"Desired Global Yaw (for Food_X in RB): {math.degrees(desired_global_approach_yaw_rb):.1f} deg. " + f"Required Food_Z Rotation: {math.degrees(delta_psi_food_z):.1f} deg." + ) + + # 5. Apply rotation to local poses (around Z-axis of food_frame) + R_correction_in_food_z = R.from_euler("z", delta_psi_food_z) + + # Rotate MoveAbove pose + P_ma_orig_food_numpy = ros2_numpy.numpify(ma_pose_orig_food.position) + R_ma_orig_food_scipy = R.from_quat( + [ + ma_pose_orig_food.orientation.x, + ma_pose_orig_food.orientation.y, + ma_pose_orig_food.orientation.z, + ma_pose_orig_food.orientation.w, + ] + ) + P_ma_rotated_food_numpy = R_correction_in_food_z.apply(P_ma_orig_food_numpy) + R_ma_rotated_food_scipy = R_correction_in_food_z * R_ma_orig_food_scipy + + ma_pose_rotated_food = Pose() + ma_pose_rotated_food.position = ros2_numpy.msgify( + PointMsg, P_ma_rotated_food_numpy + ) + ma_quat_xyzw_rotated = R_ma_rotated_food_scipy.as_quat() + ma_pose_rotated_food.orientation.x = ma_quat_xyzw_rotated[0] + ma_pose_rotated_food.orientation.y = ma_quat_xyzw_rotated[1] + ma_pose_rotated_food.orientation.z = ma_quat_xyzw_rotated[2] + ma_pose_rotated_food.orientation.w = ma_quat_xyzw_rotated[3] + self.blackboard_set( + "move_above_pose_in_food_frame_rotated", ma_pose_rotated_food + ) + + # Rotate MoveInto pose + P_mi_orig_food_numpy = ros2_numpy.numpify(mi_pose_orig_food.position) + R_mi_orig_food_scipy = R.from_quat( + [ + mi_pose_orig_food.orientation.x, + mi_pose_orig_food.orientation.y, + mi_pose_orig_food.orientation.z, + mi_pose_orig_food.orientation.w, + ] + ) # Should be same as R_ma_orig_food_scipy + P_mi_rotated_food_numpy = R_correction_in_food_z.apply(P_mi_orig_food_numpy) + R_mi_rotated_food_scipy = R_correction_in_food_z * R_mi_orig_food_scipy + + mi_pose_rotated_food = Pose() + mi_pose_rotated_food.position = ros2_numpy.msgify( + PointMsg, P_mi_rotated_food_numpy + ) + mi_quat_xyzw_rotated = R_mi_rotated_food_scipy.as_quat() + mi_pose_rotated_food.orientation.x = mi_quat_xyzw_rotated[0] + mi_pose_rotated_food.orientation.y = mi_quat_xyzw_rotated[1] + mi_pose_rotated_food.orientation.z = mi_quat_xyzw_rotated[2] + mi_pose_rotated_food.orientation.w = mi_quat_xyzw_rotated[3] + self.blackboard_set( + "move_into_pose_in_food_frame_rotated", mi_pose_rotated_food + ) + + self.feedback_message = f"Local poses rotated by {math.degrees(delta_psi_food_z):.1f} deg around food Z." + return Status.SUCCESS + + except KeyError as e: + self.feedback_message = f"Blackboard key error: {e}" + self.logger.error(f"[{self.name}] {self.feedback_message}") + return Status.FAILURE + except Exception as e: + self.feedback_message = f"Unexpected error in {self.name}: {e}" + self.logger.error(f"[{self.name}] {self.feedback_message}") + return Status.FAILURE + + @override + def terminate(self, new_status: Status) -> None: + self.logger.debug(f"[{self.name}] Terminating with status {new_status}.") diff --git a/ada_feeding/ada_feeding/behaviors/articutool/__init__.py b/ada_feeding/ada_feeding/behaviors/articutool/__init__.py new file mode 100644 index 00000000..bf126214 --- /dev/null +++ b/ada_feeding/ada_feeding/behaviors/articutool/__init__.py @@ -0,0 +1,15 @@ +# Copyright (c) 2024-2025, Personal Robotics Laboratory +# License: BSD 3-Clause. See LICENSE.md file in root directory. + +from .execute_articutool_trajectory import ( + ExecuteArticutoolTrajectory, +) +from .call_set_orientation_control import ( + CallSetOrientationControl, +) +from .switch_articutool_controllers import ( + SwitchArticutoolControllers, +) +from .compute_articutool_leveling_joints import ComputeArticutoolLevelingJoints +from .trigger_articutool_calibration import TriggerArticutoolCalibration +from .execute_named_primitive import ExecuteNamedPrimitive diff --git a/ada_feeding/ada_feeding/behaviors/articutool/call_set_orientation_control.py b/ada_feeding/ada_feeding/behaviors/articutool/call_set_orientation_control.py new file mode 100644 index 00000000..3c456a59 --- /dev/null +++ b/ada_feeding/ada_feeding/behaviors/articutool/call_set_orientation_control.py @@ -0,0 +1,329 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2025, Personal Robotics Laboratory +# License: BSD 3-Clause. See LICENSE.md file in root directory. + +""" +Defines the CallSetOrientationControl behavior, which calls the service +to set the Articutool orientation controller's mode and targets. +""" + +import rclpy +from rclpy.node import Node +from rclpy.executors import Future # For type hints + +from typing import Union, Optional, Dict, Any, List, Tuple +import math + +from geometry_msgs.msg import Quaternion +from articutool_interfaces.srv import SetOrientationControl + +from overrides import override +import py_trees +import py_trees.blackboard +from py_trees.common import Access, Status +import numpy as np + +# Local imports +from ada_feeding.helpers import BlackboardKey +from ada_feeding.behaviors import BlackboardBehavior + + +class CallSetOrientationControl(BlackboardBehavior): + """ + Calls the SetOrientationControl service to set the Articutool's + orientation control mode and corresponding targets. + + Returns SUCCESS if the service call completes and the service response indicates success. + Returns FAILURE if the service is unavailable, the call fails, or the service response indicates failure. + Returns RUNNING while waiting for the service response. + """ + + DEFAULT_SERVICE_NAME = "/orientation_control/set_orientation_control_mode" + DEFAULT_WAIT_TIMEOUT_SEC = 1.0 + + def blackboard_inputs( + self, + control_mode: Union[ + BlackboardKey, int + ], # MODE_DISABLED, MODE_LEVELING, MODE_FULL_ORIENTATION + pitch_offset_deg: Optional[ + Union[BlackboardKey, float] + ] = 0.0, # For MODE_LEVELING + roll_offset_deg: Optional[ + Union[BlackboardKey, float] + ] = 0.0, # For MODE_LEVELING + target_orientation_robot_base_quat: Optional[ + Union[BlackboardKey, Quaternion, List[float], Tuple[float, ...]] + ] = None, # For MODE_FULL_ORIENTATION + service_name: Union[BlackboardKey, str] = DEFAULT_SERVICE_NAME, + wait_for_server_timeout_sec: Union[ + BlackboardKey, float + ] = DEFAULT_WAIT_TIMEOUT_SEC, + ) -> None: + """ + Blackboard Inputs + + Parameters + ---------- + control_mode: The desired control mode (e.g., SetOrientationControl.Request.MODE_LEVELING). + pitch_offset_deg: Target pitch offset in degrees (for MODE_LEVELING). + roll_offset_deg: Target roll offset in degrees (for MODE_LEVELING). + target_orientation_robot_base_quat: Target orientation as Quaternion msg or [x,y,z,w] list/tuple + (for MODE_FULL_ORIENTATION), relative to robot_base_frame. + service_name: Name of the SetOrientationControl service. + wait_for_server_timeout_sec: Max time (sec) to wait for server. + """ + super().blackboard_inputs( + **{key: value for key, value in locals().items() if key != "self"} + ) + + def blackboard_outputs( + self, + service_call_succeeded: Optional[BlackboardKey] = None, + service_response_success: Optional[BlackboardKey] = None, + service_response_message: Optional[BlackboardKey] = None, + ) -> None: + """ + Blackboard Outputs + (Same as before) + """ + super().blackboard_outputs( + **{key: value for key, value in locals().items() if key != "self"} + ) + + def setup(self, **kwargs): + """Get the node and create the service client.""" + self.node: Optional[Node] = kwargs.get("node") # Use .get() for safety + self.client = None + if not self.node: + self.logger.error( + f"[{self.name}] Node object not provided in setup kwargs." + ) + return + + try: + # It's better to get these once in setup if they don't change per tick + # Or get them in update if they can change dynamically from the blackboard + self._service_name_str = self.blackboard_get("service_name") + self._wait_timeout_float = self.blackboard_get( + "wait_for_server_timeout_sec" + ) + + self.client = self.node.create_client( + SetOrientationControl, self._service_name_str + ) + self.logger.info( + f"[{self.name}] Service client created for '{self._service_name_str}'" + ) + except KeyError as e: + self.logger.error(f"[{self.name}] Missing blackboard key during setup: {e}") + except Exception as e: + self.logger.error(f"[{self.name}] Failed to create service client: {e}") + + def initialise(self) -> None: + """Reset the future.""" + self.service_future: Optional[Future] = None + self.logger.debug(f"[{self.name}] Initializing service call state.") + self.blackboard_set("service_call_succeeded", False) + self.blackboard_set("service_response_success", False) + self.blackboard_set("service_response_message", "") + + def update(self) -> Status: + """Manage the asynchronous service call state machine.""" + if self.client is None: + self.logger.error( + f"[{self.name}] Service client not available. Setup likely failed or did not complete." + ) + return Status.FAILURE + + # --- State 1: Ready to Call --- + if self.service_future is None: + if ( + self._wait_timeout_float >= 0.0 + ): # Check server availability only if timeout is non-negative + if not self.client.wait_for_service( + timeout_sec=self._wait_timeout_float + ): + self.logger.error( + f"[{self.name}] Service '{self._service_name_str}' not available after {self._wait_timeout_float}s timeout." + ) + return Status.FAILURE + + try: + # Read all necessary inputs from blackboard for this call + control_mode_input = self.blackboard_get("control_mode") + + # Validate control_mode + if control_mode_input not in [ + SetOrientationControl.Request.MODE_DISABLED, + SetOrientationControl.Request.MODE_LEVELING, + SetOrientationControl.Request.MODE_FULL_ORIENTATION, + ]: + self.logger.error( + f"[{self.name}] Invalid 'control_mode' input: {control_mode_input}." + ) + return Status.FAILURE + + req = SetOrientationControl.Request() + req.control_mode = int(control_mode_input) # Ensure it's int + + # Populate fields based on mode + if req.control_mode == SetOrientationControl.Request.MODE_LEVELING: + pitch_deg = self.blackboard_get( + "pitch_offset_deg" + ) # Default to 0 if not found + roll_deg = self.blackboard_get( + "roll_offset_deg" + ) # Default to 0 if not found + req.pitch_offset = float(math.radians(pitch_deg)) + req.roll_offset = float(math.radians(roll_deg)) + # Set a default for target_orientation_robot_base when not in MODE_FULL_ORIENTATION + req.target_orientation_robot_base = Quaternion( + x=0.0, y=0.0, z=0.0, w=1.0 + ) + self.logger.info( + f"[{self.name}] Setting MODE_LEVELING with pitch_offset_rad={req.pitch_offset:.3f}, roll_offset_rad={req.roll_offset:.3f}" + ) + + elif ( + req.control_mode + == SetOrientationControl.Request.MODE_FULL_ORIENTATION + ): + target_orient_input = self.blackboard_get( + "target_orientation_robot_base_quat" + ) + if target_orient_input is None: + self.logger.error( + f"[{self.name}] 'target_orientation_robot_base_quat' is required for MODE_FULL_ORIENTATION but not found or None." + ) + return Status.FAILURE + + temp_quat_msg = Quaternion() + if isinstance(target_orient_input, Quaternion): + temp_quat_msg = target_orient_input + elif ( + isinstance(target_orient_input, (list, tuple)) + and len(target_orient_input) == 4 + ): + try: + temp_quat_msg.x = float(target_orient_input[0]) + temp_quat_msg.y = float(target_orient_input[1]) + temp_quat_msg.z = float(target_orient_input[2]) + temp_quat_msg.w = float(target_orient_input[3]) + norm = np.linalg.norm( + [ + temp_quat_msg.x, + temp_quat_msg.y, + temp_quat_msg.z, + temp_quat_msg.w, + ] + ) + if not np.isclose(norm, 1.0, atol=0.01) and not np.isclose( + norm, 0.0, atol=0.01 + ): # Allow zero quat if it means "don't care" + self.logger.warning( + f"Input target_orientation_robot_base_quat norm is {norm:.3f}, not 1.0." + ) + except (ValueError, TypeError, IndexError) as e: + self.logger.error( + f"Could not convert list/tuple {target_orient_input} to Quaternion: {e}" + ) + return Status.FAILURE + else: + self.logger.error( + f"[{self.name}] Input 'target_orientation_robot_base_quat' invalid type {type(target_orient_input)} for MODE_FULL_ORIENTATION." + ) + return Status.FAILURE + req.target_orientation_robot_base = temp_quat_msg + # Set defaults for leveling offsets when not in MODE_LEVELING + req.pitch_offset = 0.0 + req.roll_offset = 0.0 + self.logger.info( + f"[{self.name}] Setting MODE_FULL_ORIENTATION with target_quat (xyzw): [{req.target_orientation_robot_base.x:.3f}, {req.target_orientation_robot_base.y:.3f}, {req.target_orientation_robot_base.z:.3f}, {req.target_orientation_robot_base.w:.3f}]" + ) + + else: # MODE_DISABLED or other + # Set defaults for all orientation fields + req.pitch_offset = 0.0 + req.roll_offset = 0.0 + req.target_orientation_robot_base = Quaternion( + x=0.0, y=0.0, z=0.0, w=1.0 + ) + self.logger.info( + f"[{self.name}] Setting control_mode to {req.control_mode} (likely DISABLED)." + ) + + self.logger.info( + f"[{self.name}] Calling service '{self._service_name_str}' with mode={req.control_mode}..." + ) + self.service_future = self.client.call_async(req) + return Status.RUNNING + + except KeyError as e: + self.logger.error( + f"[{self.name}] Blackboard key error preparing service call: {e}" + ) + return Status.FAILURE + except Exception as e: + self.logger.error(f"[{self.name}] Error preparing service call: {e}") + return Status.FAILURE + + # --- State 2: Waiting for Response --- + elif ( + self.service_future and not self.service_future.done() + ): # Added check for self.service_future not None + self.logger.debug(f"[{self.name}] Waiting for service response...") + return Status.RUNNING + + # --- State 3: Processing Response --- + elif self.service_future: # Ensure future exists before trying to get result + self.logger.debug(f"[{self.name}] Service call future done.") + response_payload = None + try: + response_payload = self.service_future.result() + self.blackboard_set("service_call_succeeded", True) + + if response_payload is not None: + self.blackboard_set( + "service_response_success", response_payload.success + ) + self.blackboard_set( + "service_response_message", response_payload.message + ) + self.logger.info( + f"[{self.name}] Service response: success={response_payload.success}, msg='{response_payload.message}'" + ) + final_status = ( + Status.SUCCESS if response_payload.success else Status.FAILURE + ) + else: + self.logger.error( + f"[{self.name}] Service call future done but result is None." + ) + self.blackboard_set("service_response_success", False) + final_status = Status.FAILURE + except Exception as e: + self.logger.error( + f"[{self.name}] Service call failed with exception: {e}", + ) + self.blackboard_set("service_call_succeeded", False) + self.blackboard_set("service_response_success", False) + self.blackboard_set("service_response_message", f"Exception: {e}") + final_status = Status.FAILURE + + self.service_future = None # Reset for next call + return final_status + + # Should not be reached if logic is correct + self.logger.warn(f"[{self.name}] Reached unexpected state in update method.") + return Status.FAILURE + + def terminate(self, new_status: Status) -> None: + """Log termination status and clear future if needed.""" + self.logger.debug(f"[{self.name}] Terminating with status {new_status}.") + if self.service_future is not None and not self.service_future.done(): + self.logger.warning( + f"[{self.name}] Service call terminated while waiting for response." + ) + # Future cannot be truly cancelled easily, just abandoned + self.service_future = None diff --git a/ada_feeding/ada_feeding/behaviors/articutool/compute_articutool_leveling_joints.py b/ada_feeding/ada_feeding/behaviors/articutool/compute_articutool_leveling_joints.py new file mode 100644 index 00000000..e81e2b90 --- /dev/null +++ b/ada_feeding/ada_feeding/behaviors/articutool/compute_articutool_leveling_joints.py @@ -0,0 +1,277 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2024-2025, Personal Robotics Laboratory +# License: BSD 3-Clause. See LICENSE.md file in root directory. + +""" +This module defines the ComputeArticutoolLevelingJoints behavior. +Given the Jaco arm's end-effector world pose, this behavior calculates +the Articutool joint angles (pitch, roll) required to make the +Articutool's tool_tip Y-axis point upwards against gravity (world +Z). +""" + +# Standard imports +import math +from typing import Union, Optional, List, Tuple + +# Third-party imports +from geometry_msgs.msg import Pose, PoseStamped, Quaternion as QuaternionMsg +import numpy as np +from overrides import override +from scipy.spatial.transform import Rotation as R +import py_trees +from py_trees.common import Status, Access +import rclpy +from rclpy.node import Node + +# Local imports +from ada_feeding.behaviors import BlackboardBehavior # Assuming this is your base class +from ada_feeding.helpers import BlackboardKey + + +class ComputeArticutoolLevelingJoints(BlackboardBehavior): + """ + Calculates Articutool joint angles for its tool_tip Y-axis to align with World +Z. + """ + + EPSILON = 1e-6 + WORLD_Z_UP_VECTOR = np.array([0.0, 0.0, 1.0]) # Target direction in World Frame + + def blackboard_inputs( + self, + jaco_ee_world_pose: Union[ + BlackboardKey, Pose, PoseStamped + ], # Current pose of Jaco EE in world + articutool_pitch_limits_rad: Union[BlackboardKey, Tuple[float, float]] = ( + -math.pi / 2, + math.pi / 2, + ), + articutool_roll_limits_rad: Union[BlackboardKey, Tuple[float, float]] = ( + -math.pi, + math.pi, + ), + ) -> None: + super().blackboard_inputs( + **{key: value for key, value in locals().items() if key != "self"} + ) + + def blackboard_outputs( + self, + articutool_joint_positions: Union[BlackboardKey, List[float]], + articutool_leveling_ik_found: Optional[BlackboardKey], + ) -> None: + super().blackboard_outputs( + **{key: value for key, value in locals().items() if key != "self"} + ) + + def __init__(self, name: str, **kwargs): + super().__init__(name=name, **kwargs) + self.node: Optional[Node] = None + self._initialized_properly = False + + @override + def setup(self, **kwargs): + try: + self.node = kwargs["node"] + self._initialized_properly = True + self.logger.debug(f"[{self.name}] Behavior initialized.") + except KeyError as e: + self.logger.error( + f"[{self.name}] Behaviour expects 'node' in setup kwargs. {e}" + ) + self._initialized_properly = False + except Exception as e: + self.logger.error(f"[{self.name}] Error during setup: {e}") + self._initialized_properly = False + + def _normalize_angle(self, angle: float) -> float: + """Normalize an angle to [-pi, pi].""" + return (angle + math.pi) % (2 * math.pi) - math.pi + + def _solve_articutool_ik_for_leveling( + self, target_y_axis_in_jaco_hand_frame: np.ndarray + ) -> List[Tuple[float, float]]: + """ + Analytical IK solver for the 2-DOF Articutool. + Input: target_y_axis_in_jaco_hand_frame (3D numpy array) - This is the desired + pointing direction of the Articutool's tool_tip Y-axis, expressed in the + Jaco Hand Frame (Articutool's base frame, F_JH). + Output: List of (theta_p, theta_r) solutions in radians. + This IK is based on the one used in the benchmark and CheckArticutoolPathOrientationFeasibility. + It assumes the relationship where the input vector (vx, vy, vz) from F_JH + is related to the Articutool's FK $y_{tip}^{AB}(\theta_p, \theta_r) = (X_{AB}, Y_{AB}, Z_{AB})$ + after transformation $y_{tip}^{AB} = R_z(-\pi/2) \cdot (v_x, v_y, v_z)^T = (v_y, -v_x, v_z)^T$. + So, the IK solves for: + $\cos\theta_p \cos\theta_r = v_y$ + $\sin\theta_r = -v_x$ + $\sin\theta_p \cos\theta_r = v_z$ + """ + vx, vy, vz = ( + target_y_axis_in_jaco_hand_frame # This is the target vector in Jaco Hand Frame + ) + solutions: List[Tuple[float, float]] = [] + + # From sin(theta_r) = -vx + asin_arg_for_tr = -vx + if not (-1.0 - self.EPSILON <= asin_arg_for_tr <= 1.0 + self.EPSILON): + self.logger.debug( + f"[{self.name}] IK: asin_arg_for_tr ({asin_arg_for_tr:.4f}) out of range for -vx={-vx:.4f}." + ) + return [] # No real solution for theta_r + + asin_arg_for_tr_clipped = np.clip(asin_arg_for_tr, -1.0, 1.0) + theta_r_sol1 = math.asin(asin_arg_for_tr_clipped) + theta_r_sol2 = self._normalize_angle(math.pi - theta_r_sol1) + + candidate_thetas_r = [theta_r_sol1] + if not math.isclose(theta_r_sol1, theta_r_sol2, abs_tol=self.EPSILON): + candidate_thetas_r.append(theta_r_sol2) + + for theta_r in candidate_thetas_r: + cos_theta_r = math.cos(theta_r) + # From cos(theta_p)cos(theta_r) = vy AND sin(theta_p)cos(theta_r) = vz + # If cos(theta_r) is near zero (singularity): + if math.isclose(cos_theta_r, 0.0, abs_tol=self.EPSILON): + # Then vy and vz must also be near zero. + # If so, theta_p is indeterminate but can be chosen (e.g., 0). + # This case also means sin(theta_r) = +/-1, so -vx = +/-1. + if math.isclose(vy, 0.0, abs_tol=self.EPSILON) and math.isclose( + vz, 0.0, abs_tol=self.EPSILON + ): + # This implies target vector was (∓1, 0, 0) in Jaco Hand Frame. + # This would mean Artic Y is trying to point along Jaco Hand +/-X. + solutions.append( + (0.0, self._normalize_angle(theta_r)) + ) # Choose theta_p = 0 + # else: (vy or vz is not zero, but cos_theta_r is zero) -> No solution + continue # Skip to next theta_r if singular and conditions not met + + # Regular case: cos_theta_r is not zero + # theta_p = atan2(sin_theta_p, cos_theta_p) + # theta_p = atan2(vz / cos_theta_r, vy / cos_theta_r) which simplifies to atan2(vz, vy) + theta_p_sol = math.atan2(vz, vy) + solutions.append( + (self._normalize_angle(theta_p_sol), self._normalize_angle(theta_r)) + ) + return solutions + + @override + def update(self) -> Status: + if not self._initialized_properly: + self.feedback_message = "Behavior not properly initialized." + self.logger.error(f"[{self.name}] {self.feedback_message}") + self.blackboard_set("articutool_leveling_ik_found", False) + return Status.FAILURE + + try: + jaco_ee_pose_input: Union[Pose, PoseStamped] = self.blackboard_get( + "jaco_ee_world_pose" + ) + pitch_limits: Tuple[float, float] = self.blackboard_get( + "articutool_pitch_limits_rad" + ) + roll_limits: Tuple[float, float] = self.blackboard_get( + "articutool_roll_limits_rad" + ) + + current_jaco_ee_pose: Pose + if isinstance(jaco_ee_pose_input, PoseStamped): + current_jaco_ee_pose = jaco_ee_pose_input.pose + elif isinstance(jaco_ee_pose_input, Pose): + current_jaco_ee_pose = jaco_ee_pose_input + else: + self.feedback_message = ( + "jaco_ee_world_pose is not a Pose or PoseStamped." + ) + self.logger.error(f"[{self.name}] {self.feedback_message}") + self.blackboard_set("articutool_leveling_ik_found", False) + return Status.FAILURE + + # 1. Extract R_World_JacoEE from the Jaco EE's world pose + R_world_jacoee = R.from_quat( + [ + current_jaco_ee_pose.orientation.x, + current_jaco_ee_pose.orientation.y, + current_jaco_ee_pose.orientation.z, + current_jaco_ee_pose.orientation.w, + ] + ) + + # 2. Desired tool_tip Y-axis in World Frame is WORLD_Z_UP_VECTOR + # Transform this into the Jaco EE frame (F_JH) + # target_Ytip_in_JacoEE = R_JacoEE_World * WORLD_Z_UP_VECTOR + target_Ytip_in_JacoEE = R_world_jacoee.inv().apply(self.WORLD_Z_UP_VECTOR) + + self.logger.debug( + f"[{self.name}] Jaco EE R_W_EE: {R_world_jacoee.as_quat(canonical=False)}" + ) + self.logger.debug( + f"[{self.name}] Target Y_tip in JacoEE frame: {np.round(target_Ytip_in_JacoEE, 3)}" + ) + + # 3. Solve Articutool IK + ik_solutions = self._solve_articutool_ik_for_leveling(target_Ytip_in_JacoEE) + + if not ik_solutions: + self.feedback_message = "Articutool IK found no solutions for leveling." + self.logger.warn(f"[{self.name}] {self.feedback_message}") + self.blackboard_set("articutool_leveling_ik_found", False) + return ( + Status.FAILURE + ) # Or SUCCESS if allowing no solution to mean "do nothing" + + # 4. Select a valid solution (e.g., first one within limits, or closest to current) + # For simplicity, taking the first valid one. + valid_solution_found = False + best_pitch = None + best_roll = None + + for theta_p_sol, theta_r_sol in ik_solutions: + # Normalize again just to be safe, though IK solver should do it + tp_norm = self._normalize_angle(theta_p_sol) + tr_norm = self._normalize_angle(theta_r_sol) + + if ( + pitch_limits[0] - self.EPSILON + <= tp_norm + <= pitch_limits[1] + self.EPSILON + and roll_limits[0] - self.EPSILON + <= tr_norm + <= roll_limits[1] + self.EPSILON + ): + best_pitch = tp_norm + best_roll = tr_norm + valid_solution_found = True + self.logger.info( + f"[{self.name}] Found valid Articutool leveling solution: " + f"Pitch={math.degrees(best_pitch):.1f} deg, Roll={math.degrees(best_roll):.1f} deg" + ) + self.blackboard_set( + "articutool_joint_positions", [best_pitch, best_roll] + ) + self.blackboard_set("articutool_leveling_ik_found", True) + self.feedback_message = "Articutool leveling IK solution found." + return Status.SUCCESS + + self.feedback_message = ( + "Articutool IK solutions for leveling are out of joint limits." + ) + self.logger.warn( + f"[{self.name}] {self.feedback_message} Solutions: {[(math.degrees(s[0]), math.degrees(s[1])) for s in ik_solutions]}" + ) + self.blackboard_set("articutool_leveling_ik_found", False) + return Status.FAILURE + + except KeyError as e: # For blackboard_get errors + self.feedback_message = f"Blackboard key error: {e}" + self.logger.error(f"[{self.name}] {self.feedback_message}") + self.blackboard_set("articutool_leveling_ik_found", False) + return Status.FAILURE + except Exception as e: + self.feedback_message = f"Unexpected error in {self.name}: {e}" + self.logger.error(f"[{self.name}] {self.feedback_message}") + self.blackboard_set("articutool_leveling_ik_found", False) + return Status.FAILURE + + @override + def terminate(self, new_status: Status) -> None: + self.logger.debug(f"[{self.name}] Terminating with status {new_status}.") diff --git a/ada_feeding/ada_feeding/behaviors/articutool/execute_articutool_trajectory.py b/ada_feeding/ada_feeding/behaviors/articutool/execute_articutool_trajectory.py new file mode 100644 index 00000000..74c1f0e4 --- /dev/null +++ b/ada_feeding/ada_feeding/behaviors/articutool/execute_articutool_trajectory.py @@ -0,0 +1,457 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2025, Personal Robotics Laboratory +# License: BSD 3-Clause. See LICENSE.md file in root directory. +# (Add appropriate Copyright/License if desired) + +""" +Defines the ExecuteArticutoolTrajectory behavior, which sends a planned +trajectory to the Articutool's FollowJointTrajectory action server. +It assumes controllers are already appropriately switched by another behavior. +""" + +# Standard imports +from typing import Union, Optional, Dict, Any, List +from enum import Enum + +# Third-party imports +import rclpy +from rclpy.action import ActionClient +from rclpy.action.client import ClientGoalHandle, GoalStatus +from rclpy.node import Node as RclpyNode # Alias for clarity +from rclpy.executors import Future +from rclpy.qos import ( + qos_profile_services_default, + QoSProfile, + ReliabilityPolicy, + DurabilityPolicy, + HistoryPolicy, +) + +from control_msgs.action import FollowJointTrajectory +from trajectory_msgs.msg import JointTrajectory # Input type + +from overrides import override +import py_trees +import py_trees.blackboard +from py_trees.common import Access, Status + +# Local imports (adjust paths as needed) +from ada_feeding.helpers import BlackboardKey +from ada_feeding.behaviors import BlackboardBehavior + + +# Define constants for action status reporting +class ActionExecutionStatus(Enum): + IDLE = "IDLE" + CHECKING_DEPENDENCIES = "CHECKING_DEPENDENCIES" + SENDING_GOAL = "SENDING_GOAL" + WAITING_FOR_ACCEPTANCE = "WAITING_FOR_ACCEPTANCE" + EXECUTING = "EXECUTING" + WAITING_FOR_RESULT = "WAITING_FOR_RESULT" + GOAL_REJECTED = "GOAL_REJECTED" + GOAL_CANCELLED = "GOAL_CANCELLED" + SUCCEEDED = "SUCCEEDED" + ABORTED = "ABORTED" # From server + FAILED = "FAILED" # General failure + + +class ExecuteArticutoolTrajectory(BlackboardBehavior): + """ + Executes a JointTrajectory on the Articutool using the + FollowJointTrajectory action server. This behavior assumes that the + necessary controllers (e.g., 'joint_trajectory_controller') have already + been activated by a separate behavior (like SwitchArticutoolControllers) + prior to this behavior being ticked. + + Returns RUNNING while executing, SUCCESS on completion, + FAILURE on error, rejection, or abortion. + """ + + # --- Constants --- + DEFAULT_ACTION_SERVER = ( + "/articutool/joint_trajectory_controller/follow_joint_trajectory" + ) + + def __init__(self, name: str, ns: str = "/", **kwargs): + super().__init__(name=name, ns=ns, **kwargs) + # Internal state variables + self._action_client: Optional[ActionClient] = None + self._send_goal_future: Optional[Future] = None + self._get_result_future: Optional[Future] = None + self._goal_handle: Optional[ClientGoalHandle] = None + self._current_status: ActionExecutionStatus = ActionExecutionStatus.IDLE + self._result_code: Optional[int] = None # Store the final result code + + self._action_client_initialized: bool = False + self.node: Optional[RclpyNode] = None # Store the node instance + self._action_server_name_str: str = ( + "" # To store the resolved action server name for logging + ) + + def blackboard_inputs( + self, + trajectory: Union[BlackboardKey, JointTrajectory], + action_server_name: Union[BlackboardKey, str] = DEFAULT_ACTION_SERVER, + ) -> None: + """ + Blackboard Inputs + + Parameters + ---------- + trajectory: The trajectory_msgs/JointTrajectory message to execute. + action_server_name: Name of the FollowJointTrajectory action server. + """ + super().blackboard_inputs( + **{key: value for key, value in locals().items() if key != "self"} + ) + + def blackboard_outputs( + self, + action_goal_accepted: Optional[BlackboardKey] = None, # -> bool + action_result_code: Optional[ + BlackboardKey + ] = None, # -> int (FollowJointTrajectory.Result.error_code) + action_status: Optional[ + BlackboardKey + ] = None, # -> str (from ActionExecutionStatus enum) + ) -> None: + """ + Blackboard Outputs + + Parameters + ---------- + action_goal_accepted: True if the action server accepted the goal request. + action_result_code: The final error_code from the FollowJointTrajectory result. + action_status: The current status of the action call (e.g., "IDLE", "EXECUTING", "SUCCEEDED"). + """ + super().blackboard_outputs( + **{key: value for key, value in locals().items() if key != "self"} + ) + + @override + def setup(self, **kwargs): + """Get node and create action client.""" + try: + self.node = kwargs["node"] + except KeyError as e: + self.logger.error( + f"[{self.name}] Behaviour expects 'node' in setup kwargs. {e}" + ) + return # Critical failure + + try: + action_server_name_bb = self.blackboard_get("action_server_name") + if not isinstance(action_server_name_bb, str) or not action_server_name_bb: + self.logger.warning( + f"[{self.name}] Blackboard 'action_server_name' invalid or not found, using default: {self.DEFAULT_ACTION_SERVER}" + ) + self._action_server_name_str = self.DEFAULT_ACTION_SERVER + else: + self._action_server_name_str = action_server_name_bb + except KeyError: + self.logger.info( + f"[{self.name}] Input 'action_server_name' not found on blackboard, using default: {self.DEFAULT_ACTION_SERVER}" + ) + self._action_server_name_str = self.DEFAULT_ACTION_SERVER + + # Define QoS profiles for action client components + service_qos_profile = qos_profile_services_default + feedback_qos_profile = QoSProfile( + reliability=ReliabilityPolicy.RELIABLE, + history=HistoryPolicy.KEEP_LAST, + depth=10, + ) + status_qos_profile = QoSProfile( + reliability=ReliabilityPolicy.RELIABLE, + durability=DurabilityPolicy.TRANSIENT_LOCAL, + history=HistoryPolicy.KEEP_LAST, + depth=1, + ) + + self._action_client = ActionClient( + self.node, + FollowJointTrajectory, + self._action_server_name_str, # Use stored name + goal_service_qos_profile=service_qos_profile, + result_service_qos_profile=service_qos_profile, + cancel_service_qos_profile=service_qos_profile, + feedback_sub_qos_profile=feedback_qos_profile, + status_sub_qos_profile=status_qos_profile, + ) + self._action_client_initialized = True + self.logger.info( + f"[{self.name}] Action client created for '{self._action_server_name_str}' (readiness check deferred to update)." + ) + self.logger.info(f"[{self.name}] Setup method complete.") + + @override + def initialise(self) -> None: + """Reset action client state.""" + self.logger.debug(f"[{self.name}] Initialising.") + self._send_goal_future = None + self._get_result_future = None + self._goal_handle = None + self._result_code = None + + if self._action_client_initialized: + self._current_status = ActionExecutionStatus.CHECKING_DEPENDENCIES + else: + self.logger.error( + f"[{self.name}] Action client was not initialized during setup. Failing." + ) + self._current_status = ActionExecutionStatus.FAILED + + # Clear blackboard outputs + self.blackboard_set("action_goal_accepted", False) + self.blackboard_set("action_result_code", self._result_code) + self.blackboard_set("action_status", self._current_status.value) + + def _check_dependencies(self) -> Status: + """Helper to check and wait for the action server.""" + if not self._action_client_initialized: + self.logger.error( + f"[{self.name}] Action client instance not created. Critical setup failure." + ) + return self._handle_failure( + "Action client not initialized", ActionExecutionStatus.FAILED + ) + + if not self._action_client.wait_for_server( + timeout_sec=0.01 + ): # Non-blocking check + self.logger.info( + f"[{self.name}] Waiting for action server '{self._action_server_name_str}'..." # Use stored name + ) + return Status.RUNNING + + self.logger.info( + f"[{self.name}] Action server '{self._action_server_name_str}' is ready." + ) # Use stored name + self._current_status = ActionExecutionStatus.IDLE + self.blackboard_set("action_status", self._current_status.value) + return Status.RUNNING + + @override + def update(self) -> Status: + """Manage the action client state machine.""" + self.blackboard_set("action_status", self._current_status.value) + + if self._current_status == ActionExecutionStatus.FAILED: + return Status.FAILURE + + if self._current_status == ActionExecutionStatus.CHECKING_DEPENDENCIES: + return self._check_dependencies() + + if self._current_status == ActionExecutionStatus.IDLE: + try: + trajectory: JointTrajectory = self.blackboard_get("trajectory") + if not isinstance(trajectory, JointTrajectory): + self.logger.error( + f"[{self.name}] Input 'trajectory' is not a JointTrajectory message." + ) + return self._handle_failure("Invalid trajectory input type") + if not trajectory.points: + self.logger.warning( + f"[{self.name}] Input 'trajectory' has no points. Succeeding trivially." + ) + return self._handle_success( + result_code=FollowJointTrajectory.Result.SUCCESSFUL + ) + + goal_msg = FollowJointTrajectory.Goal() + goal_msg.trajectory = trajectory + self.logger.info( + f"[{self.name}] Sending trajectory goal to action server '{self._action_server_name_str}'..." # Use stored name + ) + self._send_goal_future = self._action_client.send_goal_async(goal_msg) + self._current_status = ActionExecutionStatus.SENDING_GOAL + self.blackboard_set("action_status", self._current_status.value) + return Status.RUNNING + except KeyError as e: + return self._handle_failure(f"Blackboard key error: {e}") + except Exception as e: + return self._handle_failure(f"Error preparing to send goal: {e}") + + if self._current_status == ActionExecutionStatus.SENDING_GOAL: + if self._send_goal_future is None: + return self._handle_invalid_state("SENDING_GOAL future is None") + if self._send_goal_future.done(): + try: + goal_handle: ClientGoalHandle = self._send_goal_future.result() + except Exception as e: + self._send_goal_future = None + self.logger.error( + f"[{self.name}] Exception waiting for goal handle: {e}. Server might have shut down." + ) + self._current_status = ActionExecutionStatus.CHECKING_DEPENDENCIES + self.logger.warning( + f"[{self.name}] Re-checking dependencies due to goal send error." + ) + return Status.RUNNING + self._send_goal_future = None + if not goal_handle.accepted: + self.logger.warning( + f"[{self.name}] Goal rejected by action server." + ) + self.blackboard_set("action_goal_accepted", False) + return self._handle_failure( + "Goal rejected", ActionExecutionStatus.GOAL_REJECTED + ) + else: + try: + uuid_hex = bytes(goal_handle.goal_id.uuid).hex() + except Exception: + uuid_hex = "[Error Formatting UUID]" + self.logger.info( + f"[{self.name}] Goal accepted by action server (ID: {uuid_hex})." + ) + self._goal_handle = goal_handle + self.blackboard_set("action_goal_accepted", True) + self._get_result_future = self._goal_handle.get_result_async() + self._current_status = ActionExecutionStatus.EXECUTING + self.blackboard_set("action_status", self._current_status.value) + return Status.RUNNING + else: + return Status.RUNNING + + if ( + self._current_status == ActionExecutionStatus.EXECUTING + or self._current_status == ActionExecutionStatus.WAITING_FOR_RESULT + ): + if self._get_result_future is None or self._goal_handle is None: + return self._handle_invalid_state( + "EXECUTING/WAITING future or handle None" + ) + status = self._goal_handle.status + if status == GoalStatus.STATUS_EXECUTING: + self._current_status = ActionExecutionStatus.EXECUTING + elif status == GoalStatus.STATUS_ABORTED: + self.logger.warning( + f"[{self.name}] Goal aborted by server (status update)." + ) + self._current_status = ActionExecutionStatus.WAITING_FOR_RESULT + elif status == GoalStatus.STATUS_CANCELED: + self.logger.warning(f"[{self.name}] Goal canceled (status update).") + self._current_status = ActionExecutionStatus.WAITING_FOR_RESULT + elif status == GoalStatus.STATUS_SUCCEEDED: + self.logger.info( + f"[{self.name}] Goal status reported SUCCEEDED (status update)." + ) + self._current_status = ActionExecutionStatus.WAITING_FOR_RESULT + if self._get_result_future.done(): + try: + result_wrapper = self._get_result_future.result() + action_result = result_wrapper.result + self._result_code = action_result.error_code + result_string = action_result.error_string or "None" + self.logger.info( + f"[{self.name}] Action result received: Code={self._result_code}, Msg='{result_string}'" + ) + self.blackboard_set("action_result_code", self._result_code) + if self._result_code == FollowJointTrajectory.Result.SUCCESSFUL: + return self._handle_success(result_code=self._result_code) + else: + final_status_map = { + GoalStatus.STATUS_ABORTED: ActionExecutionStatus.ABORTED, + GoalStatus.STATUS_CANCELED: ActionExecutionStatus.GOAL_CANCELLED, + } + final_exec_status = final_status_map.get( + status, ActionExecutionStatus.FAILED + ) + return self._handle_failure( + f"Action Failed by server: Code={self._result_code}, Msg='{result_string}'", + final_exec_status, + result_code=self._result_code, + ) + except Exception as e: + self.logger.error( + f"[{self.name}] Exception getting result from future: {e}" + ) + self._current_status = ActionExecutionStatus.CHECKING_DEPENDENCIES + self.logger.warning( + f"[{self.name}] Re-checking dependencies due to result error." + ) + return Status.RUNNING + else: + self.blackboard_set("action_status", self._current_status.value) + return Status.RUNNING + + if self._current_status == ActionExecutionStatus.SUCCEEDED: + return Status.SUCCESS + if self._current_status in [ + ActionExecutionStatus.FAILED, + ActionExecutionStatus.GOAL_REJECTED, + ActionExecutionStatus.GOAL_CANCELLED, + ActionExecutionStatus.ABORTED, + ]: + return Status.FAILURE + return self._handle_invalid_state(f"Unhandled status {self._current_status}") + + @override + def terminate(self, new_status: Status) -> None: + self.logger.debug(f"[{self.name}] Terminating with status {new_status}.") + if new_status == Status.INVALID and self._goal_handle is not None: + status = self._goal_handle.status + if ( + status == GoalStatus.STATUS_ACCEPTED + or status == GoalStatus.STATUS_EXECUTING + ): + self.logger.warning( + f"[{self.name}] Behavior terminated externally while action goal was active (Status: {GoalStatus(status).name}). Requesting cancellation." + ) + self._goal_handle.cancel_goal_async() + self._send_goal_future = None + self._get_result_future = None + self._goal_handle = None + if self._current_status not in [ + ActionExecutionStatus.SUCCEEDED, + ActionExecutionStatus.FAILED, + ActionExecutionStatus.GOAL_REJECTED, + ActionExecutionStatus.GOAL_CANCELLED, + ActionExecutionStatus.ABORTED, + ]: + self._current_status = ActionExecutionStatus.IDLE + + def _handle_success( + self, result_code: int = FollowJointTrajectory.Result.SUCCESSFUL + ) -> Status: + self.logger.info(f"[{self.name}] Action Succeeded. Code: {result_code}") + self._current_status = ActionExecutionStatus.SUCCEEDED + self.blackboard_set("action_status", self._current_status.value) + self.blackboard_set("action_result_code", result_code) + self._goal_handle = None + self._get_result_future = None + return Status.SUCCESS + + def _handle_failure( + self, + reason: str, + final_status: ActionExecutionStatus = ActionExecutionStatus.FAILED, + result_code: Optional[int] = None, + ) -> Status: + self.logger.error(f"[{self.name}] Failure: {reason}") + self._current_status = final_status + self.blackboard_set("action_status", self._current_status.value) + if result_code is not None: + self.blackboard_set("action_result_code", result_code) + elif ( + self._result_code is not None + and self._result_code != FollowJointTrajectory.Result.SUCCESSFUL + ): + self.blackboard_set("action_result_code", self._result_code) + elif final_status == ActionExecutionStatus.GOAL_REJECTED: + self.blackboard_set("action_result_code", -998) + else: + self.blackboard_set("action_result_code", -999) + self._goal_handle = None + self._get_result_future = None + self._send_goal_future = None + return Status.FAILURE + + def _handle_invalid_state(self, message: str) -> Status: + self.logger.error( + f"[{self.name}] Reached invalid state: {message}. This is a bug." + ) + return self._handle_failure( + f"Invalid internal state: {message}", ActionExecutionStatus.FAILED + ) diff --git a/ada_feeding/ada_feeding/behaviors/articutool/execute_named_primitive.py b/ada_feeding/ada_feeding/behaviors/articutool/execute_named_primitive.py new file mode 100644 index 00000000..182312d4 --- /dev/null +++ b/ada_feeding/ada_feeding/behaviors/articutool/execute_named_primitive.py @@ -0,0 +1,270 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +# Copyright (c) 2025, Personal Robotics Laboratory +# License: BSD 3-Clause. See LICENSE.md file in root directory. + +""" +Defines the ExecuteNamedPrimitive behavior, which executes a specified +Articutool primitive action by reading its name and parameters from the +blackboard. +""" + +from typing import Union, Optional, Dict, Any, List +from enum import Enum +import traceback + +import rclpy +from rclpy.action import ActionClient +from rclpy.action.client import ClientGoalHandle +from action_msgs.msg import GoalStatus +from rclpy.node import Node +from rclpy.executors import Future +from rclpy.duration import Duration as RCLPYDuration + +from articutool_interfaces.action import ExecuteArticutoolPrimitive + +from overrides import override +import py_trees +import py_trees.blackboard +from py_trees.common import Status + +# Assuming these are your actual base classes and helpers +from ada_feeding.helpers import BlackboardKey +from ada_feeding.behaviors import BlackboardBehavior + + +class ActionExecutionStatus(Enum): + """Enum for tracking the state of the action client behavior.""" + + IDLE = "IDLE" + CHECKING_SERVER = "CHECKING_SERVER" + SENDING_GOAL = "SENDING_GOAL" + EXECUTING = "EXECUTING" + GOAL_REJECTED = "GOAL_REJECTED" + GOAL_CANCELLED = "GOAL_CANCELLED" + SUCCEEDED = "SUCCEEDED" + ABORTED = "ABORTED" + FAILED = "FAILED" + + +class ExecuteNamedPrimitive(BlackboardBehavior): + """ + Executes a named Articutool primitive by calling the ExecuteArticutoolPrimitive + action server. The primitive name and its parameters are read from the blackboard. + If the primitive name is "NONE" or empty, it succeeds immediately. + """ + + DEFAULT_ACTION_SERVER_NAME = "/orientation_control/execute_primitive" + DEFAULT_WAIT_TIMEOUT_SEC = 2.0 + + def __init__(self, name: str, ns: Optional[str] = None, **kwargs): + super().__init__(name=name, ns=ns if ns else name, **kwargs) + self.node: Optional[Node] = None + self._action_client: Optional[ActionClient] = None + self._send_goal_future: Optional[Future] = None + self._get_result_future: Optional[Future] = None + self._goal_handle: Optional[ClientGoalHandle] = None + self._current_status: ActionExecutionStatus = ActionExecutionStatus.IDLE + self._result: Optional[ExecuteArticutoolPrimitive.Result] = None + + self._action_server_name_str: str = self.DEFAULT_ACTION_SERVER_NAME + self._wait_timeout_float: float = self.DEFAULT_WAIT_TIMEOUT_SEC + + def blackboard_inputs( + self, + primitive_name: Union[BlackboardKey, str], + primitive_params: Union[BlackboardKey, List[float]] = [], + action_server_name: Union[BlackboardKey, str] = DEFAULT_ACTION_SERVER_NAME, + wait_for_server_timeout_sec: Union[ + BlackboardKey, float + ] = DEFAULT_WAIT_TIMEOUT_SEC, + ) -> None: + super().blackboard_inputs( + **{key: value for key, value in locals().items() if key != "self"} + ) + + def blackboard_outputs( + self, + primitive_result: Optional[BlackboardKey] = None, + primitive_status: Optional[BlackboardKey] = None, + ) -> None: + super().blackboard_outputs( + **{key: value for key, value in locals().items() if key != "self"} + ) + + @override + def setup(self, **kwargs: Any) -> None: + try: + self.node = kwargs["node"] + except KeyError as e: + self.logger.error( + f"[{self.name}] Behaviour expects 'node' in setup kwargs. {e}" + ) + return # Critical failure + + try: + self._action_server_name_str = self.blackboard_get("action_server_name") + self._wait_timeout_float = self.blackboard_get( + "wait_for_server_timeout_sec" + ) + + self._action_client = ActionClient( + self.node, ExecuteArticutoolPrimitive, self._action_server_name_str + ) + self.logger.info( + f"[{self.name}] Action client created for '{self._action_server_name_str}'." + ) + except Exception as e: + self.logger.error( + f"[{self.name}] Failed to create action client during setup: {e} {traceback.format_exc()}" + ) + self._action_client = None + + @override + def initialise(self) -> None: + self.logger.debug(f"[{self.name}] Initialising.") + self._send_goal_future = None + self._get_result_future = None + self._goal_handle = None + self._current_status = ActionExecutionStatus.IDLE + self._result = None + self.blackboard_set("primitive_result", None) + self.blackboard_set("primitive_status", self._current_status.value) + + def _handle_failure( + self, + reason: str, + final_status: ActionExecutionStatus = ActionExecutionStatus.FAILED, + ) -> Status: + self.logger.error(f"[{self.name}] Failure: {reason}") + self._current_status = final_status + self.blackboard_set("primitive_status", self._current_status.value) + return Status.FAILURE + + def _handle_success(self, message: str) -> Status: + self.logger.info(f"[{self.name}] Success: {message}") + self._current_status = ActionExecutionStatus.SUCCEEDED + self.blackboard_set("primitive_status", self._current_status.value) + return Status.SUCCESS + + @override + def update(self) -> Status: + self.blackboard_set("primitive_status", self._current_status.value) + + if self.node is None or self._action_client is None: + return self._handle_failure( + "Behavior not initialized properly (node or action client is None)." + ) + + # --- State: IDLE -> Check inputs and transition --- + if self._current_status == ActionExecutionStatus.IDLE: + try: + primitive_name = self.blackboard_get("primitive_name") + # Handle the "do nothing" case + if not primitive_name or primitive_name.upper() == "NONE": + return self._handle_success("No primitive specified.") + + # If a primitive is specified, transition to check for server + self._current_status = ActionExecutionStatus.CHECKING_SERVER + self.blackboard_set("primitive_status", self._current_status.value) + return Status.RUNNING # Return RUNNING to re-tick in the new state + + except KeyError as e: + return self._handle_failure(f"Blackboard key error: {e}") + + # --- State: CHECKING_SERVER -> Wait for server then send goal --- + if self._current_status == ActionExecutionStatus.CHECKING_SERVER: + if not self._action_client.wait_for_server( + timeout_sec=self._wait_timeout_float + ): + return self._handle_failure( + f"Action server '{self._action_server_name_str}' not available after {self._wait_timeout_float}s timeout." + ) + + try: + # Re-read inputs in case they changed while waiting + primitive_name = self.blackboard_get("primitive_name") + primitive_params = self.blackboard_get("primitive_params") or [] + + goal_msg = ExecuteArticutoolPrimitive.Goal() + goal_msg.primitive_name = primitive_name + goal_msg.parameters = [float(p) for p in primitive_params] + + self.logger.info( + f"[{self.name}] Sending goal '{primitive_name}' with params {goal_msg.parameters}" + ) + self._send_goal_future = self._action_client.send_goal_async(goal_msg) + self._current_status = ActionExecutionStatus.SENDING_GOAL + return Status.RUNNING + except Exception as e: + return self._handle_failure( + f"Error sending goal: {e} {traceback.format_exc()}" + ) + + # --- State: SENDING_GOAL -> Waiting for goal acceptance --- + if self._current_status == ActionExecutionStatus.SENDING_GOAL: + if not self._send_goal_future.done(): + return Status.RUNNING + try: + self._goal_handle = self._send_goal_future.result() + if not self._goal_handle.accepted: + return self._handle_failure( + "Goal rejected by action server.", + ActionExecutionStatus.GOAL_REJECTED, + ) + self._get_result_future = self._goal_handle.get_result_async() + self._current_status = ActionExecutionStatus.EXECUTING + return Status.RUNNING + except Exception as e: + return self._handle_failure(f"Exception getting goal handle: {e}") + + # --- State: EXECUTING -> Waiting for the result --- + if self._current_status == ActionExecutionStatus.EXECUTING: + if not self._get_result_future.done(): + return Status.RUNNING + try: + result_wrapper = self._get_result_future.result() + self._result = result_wrapper.result + self.blackboard_set("primitive_result", self._result) + final_status = result_wrapper.status + if final_status == GoalStatus.STATUS_SUCCEEDED and self._result.success: + return self._handle_success( + f"Primitive completed successfully: {self._result.message}" + ) + else: + final_enum = ( + ActionExecutionStatus.ABORTED + if final_status == GoalStatus.STATUS_ABORTED + else ( + ActionExecutionStatus.GOAL_CANCELLED + if final_status == GoalStatus.STATUS_CANCELED + else ActionExecutionStatus.FAILED + ) + ) + return self._handle_failure( + f"Primitive did not succeed. Final status: {final_status}, Result msg: '{self._result.message}'", + final_enum, + ) + except Exception as e: + return self._handle_failure( + f"Exception while getting action result: {e}" + ) + + # Fallback + return self._handle_failure( + f"Reached unhandled state: {self._current_status.value}" + ) + + @override + def terminate(self, new_status: Status) -> None: + self.logger.debug(f"[{self.name}] Terminating with status {new_status}.") + if ( + new_status == Status.INVALID + and self._goal_handle + and self._goal_handle.is_active + ): + self.logger.warning( + f"[{self.name}] Terminated externally. Attempting to cancel active goal." + ) + self._goal_handle.cancel_goal_async() + self.initialise() diff --git a/ada_feeding/ada_feeding/behaviors/articutool/switch_articutool_controllers.py b/ada_feeding/ada_feeding/behaviors/articutool/switch_articutool_controllers.py new file mode 100644 index 00000000..8ddb5066 --- /dev/null +++ b/ada_feeding/ada_feeding/behaviors/articutool/switch_articutool_controllers.py @@ -0,0 +1,229 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +# (Add appropriate Copyright/License if desired) + +""" +Defines the SwitchArticutoolControllers behavior, which calls the standard +controller_manager service to switch controllers for the Articutool. +""" + +import rclpy +from rclpy.node import Node +from rclpy.executors import Future # For type hints +from rclpy.duration import Duration + +# Import the standard controller manager service definition +from controller_manager_msgs.srv import SwitchController + +from overrides import override +import py_trees +import py_trees.blackboard +from py_trees.common import Access, Status + +# Import base class and helpers (Adjust path as needed) +from ada_feeding.helpers import BlackboardKey +from ada_feeding.behaviors import BlackboardBehavior + +# Typing imports +from typing import Union, Optional, Dict, Any, List + + +class SwitchArticutoolControllers(BlackboardBehavior): + """ + Calls the /articutool/controller_manager/switch_controller service + to activate and deactivate specified controllers. + + Returns SUCCESS if the service call completes and the response indicates success (ok=True). + Returns FAILURE if the service is unavailable, the call fails, or the response indicates failure. + Returns RUNNING while waiting for the service response. + """ + + # Define default values used if not provided via blackboard inputs during instantiation + DEFAULT_SERVICE_NAME = "/articutool/controller_manager/switch_controller" + DEFAULT_WAIT_TIMEOUT_SEC = 2.0 # Timeout for wait_for_service + DEFAULT_STRICTNESS = SwitchController.Request.BEST_EFFORT # Default: 1 (STRICT=2) + + def blackboard_inputs( + self, + controllers_to_activate: Union[BlackboardKey, List[str]], + controllers_to_deactivate: Union[BlackboardKey, List[str]], + strictness: Union[BlackboardKey, int] = DEFAULT_STRICTNESS, + service_name: Union[BlackboardKey, str] = DEFAULT_SERVICE_NAME, + wait_for_server_timeout_sec: Union[BlackboardKey, float] = DEFAULT_WAIT_TIMEOUT_SEC, + ) -> None: + """ + Blackboard Inputs + + Parameters + ---------- + controllers_to_activate: List of controller names to activate (start). + controllers_to_deactivate: List of controller names to deactivate (stop). + strictness: Strictness level for the switch operation. Options are + SwitchController.Request.BEST_EFFORT (1) or + SwitchController.Request.STRICT (2). + service_name: Name of the controller manager's switch_controller service. + wait_for_server_timeout_sec: Max time (sec) to wait for server in initial check. + Use 0.0 or negative to skip check. + """ + # Use the base class method to register keys/defaults + super().blackboard_inputs( + **{key: value for key, value in locals().items() if key != "self"} + ) + + def blackboard_outputs( + self, + switch_call_succeeded: Optional[BlackboardKey] = None, # -> bool (Did call complete?) + switch_response_ok: Optional[BlackboardKey] = None, # -> bool (Payload 'ok' field) + ) -> None: + """ + Blackboard Outputs + + Parameters + ---------- + switch_call_succeeded: True if the service call finished without client-side errors. + switch_response_ok: The boolean 'ok' field from the SwitchController response payload, + indicating if the controller manager performed the switch successfully. + """ + super().blackboard_outputs( + **{key: value for key, value in locals().items() if key != "self"} + ) + + @override + def setup(self, **kwargs): + """Get node and create the service client.""" + # pylint: disable=attribute-defined-outside-init + self.node: Node = kwargs['node'] + self.client: Optional[rclpy.client.Client] = None + try: + # Read configuration parameters using the blackboard helper method + self.service_name = self.blackboard_get("service_name") + self.wait_timeout = self.blackboard_get("wait_for_server_timeout_sec") + + # Create client for the standard controller manager service + self.client = self.node.create_client(SwitchController, self.service_name) + self.logger.info(f"[{self.name}] Service client created for '{self.service_name}'") + + except KeyError as e: + self.logger.error(f"[{self.name}] Missing blackboard key during setup: {e}") + self.client = None # Ensure client is None if setup fails + except Exception as e: + self.logger.error(f"[{self.name}] Failed to create service client: {e}") + self.client = None # Ensure client is None if setup fails + + + @override + def initialise(self) -> None: + """Reset the future before potentially calling the service.""" + # pylint: disable=attribute-defined-outside-init + self.service_future: Optional[Future] = None + self.logger.debug(f"[{self.name}] Initializing controller switch call state.") + # Clear outputs to default non-success state + self.blackboard_set("switch_call_succeeded", False) + self.blackboard_set("switch_response_ok", False) + + + @override + def update(self) -> Status: + """Manage the asynchronous service call state machine.""" + if self.client is None: + self.logger.error(f"[{self.name}] Service client not available. Setup failed?") + return Status.FAILURE + + # --- State 1: Ready to Call --- + if self.service_future is None: + # Check server availability (only if timeout >= 0) + wait_timeout = self.wait_timeout + if wait_timeout >= 0.0: + if not self.client.wait_for_service(timeout_sec=wait_timeout): + self.logger.error(f"[{self.name}] Service '{self.service_name}' not available after {wait_timeout}s timeout.") + return Status.FAILURE # Service unavailable is a failure + + try: + # Read inputs from blackboard + activate_list = self.blackboard_get("controllers_to_activate") + deactivate_list = self.blackboard_get("controllers_to_deactivate") + strict_level = self.blackboard_get("strictness") + + # Basic Type Validation + if not isinstance(activate_list, list) or not all(isinstance(s, str) for s in activate_list): + self.logger.error(f"[{self.name}] Input 'controllers_to_activate' must be a List[str].") + return Status.FAILURE + if not isinstance(deactivate_list, list) or not all(isinstance(s, str) for s in deactivate_list): + self.logger.error(f"[{self.name}] Input 'controllers_to_deactivate' must be a List[str].") + return Status.FAILURE + if not isinstance(strict_level, int): + self.logger.error(f"[{self.name}] Input 'strictness' must be an integer.") + return Status.FAILURE + if strict_level not in [SwitchController.Request.BEST_EFFORT, SwitchController.Request.STRICT]: + self.logger.warning(f"[{self.name}] Invalid strictness value {strict_level}, using BEST_EFFORT.") + strict_level = SwitchController.Request.BEST_EFFORT + + # Create request + req = SwitchController.Request() + req.activate_controllers = activate_list + req.deactivate_controllers = deactivate_list + req.strictness = strict_level + req.activate_asap = False # Usually False for planned switches + req.timeout = Duration(seconds=5.0).to_msg() # Server-side timeout within CM + + # Call service async + self.logger.info(f"[{self.name}] Calling switch_controller: " + f"Activate={req.activate_controllers}, Deactivate={req.deactivate_controllers}, Strictness={req.strictness}") + self.service_future = self.client.call_async(req) + return Status.RUNNING # Waiting for response + + except KeyError as e: + self.logger.error(f"[{self.name}] Blackboard key error: {e}") + return Status.FAILURE + except Exception as e: + self.logger.error(f"[{self.name}] Error preparing service call: {e}") + return Status.FAILURE + + # --- State 2: Waiting for Response --- + elif not self.service_future.done(): + self.logger.debug(f"[{self.name}] Waiting for switch_controller response...") + return Status.RUNNING + + # --- State 3: Processing Response --- + else: + self.logger.info(f"[{self.name}] switch_controller call finished.") + response = None + call_succeeded = False # Did the call itself complete without client-side error? + response_ok = False # Did the service response payload indicate success? + try: + response = self.service_future.result() # Get the response payload + call_succeeded = True # If we get here, the call itself completed + + if response is not None: + response_ok = response.ok # Check the 'ok' field in the response + self.logger.info(f"[{self.name}] switch_controller response: ok={response_ok}") + else: + self.logger.error(f"[{self.name}] Service call future done but result is None.") + response_ok = False + + except Exception as e: + # Error during call execution on server or getting result + self.logger.error(f"[{self.name}] Service call failed with exception: {e}") + call_succeeded = False + response_ok = False + + # Write outputs + self.blackboard_set("switch_call_succeeded", call_succeeded) + self.blackboard_set("switch_response_ok", response_ok) + + # Reset future + self.service_future = None + + # Return status based on response 'ok' field + return Status.SUCCESS if response_ok else Status.FAILURE + + + @override + def terminate(self, new_status: Status) -> None: + """Log termination status and clear future if needed.""" + self.logger.debug(f"[{self.name}] Terminating with status {new_status}.") + # If terminated while waiting for response, clear the future + if self.service_future is not None and not self.service_future.done(): + self.logger.warning(f"[{self.name}] Terminated while waiting for switch_controller response.") + # We can't easily cancel a service call future, just abandon it + self.service_future = None diff --git a/ada_feeding/ada_feeding/behaviors/articutool/trigger_articutool_calibration.py b/ada_feeding/ada_feeding/behaviors/articutool/trigger_articutool_calibration.py new file mode 100644 index 00000000..c48c9c6a --- /dev/null +++ b/ada_feeding/ada_feeding/behaviors/articutool/trigger_articutool_calibration.py @@ -0,0 +1,281 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2025, Personal Robotics Laboratory +# License: BSD 3-Clause. See LICENSE.md file in root directory. + +""" +Defines the TriggerArticutoolCalibration behavior, which calls the service +to trigger the Articutool's IMU orientation calibration routine. +""" + +import rclpy +from rclpy.node import Node +from rclpy.executors import Future # For type hints +from rclpy.duration import Duration as RCLPYDuration # For service call timeout + +from typing import Union, Optional, Dict, Any +import time # For service call timeout logic + +from geometry_msgs.msg import Quaternion as QuaternionMsg +from articutool_interfaces.srv import TriggerCalibration # Service type + +from overrides import override +import py_trees +import py_trees.blackboard +from py_trees.common import Access, Status + +# Local imports +from ada_feeding.helpers import BlackboardKey # Assuming this is your helper +from ada_feeding.behaviors import BlackboardBehavior # Assuming this is your base class + + +class TriggerArticutoolCalibration(BlackboardBehavior): + """ + Calls the TriggerCalibration service to initiate Articutool's + IMU orientation calibration. + + Returns SUCCESS if the service call completes and the service response indicates success. + Returns FAILURE if the service is unavailable, the call fails, or the service response indicates failure. + Returns RUNNING while waiting for the service response. + """ + + DEFAULT_SERVICE_NAME = ( + "/orientation_calibration_service/trigger_calibration" # Default name + ) + DEFAULT_WAIT_FOR_SERVER_TIMEOUT_SEC = 2.0 + DEFAULT_SERVICE_CALL_TIMEOUT_SEC = 10.0 # Calibration might take some time + + def blackboard_inputs( + self, + service_name: Union[BlackboardKey, str] = DEFAULT_SERVICE_NAME, + wait_for_server_timeout_sec: Union[ + BlackboardKey, float + ] = DEFAULT_WAIT_FOR_SERVER_TIMEOUT_SEC, + service_call_timeout_sec: Union[ + BlackboardKey, float + ] = DEFAULT_SERVICE_CALL_TIMEOUT_SEC, + ) -> None: + """ + Blackboard Inputs + + Parameters + ---------- + service_name: Name of the TriggerCalibration service. + wait_for_server_timeout_sec: Max time (sec) to wait for server availability. + service_call_timeout_sec: Max time (sec) to wait for the service call to complete. + """ + super().blackboard_inputs( + **{key: value for key, value in locals().items() if key != "self"} + ) + + def blackboard_outputs( + self, + calibration_call_succeeded: Optional[ + BlackboardKey + ] = None, # bool: Did the ROS service call itself succeed? + calibration_routine_success: Optional[ + BlackboardKey + ] = None, # bool: From service response .success + calibration_message: Optional[ + BlackboardKey + ] = None, # string: From service response .message + computed_calibration_offset_quat: Optional[ + BlackboardKey + ] = None, # QuaternionMsg: From service response + ) -> None: + """ + Blackboard Outputs + """ + super().blackboard_outputs( + **{key: value for key, value in locals().items() if key != "self"} + ) + + def setup(self, **kwargs): + """Get the node and create the service client.""" + self.node: Optional[Node] = kwargs.get("node") + self.client = None + self._service_name_str: str = self.DEFAULT_SERVICE_NAME + self._wait_timeout_float: float = self.DEFAULT_WAIT_FOR_SERVER_TIMEOUT_SEC + self._call_timeout_float: float = self.DEFAULT_SERVICE_CALL_TIMEOUT_SEC + self._initialized_properly = False + + if not self.node: + self.logger.error( + f"[{self.name}] Node object not provided in setup kwargs." + ) + return + + try: + # It's better to get these once in setup if they don't change per tick, + # or get them in update if they can change dynamically from the blackboard. + # For service clients, usually setup is fine. + self._service_name_str = self.blackboard_get("service_name") + self._wait_timeout_float = self.blackboard_get( + "wait_for_server_timeout_sec" + ) + self._call_timeout_float = self.blackboard_get("service_call_timeout_sec") + + self.client = self.node.create_client( + TriggerCalibration, self._service_name_str + ) + self.logger.info( + f"[{self.name}] Service client created for '{self._service_name_str}'" + ) + self._initialized_properly = True + except KeyError as e: + self.logger.error(f"[{self.name}] Missing blackboard key during setup: {e}") + except Exception as e: + self.logger.error( + f"[{self.name}] Failed to create service client during setup: {e}" + ) + + @override + def initialise(self) -> None: + self.service_future = None # Reset for a new call + self.call_sent_time = None + self.logger.debug(f"[{self.name}] Initializing service call state.") + + def update(self) -> Status: + """Manage the asynchronous service call state machine.""" + if not self._initialized_properly or self.client is None: + self.logger.error( + f"[{self.name}] Service client not available. Setup likely failed." + ) + self.blackboard_set( + "calibration_call_succeeded", False + ) # Use internal port name + self.blackboard_set("calibration_routine_success", False) + return Status.FAILURE + + # --- State 1: Ready to Call --- + if self.service_future is None: + if self._wait_timeout_float >= 0.0: + if not self.client.wait_for_service( + timeout_sec=self._wait_timeout_float + ): + self.logger.error( + f"[{self.name}] Service '{self._service_name_str}' not available after {self._wait_timeout_float}s timeout." + ) + self.blackboard_set("calibration_call_succeeded", False) + return Status.FAILURE + + # If wait_for_service timeout is < 0, we attempt call without waiting (not recommended for critical services) + # elif not self.client.service_is_ready(): + # self.logger.error(f"[{self.name}] Service '{self._service_name_str}' not ready (no wait).") + # self.blackboard_set("calibration_call_succeeded", False) + # return Status.FAILURE + + req = TriggerCalibration.Request() # Empty request + self.logger.info( + f"[{self.name}] Calling service '{self._service_name_str}' to trigger calibration." + ) + try: + self.service_future = self.client.call_async(req) + self.call_sent_time = time.monotonic() # Record time when call was sent + return Status.RUNNING + except Exception as e: + self.logger.error(f"[{self.name}] Error sending service request: {e}") + self.blackboard_set("calibration_call_succeeded", False) + return Status.FAILURE + + # --- State 2: Waiting for Response --- + elif self.service_future and not self.service_future.done(): + if self.call_sent_time is not None and ( + time.monotonic() - self.call_sent_time > self._call_timeout_float + ): + self.logger.error( + f"[{self.name}] Service call to '{self._service_name_str}' timed out after {self._call_timeout_float}s." + ) + # Attempt to cancel, though ROS 2 service call cancellation is not straightforward + if hasattr(self.service_future, "cancel") and callable( + self.service_future.cancel + ): + if ( + not self.service_future.cancelled() + ): # Check if it can be cancelled + if self.service_future.cancel(): # Try to cancel + self.logger.info( + f"[{self.name}] Service call future cancelled." + ) + else: + self.logger.warn( + f"[{self.name}] Failed to cancel service call future (may have already completed or cannot be cancelled)." + ) + self.service_future = None # Abandon the future + self.call_sent_time = None + self.blackboard_set("calibration_call_succeeded", False) + self.blackboard_set("calibration_message", "Service call timed out") + return Status.FAILURE + + self.logger.debug( + f"[{self.name}] Waiting for calibration service response..." + ) + return Status.RUNNING + + # --- State 3: Processing Response --- + elif self.service_future: # Future exists and is done + self.logger.debug(f"[{self.name}] Calibration service call future done.") + response_payload = None + final_status = Status.FAILURE # Default to failure + try: + response_payload = self.service_future.result() + self.blackboard_set("calibration_call_succeeded", True) + + if response_payload is not None: + self.blackboard_set( + "calibration_routine_success", response_payload.success + ) + self.blackboard_set("calibration_message", response_payload.message) + self.blackboard_set( + "computed_calibration_offset_quat", + response_payload.computed_offset_jacobase_to_filterworld, + ) + + self.logger.info( + f"[{self.name}] Calibration service response: success={response_payload.success}, msg='{response_payload.message}'" + ) + if response_payload.success: + self.logger.info( + f" Computed Offset (xyzw): [" + f"{response_payload.computed_offset_jacobase_to_filterworld.x:.4f}, " + f"{response_payload.computed_offset_jacobase_to_filterworld.y:.4f}, " + f"{response_payload.computed_offset_jacobase_to_filterworld.z:.4f}, " + f"{response_payload.computed_offset_jacobase_to_filterworld.w:.4f}]" + ) + final_status = ( + Status.SUCCESS if response_payload.success else Status.FAILURE + ) + else: + self.logger.error( + f"[{self.name}] Service call future done but result is None." + ) + self.blackboard_set("calibration_routine_success", False) + self.blackboard_set( + "calibration_message", "Service call returned None result" + ) + except Exception as e: + self.logger.error( + f"[{self.name}] Service call failed with exception: {e}" + ) + self.blackboard_set("calibration_call_succeeded", False) + self.blackboard_set("calibration_routine_success", False) + self.blackboard_set("calibration_message", f"Exception: {e}") + + self.service_future = None # Reset for next call + self.call_sent_time = None + return final_status + + # Should not be reached + self.logger.warn(f"[{self.name}] Reached unexpected state in update method.") + return Status.FAILURE + + def terminate(self, new_status: Status) -> None: + """Log termination status and clear future if needed.""" + self.logger.debug(f"[{self.name}] Terminating with status {new_status}.") + if self.service_future is not None and not self.service_future.done(): + self.logger.warning( + f"[{self.name}] Service call terminated while waiting for response." + ) + # Note: ROS 2 futures are not easily cancellable once sent if the service doesn't support it. + # We just abandon it here. + self.service_future = None + self.call_sent_time = None diff --git a/ada_feeding/ada_feeding/behaviors/moveit2/__init__.py b/ada_feeding/ada_feeding/behaviors/moveit2/__init__.py index 0c0c9422..a64572d0 100644 --- a/ada_feeding/ada_feeding/behaviors/moveit2/__init__.py +++ b/ada_feeding/ada_feeding/behaviors/moveit2/__init__.py @@ -14,6 +14,8 @@ MoveIt2OrientationConstraint, MoveIt2PoseConstraint, ) +from .moveit2_compute_ik import MoveIt2ComputeIK +from .moveit2_compute_fk import MoveIt2ComputeFK from .servo_move import ServoMove # Modifying the planning scene diff --git a/ada_feeding/ada_feeding/behaviors/moveit2/moveit2_compute_fk.py b/ada_feeding/ada_feeding/behaviors/moveit2/moveit2_compute_fk.py new file mode 100644 index 00000000..216b5555 --- /dev/null +++ b/ada_feeding/ada_feeding/behaviors/moveit2/moveit2_compute_fk.py @@ -0,0 +1,244 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2025, Personal Robotics Laboratory +# License: BSD 3-Clause. See LICENSE.md file in root directory. + +# (Add appropriate Copyright/License if desired) + +""" +This module defines the MoveIt2ComputeFK behavior, which uses the +ada_feeding MoveIt2 wrapper to compute Forward Kinematics. +""" + +# Standard imports +from typing import Union, Optional, Dict, Any, List, Tuple +from threading import Lock + +# Third-party imports +from geometry_msgs.msg import PoseStamped +from sensor_msgs.msg import JointState +from moveit_msgs.msg import MoveItErrorCodes +from overrides import override +import py_trees +import py_trees.blackboard +from py_trees.common import Access, Status +import rclpy.node + +# Local imports +from ada_feeding.helpers import BlackboardKey, get_moveit2_object +from ada_feeding.behaviors import BlackboardBehavior + +from pymoveit2 import MoveIt2 + + +class MoveIt2ComputeFK(BlackboardBehavior): + """ + Computes Forward Kinematics (FK) using the MoveIt2 interface wrapper + for a given joint state and specified links. + + If joint_state is not provided, uses the current state known by the MoveIt2 object. + If fk_link_names is not provided, computes FK for the default end-effector link + associated with the MoveIt2 object's group. + """ + + def blackboard_inputs( + self, + group_name: Union[BlackboardKey, str], + joint_state: Optional[Union[BlackboardKey, JointState, List[float]]] = None, + fk_link_names: Optional[Union[BlackboardKey, List[str]]] = None, + ) -> None: + """ + Blackboard Inputs + + Parameters + ---------- + group_name: The MoveIt2 planning group name. Used during setup to retrieve + the correct MoveIt2 object instance. + joint_state: Optional state (JointState or List[float]) for FK calculation. + If None or key not present, uses current state from MoveIt2 object. + fk_link_names: Optional list of link names. If None or key not present, uses + the default end-effector link from the MoveIt2 object. + """ + # pylint: disable=unused-argument, duplicate-code + super().blackboard_inputs( + **{key: value for key, value in locals().items() if key != "self"} + ) + + def blackboard_outputs( + self, + fk_poses: Optional[ + BlackboardKey + ], # -> Optional[Union[PoseStamped, List[PoseStamped]]] + success: Optional[BlackboardKey], # -> bool + ) -> None: + """ + Blackboard Outputs + + Parameters + ---------- + fk_poses: The resulting PoseStamped (if single link/default) or List[PoseStamped] + (if multiple fk_link_names requested). None on failure. + success: Boolean flag indicating if FK computation was successful. + """ + # pylint: disable=unused-argument, duplicate-code + super().blackboard_outputs( + **{key: value for key, value in locals().items() if key != "self"} + ) + + @override + def setup(self, **kwargs): + """Get the ROS2 node and acquire the MoveIt2 object for the specified group.""" + # pylint: disable=attribute-defined-outside-init + self.node: rclpy.node.Node = kwargs["node"] + self.moveit2_obj: Optional[MoveIt2] = None + self.moveit2_lock: Optional[Lock] = None + self.fk_group_name: Optional[str] = None + + try: + self.fk_group_name = self.blackboard_get("group_name") + if not isinstance(self.fk_group_name, str) or not self.fk_group_name: + raise ValueError("group_name must be a non-empty string") + except (KeyError, ValueError) as e: + self.logger.error( + f"[{self.name}] Invalid or missing input 'group_name': {e}" + ) + return + + try: + self.moveit2_obj, self.moveit2_lock = get_moveit2_object( + blackboard=self.blackboard, + group_name=self.fk_group_name, + node=self.node, + ) + if self.moveit2_obj is None or self.moveit2_lock is None: + raise RuntimeError("get_moveit2_object returned None") + self.logger.info( + f"[{self.name}] Successfully obtained MoveIt2 object for FK group '{self.fk_group_name}'" + ) + except Exception as e: + self.logger.error( + f"[{self.name}] Failed to get MoveIt2 object for FK group '{self.fk_group_name}': {e}" + ) + self.moveit2_obj = None + self.moveit2_lock = None + + @override + def initialise(self) -> None: + """Reset status variables if needed.""" + self.logger.debug(f"[{self.name}] Initialising FK computation.") + # Clear previous results from blackboard + self.blackboard_set("fk_poses", None) + self.blackboard_set("success", False) + + @override + def update(self) -> Status: + """Perform the FK computation using the synchronous compute_fk method.""" + # Check if setup was successful + if self.moveit2_obj is None or self.moveit2_lock is None: + self.logger.error( + f"[{self.name}] MoveIt2 object not initialized. Setup likely failed." + ) + return Status.FAILURE + + # Check if MoveIt2 is currently locked by another behavior + # Note: FK is usually read-only, lock might be optional depending on wrapper implementation + # but using it ensures consistency if wrapper reads internal state like current joints. + if self.moveit2_lock.locked(): + self.logger.debug(f"[{self.name}] MoveIt2 is locked, waiting.") + return Status.RUNNING + + # Acquire lock and perform FK computation + with self.moveit2_lock: + try: + # Read optional inputs using try-except to handle missing keys gracefully + joint_state_input = None + try: + joint_state_input = self.blackboard_get("joint_state") + # Optional validation + if joint_state_input is not None and not isinstance( + joint_state_input, (JointState, list) + ): + self.logger.warning( + f"[{self.name}] Input 'joint_state' is not JointState or List, ignoring." + ) + joint_state_input = None + except KeyError: + self.logger.debug( + f"[{self.name}] Optional input 'joint_state' not found, using MoveIt2 object's current state." + ) + pass # Keep default None -> wrapper uses current state + + fk_link_names_input = None + try: + fk_link_names_input = self.blackboard_get("fk_link_names") + if fk_link_names_input is not None and not ( + isinstance(fk_link_names_input, list) + and all(isinstance(n, str) for n in fk_link_names_input) + ): + self.logger.warning( + f"[{self.name}] Input 'fk_link_names' is not List[str], ignoring." + ) + fk_link_names_input = None + except KeyError: + self.logger.debug( + f"[{self.name}] Optional input 'fk_link_names' not found, using MoveIt2 object's default end-effector." + ) + pass # Keep default None -> wrapper uses default EE link + + # Call the synchronous compute_fk method + self.logger.info( + f"[{self.name}] Computing FK for group '{self.fk_group_name}' " + f"with links: {fk_link_names_input or 'default EE'}." + ) + + result_poses: Optional[ + Union[PoseStamped, List[PoseStamped]] + ] = self.moveit2_obj.compute_fk( + joint_state=joint_state_input, + fk_link_names=fk_link_names_input, + ) + + # Determine success based on whether a result was returned + fk_success = result_poses is not None + + # Write results to blackboard + self.blackboard_set("success", fk_success) + self.blackboard_set("fk_poses", result_poses) + + if fk_success: + num_poses = ( + len(result_poses) if isinstance(result_poses, list) else 1 + ) + self.logger.info( + f"[{self.name}] FK computation successful ({num_poses} pose(s) found)." + ) + return Status.SUCCESS + else: + # The compute_fk wrapper likely logs the specific error code/reason + self.logger.warning( + f"[{self.name}] FK computation failed (see MoveIt2 wrapper logs)." + ) + return Status.FAILURE + + except KeyError as e: + self.logger.error( + f"[{self.name}] Blackboard key error during FK update: {e}" + ) + self.blackboard_set("success", False) + self.blackboard_set("fk_poses", None) + return Status.FAILURE + except Exception as e: + self.logger.error( + f"[{self.name}] Unexpected error during FK computation: {e}", + exc_info=True, + ) + self.blackboard_set("success", False) + self.blackboard_set("fk_poses", None) + return Status.FAILURE + + # Should not be reached if lock logic is correct + return Status.FAILURE + + @override + def terminate(self, new_status: Status) -> None: + """Log termination status.""" + self.logger.debug(f"[{self.name}] Terminating with status {new_status}.") diff --git a/ada_feeding/ada_feeding/behaviors/moveit2/moveit2_compute_ik.py b/ada_feeding/ada_feeding/behaviors/moveit2/moveit2_compute_ik.py new file mode 100644 index 00000000..dfe21077 --- /dev/null +++ b/ada_feeding/ada_feeding/behaviors/moveit2/moveit2_compute_ik.py @@ -0,0 +1,301 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2024-2025, Personal Robotics Laboratory +# License: BSD 3-Clause. See LICENSE.md file in root directory. + +""" +This module defines the MoveIt2ComputeIK behavior, which uses the +ada_feeding MoveIt2 wrapper to compute Inverse Kinematics for a target pose. +""" + +# Standard imports +from typing import Any, Union, Optional, Dict, List, Tuple +from threading import Lock + +# Third-party imports +from geometry_msgs.msg import PoseStamped, Point, Quaternion +from sensor_msgs.msg import JointState +from moveit_msgs.msg import ( + Constraints, + MoveItErrorCodes, +) # Added MoveItErrorCodes potentially used by wrapper +from overrides import override +import py_trees +import py_trees.blackboard +from py_trees.common import Access +import rclpy.node + +# Local imports from ada_feeding (adjust paths if necessary) +from ada_feeding.helpers import ( + BlackboardKey, + get_moveit2_object, + # get_tf_object, # Likely not needed here if pose is already in desired frame for IK +) +from ada_feeding.behaviors import BlackboardBehavior + +from pymoveit2 import MoveIt2 +from .moveit2_plan import MoveIt2ConstraintType + + +class MoveIt2ComputeIK(BlackboardBehavior): + """ + Computes Inverse Kinematics (IK) using the ada_feeding MoveIt2 interface wrapper. + + Takes a target pose (PoseStamped) and computes a joint state configuration + that achieves that pose for the planning group associated with the MoveIt2 object. + """ + + # pylint: disable=arguments-differ + # pylint: disable=too-many-arguments + + def blackboard_inputs( + self, + target_pose: Union[BlackboardKey, PoseStamped], + group_name: Union[BlackboardKey, str], + start_joint_state: Optional[Union[BlackboardKey, JointState]] = None, + constraints: Optional[ + Union[BlackboardKey, List[Tuple[MoveIt2ConstraintType, Dict[str, Any]]]] + ] = None, + ) -> None: + """ + Blackboard Inputs + + Parameters + ---------- + target_pose: The target pose (PoseStamped) for the end-effector of the group. + The frame_id within PoseStamped is important for the IK solver. + group_name: The MoveIt2 planning group name. Used during setup to retrieve + the correct MoveIt2 object instance. + start_joint_state: Optional seed state (JointState or List[float]) for the IK solver. + ik_constraints: Optional MoveIt constraints message to respect during IK solving. + """ + # pylint: disable=unused-argument, duplicate-code + super().blackboard_inputs( + **{key: value for key, value in locals().items() if key != "self"} + ) + + def blackboard_outputs( + self, + ik_solution_joint_state: Optional[BlackboardKey], # -> Optional[JointState] + success: Optional[BlackboardKey], # -> bool + ) -> None: + """ + Blackboard Outputs + + Parameters + ---------- + ik_solution_joint_state: The resulting JointState if IK is successful, otherwise None. + success: Boolean flag indicating whether a valid IK solution was found (True) or not (False). + """ + # pylint: disable=unused-argument, duplicate-code + super().blackboard_outputs( + **{key: value for key, value in locals().items() if key != "self"} + ) + + @override + def setup(self, **kwargs): + """Get the ROS2 node and acquire the MoveIt2 object for the specified group.""" + # pylint: disable=attribute-defined-outside-init + self.node: rclpy.node.Node = kwargs["node"] + self.moveit2_obj: Optional[MoveIt2] = None + self.moveit2_lock: Optional[Lock] = None + self.ik_group_name: Optional[str] = None + + # Get group name from blackboard - needed to select the correct MoveIt2 instance + try: + self.ik_group_name = self.blackboard_get("group_name") + if not isinstance(self.ik_group_name, str) or not self.ik_group_name: + raise ValueError("group_name must be a non-empty string") + except (KeyError, ValueError) as e: + self.logger.error( + f"[{self.name}] Invalid or missing input 'group_name': {e}" + ) + return + + # Get the MoveIt2 object using the specific group name + try: + self.moveit2_obj, self.moveit2_lock = get_moveit2_object( + blackboard=self.blackboard, + group_name=self.ik_group_name, + node=self.node, + ) + if self.moveit2_obj is None or self.moveit2_lock is None: + # Ensure setup fails completely if objects aren't retrieved + raise RuntimeError( + "get_moveit2_object returned None for MoveIt2 object or lock" + ) + self.logger.info( + f"[{self.name}] Successfully obtained MoveIt2 object for IK group '{self.ik_group_name}'" + ) + except Exception as e: + self.logger.error( + f"[{self.name}] Failed to get MoveIt2 object for IK group '{self.ik_group_name}': {e}" + ) + self.moveit2_obj = None + self.moveit2_lock = None + + @override + def initialise(self) -> None: + """Reset status variables if needed.""" + self.logger.debug(f"[{self.name}] Initialising IK computation.") + self.blackboard_set("success", False) + self.blackboard_set("ik_solution_joint_state", None) + + @override + def update(self) -> py_trees.common.Status: + """Perform the IK computation using the synchronous compute_ik method.""" + # Check if setup was successful + if self.moveit2_obj is None or self.moveit2_lock is None: + self.logger.error( + f"[{self.name}] MoveIt2 object not initialized. Setup likely failed." + ) + return py_trees.common.Status.FAILURE + + # Check if MoveIt2 is currently locked by another behavior + if self.moveit2_lock.locked(): + self.logger.debug(f"[{self.name}] MoveIt2 is locked, waiting.") + return py_trees.common.Status.RUNNING + + # Acquire lock and perform IK computation + with self.moveit2_lock: + try: + # Read inputs from blackboard at runtime + target_pose_stamped: PoseStamped = self.blackboard_get("target_pose") + start_state_seed = None + try: + start_state_seed = self.blackboard_get("start_joint_state") + except KeyError: + self.logger.debug( + f"[{self.name}] Optional input 'start_joint_state' not found, using None." + ) + pass + constraints = None + try: + constraints = self.blackboard_get("constraints") + except KeyError: + self.logger.debug( + f"[{self.name}] Optional input 'constraints' not found, using None." + ) + pass + + # Validate target_pose_stamped type + if not isinstance(target_pose_stamped, PoseStamped): + self.logger.error( + f"[{self.name}] Input 'target_pose' is not a PoseStamped message." + ) + return py_trees.common.Status.FAILURE + + ik_constraints_msg: Optional[Constraints] = None + if isinstance(constraints, list) and constraints: + ik_constraints_msg = Constraints() + ik_constraints_msg.name = "ik_constraints_from_list" + self.logger.debug( + f"[{self.name}] Processing {len(constraints)} constraint specifications for IK." + ) + for constraint_type, constraint_kwargs in constraints: + try: + if constraint_type == MoveIt2ConstraintType.JOINT: + # Assumes wrapper has create_joint_constraint(**kwargs) -> JointConstraint + constraint_obj = ( + self.moveit2_obj.create_joint_constraints( + **constraint_kwargs + ) + ) + if constraint_obj: + ik_constraints_msg.joint_constraints.extend( + constraint_obj + ) + elif constraint_type == MoveIt2ConstraintType.POSITION: + # Assumes wrapper has create_position_constraint(**kwargs) -> PositionConstraint + constraint_obj = ( + self.moveit2_obj.create_position_constraint( + **constraint_kwargs + ) + ) + if constraint_obj: + ik_constraints_msg.position_constraints.append( + constraint_obj + ) + elif constraint_type == MoveIt2ConstraintType.ORIENTATION: + # Assumes wrapper has create_orientation_constraint(**kwargs) -> OrientationConstraint + constraint_obj = ( + self.moveit2_obj.create_orientation_constraint( + **constraint_kwargs + ) + ) + if constraint_obj: + ik_constraints_msg.orientation_constraints.append( + constraint_obj + ) + else: + self.logger.warning( + f"[{self.name}] Unknown constraint type '{constraint_type}' in list." + ) + + except AttributeError as ae: + self.logger.error( + f"[{self.name}] MoveIt2 wrapper missing 'create_X_constraint' method for type {constraint_type}? Error: {ae}" + ) + return py_trees.common.Status.FAILURE + except Exception as e_constr: + self.logger.error( + f"[{self.name}] Error processing constraint {constraint_type} with args {constraint_kwargs}: {e_constr}" + ) + return py_trees.common.Status.FAILURE + # Optional: Write generated message to blackboard for debugging + # self.blackboard_set("generated_ik_constraints_msg", ik_constraints_msg) + elif constraints is not None: + self.logger.warning( + f"[{self.name}] Input 'constraints' is not a list, ignoring. Type: {type(constraints)}" + ) + + # --- Call the synchronous compute_ik method provided in the API --- + # It takes position and orientation separately. + self.logger.info( + f"[{self.name}] Computing IK for group '{self.ik_group_name}' targeting pose in frame '{target_pose_stamped.header.frame_id}'..." + ) + + # The MoveIt service internally uses the frame_id from the PoseStamped in its request, + # even though the wrapper function takes position/orientation separately. + # We pass the pose components directly. + ik_result_state: Optional[JointState] = self.moveit2_obj.compute_ik( + position=target_pose_stamped.pose.position, + quat_xyzw=target_pose_stamped.pose.orientation, + start_joint_state=start_state_seed, + constraints=ik_constraints_msg, + ) + + # Determine success based on whether a result was returned + ik_success = ik_result_state is not None + + # Write results to blackboard + self.blackboard_set("success", ik_success) + self.blackboard_set("ik_solution_joint_state", ik_result_state) + + if ik_success: + self.logger.info(f"[{self.name}] IK computation successful.") + self.logger.info(f"IK solution: {ik_result_state}") + return py_trees.common.Status.SUCCESS + else: + self.logger.warning( + f"[{self.name}] IK computation failed (see MoveIt2 wrapper logs for error code)." + ) + return py_trees.common.Status.FAILURE + + except KeyError as e: + self.logger.error( + f"[{self.name}] Blackboard key error during IK update: {e}" + ) + return py_trees.common.Status.FAILURE + except Exception as e: + self.logger.error( + f"[{self.name}] Unexpected error during IK computation: {e}", + ) + return py_trees.common.Status.FAILURE + + # This line should not be reachable if the lock logic is correct + return py_trees.common.Status.FAILURE + + @override + def terminate(self, new_status: py_trees.common.Status) -> None: + """Log termination status.""" + self.logger.debug(f"[{self.name}] Terminating with status {new_status}.") diff --git a/ada_feeding/ada_feeding/behaviors/moveit2/moveit2_constraints.py b/ada_feeding/ada_feeding/behaviors/moveit2/moveit2_constraints.py index 83b12871..12a05999 100644 --- a/ada_feeding/ada_feeding/behaviors/moveit2/moveit2_constraints.py +++ b/ada_feeding/ada_feeding/behaviors/moveit2/moveit2_constraints.py @@ -25,6 +25,7 @@ import numpy as np from overrides import override import py_trees +from py_trees.blackboard import Blackboard import rclpy import ros2_numpy @@ -101,11 +102,23 @@ def blackboard_outputs( def update(self) -> py_trees.common.Status: # Docstring copied from @override - if not self.blackboard_exists( - ["joint_positions", "joint_names", "tolerance", "weight", "constraints"] - ): - self.logger.error("MoveIt2Constraint: Missing input arguments.") - return py_trees.common.Status.FAILURE + for argument in [ + "joint_positions", + "joint_names", + "tolerance", + "weight", + "constraints", + ]: + if not self.blackboard_exists(argument): + self.logger.error( + f"MoveIt2JointConstraint: Missing input argument: {argument}" + ) + return py_trees.common.Status.FAILURE + + # # Check if joint positions are valid (not None or empty) + # if self.blackboard_get("joint_positions") is None or not self.blackboard_get("joint_positions"): + # self.logger.debug("MoveIt2Constraint: Joint positions not yet available.") + # return py_trees.common.Status.RUNNING constraint = ( MoveIt2ConstraintType.JOINT, @@ -155,6 +168,7 @@ def blackboard_inputs( constraints: Union[ BlackboardKey, Optional[List[Tuple[MoveIt2ConstraintType, Dict[str, Any]]]] ] = None, + group_name: Union[BlackboardKey, str] = "jaco_arm_with_articutool", ) -> None: """ Blackboard Inputs @@ -169,6 +183,8 @@ def blackboard_inputs( Note: if position is Vector3Stamped, use position.header.frame_id for frame_id (if not "") + group_name: The name of the MoveIt2 planning group + Parameters ---------- constraints: previous set of constraints to append to @@ -210,10 +226,14 @@ def setup(self, **kwargs): # Get Node from Kwargs self.node = kwargs["node"] + # Get group name from blackboard + self.group_name = self.blackboard_get("group_name") + # Get the MoveIt2 object, don't need lock (const read only) self.moveit2, _ = get_moveit2_object( self.blackboard, self.node, + self.group_name, ) # Get TF Listener from blackboard @@ -223,21 +243,20 @@ def setup(self, **kwargs): def update(self) -> py_trees.common.Status: # Docstring copied from @override - if not self.blackboard_exists( - [ - "offset", - "frame_id", - "target_link", - "tolerance", - "weight", - "constraints", - ] - ): - self.logger.error( - "MoveIt2PositionOffsetConstraint: Missing input arguments." - ) - self.logger.error(str(self.blackboard)) - return py_trees.common.Status.FAILURE + for argument in [ + "offset", + "frame_id", + "target_link", + "tolerance", + "weight", + "constraints", + ]: + if not self.blackboard_exists(argument): + self.logger.error( + f"MoveIt2PositionOffsetConstraint: Missing input argument: {argument}" + ) + self.logger.error(str(self.blackboard)) + return py_trees.common.Status.FAILURE if self.tf_lock.locked(): return py_trees.common.Status.RUNNING @@ -380,19 +399,20 @@ def blackboard_outputs( def update(self) -> py_trees.common.Status: # Docstring copied from @override - if not self.blackboard_exists( - [ - "position", - "frame_id", - "target_link", - "tolerance", - "weight", - "constraints", - ] - ): - self.logger.error("MoveIt2PositionConstraint: Missing input arguments.") - self.logger.error(str(self.blackboard)) - return py_trees.common.Status.FAILURE + for argument in [ + "position", + "frame_id", + "target_link", + "tolerance", + "weight", + "constraints", + ]: + if not self.blackboard_exists(argument): + self.logger.error( + f"MoveIt2PositionConstraint: Missing input argument: {argument}" + ) + self.logger.error(str(self.blackboard)) + return py_trees.common.Status.FAILURE position = self.blackboard_get("position") constraint = ( @@ -505,19 +525,21 @@ def blackboard_outputs( def update(self) -> py_trees.common.Status: # Docstring copied from @override - if not self.blackboard_exists( - [ - "quat_xyzw", - "frame_id", - "target_link", - "tolerance", - "weight", - "constraints", - "parameterization", - ] - ): - self.logger.error("MoveIt2OrientationConstraint: Missing input arguments.") - return py_trees.common.Status.FAILURE + for argument in [ + "quat_xyzw", + "frame_id", + "target_link", + "tolerance", + "weight", + "constraints", + "parameterization", + ]: + if not self.blackboard_exists(argument): + self.logger.error( + f"MoveIt2OrientationConstraint: Missing input argument: {argument}" + ) + self.logger.error(str(self.blackboard)) + return py_trees.common.Status.FAILURE quat_xyzw = self.blackboard_get("quat_xyzw") constraint = ( @@ -631,21 +653,23 @@ def blackboard_outputs( def update(self) -> py_trees.common.Status: # Docstring copied from @override - if not self.blackboard_exists( - [ - "pose", - "frame_id", - "target_link", - "tolerance_position", - "weight_position", - "tolerance_orientation", - "weight_orientation", - "constraints", - "parameterization", - ] - ): - self.logger.error("MoveIt2Constraint: Missing input arguments.") - return py_trees.common.Status.FAILURE + for argument in [ + "pose", + "frame_id", + "target_link", + "tolerance_position", + "weight_position", + "tolerance_orientation", + "weight_orientation", + "constraints", + "parameterization", + ]: + if not self.blackboard_exists(argument): + self.logger.error( + f"MoveIt2PoseConstraint: Missing input argument: {argument}" + ) + self.logger.error(str(self.blackboard)) + return py_trees.common.Status.FAILURE pose = self.blackboard_get("pose") constraint_orient = ( diff --git a/ada_feeding/ada_feeding/behaviors/moveit2/moveit2_execute.py b/ada_feeding/ada_feeding/behaviors/moveit2/moveit2_execute.py index 1c85fe7d..8f115ee3 100644 --- a/ada_feeding/ada_feeding/behaviors/moveit2/moveit2_execute.py +++ b/ada_feeding/ada_feeding/behaviors/moveit2/moveit2_execute.py @@ -40,6 +40,7 @@ def blackboard_inputs( trajectory: Union[BlackboardKey, Optional[JointTrajectory]], terminate_timeout: Union[BlackboardKey, Duration] = Duration(seconds=10.0), terminate_rate_hz: Union[BlackboardKey, float] = 30.0, + group_name: Union[BlackboardKey, str] = "jaco_arm_with_articutool", ) -> None: """ Blackboard Inputs @@ -51,6 +52,7 @@ def blackboard_inputs( response from the MoveIt2 action server. terminate_rate_hz: How often to check whether a terminate request has been processed. + group_name: The name of the MoveIt2 planning group """ # pylint: disable=unused-argument, duplicate-code # Arguments are handled generically in base class. @@ -95,10 +97,14 @@ def setup(self, **kwargs): # Get Node from Kwargs self.node = kwargs["node"] + # Get group name from blackboard + self.group_name = self.blackboard_get("group_name") + # Get the MoveIt2 object. self.moveit2, self.moveit2_lock = get_moveit2_object( self.blackboard, self.node, + self.group_name, ) @override diff --git a/ada_feeding/ada_feeding/behaviors/moveit2/moveit2_plan.py b/ada_feeding/ada_feeding/behaviors/moveit2/moveit2_plan.py index e6fd923c..8d2e85d9 100644 --- a/ada_feeding/ada_feeding/behaviors/moveit2/moveit2_plan.py +++ b/ada_feeding/ada_feeding/behaviors/moveit2/moveit2_plan.py @@ -109,6 +109,7 @@ def blackboard_inputs( Union[BlackboardKey, float] ] = 10.0, # np.sqrt(6.0) * np.pi, max_path_len_joint: Optional[Union[BlackboardKey, Dict[str, float]]] = None, + group_name: Union[BlackboardKey, str] = "jaco_arm_with_articutool", ) -> None: """ Blackboard Inputs @@ -147,6 +148,7 @@ def blackboard_inputs( (likely radians). Default sqrt(6.0 * pi) (i.e. 180 degrees each for a 6DOF robot) max_path_len_joint: Maximum distance each joint (or subset of joints) is allowed to travel. As dictionary: -> + group_name: The name of the MoveIt2 planning group """ # TODO: consider cartesian parameter struct # pylint: disable=unused-argument, duplicate-code @@ -190,10 +192,14 @@ def setup(self, **kwargs): # Get Node from Kwargs self.node = kwargs["node"] + # Get group name from blackboard + self.group_name = self.blackboard_get("group_name") + # Get the MoveIt2 object. self.moveit2, self.moveit2_lock = get_moveit2_object( self.blackboard, self.node, + self.group_name, ) # Get TF Listener from blackboard diff --git a/ada_feeding/ada_feeding/behaviors/ros/__init__.py b/ada_feeding/ada_feeding/behaviors/ros/__init__.py index fc63d952..6528cc88 100644 --- a/ada_feeding/ada_feeding/behaviors/ros/__init__.py +++ b/ada_feeding/ada_feeding/behaviors/ros/__init__.py @@ -8,6 +8,7 @@ UpdateTimestamp, CreatePoseStamped, PoseStampedToTwistStamped, + StampPoseFromPose, ) from .tf import ( GetTransform, diff --git a/ada_feeding/ada_feeding/behaviors/ros/msgs.py b/ada_feeding/ada_feeding/behaviors/ros/msgs.py index 2b01ad3e..b85c23ff 100644 --- a/ada_feeding/ada_feeding/behaviors/ros/msgs.py +++ b/ada_feeding/ada_feeding/behaviors/ros/msgs.py @@ -8,7 +8,7 @@ """ # Standard imports -from typing import Any, Callable, List, Optional, Tuple, Union +from typing import Any, Callable, List, Optional, Tuple, Union, Dict # Third-party imports from geometry_msgs.msg import ( @@ -22,15 +22,22 @@ from overrides import override import py_trees import rclpy +import rclpy.node +import rclpy.time from rclpy.duration import Duration import ros2_numpy from scipy.spatial.transform import Rotation as R -from tf2_geometry_msgs import PointStamped, PoseStamped +from tf2_geometry_msgs import Pose, PointStamped, PoseStamped # Local imports from ada_feeding.helpers import BlackboardKey from ada_feeding.behaviors import BlackboardBehavior +# Third-party imports +import py_trees +import py_trees.blackboard +from py_trees.common import Access, Status + class UpdateTimestamp(BlackboardBehavior): """ @@ -392,3 +399,127 @@ def update(self) -> py_trees.common.Status: self.blackboard_set("twist_stamped", twist_stamped) return py_trees.common.Status.SUCCESS + + +# -*- coding: utf-8 -*- +# (Add appropriate Copyright/License if desired) + +""" +This module defines the StampPoseFromPose behavior, which converts a +geometry_msgs/Pose message from the blackboard to a geometry_msgs/PoseStamped +message by adding a header with a specified frame_id and the current timestamp. +""" + + +class StampPoseFromPose(BlackboardBehavior): + """ + Takes a geometry_msgs/Pose from the blackboard, adds a header + (specified frame_id and current timestamp), and writes the resulting + geometry_msgs/PoseStamped back to the blackboard. + + This is useful for providing correctly framed input to services or behaviors + that require PoseStamped, such as MoveIt2ComputeIK, when the source only + provides a Pose. + """ + + def blackboard_inputs( + self, + input_pose: Union[BlackboardKey, Pose], + frame_id: Union[BlackboardKey, str], + ) -> None: + """ + Blackboard Inputs + + Parameters + ---------- + input_pose: BlackboardKey resolving to the geometry_msgs/Pose message to stamp. + frame_id: BlackboardKey resolving to the string frame_id to put in the header. + This frame_id should represent the coordinate system in which the + input_pose values are actually defined. + """ + # pylint: disable=unused-argument, duplicate-code + super().blackboard_inputs( + **{key: value for key, value in locals().items() if key != "self"} + ) + + def blackboard_outputs( + self, + output_pose_stamped: Optional[BlackboardKey], # -> Optional[PoseStamped] + ) -> None: + """ + Blackboard Outputs + + Parameters + ---------- + output_pose_stamped: BlackboardKey where the resulting PoseStamped will be written. + The value will be None or unchanged if the behavior fails. + """ + # pylint: disable=unused-argument, duplicate-code + super().blackboard_outputs( + **{key: value for key, value in locals().items() if key != "self"} + ) + + @override + def setup(self, **kwargs): + """Gets the ROS2 node from the arguments passed by the tree runner.""" + # pylint: disable=attribute-defined-outside-init + try: + self.node: rclpy.node.Node = kwargs["node"] + except KeyError as e: + self.logger.error( + f"[{self.name}] Behaviour expects 'node' in setup kwargs. {e}" + ) + self.node = None + + @override + def initialise(self) -> None: + """Optionally log initialization.""" + self.logger.debug(f"[{self.name}] Initializing.") + + @override + def update(self) -> Status: + """ + Reads the input Pose and frame_id, creates a PoseStamped with a + current timestamp, and writes it to the output blackboard key. + Returns SUCCESS if successful, FAILURE otherwise. + """ + # Verify setup provided the node needed for timestamping + if not hasattr(self, "node") or self.node is None: + self.logger.error( + f"[{self.name}] Node object not available. Setup likely failed." + ) + return Status.FAILURE + + try: + pose_in = self.blackboard_get("input_pose") + frame_id_str = self.blackboard_get("frame_id") + + if not isinstance(pose_in, Pose): + self.logger.error( + f"[{self.name}] Input 'input_pose' is not a geometry_msgs/Pose (is type: {type(pose_in)})." + ) + return Status.FAILURE + + if not isinstance(frame_id_str, str) or not frame_id_str: + self.logger.error( + f"[{self.name}] Input 'frame_id' must be a non-empty string (is: '{frame_id_str}')." + ) + return Status.FAILURE + + pose_stamped_out = PoseStamped() + pose_stamped_out.header.frame_id = frame_id_str + pose_stamped_out.header.stamp = self.node.get_clock().now().to_msg() + pose_stamped_out.pose = pose_in + + self.blackboard_set("output_pose_stamped", pose_stamped_out) + self.logger.info( + f"[{self.name}] Successfully stamped Pose into frame '{frame_id_str}'." + ) + return Status.SUCCESS + + except KeyError as e: + self.logger.error(f"[{self.name}] Blackboard key error: {e}") + return Status.FAILURE + except Exception as e: + self.logger.error(f"[{self.name}] Unexpected error stamping pose: {e}") + return Status.FAILURE diff --git a/ada_feeding/ada_feeding/behaviors/ros/tf.py b/ada_feeding/ada_feeding/behaviors/ros/tf.py index 9c0cda29..332eb242 100644 --- a/ada_feeding/ada_feeding/behaviors/ros/tf.py +++ b/ada_feeding/ada_feeding/behaviors/ros/tf.py @@ -348,10 +348,16 @@ def update(self) -> py_trees.common.Status: transformed_msg = None with self.tf_lock: if target_frame is not None: + import copy + + msg_to_transform = copy.deepcopy(stamped_msg) + # Set stamp to zero to request latest transform + msg_to_transform.header.stamp = Time(seconds=0, nanoseconds=0).to_msg() + # Compute the transform from the TF tree try: transformed_msg = self.tf_buffer.transform( - stamped_msg, + msg_to_transform, target_frame, timeout, ) diff --git a/ada_feeding/ada_feeding/behaviors/state/__init__.py b/ada_feeding/ada_feeding/behaviors/state/__init__.py new file mode 100644 index 00000000..b5084a04 --- /dev/null +++ b/ada_feeding/ada_feeding/behaviors/state/__init__.py @@ -0,0 +1,40 @@ +# Copyright (c) 2025, Personal Robotics Laboratory +# License: BSD 3-Clause. See LICENSE.md file in root directory. + +""" +This package contains custom py_tree behaviors for interacting with ROS. +""" + +from .get_joint_states import ( + GetJointStates, +) +from .extract_joints_from_state import ( + ExtractJointsFromState, +) +from .combine_joint_states import ( + CombineJointStates, +) +from .extract_pose_from_poses_by_link import ( + ExtractPoseFromPosesByLink, +) +from .extract_pose_components import ( + ExtractPoseComponents, +) +from .check_jaco_directional_manipulability import ( + CheckJacoDirectionalManipulability, +) +from .check_articutool_path_orientation_feasibility import ( + CheckArticutoolPathOrientationFeasibility, +) +from .load_pinocchio_model import ( + LoadPinocchioModel, +) +from .check_articutool_path_leveling_feasibility import ( + CheckArticutoolPathLevelingFeasibility, +) +from .offset_position_from_pose import ( + OffsetPositionFromPose, +) +from .extract_pose_from_transform_stamped import ( + ExtractPoseFromTransformStamped, +) diff --git a/ada_feeding/ada_feeding/behaviors/state/check_articutool_path_dynamic_feasibility.py b/ada_feeding/ada_feeding/behaviors/state/check_articutool_path_dynamic_feasibility.py new file mode 100644 index 00000000..d3809d03 --- /dev/null +++ b/ada_feeding/ada_feeding/behaviors/state/check_articutool_path_dynamic_feasibility.py @@ -0,0 +1,346 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2024-2025, Personal Robotics Laboratory +# License: BSD 3-Clause. See LICENSE.md file in root directory. + +""" +This module defines the CheckArticutoolPathDynamicFeasibility behavior. + +This behavior extends the kinematic feasibility check by also verifying if the +Articutool has the dynamic capability (i.e., sufficient motor velocity) to +maintain a level orientation throughout a given trajectory of the Jaco arm. +It combines the kinematic check with a dynamic analysis at each waypoint. +""" + +# Standard imports +import math +from typing import Union, Optional, List, Tuple + +# Third-party imports +from geometry_msgs.msg import Pose +from moveit_msgs.msg import RobotTrajectory +from trajectory_msgs.msg import JointTrajectory +import numpy as np +from overrides import override +from scipy.spatial.transform import Rotation +import py_trees +from py_trees.common import Status +import rclpy.node +import pinocchio as pin + +# Local imports +from ada_feeding.behaviors import BlackboardBehavior +from ada_feeding.helpers import BlackboardKey + + +class CheckArticutoolPathDynamicFeasibility(BlackboardBehavior): + """ + Checks if a Jaco trajectory is dynamically feasible for the Articutool. + + For each point along a trajectory, this behavior performs a two-part check: + 1. Kinematic Check: Can the Articutool *be* level at this Jaco pose? + (Same as CheckArticutoolPathLevelingFeasibility). + 2. Dynamic Check: Is the Articutool *fast enough* to counteract the + tilting motion induced by the Jaco arm's movement? + + If both checks pass for all waypoints, it returns SUCCESS. Otherwise, FAILURE. + """ + + EPSILON = 1e-6 + WORLD_Z_UP_VECTOR = np.array([0.0, 0.0, 1.0]) + + def blackboard_inputs( + self, + pinocchio_model: Union[BlackboardKey, pin.Model], + pinocchio_data: Union[BlackboardKey, pin.Data], + jaco_joint_names_pin: Union[BlackboardKey, List[str]], + jaco_vel_indices_pin: Union[BlackboardKey, List[int]], + jaco_ee_frame_id_pin: Union[BlackboardKey, int], + jaco_trajectory: Union[BlackboardKey, RobotTrajectory, JointTrajectory], + articutool_pitch_limits_rad: Union[BlackboardKey, Tuple[float, float]], + articutool_roll_limits_rad: Union[BlackboardKey, Tuple[float, float]], + articutool_max_joint_velocity: Union[BlackboardKey, float], + num_trajectory_points_to_check: Union[BlackboardKey, int] = 10, + ) -> None: + """Define blackboard inputs.""" + super().blackboard_inputs( + **{key: value for key, value in locals().items() if key != "self"} + ) + + def blackboard_outputs( + self, + articutool_is_dynamic_feasible: Optional[BlackboardKey], + ) -> None: + """Define blackboard outputs.""" + super().blackboard_outputs( + **{key: value for key, value in locals().items() if key != "self"} + ) + + def __init__(self, name: str, **kwargs): + """Initialize the behavior.""" + super().__init__(name=name, **kwargs) + self.node: Optional[rclpy.node.Node] = None + self._pin_model: Optional[pin.Model] = None + self._pin_data: Optional[pin.Data] = None + self._jaco_joint_names_pin: Optional[List[str]] = None + self._jaco_vel_indices_pin: Optional[List[int]] = None + self._jaco_ee_frame_id_pin: Optional[int] = None + self._pitch_limits_rad: Optional[Tuple[float, float]] = None + self._roll_limits_rad: Optional[Tuple[float, float]] = None + self._max_atool_vel: Optional[float] = None + self._pinocchio_ready = False + + @override + def setup(self, **kwargs): + """Get the ROS2 node from kwargs.""" + try: + self.node = kwargs["node"] + except KeyError as e: + self.logger.error( + f"[{self.name}] Behaviour expects 'node' in setup kwargs. {e}" + ) + + def _get_pinocchio_essentials_from_blackboard(self) -> bool: + """Read Pinocchio model, data, and relevant IDs/names from blackboard.""" + if self._pinocchio_ready: + return True + try: + self._pin_model = self.blackboard_get("pinocchio_model") + self._pin_data = self.blackboard_get("pinocchio_data") + self._jaco_joint_names_pin = self.blackboard_get("jaco_joint_names_pin") + self._jaco_vel_indices_pin = self.blackboard_get("jaco_vel_indices_pin") + self._jaco_ee_frame_id_pin = self.blackboard_get("jaco_ee_frame_id_pin") + self._pitch_limits_rad = self.blackboard_get("articutool_pitch_limits_rad") + self._roll_limits_rad = self.blackboard_get("articutool_roll_limits_rad") + self._max_atool_vel = self.blackboard_get("articutool_max_joint_velocity") + self._pinocchio_ready = True + return True + except KeyError as e: + self.logger.error( + f"[{self.name}] Failed to get required info from blackboard: {e}." + ) + return False + + def _normalize_angle(self, angle: float) -> float: + """Normalize an angle to the range [-pi, pi].""" + return (angle + math.pi) % (2 * math.pi) - math.pi + + def _solve_articutool_ik_for_leveling( + self, target_y_axis_in_atool_base: np.ndarray + ) -> List[Tuple[float, float]]: + """ + Analytical IK solver based on the reconciled kinematic model. + Solves for Articutool (pitch, roll) to align its tool_tip Y-axis + with the given target vector expressed in the Articutool's base frame. + """ + vx, vy, vz = target_y_axis_in_atool_base + solutions: List[Tuple[float, float]] = [] + + asin_arg_for_tr = -vx + if not (-1.0 - self.EPSILON <= asin_arg_for_tr <= 1.0 + self.EPSILON): + return [] + + asin_arg_for_tr_clipped = np.clip(asin_arg_for_tr, -1.0, 1.0) + theta_r_sol1 = math.asin(asin_arg_for_tr_clipped) + theta_r_sol2 = self._normalize_angle(math.pi - theta_r_sol1) + + candidate_thetas_r = [theta_r_sol1] + if not math.isclose(theta_r_sol1, theta_r_sol2, abs_tol=self.EPSILON): + candidate_thetas_r.append(theta_r_sol2) + + for theta_r in candidate_thetas_r: + cos_theta_r = math.cos(theta_r) + if math.isclose(cos_theta_r, 0.0, abs_tol=self.EPSILON): + if math.isclose(vy, 0.0, abs_tol=self.EPSILON) and math.isclose( + vz, 0.0, abs_tol=self.EPSILON + ): + solutions.append((0.0, self._normalize_angle(theta_r))) + continue + + theta_p_sol = math.atan2(vz, vy) + solutions.append( + (self._normalize_angle(theta_p_sol), self._normalize_angle(theta_r)) + ) + return solutions + + def _compute_articutool_jacobian( + self, theta_p: float, theta_r: float + ) -> np.ndarray: + """ + Computes the 3x2 analytical Jacobian for the Articutool leveling task. + This must be derived from the same FK model that the IK inverts. + """ + cp, sp = math.cos(theta_p), math.sin(theta_p) + cr, sr = math.cos(theta_r), math.sin(theta_r) + + # Partial derivatives of y_F0 = [-sr, cp*cr, sp*cr]^T + # d/d(theta_p) + j11 = 0 + j21 = -sp * cr + j31 = cp * cr + + # d/d(theta_r) + j12 = -cr + j22 = -cp * sr + j32 = -sp * sr + + return np.array([[j11, j12], [j21, j22], [j31, j32]]) + + def _get_jaco_trajectory_points( + self, trajectory_input: Union[RobotTrajectory, JointTrajectory] + ) -> Optional[List[Tuple[np.ndarray, np.ndarray]]]: + """Extracts joint positions and velocities from a trajectory message.""" + if isinstance(trajectory_input, RobotTrajectory): + jt = trajectory_input.joint_trajectory + elif isinstance(trajectory_input, JointTrajectory): + jt = trajectory_input + else: + return None + + points = [] + for point in jt.points: + positions = np.array(point.positions) + velocities = np.array(point.velocities) + points.append((positions, velocities)) + return points + + @override + def update(self) -> Status: + """Execute the behavior's logic.""" + if not self.node or not self._get_pinocchio_essentials_from_blackboard(): + self.feedback_message = "Behavior not properly initialized." + self.blackboard_set("articutool_is_dynamic_feasible", False) + return Status.FAILURE + + try: + trajectory_input = self.blackboard_get("jaco_trajectory") + num_points = self.blackboard_get("num_trajectory_points_to_check") + + jaco_points = self._get_jaco_trajectory_points(trajectory_input) + + if not jaco_points: + self.logger.warn( + f"[{self.name}] Trajectory is empty. Assuming feasible." + ) + self.blackboard_set("articutool_is_dynamic_feasible", True) + return Status.SUCCESS + + indices = np.linspace(0, len(jaco_points) - 1, num_points, dtype=int) + + last_valid_atool_q = None + + for idx in indices: + q_jaco, v_jaco = jaco_points[idx] + + # --- KINEMATIC FEASIBILITY CHECK (Prerequisite) --- + + # 1. Get Articutool base orientation from Jaco FK + pin.forwardKinematics(self._pin_model, self._pin_data, q_jaco) + pin.updateFramePlacements(self._pin_model, self._pin_data) + T_world_atool_base = self._pin_data.oMf[self._jaco_ee_frame_id_pin] + R_world_atool_base = Rotation.from_matrix(T_world_atool_base.rotation) + + # 2. Transform world "up" vector to Articutool's base frame + target_y_in_atool_base = R_world_atool_base.inv().apply( + self.WORLD_Z_UP_VECTOR + ) + + # 3. Solve Articutool IK for leveling + ik_solutions = self._solve_articutool_ik_for_leveling( + target_y_in_atool_base + ) + + valid_solutions = [ + sol + for sol in ik_solutions + if self._pitch_limits_rad[0] - self.EPSILON + <= sol[0] + <= self._pitch_limits_rad[1] + self.EPSILON + and self._roll_limits_rad[0] - self.EPSILON + <= sol[1] + <= self._roll_limits_rad[1] + self.EPSILON + ] + + if not valid_solutions: + self.feedback_message = ( + f"Path is kinematically infeasible at point {idx}." + ) + self.blackboard_set("articutool_is_dynamic_feasible", False) + return Status.FAILURE + + # Choose best solution (e.g., closest to previous) + if last_valid_atool_q is None: + q_atool = valid_solutions[0] + else: + distances = [ + np.linalg.norm(np.array(sol) - last_valid_atool_q) + for sol in valid_solutions + ] + q_atool = valid_solutions[np.argmin(distances)] + last_valid_atool_q = np.array(q_atool) + + # --- DYNAMIC FEASIBILITY CHECK --- + + # 1. Calculate disturbance velocity from Jaco arm + J_jaco_full = pin.computeFrameJacobian( + self._pin_model, + self._pin_data, + q_jaco, + self._jaco_ee_frame_id_pin, + pin.ReferenceFrame.WORLD, + ) + v_jaco_full = J_jaco_full @ v_jaco + omega_disturbance_world = v_jaco_full[3:6] # Angular velocity part + + # 2. Transform disturbance to Articutool's local frame + omega_correction_local = -R_world_atool_base.inv().apply( + omega_disturbance_world + ) + + # 3. Compute Articutool's Jacobian at the required configuration + J_atool = self._compute_articutool_jacobian(q_atool[0], q_atool[1]) + + # 4. Solve for required Articutool joint velocities + try: + J_atool_pinv = np.linalg.pinv(J_atool, rcond=1e-4) + q_dot_atool_required = J_atool_pinv @ omega_correction_local + except np.linalg.LinAlgError: + self.feedback_message = ( + f"Articutool Jacobian is singular at point {idx}." + ) + self.blackboard_set("articutool_is_dynamic_feasible", False) + return Status.FAILURE + + # 5. The final check + if np.linalg.norm(q_dot_atool_required) > self._max_atool_vel: + self.feedback_message = ( + f"Path is dynamically infeasible at point {idx}. " + f"Required vel: {np.linalg.norm(q_dot_atool_required):.2f} > " + f"Max vel: {self._max_atool_vel:.2f}" + ) + self.blackboard_set("articutool_is_dynamic_feasible", False) + return Status.FAILURE + + # If all checks pass + self.feedback_message = "Trajectory is dynamically feasible." + self.blackboard_set("articutool_is_dynamic_feasible", True) + return Status.SUCCESS + + except KeyError as e: + self.feedback_message = f"Blackboard key error: {e}" + self.blackboard_set("articutool_is_dynamic_feasible", False) + return Status.FAILURE + except Exception as e: + self.feedback_message = f"Unexpected error: {e}" + self.blackboard_set("articutool_is_dynamic_feasible", False) + return Status.FAILURE + + @override + def terminate(self, new_status: Status) -> None: + """Log termination status.""" + self.logger.debug(f"[{self.name}] Terminating with status {new_status}.") + + @override + def initialise(self) -> None: + """Reset blackboard output on initialization.""" + self.logger.debug(f"[{self.name}] Initializing.") + self.blackboard_set("articutool_is_dynamic_feasible", None) diff --git a/ada_feeding/ada_feeding/behaviors/state/check_articutool_path_leveling_feasibility.py b/ada_feeding/ada_feeding/behaviors/state/check_articutool_path_leveling_feasibility.py new file mode 100644 index 00000000..7db94ccc --- /dev/null +++ b/ada_feeding/ada_feeding/behaviors/state/check_articutool_path_leveling_feasibility.py @@ -0,0 +1,353 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2024-2025, Personal Robotics Laboratory +# License: BSD 3-Clause. See LICENSE.md file in root directory. + +""" +This module defines the CheckArticutoolPathLevelingFeasibility behavior. + +This behavior checks if the Articutool can maintain a "level" orientation +(tool_tip Y-axis aligned against gravity) throughout a given trajectory +of the Jaco arm. It iterates through the trajectory, and for each waypoint, +it calculates if a valid, within-limits IK solution exists for the +Articutool to achieve leveling. +""" + +# Standard imports +import math +from typing import Union, Optional, List, Tuple + +# Third-party imports +from geometry_msgs.msg import Pose +from moveit_msgs.msg import RobotTrajectory +from trajectory_msgs.msg import JointTrajectory +import numpy as np +from overrides import override +from scipy.spatial.transform import Rotation +import py_trees +from py_trees.common import Status +import rclpy.node +import pinocchio as pin + +# Local imports +# Assuming these are in your project structure +from ada_feeding.behaviors import BlackboardBehavior +from ada_feeding.helpers import BlackboardKey + + +class CheckArticutoolPathLevelingFeasibility(BlackboardBehavior): + """ + Checks if the Articutool can remain level throughout a Jaco trajectory. + + This behavior iterates through a specified number of points along a given + Jaco arm trajectory. For each point, it calculates the corresponding + Jaco end-effector pose and then solves the inverse kinematics for the + Articutool to determine if there's a joint configuration that would keep + the tool_tip's Y-axis pointing upwards (level), within the Articutool's + joint limits. + + If a valid leveling solution exists for all checked points, the behavior + returns SUCCESS. If any point is found to be infeasible, it immediately + returns FAILURE. This is crucial for validating transport motions where + spillage must be prevented. + """ + + EPSILON = 1e-6 + # The desired "up" vector for the tool tip's Y-axis in the world frame. + WORLD_Z_UP_VECTOR = np.array([0.0, 0.0, 1.0]) + + def blackboard_inputs( + self, + pinocchio_model: Union[BlackboardKey, pin.Model], + pinocchio_data: Union[BlackboardKey, pin.Data], + jaco_joint_names_pin: Union[BlackboardKey, List[str]], + jaco_ee_frame_id_pin: Union[BlackboardKey, int], + jaco_trajectory: Union[BlackboardKey, RobotTrajectory, JointTrajectory], + articutool_pitch_limits_rad: Union[BlackboardKey, Tuple[float, float]], + articutool_roll_limits_rad: Union[BlackboardKey, Tuple[float, float]], + num_trajectory_points_to_check: Union[BlackboardKey, int] = 10, + ) -> None: + """Define blackboard inputs.""" + super().blackboard_inputs( + **{key: value for key, value in locals().items() if key != "self"} + ) + + def blackboard_outputs( + self, + articutool_is_leveling_feasible: Optional[BlackboardKey], + ) -> None: + """Define blackboard outputs.""" + super().blackboard_outputs( + **{key: value for key, value in locals().items() if key != "self"} + ) + + def __init__(self, name: str, **kwargs): + """Initialize the behavior.""" + super().__init__(name=name, **kwargs) + self.node: Optional[rclpy.node.Node] = None + self._pin_model: Optional[pin.Model] = None + self._pin_data: Optional[pin.Data] = None + self._jaco_joint_names_pin: Optional[List[str]] = None + self._jaco_ee_frame_id_pin: Optional[int] = None + self._pitch_limits_rad: Optional[Tuple[float, float]] = None + self._roll_limits_rad: Optional[Tuple[float, float]] = None + self._pinocchio_ready = False + + @override + def setup(self, **kwargs): + """Get the ROS2 node from kwargs.""" + try: + self.node = kwargs["node"] + except KeyError as e: + self.logger.error( + f"[{self.name}] Behaviour expects 'node' in setup kwargs. {e}" + ) + + def _get_pinocchio_essentials_from_blackboard(self) -> bool: + """Read Pinocchio model, data, and relevant IDs/names from blackboard.""" + if self._pinocchio_ready: + return True + try: + self._pin_model = self.blackboard_get("pinocchio_model") + self._pin_data = self.blackboard_get("pinocchio_data") + self._jaco_joint_names_pin = self.blackboard_get("jaco_joint_names_pin") + self._jaco_ee_frame_id_pin = self.blackboard_get("jaco_ee_frame_id_pin") + self._pitch_limits_rad = self.blackboard_get("articutool_pitch_limits_rad") + self._roll_limits_rad = self.blackboard_get("articutool_roll_limits_rad") + + if not all( + [ + isinstance(self._pin_model, pin.Model), + isinstance(self._pin_data, pin.Data), + isinstance(self._jaco_joint_names_pin, list), + isinstance(self._jaco_ee_frame_id_pin, int), + isinstance(self._pitch_limits_rad, tuple) + and len(self._pitch_limits_rad) == 2, + isinstance(self._roll_limits_rad, tuple) + and len(self._roll_limits_rad) == 2, + ] + ): + self.logger.error( + f"[{self.name}] One or more Pinocchio-related inputs from blackboard are invalid." + ) + return False + self._pinocchio_ready = True + return True + except KeyError as e: + self.logger.error( + f"[{self.name}] Failed to get Pinocchio info from blackboard: {e}." + ) + return False + + def _normalize_angle(self, angle: float) -> float: + """Normalize an angle to the range [-pi, pi].""" + return (angle + math.pi) % (2 * math.pi) - math.pi + + def _solve_articutool_ik_for_leveling( + self, target_y_axis_in_atool_base: np.ndarray + ) -> List[Tuple[float, float]]: + """ + Analytical IK solver based on the corrected kinematic model. + Solves for Articutool (pitch, roll) to align its tool_tip Y-axis + with the given target vector expressed in the Articutool's base frame. + """ + vx, vy, vz = target_y_axis_in_atool_base + solutions: List[Tuple[float, float]] = [] + + # From sin(theta_r) = -vx (as per corrected model) + asin_arg_for_tr = -vx + if not (-1.0 - self.EPSILON <= asin_arg_for_tr <= 1.0 + self.EPSILON): + return [] # No real solution for theta_r + + asin_arg_for_tr_clipped = np.clip(asin_arg_for_tr, -1.0, 1.0) + theta_r_sol1 = math.asin(asin_arg_for_tr_clipped) + theta_r_sol2 = self._normalize_angle(math.pi - theta_r_sol1) + + candidate_thetas_r = [theta_r_sol1] + if not math.isclose(theta_r_sol1, theta_r_sol2, abs_tol=self.EPSILON): + candidate_thetas_r.append(theta_r_sol2) + + for theta_r in candidate_thetas_r: + cos_theta_r = math.cos(theta_r) + # Handle singularity when cos(theta_r) is near zero + if math.isclose(cos_theta_r, 0.0, abs_tol=self.EPSILON): + # If singular, vy and vz must also be near zero for a solution to exist. + if math.isclose(vy, 0.0, abs_tol=self.EPSILON) and math.isclose( + vz, 0.0, abs_tol=self.EPSILON + ): + # theta_p is indeterminate, choose a convention (e.g., 0) + solutions.append((0.0, self._normalize_angle(theta_r))) + continue + + # Standard case: theta_p = atan2(vz, vy) as per corrected model + theta_p_sol = math.atan2(vz, vy) + solutions.append( + (self._normalize_angle(theta_p_sol), self._normalize_angle(theta_r)) + ) + return solutions + + def _get_jaco_ee_poses_from_trajectory( + self, trajectory_input: Union[RobotTrajectory, JointTrajectory] + ) -> Optional[List[Pose]]: + """Extract or compute Jaco EE poses from a trajectory message via FK.""" + jaco_ee_world_poses: List[Pose] = [] + + # Determine the source of joint trajectory points + if isinstance(trajectory_input, RobotTrajectory): + # This implementation assumes the trajectory is defined in joint space. + # A multi_dof_joint_trajectory (Cartesian) is not used for FK. + if trajectory_input.multi_dof_joint_trajectory.points: + self.logger.error( + f"[{self.name}] Multi-DOF trajectories are not supported for FK-based checks." + ) + return None + jt = trajectory_input.joint_trajectory + elif isinstance(trajectory_input, JointTrajectory): + jt = trajectory_input + else: + self.logger.error( + f"[{self.name}] Input 'jaco_trajectory' has unexpected type: {type(trajectory_input)}." + ) + return None + + # Perform Forward Kinematics for each joint trajectory point + q_robot = pin.neutral(self._pin_model) + for point in jt.points: + # Map positions from trajectory to the correct joints in Pinocchio model + # This assumes jt.joint_names corresponds to the Jaco arm joints + traj_joint_map = { + name: point.positions[i] for i, name in enumerate(jt.joint_names) + } + for j_name_pin in self._jaco_joint_names_pin: + if j_name_pin in traj_joint_map: + joint_id = self._pin_model.getJointId(j_name_pin) + joint_obj = self._pin_model.joints[joint_id] + theta = traj_joint_map[j_name_pin] + + # Assuming revolute joints for Jaco. Update q based on Pinocchio's model. + if ( + joint_obj.nq == 2 and joint_obj.nv == 1 + ): # Revolute joint represented by cos/sin + q_robot[joint_obj.idx_q] = math.cos(theta) + q_robot[joint_obj.idx_q + 1] = math.sin(theta) + elif ( + joint_obj.nq == 1 and joint_obj.nv == 1 + ): # Revolute or Prismatic + q_robot[joint_obj.idx_q] = theta + + pin.forwardKinematics(self._pin_model, self._pin_data, q_robot) + pin.updateFramePlacements(self._pin_model, self._pin_data) + ee_transform: pin.SE3 = self._pin_data.oMf[self._jaco_ee_frame_id_pin] + + pose = Pose() + pose.position.x, pose.position.y, pose.position.z = ee_transform.translation + quat_xyzw = Rotation.from_matrix(ee_transform.rotation).as_quat() + ( + pose.orientation.x, + pose.orientation.y, + pose.orientation.z, + pose.orientation.w, + ) = quat_xyzw + jaco_ee_world_poses.append(pose) + + return jaco_ee_world_poses + + @override + def update(self) -> Status: + """Execute the behavior's logic.""" + if not self.node or not self._get_pinocchio_essentials_from_blackboard(): + self.feedback_message = "Behavior not properly initialized." + self.blackboard_set("articutool_is_leveling_feasible", False) + return Status.FAILURE + + try: + trajectory_input = self.blackboard_get("jaco_trajectory") + num_points = self.blackboard_get("num_trajectory_points_to_check") + + jaco_ee_poses = self._get_jaco_ee_poses_from_trajectory(trajectory_input) + + if jaco_ee_poses is None: + self.feedback_message = "Failed to get Jaco EE poses from trajectory." + self.blackboard_set("articutool_is_leveling_feasible", False) + return Status.FAILURE + + if not jaco_ee_poses: + self.logger.warn( + f"[{self.name}] Trajectory is empty. Assuming feasible." + ) + self.blackboard_set("articutool_is_leveling_feasible", True) + return Status.SUCCESS + + # Select a subset of points to check for efficiency + indices = ( + np.linspace(0, len(jaco_ee_poses) - 1, num_points, dtype=int) + if num_points < len(jaco_ee_poses) + else range(len(jaco_ee_poses)) + ) + + for idx in indices: + ee_pose: Pose = jaco_ee_poses[idx] + R_World_JacoEE = Rotation.from_quat( + [ + ee_pose.orientation.x, + ee_pose.orientation.y, + ee_pose.orientation.z, + ee_pose.orientation.w, + ] + ) + + # Transform the world "up" vector into the Articutool's base frame + target_y_in_atool_base = R_World_JacoEE.inv().apply( + self.WORLD_Z_UP_VECTOR + ) + + ik_solutions = self._solve_articutool_ik_for_leveling( + target_y_in_atool_base + ) + + # Check if any solution is within joint limits + is_feasible_at_point = any( + self._pitch_limits_rad[0] - self.EPSILON + <= sol[0] + <= self._pitch_limits_rad[1] + self.EPSILON + and self._roll_limits_rad[0] - self.EPSILON + <= sol[1] + <= self._roll_limits_rad[1] + self.EPSILON + for sol in ik_solutions + ) + + if not is_feasible_at_point: + self.feedback_message = f"Path is infeasible. No leveling solution at trajectory point {idx}." + self.logger.warn(f"[{self.name}] {self.feedback_message}") + self.blackboard_set("articutool_is_leveling_feasible", False) + return Status.FAILURE + + # If all checked points are feasible + self.feedback_message = ( + "Articutool can maintain leveling throughout the trajectory." + ) + self.logger.info(f"[{self.name}] {self.feedback_message}") + self.blackboard_set("articutool_is_leveling_feasible", True) + return Status.SUCCESS + + except KeyError as e: + self.feedback_message = f"Blackboard key error: {e}" + self.logger.error(f"[{self.name}] {self.feedback_message}") + self.blackboard_set("articutool_is_leveling_feasible", False) + return Status.FAILURE + except Exception as e: + self.feedback_message = f"Unexpected error: {e}" + self.logger.error(f"[{self.name}] {self.feedback_message}") + self.blackboard_set("articutool_is_leveling_feasible", False) + return Status.FAILURE + + @override + def terminate(self, new_status: Status) -> None: + """Log termination status.""" + self.logger.debug(f"[{self.name}] Terminating with status {new_status}.") + + @override + def initialise(self) -> None: + """Reset blackboard output on initialization.""" + self.logger.debug(f"[{self.name}] Initializing.") + self.blackboard_set("articutool_is_leveling_feasible", None) + # Do not reset pinocchio_ready here to avoid re-reading on every tick diff --git a/ada_feeding/ada_feeding/behaviors/state/check_articutool_path_orientation_feasibility.py b/ada_feeding/ada_feeding/behaviors/state/check_articutool_path_orientation_feasibility.py new file mode 100644 index 00000000..bde169c8 --- /dev/null +++ b/ada_feeding/ada_feeding/behaviors/state/check_articutool_path_orientation_feasibility.py @@ -0,0 +1,490 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2025, Personal Robotics Laboratory +# License: BSD 3-Clause. See LICENSE.md file in root directory. + +""" +Defines the CheckArticutoolPathOrientationFeasibility behavior. +This behavior checks if the Articutool can maintain a desired world orientation +for its tool tip throughout a given trajectory of the Jaco arm. +It uses Pinocchio for FK if the Jaco trajectory is in joint space, +and an analytical IK for the Articutool. +""" + +# Standard imports +import math +from typing import Union, Optional, List, Tuple + +# Third-party imports +from geometry_msgs.msg import ( + PoseStamped, + Quaternion as QuaternionMsg, + Pose, + TransformStamped, +) +from moveit_msgs.msg import RobotTrajectory +from trajectory_msgs.msg import ( + JointTrajectoryPoint, + JointTrajectory, + MultiDOFJointTrajectoryPoint, +) +import numpy as np +from overrides import override +from scipy.spatial.transform import Rotation +import py_trees +import py_trees.blackboard +from py_trees.common import Status +import rclpy.node +import pinocchio as pin + +# Local imports +from ada_feeding.helpers import BlackboardKey +from ada_feeding.behaviors import BlackboardBehavior + + +class CheckArticutoolPathOrientationFeasibility(BlackboardBehavior): + """ + Checks Articutool's ability to maintain tool tip orientation during Jaco trajectory. + Uses Pinocchio for FK if Jaco trajectory is joint-space. + """ + + EPSILON = 1e-6 + + def blackboard_inputs( + self, + pinocchio_model: Union[BlackboardKey, pin.Model], + pinocchio_data: Union[BlackboardKey, pin.Data], + # Jaco info for FK if needed + jaco_joint_names_pin: Union[ + BlackboardKey, List[str] + ], # Names as in Pinocchio model for q mapping + jaco_ee_frame_id_pin: Union[ + BlackboardKey, int + ], # Pinocchio frame ID for Jaco EE + # Articutool info for its own q mapping if full robot q is constructed + articutool_joint_names_pin: Union[ + BlackboardKey, List[str] + ], # Names as in Pinocchio model + # Trajectory and target + jaco_trajectory: Union[ + BlackboardKey, RobotTrajectory, JointTrajectory + ], # Can be full RobotTrajectory or just JointTrajectory + desired_tool_tip_world_orientation: Union[BlackboardKey, QuaternionMsg], + # Articutool specific limits for its IK + articutool_pitch_limits_rad: Union[BlackboardKey, Tuple[float, float]], + articutool_roll_limits_rad: Union[BlackboardKey, Tuple[float, float]], + num_trajectory_points_to_check: Union[BlackboardKey, int] = 10, + ) -> None: + super().blackboard_inputs( + **{key: value for key, value in locals().items() if key != "self"} + ) + + def blackboard_outputs( + self, + articutool_is_orientation_feasible: Optional[BlackboardKey], + ) -> None: + super().blackboard_outputs( + **{key: value for key, value in locals().items() if key != "self"} + ) + + def __init__(self, name: str, **kwargs): + super().__init__(name=name, **kwargs) + self.node: Optional[rclpy.node.Node] = None + self._pin_model: Optional[pin.Model] = None + self._pin_data: Optional[pin.Data] = None + self._jaco_joint_names_pin: Optional[List[str]] = None + self._jaco_ee_frame_id_pin: Optional[int] = None + self._articutool_joint_names_pin: Optional[List[str]] = None + self._pitch_limits_rad: Optional[Tuple[float, float]] = None + self._roll_limits_rad: Optional[Tuple[float, float]] = None + self._pinocchio_ready = False + + @override + def setup(self, **kwargs): + """Gets the ROS2 node.""" + try: + self.node = kwargs["node"] + except KeyError as e: + self.logger.error( + f"[{self.name}] Behaviour expects 'node' in setup kwargs. {e}" + ) + + def _get_pinocchio_essentials_from_blackboard(self) -> bool: + """Reads Pinocchio model, data, and relevant IDs/names from blackboard.""" + if self._pinocchio_ready: # Avoid re-reading if already successful + return True + try: + self._pin_model = self.blackboard_get("pinocchio_model") + self._pin_data = self.blackboard_get("pinocchio_data") + self._jaco_joint_names_pin = self.blackboard_get("jaco_joint_names_pin") + self._jaco_ee_frame_id_pin = self.blackboard_get("jaco_ee_frame_id_pin") + self._articutool_joint_names_pin = self.blackboard_get( + "articutool_joint_names_pin" + ) + self._pitch_limits_rad = self.blackboard_get("articutool_pitch_limits_rad") + self._roll_limits_rad = self.blackboard_get("articutool_roll_limits_rad") + + if ( + not isinstance(self._pin_model, pin.Model) + or not isinstance(self._pin_data, pin.Data) + or not isinstance(self._jaco_joint_names_pin, list) + or not isinstance(self._jaco_ee_frame_id_pin, int) + or not isinstance(self._articutool_joint_names_pin, list) + or not ( + isinstance(self._pitch_limits_rad, tuple) + and len(self._pitch_limits_rad) == 2 + ) + or not ( + isinstance(self._roll_limits_rad, tuple) + and len(self._roll_limits_rad) == 2 + ) + ): + self.logger.error( + f"[{self.name}] One or more Pinocchio-related inputs from blackboard are invalid type/format." + ) + return False + self._pinocchio_ready = True + return True + except KeyError as e: + self.logger.error( + f"[{self.name}] Failed to get Pinocchio info from blackboard: {e}. Ensure LoadPinocchioModel ran." + ) + return False + + def _normalize_angle(self, angle: float) -> float: + return (angle + math.pi) % (2 * math.pi) - math.pi + + def _solve_articutool_ik_benchmark_style( + self, target_y_axis_in_jaco_hand_frame: np.ndarray + ) -> List[Tuple[float, float]]: + vx, vy, vz = target_y_axis_in_jaco_hand_frame + solutions: List[Tuple[float, float]] = [] + asin_arg_for_tr = -vx + if not (-1.0 - self.EPSILON <= asin_arg_for_tr <= 1.0 + self.EPSILON): + return [] + asin_arg_for_tr_clipped = np.clip(asin_arg_for_tr, -1.0, 1.0) + theta_r_sol1 = math.asin(asin_arg_for_tr_clipped) + theta_r_sol2 = self._normalize_angle(math.pi - theta_r_sol1) + candidate_thetas_r = [theta_r_sol1] + if not math.isclose(theta_r_sol1, theta_r_sol2, abs_tol=self.EPSILON): + candidate_thetas_r.append(theta_r_sol2) + for theta_r in candidate_thetas_r: + cos_theta_r = math.cos(theta_r) + if math.isclose(cos_theta_r, 0.0, abs_tol=self.EPSILON): + if math.isclose(vy, 0.0, abs_tol=self.EPSILON) and math.isclose( + vz, 0.0, abs_tol=self.EPSILON + ): + solutions.append((0.0, self._normalize_angle(theta_r))) + continue + theta_p_sol = math.atan2(vz, vy) + solutions.append( + (self._normalize_angle(theta_p_sol), self._normalize_angle(theta_r)) + ) + return solutions + + @override + def initialise(self) -> None: + self.logger.debug(f"[{self.name}] Initializing.") + self.blackboard_set("articutool_is_orientation_feasible", None) + self._pinocchio_ready = False + + @override + def update(self) -> Status: + if not self.node: + self.feedback_message = "Node not initialized" + return Status.FAILURE + if ( + not self._pinocchio_ready + ): # Attempt to load Pinocchio essentials if not already done + if not self._get_pinocchio_essentials_from_blackboard(): + self.feedback_message = ( + "Failed to get Pinocchio essentials from blackboard." + ) + return Status.FAILURE + # Now, self._pin_model, self._pin_data etc. should be populated if _get_pinocchio_essentials_from_blackboard was successful + + try: + trajectory_input: Union[RobotTrajectory, JointTrajectory] = ( + self.blackboard_get("jaco_trajectory") + ) + desired_orientation_msg: QuaternionMsg = self.blackboard_get( + "desired_tool_tip_world_orientation" + ) + num_points_to_check: int = self.blackboard_get( + "num_trajectory_points_to_check" + ) + + if not isinstance(desired_orientation_msg, QuaternionMsg): + self.feedback_message = ( + "Input 'desired_tool_tip_world_orientation' is not Quaternion." + ) + self.logger.error(f"[{self.name}] {self.feedback_message}") + return Status.FAILURE + + R_World_TipTarget = Rotation.from_quat( + [ + desired_orientation_msg.x, + desired_orientation_msg.y, + desired_orientation_msg.z, + desired_orientation_msg.w, + ] + ) + y_axis_TipTarget_InWorld = R_World_TipTarget.apply( + np.array([0.0, 1.0, 0.0]) + ) + + jaco_ee_world_poses: List[Pose] = [] + + if isinstance(trajectory_input, RobotTrajectory): + if trajectory_input.multi_dof_joint_trajectory.points: + self.logger.debug( + f"[{self.name}] Using multi_dof_joint_trajectory from RobotTrajectory." + ) + for mjt_point in trajectory_input.multi_dof_joint_trajectory.points: + if mjt_point.transforms: + transform = mjt_point.transforms[0] + pose = Pose() + pose.position.x = transform.translation.x + pose.position.y = transform.translation.y + pose.position.z = transform.translation.z + pose.orientation = transform.rotation + jaco_ee_world_poses.append(pose) + elif trajectory_input.joint_trajectory.points: + self.logger.debug( + f"[{self.name}] Using joint_trajectory from RobotTrajectory. Performing FK." + ) + jt_points = trajectory_input.joint_trajectory.points + jt_joint_names = trajectory_input.joint_trajectory.joint_names + # Map jt_joint_names to the order expected by self._jaco_joint_names_pin for FK + # This part needs care if orders differ or if jt_joint_names is not just Jaco + + q_robot = pin.neutral(self._pin_model) # Full robot configuration + for point_data in jt_points: + # Populate q_robot based on point_data.positions, jt_joint_names + # and neutral/current for other joints (Articutool, virtual base) + + # Simplified: Assume jt_joint_names are ONLY Jaco joints and match _jaco_joint_names_pin order + # And Articutool joints remain at their neutral/current state from q_robot initialization + # A more robust solution would take current full robot state and update Jaco parts. + + # Map positions from trajectory to the correct Jaco joints in Pinocchio model + temp_jaco_map = { + name: point_data.positions[i] + for i, name in enumerate(jt_joint_names) + } + + for ( + j_name_pin + ) in ( + self._jaco_joint_names_pin + ): # Iterate through Pinocchio's Jaco joint names + if j_name_pin in temp_jaco_map: + joint_id = self._pin_model.getJointId(j_name_pin) + joint_obj = self._pin_model.joints[joint_id] + theta = temp_jaco_map[j_name_pin] + if joint_obj.nq == 2 and joint_obj.nv == 1: # Revolute + q_robot[joint_obj.idx_q] = math.cos(theta) + q_robot[joint_obj.idx_q + 1] = math.sin(theta) + elif ( + joint_obj.nq == 1 and joint_obj.nv == 1 + ): # Prismatic + q_robot[joint_obj.idx_q] = theta + + # Articutool joints (assume they stay at neutral or a fixed known state for FK of Jaco EE) + # For simplicity, let's assume they remain neutral as set by pin.neutral(self._pin_model) + # If Articutool moves with Jaco, its state should also be in trajectory or known. + + pin.forwardKinematics(self._pin_model, self._pin_data, q_robot) + pin.updateFramePlacements(self._pin_model, self._pin_data) + jaco_ee_transform_pin: pin.SE3 = self._pin_data.oMf[ + self._jaco_ee_frame_id_pin + ] + + pose = Pose() + pose.position.x, pose.position.y, pose.position.z = ( + jaco_ee_transform_pin.translation + ) + quat_xyzw = Rotation.from_matrix( + jaco_ee_transform_pin.rotation + ).as_quat() + ( + pose.orientation.x, + pose.orientation.y, + pose.orientation.z, + pose.orientation.w, + ) = quat_xyzw + jaco_ee_world_poses.append(pose) + + else: # RobotTrajectory is empty + self.logger.warn( + f"[{self.name}] Input RobotTrajectory is empty. Assuming feasible." + ) + self.blackboard_set("articutool_is_orientation_feasible", True) + return Status.SUCCESS + + elif isinstance(trajectory_input, JointTrajectory): + self.logger.debug( + f"[{self.name}] Input is JointTrajectory. Performing FK." + ) + # Similar FK logic as above for RobotTrajectory.joint_trajectory + jt_points = trajectory_input.points + jt_joint_names = trajectory_input.joint_names + q_robot = pin.neutral(self._pin_model) + for point_data in jt_points: + temp_jaco_map = { + name: point_data.positions[i] + for i, name in enumerate(jt_joint_names) + } + for j_name_pin in self._jaco_joint_names_pin: + if j_name_pin in temp_jaco_map: + joint_id = self._pin_model.getJointId(j_name_pin) + joint_obj = self._pin_model.joints[joint_id] + theta = temp_jaco_map[j_name_pin] + if joint_obj.nq == 2 and joint_obj.nv == 1: + q_robot[joint_obj.idx_q] = math.cos(theta) + q_robot[joint_obj.idx_q + 1] = math.sin(theta) + elif joint_obj.nq == 1 and joint_obj.nv == 1: + q_robot[joint_obj.idx_q] = theta + pin.forwardKinematics(self._pin_model, self._pin_data, q_robot) + pin.updateFramePlacements(self._pin_model, self._pin_data) + jaco_ee_transform_pin: pin.SE3 = self._pin_data.oMf[ + self._jaco_ee_frame_id_pin + ] + pose = Pose() + pose.position.x, pose.position.y, pose.position.z = ( + jaco_ee_transform_pin.translation + ) + quat_xyzw = Rotation.from_matrix( + jaco_ee_transform_pin.rotation + ).as_quat() + ( + pose.orientation.x, + pose.orientation.y, + pose.orientation.z, + pose.orientation.w, + ) = quat_xyzw + jaco_ee_world_poses.append(pose) + else: + self.feedback_message = f"Input 'jaco_trajectory' is of unexpected type: {type(trajectory_input)}." + self.logger.error(f"[{self.name}] {self.feedback_message}") + return Status.FAILURE + + if not jaco_ee_world_poses: + self.logger.warn( + f"[{self.name}] No Jaco EE poses to check. Assuming feasible." + ) + self.blackboard_set("articutool_is_orientation_feasible", True) + return Status.SUCCESS + + indices_to_check = [] + if len(jaco_ee_world_poses) == 1: + indices_to_check = [0] + elif num_points_to_check >= len(jaco_ee_world_poses): + indices_to_check = list(range(len(jaco_ee_world_poses))) + elif num_points_to_check > 0: + indices_to_check = np.linspace( + 0, len(jaco_ee_world_poses) - 1, num_points_to_check, dtype=int + ) + + # Check the length of indices_to_check, as it can be a list or numpy array + if len(indices_to_check) == 0 and num_points_to_check > 0: + self.logger.warn( + f"[{self.name}] No indices to check (num_points_to_check={num_points_to_check}, len_poses={len(jaco_ee_world_poses)}). Assuming feasible." + ) + self.blackboard_set("articutool_is_orientation_feasible", True) + return Status.SUCCESS + + last_valid_solution: Optional[np.ndarray] = None + for traj_idx in indices_to_check: + jaco_ee_pose_msg: Pose = jaco_ee_world_poses[traj_idx] + R_World_JacoEE = Rotation.from_quat( + [ + jaco_ee_pose_msg.orientation.x, + jaco_ee_pose_msg.orientation.y, + jaco_ee_pose_msg.orientation.z, + jaco_ee_pose_msg.orientation.w, + ] + ) + target_y_for_ik_in_atool_base = R_World_JacoEE.inv().apply( + y_axis_TipTarget_InWorld + ) + norm_target_y = np.linalg.norm(target_y_for_ik_in_atool_base) + if norm_target_y < self.EPSILON: + self.feedback_message = ( + f"Target Y-axis for IK near zero at traj point {traj_idx}." + ) + self.logger.error(f"[{self.name}] {self.feedback_message}") + self.blackboard_set("articutool_is_orientation_feasible", False) + return Status.FAILURE + target_y_for_ik_in_atool_base /= norm_target_y + + ik_solutions = self._solve_articutool_ik_benchmark_style( + target_y_for_ik_in_atool_base + ) + + # Filter for solutions within joint limits + valid_solutions = [] + for theta_p, theta_r in ik_solutions: + if ( + self._pitch_limits_rad[0] - self.EPSILON + <= theta_p + <= self._pitch_limits_rad[1] + self.EPSILON + and self._roll_limits_rad[0] - self.EPSILON + <= theta_r + <= self._pitch_limits_rad[1] + self.EPSILON + ): + valid_solutions.append(np.array([theta_p, theta_r])) + + # If no valid solutions exist, the path is infeasible + if not valid_solutions: + self.logger.warn( + f"[{self.name}] No valid IK solution within joint limits at trajectory point {traj_idx}" + ) + self.blackboard_set("articutool_is_orientation_feasible", False) + return Status.FAILURE + + # Select the best solution based on continuity + if last_valid_solution is None: + # For the first point, find the solution closest to the current tool pose + chosen_solution = valid_solutions[0] + # current_tool_joints = self.blackboard_get( + # "current_articutool_joint_state" + # ) + # distances = [ + # np.linalg.norm(sol - current_tool_joints) + # for sol in valid_solutions + # ] + # chosen_solution = valid_solutions[np.argmin(distances)] + else: + # For subsequent points, find the solution closest to the previous point's solution + distances = [ + np.linalg.norm(sol - last_valid_solution) + for sol in valid_solutions + ] + chosen_solution = valid_solutions[np.argmin(distances)] + + # Update the state for the next iteration + last_valid_solution = chosen_solution + + self.logger.debug( + f"[{self.name}] Pt {traj_idx}: Articutool IK feasible." + ) + + self.feedback_message = ( + "Articutool can maintain orientation throughout Jaco trajectory." + ) + self.logger.info(f"[{self.name}] {self.feedback_message}") + self.blackboard_set("articutool_is_orientation_feasible", True) + return Status.SUCCESS + + except KeyError as e: + self.feedback_message = f"Blackboard key error: {e}" + self.logger.error(f"[{self.name}] {self.feedback_message}") + return Status.FAILURE + except Exception as e: + self.feedback_message = f"Unexpected error: {e}" + self.logger.error(f"[{self.name}] {self.feedback_message}") + return Status.FAILURE + + @override + def terminate(self, new_status: Status) -> None: + self.logger.debug(f"[{self.name}] Terminating with status {new_status}.") diff --git a/ada_feeding/ada_feeding/behaviors/state/check_jaco_directional_manipulability.py b/ada_feeding/ada_feeding/behaviors/state/check_jaco_directional_manipulability.py new file mode 100644 index 00000000..f60e29e5 --- /dev/null +++ b/ada_feeding/ada_feeding/behaviors/state/check_jaco_directional_manipulability.py @@ -0,0 +1,350 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2025, Personal Robotics Laboratory +# License: BSD 3-Clause. See LICENSE.md file in root directory. + +""" +Defines the CheckJacoDirectionalManipulability behavior. +This behavior evaluates if the Jaco arm, at a given configuration, +has sufficient translational manipulability for its end-effector to move +along a specified Cartesian direction. It uses a pre-loaded Pinocchio model +from the blackboard. +""" + +# Standard imports +import math +from typing import Union, Optional, List, Dict, Any + +# Third-party imports +from geometry_msgs.msg import PoseStamped +from sensor_msgs.msg import JointState +import numpy as np +from overrides import override +import pinocchio as pin +import py_trees +import py_trees.blackboard +from py_trees.common import Access, Status +import rclpy.node + +# Local imports +from ada_feeding.helpers import BlackboardKey +from ada_feeding.behaviors import BlackboardBehavior + + +class CheckJacoDirectionalManipulability(BlackboardBehavior): + """ + Checks Jaco arm's directional translational manipulability using a pre-loaded + Pinocchio model and pre-calculated joint/frame IDs from the blackboard. + """ + + def blackboard_inputs( + self, + pinocchio_model: Union[BlackboardKey, pin.Model], + pinocchio_data: Union[BlackboardKey, pin.Data], + jaco_vel_indices_pin: Union[ + BlackboardKey, List[int] + ], # Pre-calculated velocity indices for Jaco joints + jaco_ee_frame_id_pin: Union[ + BlackboardKey, int + ], # Pre-calculated frame ID for Jaco EE + current_full_robot_joint_state_MA: Union[BlackboardKey, JointState], + tool_tip_move_above_pose_world: Union[BlackboardKey, PoseStamped], + tool_tip_move_into_pose_world: Union[BlackboardKey, PoseStamped], + directional_manipulability_threshold: Union[BlackboardKey, float], + ) -> None: + """ + Blackboard Inputs + + Parameters + ---------- + pinocchio_model: Pinocchio model object (from blackboard). + pinocchio_data: Pinocchio data object (from blackboard). + jaco_vel_indices_pin: List of Pinocchio velocity indices for Jaco joints (from blackboard). + jaco_ee_frame_id_pin: Pinocchio frame ID for the Jaco end-effector (from blackboard). + current_full_robot_joint_state_MA: JointState of the full robot at "Move Above". + tool_tip_move_above_pose_world: PoseStamped of tool tip at "Move Above" in world. + tool_tip_move_into_pose_world: PoseStamped of tool tip at "Move Into" in world. + directional_manipulability_threshold: Minimum acceptable score. + """ + super().blackboard_inputs( + **{key: value for key, value in locals().items() if key != "self"} + ) + + def blackboard_outputs( + self, + jaco_directional_manipulability_score: Optional[BlackboardKey], + jaco_is_manipulable_for_direction: Optional[BlackboardKey], + ) -> None: + """ + Blackboard Outputs + (Same as before) + """ + super().blackboard_outputs( + **{key: value for key, value in locals().items() if key != "self"} + ) + + def __init__(self, name: str, **kwargs): + super().__init__(name=name, **kwargs) + self.node: Optional[rclpy.node.Node] = None + # Pinocchio objects will be read from blackboard in update/initialise + self._pin_model: Optional[pin.Model] = None + self._pin_data: Optional[pin.Data] = None + self._jaco_vel_indices: Optional[List[int]] = None + self._jaco_ee_frame_id: Optional[int] = None + + @override + def setup(self, **kwargs): + """Gets the ROS2 node from arguments.""" + try: + self.node = kwargs["node"] + except KeyError as e: + self.logger.error( + f"[{self.name}] Behaviour expects 'node' in setup kwargs. {e}" + ) + # Node is critical, update will fail if it's None + + @override + def initialise(self) -> None: + """ + Reads Pinocchio model, data, and pre-calculated IDs from the blackboard. + Clears previous outputs. + """ + self.logger.debug( + f"[{self.name}] Initializing and attempting to read Pinocchio data from blackboard." + ) + self.blackboard_set("jaco_directional_manipulability_score", None) + self.blackboard_set("jaco_is_manipulable_for_direction", None) + + try: + self._pin_model = self.blackboard_get("pinocchio_model") + self._pin_data = self.blackboard_get("pinocchio_data") + self._jaco_vel_indices = self.blackboard_get("jaco_vel_indices_pin") + self._jaco_ee_frame_id = self.blackboard_get("jaco_ee_frame_id_pin") + + if not isinstance(self._pin_model, pin.Model): + self.logger.error( + f"[{self.name}] 'pinocchio_model' from blackboard is not a valid Pinocchio Model." + ) + self._pin_model = None # Invalidate + if not isinstance(self._pin_data, pin.Data): + self.logger.error( + f"[{self.name}] 'pinocchio_data' from blackboard is not a valid Pinocchio Data." + ) + self._pin_data = None # Invalidate + if not isinstance(self._jaco_vel_indices, list) or not all( + isinstance(i, int) for i in self._jaco_vel_indices + ): + self.logger.error( + f"[{self.name}] 'jaco_vel_indices_pin' from blackboard is not a valid List[int]." + ) + self._jaco_vel_indices = None # Invalidate + if not isinstance(self._jaco_ee_frame_id, int): + self.logger.error( + f"[{self.name}] 'jaco_ee_frame_id_pin' from blackboard is not a valid int." + ) + self._jaco_ee_frame_id = None # Invalidate + + except KeyError as e: + self.logger.error( + f"[{self.name}] Failed to get required Pinocchio info from blackboard: {e}. Ensure LoadPinocchioModel ran successfully." + ) + self._pin_model = None # Ensure invalidated on error + self._pin_data = None + self._jaco_vel_indices = None + self._jaco_ee_frame_id = None + + if ( + self._pin_model + and self._pin_data + and self._jaco_vel_indices + and self._jaco_ee_frame_id is not None + ): + self.logger.debug( + f"[{self.name}] Successfully retrieved Pinocchio model, data, and Jaco IDs from blackboard." + ) + else: + self.logger.error( + f"[{self.name}] Failed to properly initialize with Pinocchio data from blackboard." + ) + + @override + def update(self) -> Status: + if not self.node: + self.feedback_message = "Node not initialized in setup." + self.logger.error(f"[{self.name}] {self.feedback_message}") + return Status.FAILURE + + # Check if Pinocchio objects were successfully retrieved in initialise + if ( + not self._pin_model + or not self._pin_data + or self._jaco_ee_frame_id is None + or not self._jaco_vel_indices + ): + self.feedback_message = ( + "Pinocchio model/data/IDs not available from blackboard." + ) + self.logger.error( + f"[{self.name}] {self.feedback_message} Ensure LoadPinocchioModel ran successfully and populated the blackboard." + ) + return Status.FAILURE + + try: + current_q_robot_state: JointState = self.blackboard_get( + "current_full_robot_joint_state_MA" + ) + tip_pose_MA_world: PoseStamped = self.blackboard_get( + "tool_tip_move_above_pose_world" + ) + tip_pose_MI_world: PoseStamped = self.blackboard_get( + "tool_tip_move_into_pose_world" + ) + threshold: float = self.blackboard_get( + "directional_manipulability_threshold" + ) + + if not all( + isinstance(p, PoseStamped) + for p in [tip_pose_MA_world, tip_pose_MI_world] + ): + self.feedback_message = "Input tool tip poses are not PoseStamped." + self.logger.error(f"[{self.name}] {self.feedback_message}") + return Status.FAILURE + + # --- Populate Pinocchio's q vector (q_MA) --- + q_MA = pin.neutral(self._pin_model) # Use the model from blackboard + + num_updated_joints_in_q = 0 + for i, name_from_js in enumerate(current_q_robot_state.name): + if self._pin_model.existJointName(name_from_js): + joint_id_pin = self._pin_model.getJointId(name_from_js) + joint_obj_pin = self._pin_model.joints[joint_id_pin] + pin_joint_name = self._pin_model.names[joint_id_pin] + + theta = current_q_robot_state.position[i] + + if joint_obj_pin.nq == 2 and joint_obj_pin.nv == 1: + q_MA[joint_obj_pin.idx_q] = math.cos(theta) + q_MA[joint_obj_pin.idx_q + 1] = math.sin(theta) + num_updated_joints_in_q += 1 + elif joint_obj_pin.nq == 1 and joint_obj_pin.nv == 1: + q_MA[joint_obj_pin.idx_q] = theta + num_updated_joints_in_q += 1 + elif joint_obj_pin.nq > 1: + if ( + pin_joint_name == self._pin_model.names[1] + and joint_obj_pin.shortname() == "JointModelFreeFlyer" + ): + if (i + 7) <= len(current_q_robot_state.position): + q_MA[joint_obj_pin.idx_q : joint_obj_pin.idx_q + 7] = ( + current_q_robot_state.position[i : i + 7] + ) + num_updated_joints_in_q += 1 + else: + self.logger.warn( + f"[{self.name}] FreeFlyer base joint '{pin_joint_name}' found, but not enough values in JointState (from index {i}) to populate q (expected 7). Using neutral for base." + ) + else: + self.logger.warn( + f"[{self.name}] Joint '{name_from_js}' (Pinocchio name: '{pin_joint_name}') has nq={joint_obj_pin.nq}, nv={joint_obj_pin.nv}. Unhandled complex joint type." + ) + else: + self.logger.warn( + f"[{self.name}] Joint '{name_from_js}' from input JointState not found in Pinocchio model. Skipping." + ) + + if num_updated_joints_in_q == 0: + self.logger.error( + f"[{self.name}] No joints in Pinocchio's q were updated from input JointState." + ) + return Status.FAILURE + + pin.forwardKinematics(self._pin_model, self._pin_data, q_MA) + pin.updateFramePlacements(self._pin_model, self._pin_data) + + J_jacoEE_world_full = pin.computeFrameJacobian( + self._pin_model, + self._pin_data, + q_MA, + self._jaco_ee_frame_id, + pin.ReferenceFrame.WORLD, + ) + + J_jacoEE_world_jaco = J_jacoEE_world_full[ + :, self._jaco_vel_indices + ] # Use pre-calculated indices + J_jacoEE_world_jaco_trans = J_jacoEE_world_jaco[:3, :] + self.logger.info( + f"[{self.name}] Jaco EE Trans Jacobian (J_jacoEE_world_jaco_trans):\n{J_jacoEE_world_jaco_trans}" + ) + + pos_MA_tip = np.array( + [ + tip_pose_MA_world.pose.position.x, + tip_pose_MA_world.pose.position.y, + tip_pose_MA_world.pose.position.z, + ] + ) + pos_MI_tip = np.array( + [ + tip_pose_MI_world.pose.position.x, + tip_pose_MI_world.pose.position.y, + tip_pose_MI_world.pose.position.z, + ] + ) + + task_direction_vec = pos_MI_tip - pos_MA_tip + norm_task_direction = np.linalg.norm(task_direction_vec) + + if norm_task_direction < 1e-6: + self.feedback_message = "Task direction vector (tool tip) is near zero." + self.logger.warn(f"[{self.name}] {self.feedback_message}") + self.blackboard_set("jaco_directional_manipulability_score", 0.0) + self.blackboard_set("jaco_is_manipulable_for_direction", False) + return Status.FAILURE + + unit_task_direction_jacoEE = task_direction_vec / norm_task_direction + + JJT_trans_jacoEE = J_jacoEE_world_jaco_trans @ J_jacoEE_world_jaco_trans.T + det_JJT = np.linalg.det(JJT_trans_jacoEE) + self.logger.debug( + f"[{self.name}] Determinant of JJT_trans_jacoEE: {det_JJT:.6e}" + ) + + m_d_squared = ( + unit_task_direction_jacoEE.T + @ JJT_trans_jacoEE + @ unit_task_direction_jacoEE + ) + manipulability_score = np.sqrt(max(0.0, m_d_squared)) + + self.logger.info( + f"[{self.name}] Jaco EE directional manipulability (tool tip direction): {manipulability_score:.4f}" + ) + self.blackboard_set( + "jaco_directional_manipulability_score", manipulability_score + ) + + is_manipulable = manipulability_score >= threshold + self.blackboard_set("jaco_is_manipulable_for_direction", is_manipulable) + + if is_manipulable: + self.feedback_message = f"Jaco EE is manipulable for task direction (Score: {manipulability_score:.4f} >= Threshold: {threshold:.4f})" + self.logger.info(f"[{self.name}] {self.feedback_message}") + return Status.SUCCESS + else: + self.feedback_message = f"Jaco EE NOT manipulable for task direction (Score: {manipulability_score:.4f} < Threshold: {threshold:.4f})" + self.logger.warn(f"[{self.name}] {self.feedback_message}") + return Status.FAILURE + + except KeyError as e: + self.feedback_message = f"Blackboard key error: {e}" + self.logger.error(f"[{self.name}] {self.feedback_message}") + return Status.FAILURE + except Exception as e: + self.feedback_message = f"Unexpected error: {e}" + self.logger.error(f"[{self.name}] {self.feedback_message}") + return Status.FAILURE + + @override + def terminate(self, new_status: Status) -> None: + self.logger.debug(f"[{self.name}] Terminating with status {new_status}.") diff --git a/ada_feeding/ada_feeding/behaviors/state/combine_joint_states.py b/ada_feeding/ada_feeding/behaviors/state/combine_joint_states.py new file mode 100644 index 00000000..c1368ab7 --- /dev/null +++ b/ada_feeding/ada_feeding/behaviors/state/combine_joint_states.py @@ -0,0 +1,191 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2025, Personal Robotics Laboratory +# License: BSD 3-Clause. See LICENSE.md file in root directory. + +# (Add appropriate Copyright/License if desired) + +""" +Defines the CombineJointStates behavior, which merges two JointState +messages into a single one based on a provided full list of joint names. +""" + +# Standard imports +from typing import Union, Optional, Dict, Any, List + +# Third-party imports +from sensor_msgs.msg import JointState +from overrides import override +import py_trees +import py_trees.blackboard +from py_trees.common import Access, Status +import rclpy.node +import rclpy.time + +# Local imports (adjust paths as needed) +from ada_feeding.helpers import BlackboardKey +from ada_feeding.behaviors import BlackboardBehavior + + +class CombineJointStates(BlackboardBehavior): + """ + Merges two input JointState messages (e.g., from separate arm and tool plans) + into a single output JointState message, ordered according to a provided + list of all joint names for the combined system. + """ + + def blackboard_inputs( + self, + joint_state_1: Union[BlackboardKey, JointState], + joint_state_2: Union[BlackboardKey, JointState], + full_joint_names: Union[BlackboardKey, List[str]], + ) -> None: + """ + Blackboard Inputs + + Parameters + ---------- + joint_state_1: BlackboardKey resolving to the first JointState message. + joint_state_2: BlackboardKey resolving to the second JointState message. + full_joint_names: BlackboardKey resolving to the List[str] containing all + joint names for the combined state, in the desired order. + """ + # pylint: disable=unused-argument, duplicate-code + super().blackboard_inputs( + **{key: value for key, value in locals().items() if key != "self"} + ) + + def blackboard_outputs( + self, + combined_joint_state: Optional[BlackboardKey], # -> Optional[JointState] + ) -> None: + """ + Blackboard Outputs + + Parameters + ---------- + combined_joint_state: BlackboardKey where the resulting merged JointState + message will be written. + """ + # pylint: disable=unused-argument, duplicate-code + super().blackboard_outputs( + **{key: value for key, value in locals().items() if key != "self"} + ) + + @override + def setup(self, **kwargs): + """Gets the ROS2 node from the arguments passed by the tree runner.""" + # pylint: disable=attribute-defined-outside-init + try: + self.node: rclpy.node.Node = kwargs["node"] + except KeyError as e: + self.logger.error( + f"[{self.name}] Behaviour expects 'node' in setup kwargs. {e}" + ) + self.node = None + + # initialise is not strictly needed, but good practice + @override + def initialise(self) -> None: + """Optionally log initialization.""" + self.logger.debug(f"[{self.name}] Initializing.") + # Clear previous output + self.blackboard_set("combined_joint_state", None) + + @override + def update(self) -> Status: + """ + Reads input JointStates, merges them based on full_joint_names, + and writes the combined JointState. + """ + if self.node is None: + self.logger.error( + f"[{self.name}] Node object not available. Setup likely failed." + ) + return Status.FAILURE + + try: + # Read inputs + state1: JointState = self.blackboard_get("joint_state_1") + state2: JointState = self.blackboard_get("joint_state_2") + full_names: List[str] = self.blackboard_get("full_joint_names") + + # --- Input Validation --- + if not isinstance(state1, JointState): + self.logger.error( + f"[{self.name}] Input 'joint_state_1' is not a JointState message (is type: {type(state1)})." + ) + return Status.FAILURE + if not isinstance(state2, JointState): + self.logger.error( + f"[{self.name}] Input 'joint_state_2' is not a JointState message (is type: {type(state2)})." + ) + return Status.FAILURE + if not isinstance(full_names, list) or not all( + isinstance(n, str) for n in full_names + ): + self.logger.error( + f"[{self.name}] Input 'full_joint_names' must be a List[str]." + ) + return Status.FAILURE + if not full_names: + self.logger.error( + f"[{self.name}] Input 'full_joint_names' cannot be empty." + ) + return Status.FAILURE + # --- End Validation --- + + # --- Merge Logic --- + # Create a dictionary to hold combined positions, giving priority to state2 if names overlap + combined_positions_dict = {} + for name, pos in zip(state1.name, state1.position): + combined_positions_dict[name] = pos + for name, pos in zip(state2.name, state2.position): + combined_positions_dict[name] = pos # Overwrites if name was in state1 + + # Create the output JointState + output_state = JointState() + output_state.header.stamp = ( + self.node.get_clock().now().to_msg() + ) # Use current time + output_state.name = full_names # Assign the full ordered list of names + output_state.position = [] # Initialize empty list + + # Populate positions in the order specified by full_names + all_found = True + for name in full_names: + if name in combined_positions_dict: + output_state.position.append(combined_positions_dict[name]) + else: + self.logger.error( + f"[{self.name}] Joint '{name}' from 'full_joint_names' not found in either input JointState." + ) + all_found = False + break # Stop processing + + if not all_found: + return Status.FAILURE + + # Basic check for consistency + if len(output_state.name) != len(output_state.position): + self.logger.error( + f"[{self.name}] Mismatch between output names ({len(output_state.name)}) and positions ({len(output_state.position)}). Logic error?" + ) + return Status.FAILURE + # --- End Merge Logic --- + + # Write the successful result to the blackboard + self.blackboard_set("combined_joint_state", output_state) + self.logger.info( + f"[{self.name}] Successfully combined joint states for {len(output_state.name)} joints." + ) + return Status.SUCCESS + + except KeyError as e: + self.logger.error(f"[{self.name}] Blackboard key error: {e}") + return Status.FAILURE + except Exception as e: + self.logger.error( + f"[{self.name}] Unexpected error combining joint states: {e}", + exc_info=True, + ) + return Status.FAILURE diff --git a/ada_feeding/ada_feeding/behaviors/state/extract_joints_from_state.py b/ada_feeding/ada_feeding/behaviors/state/extract_joints_from_state.py new file mode 100644 index 00000000..019c1e52 --- /dev/null +++ b/ada_feeding/ada_feeding/behaviors/state/extract_joints_from_state.py @@ -0,0 +1,162 @@ +# -*- coding: utf-8 -*- + +# Copyright (c) 2025, Personal Robotics Laboratory +# License: BSD 3-Clause. See LICENSE.md file in root directory. + +""" +This module defines the ExtractJointsFromState behavior, which extracts specific +joint names and their corresponding positions from a JointState message. +""" + +# Standard imports +from typing import Union, Optional, Dict, Any, List + +# Third-party imports +from sensor_msgs.msg import JointState +from overrides import override +import py_trees +import py_trees.blackboard +from py_trees.common import Access, Status +import rclpy.node + +# Local imports (adjust paths as needed) +from ada_feeding.helpers import BlackboardKey +from ada_feeding.behaviors import BlackboardBehavior + + +class ExtractJointsFromState(BlackboardBehavior): + """ + Extracts specified joint names and their corresponding positions from a + source JointState message read from the blackboard. Writes the results + as separate lists (names and positions) to the blackboard. + """ + + def blackboard_inputs( + self, + source_joint_state: Union[BlackboardKey, JointState], + target_joint_names: Union[BlackboardKey, List[str]], + ) -> None: + """ + Blackboard Inputs + + Parameters + ---------- + source_joint_state: BlackboardKey resolving to the input JointState message + (e.g., the result of an IK computation). + target_joint_names: BlackboardKey resolving to the List[str] of joint names + to extract from the source_joint_state. + """ + # pylint: disable=unused-argument, duplicate-code + super().blackboard_inputs( + **{key: value for key, value in locals().items() if key != "self"} + ) + + def blackboard_outputs( + self, + output_joint_names: Optional[BlackboardKey], # -> Optional[List[str]] + output_joint_positions: Optional[BlackboardKey], # -> Optional[List[float]] + success: Optional[BlackboardKey], # -> bool (Optional success flag) + ) -> None: + """ + Blackboard Outputs + + Parameters + ---------- + output_joint_names: BlackboardKey where the extracted List[str] of joint names + (in the order requested) will be written. + output_joint_positions: BlackboardKey where the extracted List[float] of corresponding + joint positions will be written. + success: Optional BlackboardKey to write a boolean indicating successful extraction. + """ + # pylint: disable=unused-argument, duplicate-code + super().blackboard_outputs( + **{key: value for key, value in locals().items() if key != "self"} + ) + + @override + def initialise(self) -> None: + """Clear output keys on initialization.""" + self.logger.debug(f"[{self.name}] Initializing.") + # Clear potential previous results + self.blackboard_set("output_joint_names", None) + self.blackboard_set("output_joint_positions", None) + self.blackboard_set("success", False) + + @override + def update(self) -> Status: + """ + Reads inputs, performs extraction, writes outputs. + Returns SUCCESS if all target joints are found, FAILURE otherwise. + """ + try: + source_state: JointState = self.blackboard_get("source_joint_state") + target_names: List[str] = self.blackboard_get("target_joint_names") + + if not isinstance(source_state, JointState): + self.logger.error( + f"[{self.name}] Input 'source_joint_state' is not a JointState message (is type: {type(source_state)})." + ) + return Status.FAILURE + + if not isinstance(target_names, list) or not all( + isinstance(n, str) for n in target_names + ): + self.logger.error( + f"[{self.name}] Input 'target_joint_names' must be a List[str]." + ) + return Status.FAILURE + + if not target_names: + self.logger.warning( + f"[{self.name}] Input 'target_joint_names' is empty. Nothing to extract." + ) + self.blackboard_set("output_joint_names", []) + self.blackboard_set("output_joint_positions", []) + self.blackboard_set("success", True) + return Status.SUCCESS + + # Create a lookup dictionary for efficient access + source_positions_dict = { + name: pos for name, pos in zip(source_state.name, source_state.position) + } + + extracted_names = [] + extracted_positions = [] + all_found = True + + for name in target_names: + if name in source_positions_dict: + extracted_names.append(name) + extracted_positions.append(source_positions_dict[name]) + else: + self.logger.error( + f"[{self.name}] Target joint '{name}' not found in source_joint_state names: {list(source_positions_dict.keys())}" + ) + all_found = False + break + + if all_found: + self.blackboard_set("output_joint_names", extracted_names) + self.blackboard_set("output_joint_positions", extracted_positions) + self.blackboard_set("success", True) + self.logger.info( + f"[{self.name}] Successfully extracted {len(extracted_names)} joints." + ) + return Status.SUCCESS + else: + self.blackboard_set("output_joint_names", None) + self.blackboard_set("output_joint_positions", None) + self.blackboard_set("success", False) + return Status.FAILURE + + except KeyError as e: + self.logger.error(f"[{self.name}] Blackboard key error: {e}") + self.blackboard_set("success", False) + return Status.FAILURE + except Exception as e: + self.logger.error( + f"[{self.name}] Unexpected error during joint extraction: {e}", + exc_info=True, + ) + self.blackboard_set("success", False) + return Status.FAILURE diff --git a/ada_feeding/ada_feeding/behaviors/state/extract_pose_components.py b/ada_feeding/ada_feeding/behaviors/state/extract_pose_components.py new file mode 100644 index 00000000..b8f61954 --- /dev/null +++ b/ada_feeding/ada_feeding/behaviors/state/extract_pose_components.py @@ -0,0 +1,152 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +# (Add appropriate Copyright/License if desired) + +""" +Defines the ExtractPoseComponents behavior, which separates Pose/PoseStamped +into its constituent parts. +""" + +import rclpy +from rclpy.node import Node +from geometry_msgs.msg import Pose, PoseStamped, Point, Quaternion +from std_msgs.msg import Header # For outputting header if PoseStamped +from typing import Union, Optional, Dict, Any + +from overrides import override +import py_trees +import py_trees.blackboard +from py_trees.common import Access, Status + +# Local imports (adjust paths as needed) +from ada_feeding.helpers import BlackboardKey +from ada_feeding.behaviors import BlackboardBehavior + + +class ExtractPoseComponents(BlackboardBehavior): + """ + Extracts Position and Orientation components from an input Pose or PoseStamped + object stored on the blackboard. Optionally outputs the Header if the input + is PoseStamped. + + Returns SUCCESS if extraction is possible, FAILURE otherwise. + """ + + def blackboard_inputs( + self, + input_pose_object: Union[BlackboardKey, Pose, PoseStamped], + ) -> None: + """ + Blackboard Inputs + + Parameters + ---------- + input_pose_object: The blackboard key resolving to the Pose or PoseStamped message + from which to extract components. + """ + super().blackboard_inputs( + **{key: value for key, value in locals().items() if key != "self"} + ) + + def blackboard_outputs( + self, + output_position: Optional[BlackboardKey] = None, # -> Optional[Point] + output_orientation: Optional[BlackboardKey] = None, # -> Optional[Quaternion] + output_header: Optional[BlackboardKey] = None, # -> Optional[Header] + success: Optional[BlackboardKey] = None, # -> bool + ) -> None: + """ + Blackboard Outputs + + Parameters + ---------- + output_position: Blackboard key to write the extracted geometry_msgs/Point. + output_orientation: Blackboard key to write the extracted geometry_msgs/Quaternion. + output_header: Optional key to write the std_msgs/Header if input was PoseStamped. + If input was Pose, this key will be set to None. + success: Optional key to write boolean success flag (True if extraction succeeded). + """ + super().blackboard_outputs( + **{key: value for key, value in locals().items() if key != "self"} + ) + + @override + def setup(self, **kwargs): + """Get the node if needed (e.g., for logging).""" + # pylint: disable=attribute-defined-outside-init + try: + # Node access is useful for logging via self.logger + self.node: Node = kwargs['node'] + except KeyError: + # Fallback logger if node isn't passed (though BlackboardBehavior likely handles this) + self.logger.warning(f"[{self.name}] Node not explicitly provided via setup kwargs.") + self.node = None + + @override + def initialise(self) -> None: + """Clear output keys on the blackboard before execution.""" + self.logger.debug(f"[{self.name}] Initializing and clearing outputs.") + # Use blackboard_set helper which handles key existence checks based on blackboard_outputs registration + self.blackboard_set("output_position", None) + self.blackboard_set("output_orientation", None) + self.blackboard_set("output_header", None) + self.blackboard_set("success", False) + + + @override + def update(self) -> Status: + """Read input, extract components based on type, write outputs.""" + extracted_pos: Optional[Point] = None + extracted_ori: Optional[Quaternion] = None + extracted_header: Optional[Header] = None + op_success = False # Assume failure initially + + try: + # Read the input object from the blackboard + pose_obj = self.blackboard_get("input_pose_object") + + # Check the type and extract components + if isinstance(pose_obj, PoseStamped): + self.logger.debug(f"[{self.name}] Input is PoseStamped. Extracting pose and header.") + extracted_pos = pose_obj.pose.position + extracted_ori = pose_obj.pose.orientation + extracted_header = pose_obj.header # Get the header + op_success = True + elif isinstance(pose_obj, Pose): + self.logger.debug(f"[{self.name}] Input is Pose. Extracting components.") + extracted_pos = pose_obj.position + extracted_ori = pose_obj.orientation + extracted_header = None # No header available for Pose type + op_success = True + else: + # Log error if input is neither Pose nor PoseStamped + self.logger.error(f"[{self.name}] Input 'input_pose_object' is not Pose or PoseStamped (type: {type(pose_obj)}). Cannot extract components.") + op_success = False + + except KeyError as e: + # Error if the input blackboard key doesn't exist + self.logger.error(f"[{self.name}] Blackboard key error reading input: {e}") + op_success = False + except AttributeError as e: + # Error if the object exists but doesn't have expected fields (e.g., None value) + self.logger.error(f"[{self.name}] Attribute error accessing pose components (is input object None?): {e}") + op_success = False + except Exception as e: + # Catch any other unexpected errors + self.logger.error(f"[{self.name}] Unexpected error extracting pose components: {e}") + op_success = False + + # Write results (or None on failure) to blackboard output keys + # blackboard_set handles checking if the output key was defined + self.blackboard_set("output_position", extracted_pos) + self.blackboard_set("output_orientation", extracted_ori) + self.blackboard_set("output_header", extracted_header) + self.blackboard_set("success", op_success) + + # Return SUCCESS or FAILURE based on whether extraction worked + return Status.SUCCESS if op_success else Status.FAILURE + + @override + def terminate(self, new_status: Status) -> None: + """Log termination.""" + self.logger.debug(f"[{self.name}] Terminating with status {new_status}.") diff --git a/ada_feeding/ada_feeding/behaviors/state/extract_pose_from_poses_by_link.py b/ada_feeding/ada_feeding/behaviors/state/extract_pose_from_poses_by_link.py new file mode 100644 index 00000000..a4c4ee4a --- /dev/null +++ b/ada_feeding/ada_feeding/behaviors/state/extract_pose_from_poses_by_link.py @@ -0,0 +1,179 @@ +# -*- coding: utf-8 -*- +# (Add appropriate Copyright/License if desired) + +""" +Defines the ExtractPoseFromPosesByLink behavior, which finds and extracts +a specific PoseStamped from a list based on its associated link name. +""" + +# Standard imports +from typing import Union, Optional, Dict, Any, List + +# Third-party imports +from geometry_msgs.msg import PoseStamped +from sensor_msgs.msg import JointState +from overrides import override +import py_trees +import py_trees.blackboard +from py_trees.common import Access, Status +import rclpy.node + +# Local imports +from ada_feeding.helpers import BlackboardKey +from ada_feeding.behaviors import BlackboardBehavior + + +class ExtractPoseFromPosesByLink(BlackboardBehavior): + """ + Extracts a specific PoseStamped message from blackboard data which could be + either a single PoseStamped or a list of PoseStamped messages. + + If the input is a list, it requires a corresponding list of link names (which + should be the same list passed to the FK behavior that produced the pose list) + and a target link name to identify which pose to extract. + + If the input is a single PoseStamped, it checks if the target link name matches + expectations (either the single link name provided to FK or the default EE). + """ + + def blackboard_inputs( + self, + fk_poses: Union[BlackboardKey, PoseStamped, List[PoseStamped]], + target_link_name: Union[BlackboardKey, str], + requested_link_names: Optional[Union[BlackboardKey, List[str]]] = None, + default_ee_link_name: Optional[Union[BlackboardKey, str]] = None, + ) -> None: + """ + Blackboard Inputs + + Parameters + ---------- + fk_poses: BlackboardKey resolving to the output of MoveIt2ComputeFK + (either PoseStamped or List[PoseStamped]). + requested_link_names: BlackboardKey resolving to the List[str] that was passed + to MoveIt2ComputeFK's fk_link_names input. This is + REQUIRED if fk_poses is a List. Ignored if fk_poses + is a single PoseStamped. + target_link_name: BlackboardKey resolving to the string name of the link whose + pose should be extracted. + default_ee_link_name: Optional: BlackboardKey resolving to the default EE link name. + Used for validation when fk_poses is a single PoseStamped + and requested_link_names was empty/None. + """ + super().blackboard_inputs( + **{key: value for key, value in locals().items() if key not in ["self", "kwargs"]} + ) + + def blackboard_outputs( + self, + extracted_pose: Optional[BlackboardKey], # -> Optional[PoseStamped] + success: Optional[BlackboardKey], # -> bool + ) -> None: + """ + Blackboard Outputs + + Parameters + ---------- + extracted_pose: BlackboardKey where the single extracted PoseStamped will be written. + success: Boolean flag indicating successful extraction. + """ + super().blackboard_outputs( + **{key: value for key, value in locals().items() if key not in ["self", "kwargs"]} + ) + + @override + def initialise(self) -> None: + """Clear output keys.""" + self.logger.debug(f"[{self.name}] Initializing.") + self.blackboard_set("extracted_pose", None) + self.blackboard_set("success", False) + + @override + def update(self) -> Status: + """Extract the pose corresponding to the target link name.""" + try: + fk_data = self.blackboard_get("fk_poses") + target_link = self.blackboard_get("target_link_name") + req_link_names = self.blackboard_get("requested_link_names") + default_ee = self.blackboard_get("default_ee_link_name") + + extracted_pose_result: Optional[PoseStamped] = None + found = False + + # Case 1: FK result is a single PoseStamped + if isinstance(fk_data, PoseStamped): + self.logger.debug(f"[{self.name}] FK result is single PoseStamped.") + # Check if the target link matches expectations + if req_link_names is None or (isinstance(req_link_names, list) and len(req_link_names) == 0): + # FK was likely called for the default EE + if default_ee is not None and target_link == default_ee: + extracted_pose_result = fk_data + found = True + elif default_ee is None: + self.logger.warning(f"[{self.name}] Input fk_poses is single PoseStamped, but default_ee_link_name not provided for validation.") + # Assume it's correct if only one link could have been requested + extracted_pose_result = fk_data + found = True + else: + self.logger.error(f"[{self.name}] Target link '{target_link}' does not match default EE '{default_ee}' for single FK result.") + elif isinstance(req_link_names, list) and len(req_link_names) == 1: + # FK was called for one specific link + if target_link == req_link_names[0]: + extracted_pose_result = fk_data + found = True + else: + self.logger.error(f"[{self.name}] Target link '{target_link}' does not match requested link '{req_link_names[0]}' for single FK result.") + else: + self.logger.error(f"[{self.name}] Input fk_poses is single PoseStamped, but requested_link_names is ambiguous: {req_link_names}") + + + # Case 2: FK result is a List of PoseStamped + elif isinstance(fk_data, list): + self.logger.debug(f"[{self.name}] FK result is a list.") + if not isinstance(req_link_names, list) or len(fk_data) != len(req_link_names): + self.logger.error(f"[{self.name}] 'fk_poses' is a list, but 'requested_link_names' is missing, not a list, or lengths mismatch (Poses: {len(fk_data)}, Names: {len(req_link_names or [])}).") + return self._handle_failure() + + # Find the index matching the target link name + try: + target_index = req_link_names.index(target_link) + if isinstance(fk_data[target_index], PoseStamped): + extracted_pose_result = fk_data[target_index] + found = True + else: + self.logger.error(f"[{self.name}] Data at index {target_index} for link '{target_link}' is not a PoseStamped.") + except ValueError: + self.logger.error(f"[{self.name}] Target link '{target_link}' not found in requested_link_names: {req_link_names}") + except IndexError: + self.logger.error(f"[{self.name}] Index mismatch error after finding target link '{target_link}'.") + + # Case 3: FK result is None or unexpected type + else: + self.logger.error(f"[{self.name}] Input 'fk_poses' is None or unexpected type ({type(fk_data)}). FK likely failed.") + return self._handle_failure() + + + # Write results and return status + if found and extracted_pose_result is not None: + self.blackboard_set("extracted_pose", extracted_pose_result) + self.blackboard_set("success", True) + self.logger.info(f"[{self.name}] Successfully extracted pose for link '{target_link}'.") + return Status.SUCCESS + else: + # Error logged within logic above + return self._handle_failure() + + + except KeyError as e: + self.logger.error(f"[{self.name}] Blackboard key error: {e}") + return self._handle_failure() + except Exception as e: + self.logger.error(f"[{self.name}] Unexpected error extracting pose: {e}") + return self._handle_failure() + + + def _handle_failure(self) -> Status: + """Helper to set outputs on failure.""" + self.blackboard_set("extracted_pose", None) + self.blackboard_set("success", False) + return Status.FAILURE diff --git a/ada_feeding/ada_feeding/behaviors/state/extract_pose_from_transform_stamped.py b/ada_feeding/ada_feeding/behaviors/state/extract_pose_from_transform_stamped.py new file mode 100644 index 00000000..1f13f791 --- /dev/null +++ b/ada_feeding/ada_feeding/behaviors/state/extract_pose_from_transform_stamped.py @@ -0,0 +1,96 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2024-2025, Personal Robotics Laboratory +# License: BSD 3-Clause. See LICENSE.md file in root directory. + +""" +Defines the ExtractPoseFromTransformStamped behavior. +""" + +# Standard imports +from typing import Union, Optional + +# Third-party imports +from geometry_msgs.msg import Pose, TransformStamped +from overrides import override +import py_trees +from py_trees.common import Status + +# Local imports +from ada_feeding.helpers import BlackboardKey +from ada_feeding.behaviors import BlackboardBehavior + + +class ExtractPoseFromTransformStamped(BlackboardBehavior): + """ + Extracts the geometry_msgs/Pose from a geometry_msgs/TransformStamped message. + + This is a utility behavior to bridge components that output TransformStamped + (like a TF lookup) with components that expect a Pose as input. + """ + + def blackboard_inputs( + self, + input_transform_stamped: Union[BlackboardKey, TransformStamped], + ) -> None: + """ + Blackboard Inputs + + Parameters + ---------- + input_transform_stamped: The source TransformStamped message. + """ + super().blackboard_inputs( + **{key: value for key, value in locals().items() if key != "self"} + ) + + def blackboard_outputs( + self, + output_pose: Optional[BlackboardKey], # -> Optional[Pose] + ) -> None: + """ + Blackboard Outputs + + Parameters + ---------- + output_pose: The extracted Pose message. + """ + super().blackboard_outputs( + **{key: value for key, value in locals().items() if key != "self"} + ) + + @override + def update(self) -> Status: + """ + Performs the extraction and writes the result to the blackboard. + """ + try: + transform_stamped_in = self.blackboard_get("input_transform_stamped") + + if not isinstance(transform_stamped_in, TransformStamped): + self.logger.error( + f"[{self.name}] Input 'input_transform_stamped' is not a " + f"TransformStamped, but {type(transform_stamped_in)}." + ) + return Status.FAILURE + + # The transform contains a translation (Vector3) and rotation (Quaternion) + # which together form a Pose. + extracted_pose = Pose() + extracted_pose.position.x = transform_stamped_in.transform.translation.x + extracted_pose.position.y = transform_stamped_in.transform.translation.y + extracted_pose.position.z = transform_stamped_in.transform.translation.z + extracted_pose.orientation = transform_stamped_in.transform.rotation + + self.blackboard_set("output_pose", extracted_pose) + + self.logger.info( + f"[{self.name}] Successfully extracted Pose from TransformStamped." + ) + return Status.SUCCESS + + except KeyError as e: + self.logger.error(f"[{self.name}] Blackboard key error: {e}") + return Status.FAILURE + except Exception as e: + self.logger.error(f"[{self.name}] Unexpected error: {e}") + return Status.FAILURE diff --git a/ada_feeding/ada_feeding/behaviors/state/get_joint_states.py b/ada_feeding/ada_feeding/behaviors/state/get_joint_states.py new file mode 100644 index 00000000..7ab6e04a --- /dev/null +++ b/ada_feeding/ada_feeding/behaviors/state/get_joint_states.py @@ -0,0 +1,112 @@ +# Copyright (c) 2025, Personal Robotics Laboratory +# License: BSD 3-Clause. See LICENSE.md file in root directory. + +from rclpy.node import Node +from sensor_msgs.msg import JointState +import py_trees + +from typing import Any, Dict, List, Optional, Union +from ada_feeding.behaviors import BlackboardBehavior +from ada_feeding.helpers import BlackboardKey + + +class GetJointStates(BlackboardBehavior): + def __init__( + self, + name: str, + node=None, + ns: str = "/", + inputs: Optional[Dict[str, Union[BlackboardKey, Any]]] = None, + outputs: Optional[Dict[str, Optional[BlackboardKey]]] = None, + ): + super().__init__(name, ns=ns, inputs=inputs, outputs=outputs) + self.ns = ns + self.node = node + self.joint_states_sub = None + self.latest_joint_states = {} + self.received_all_joints = False + + def blackboard_inputs( + self, + joint_names: Union[BlackboardKey, Optional[List[str]]] = None, + ) -> None: + """ + Blackboard Inputs + + Parameters + ---------- + constraints: previous set of constraints to append to + """ + # pylint: disable=unused-argument, duplicate-code + # Arguments are handled generically in base class. + super().blackboard_inputs( + **{key: value for key, value in locals().items() if key != "self"} + ) + + def blackboard_outputs( + self, + joint_state: Union[BlackboardKey, JointState] = None, + joint_positions: Union[BlackboardKey, List[float]] = None, + joint_names: Union[BlackboardKey, Optional[List[str]]] = None, + ) -> None: + """ + Blackboard Outputs + + Parameters + ---------- + constraints: list of constraints to send to MoveIt2Plan + """ + # pylint: disable=unused-argument, duplicate-code + # Arguments are handled generically in base class. + super().blackboard_outputs( + **{key: value for key, value in locals().items() if key != "self"} + ) + + def setup(self, **kwargs): + # Initialize ROS 2 node + if self.node is None: + self.node = Node(self.name + "_node", namespace=self.ns) + # Subscribe to JointState topic + self.joint_states_sub = self.node.create_subscription( + JointState, "/joint_states", self.joint_states_callback, 10 + ) + + def joint_states_callback(self, msg: JointState): + joint_names = self.blackboard_get("joint_names") + for i, name in enumerate(msg.name): + if name in joint_names: + self.latest_joint_states[name] = msg.position[i] + + def update(self): + # Check if we have the joint states we need + if all( + name in self.latest_joint_states + for name in self.blackboard_get("joint_names") + ): + joint_names = list(self.latest_joint_states.keys()) + joint_positions = list(self.latest_joint_states.values()) + + # Create the JointState message object + joint_state_msg = JointState() + joint_state_msg.header.stamp = self.node.get_clock().now().to_msg() + + # Add frame_id if known/relevant, e.g., joint_state_msg.header.frame_id = "base_link" + joint_state_msg.name = joint_names + joint_state_msg.position = joint_positions + + self.node.get_logger().info(f"Received all joint states") + self.node.get_logger().info(f"Joint Names: {joint_names}") + self.node.get_logger().info(f"Joint Positions: {joint_positions}") + self.blackboard_set("joint_state", joint_state_msg) + self.blackboard_set("joint_names", joint_names) + self.blackboard_set("joint_positions", joint_positions) + return py_trees.common.Status.SUCCESS + else: + return py_trees.common.Status.RUNNING + + def terminate(self, new_status): + if self.node is not None and self.node != self.node: + self.node.destroy_node() + self.node = None + self.joint_states_sub = None + self.latest_joint_states.clear() diff --git a/ada_feeding/ada_feeding/behaviors/state/load_pinocchio_model.py b/ada_feeding/ada_feeding/behaviors/state/load_pinocchio_model.py new file mode 100644 index 00000000..72c439f7 --- /dev/null +++ b/ada_feeding/ada_feeding/behaviors/state/load_pinocchio_model.py @@ -0,0 +1,311 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2025, Personal Robotics Laboratory +# License: BSD 3-Clause. See LICENSE.md file in root directory. + +""" +Defines the LoadPinocchioModel behavior, which loads the robot model +using Pinocchio and places the model, data, and key kinematic information +onto the blackboard for use by other behaviors. +""" + +# Standard imports +import os +import subprocess +import tempfile +from typing import Union, Optional, List, Dict, Any +from overrides import override + +# Third-party imports +import pinocchio as pin +import py_trees +import py_trees.blackboard +from py_trees.common import Access, Status +import rclpy.node + +# Local imports +from ada_feeding.helpers import BlackboardKey # Assuming this is your helper +from ada_feeding.behaviors import BlackboardBehavior # Assuming this is your base class + + +class LoadPinocchioModel(BlackboardBehavior): + """ + Loads the robot's kinematic model using Pinocchio from a URDF/XACRO file. + It writes the Pinocchio model, data, and optionally pre-identified + joint velocity indices and frame IDs to the blackboard. + This behavior should typically run once at the start of a sequence + requiring Pinocchio-based calculations. It will only attempt to load + the model once per instantiation unless explicitly reset or re-set up. + """ + + def blackboard_inputs( + self, + urdf_file_path: Union[BlackboardKey, str], + jaco_joint_names: Optional[Union[BlackboardKey, List[str]]] = None, + articutool_joint_names: Optional[Union[BlackboardKey, List[str]]] = None, + jaco_end_effector_link_name: Optional[Union[BlackboardKey, str]] = None, + tool_tip_link_name: Optional[Union[BlackboardKey, str]] = None, + # Add other relevant link names if needed, e.g., articutool_base_link + ) -> None: + """ + Blackboard Inputs + + Parameters + ---------- + urdf_file_path: Path to the robot's URDF or XACRO file. + jaco_joint_names: Optional. List of Jaco arm joint names. If provided, their + velocity indices in the Pinocchio model will be stored. + articutool_joint_names: Optional. List of Articutool joint names. If provided, + their velocity indices will be stored. + jaco_end_effector_link_name: Optional. Name of the Jaco end-effector link. + If provided, its frame ID will be stored. + tool_tip_link_name: Optional. Name of the tool tip link. If provided, + its frame ID will be stored. + """ + super().blackboard_inputs( + **{key: value for key, value in locals().items() if key != "self"} + ) + + def blackboard_outputs( + self, + pinocchio_model: Optional[BlackboardKey], # -> Optional[pin.Model] + pinocchio_data: Optional[BlackboardKey], # -> Optional[pin.Data] + jaco_vel_indices_pin: Optional[BlackboardKey] = None, # -> Optional[List[int]] + articutool_vel_indices_pin: Optional[ + BlackboardKey + ] = None, # -> Optional[List[int]] + jaco_ee_frame_id_pin: Optional[BlackboardKey] = None, # -> Optional[int] + tool_tip_frame_id_pin: Optional[BlackboardKey] = None, # -> Optional[int] + # Add other relevant frame IDs or indices as outputs if needed + ) -> None: + """ + Blackboard Outputs + + Parameters + ---------- + pinocchio_model: The loaded Pinocchio model object. + pinocchio_data: The Pinocchio data object associated with the model. + jaco_vel_indices_pin: List of velocity indices for Jaco joints in the model. + articutool_vel_indices_pin: List of velocity indices for Articutool joints. + jaco_ee_frame_id_pin: Frame ID for the Jaco end-effector link. + tool_tip_frame_id_pin: Frame ID for the tool tip link. + """ + super().blackboard_outputs( + **{key: value for key, value in locals().items() if key != "self"} + ) + + def __init__(self, name: str, **kwargs): + super().__init__(name=name, **kwargs) + self.node: Optional[rclpy.node.Node] = None + self._model_loaded_successfully = False # Flag to prevent re-loading + + @override + def setup(self, **kwargs): + """Gets the ROS2 node from arguments.""" + try: + self.node = kwargs["node"] + except KeyError as e: + self.logger.error( + f"[{self.name}] Behaviour expects 'node' in setup kwargs. {e}" + ) + # No return here, update will handle node being None + + def _resolve_package_path(self, path_str: str) -> Optional[str]: + """Resolves package:// paths to absolute paths.""" + if path_str.startswith("package://"): + try: + parts = path_str.split("package://", 1)[1].split("/", 1) + package_name = parts[0] + relative_path = parts[1] if len(parts) > 1 else "" + from ament_index_python.packages import get_package_share_directory + + package_share_directory = get_package_share_directory(package_name) + return os.path.join(package_share_directory, relative_path) + except Exception as e: + self.logger.error( + f"[{self.name}] Could not resolve package path '{path_str}': {e}" + ) + return None + return path_str + + def _get_vel_indices_for_joints( + self, model: pin.Model, joint_names: List[str], group_name_log: str + ) -> Optional[List[int]]: + """Helper to get velocity indices for a list of joint names.""" + vel_indices = [] + for joint_name in joint_names: + if model.existJointName(joint_name): + joint_id = model.getJointId(joint_name) + if model.joints[joint_id].nv == 1: # Assuming 1 DoF in velocity space + vel_indices.append(model.joints[joint_id].idx_v) + else: + self.logger.error( + f"[{self.name}] {group_name_log} joint '{joint_name}' has nv={model.joints[joint_id].nv} != 1. Cannot directly get single velocity index." + ) + return None + else: + self.logger.error( + f"[{self.name}] {group_name_log} joint '{joint_name}' not found in Pinocchio model." + ) + return None + if ( + not vel_indices and joint_names + ): # If names were provided but no indices found + self.logger.error( + f"[{self.name}] No velocity indices determined for {group_name_log} joints: {joint_names}" + ) + return None + return vel_indices + + @override + def update(self) -> Status: + """ + Loads the Pinocchio model if not already loaded and writes outputs to blackboard. + Returns SUCCESS if model is loaded (or was already loaded), FAILURE otherwise. + """ + if not self.node: + self.feedback_message = "Node not initialized in setup." + self.logger.error(f"[{self.name}] {self.feedback_message}") + return Status.FAILURE + + if self._model_loaded_successfully: + self.feedback_message = "Pinocchio model already loaded and on blackboard." + self.logger.debug(f"[{self.name}] {self.feedback_message}") + return Status.SUCCESS # Already done, no need to reload + + try: + urdf_path_str: str = self.blackboard_get("urdf_file_path") + if not urdf_path_str: + self.feedback_message = "'urdf_file_path' not provided or is empty." + self.logger.error(f"[{self.name}] {self.feedback_message}") + return Status.FAILURE + + resolved_urdf_path = self._resolve_package_path(urdf_path_str) + if not resolved_urdf_path: + self.feedback_message = f"Failed to resolve URDF path: {urdf_path_str}" + # _resolve_package_path already logged the error + return Status.FAILURE + + processed_urdf_path = resolved_urdf_path + temp_urdf_file_name = None + + if resolved_urdf_path.endswith(".xacro"): + self.logger.info( + f"[{self.name}] Processing XACRO file: {resolved_urdf_path}" + ) + with tempfile.NamedTemporaryFile( + mode="w", suffix=".urdf", delete=False + ) as temp_file: + temp_urdf_file_name = temp_file.name + process = subprocess.run( + ["ros2", "run", "xacro", "xacro", resolved_urdf_path], + check=True, + capture_output=True, + text=True, + ) + temp_file.write(process.stdout) + processed_urdf_path = temp_urdf_file_name + + self.logger.info(f"[{self.name}] Loading URDF from: {processed_urdf_path}") + # Important: If your URDF has a floating base (e.g. for a mobile manipulator), + # you might need to load it with pin.buildModelFromUrdf(..., pin.JointModelFreeFlyer()) + # or ensure the URDF correctly specifies the root joint. + # For a fixed-base manipulator like Jaco on a table, this is usually fine. + model = pin.buildModelFromUrdf(processed_urdf_path) + data = model.createData() + self.logger.info( + f"[{self.name}] Pinocchio model loaded: {model.name}, Nq={model.nq}, Nv={model.nv}" + ) + + self.blackboard_set("pinocchio_model", model) + self.blackboard_set("pinocchio_data", data) + + # Optionally get and store Jaco joint velocity indices + jaco_joint_names_list = self.blackboard_get("jaco_joint_names") + if jaco_joint_names_list: + jaco_vel_indices = self._get_vel_indices_for_joints( + model, jaco_joint_names_list, "Jaco" + ) + if jaco_vel_indices is not None: + self.blackboard_set("jaco_vel_indices_pin", jaco_vel_indices) + self.logger.info( + f"[{self.name}] Stored Jaco velocity indices: {jaco_vel_indices}" + ) + # else: return Status.FAILURE # Fail if requested but not found + + # Optionally get and store Articutool joint velocity indices + articutool_joint_names_list = self.blackboard_get("articutool_joint_names") + if articutool_joint_names_list: + articutool_vel_indices = self._get_vel_indices_for_joints( + model, articutool_joint_names_list, "Articutool" + ) + if articutool_vel_indices is not None: + self.blackboard_set( + "articutool_vel_indices_pin", articutool_vel_indices + ) + self.logger.info( + f"[{self.name}] Stored Articutool velocity indices: {articutool_vel_indices}" + ) + # else: return Status.FAILURE + + # Optionally get and store Jaco EE frame ID + jaco_ee_link_name_str = self.blackboard_get("jaco_end_effector_link_name") + if jaco_ee_link_name_str: + if model.existFrame(jaco_ee_link_name_str): + jaco_ee_id = model.getFrameId(jaco_ee_link_name_str) + self.blackboard_set("jaco_ee_frame_id_pin", jaco_ee_id) + self.logger.info( + f"[{self.name}] Stored Jaco EE frame ID for '{jaco_ee_link_name_str}': {jaco_ee_id}" + ) + else: + self.logger.warn( + f"[{self.name}] Jaco EE link '{jaco_ee_link_name_str}' not found in Pinocchio model." + ) + # Not failing here, as it's optional + + # Optionally get and store tool tip frame ID + tool_tip_link_name_str = self.blackboard_get("tool_tip_link_name") + if tool_tip_link_name_str: + if model.existFrame(tool_tip_link_name_str): + tool_tip_id = model.getFrameId(tool_tip_link_name_str) + self.blackboard_set("tool_tip_frame_id_pin", tool_tip_id) + self.logger.info( + f"[{self.name}] Stored tool tip frame ID for '{tool_tip_link_name_str}': {tool_tip_id}" + ) + else: + self.logger.warn( + f"[{self.name}] Tool tip link '{tool_tip_link_name_str}' not found in Pinocchio model." + ) + + self._model_loaded_successfully = True + self.feedback_message = "Pinocchio model loaded successfully." + self.logger.info(f"[{self.name}] {self.feedback_message}") + return Status.SUCCESS + + except FileNotFoundError as e: + self.feedback_message = f"URDF/XACRO file not found: {e}" + self.logger.error(f"[{self.name}] {self.feedback_message}") + except subprocess.CalledProcessError as e: + self.feedback_message = f"XACRO processing failed: {e}\nOutput:\n{e.stderr}" + self.logger.error(f"[{self.name}] {self.feedback_message}") + except KeyError as e: # For blackboard_get if a required key is missing + self.feedback_message = ( + f"Blackboard key error during setup/input reading: {e}" + ) + self.logger.error(f"[{self.name}] {self.feedback_message}") + except Exception as e: + self.feedback_message = f"Failed to load Pinocchio model: {e}" + self.logger.error(f"[{self.name}] {self.feedback_message}") + finally: + if temp_urdf_file_name and os.path.exists(temp_urdf_file_name): + try: + os.unlink(temp_urdf_file_name) + except OSError as e_unlink: + self.logger.error( + f"[{self.name}] Failed to delete temp URDF file: {e_unlink}" + ) + + return Status.FAILURE # Reached if any exception occurred + + @override + def terminate(self, new_status: Status) -> None: + self.logger.debug(f"[{self.name}] Terminating with status {new_status}.") diff --git a/ada_feeding/ada_feeding/behaviors/state/offset_position_from_pose.py b/ada_feeding/ada_feeding/behaviors/state/offset_position_from_pose.py new file mode 100644 index 00000000..0eb89e70 --- /dev/null +++ b/ada_feeding/ada_feeding/behaviors/state/offset_position_from_pose.py @@ -0,0 +1,105 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2024-2025, Personal Robotics Laboratory +# License: BSD 3-Clause. See LICENSE.md file in root directory. + +""" +Defines the OffsetPositionFromPose behavior, which calculates a new 3D point +by applying a vector offset to the position component of an input pose. +""" + +# Standard imports +from typing import Union, Optional + +# Third-party imports +from geometry_msgs.msg import Pose, PoseStamped, Point, Vector3 +import numpy as np +from overrides import override +import py_trees +from py_trees.common import Status +import ros2_numpy + +# Local imports +from ada_feeding.helpers import BlackboardKey +from ada_feeding.behaviors import BlackboardBehavior + + +class OffsetPositionFromPose(BlackboardBehavior): + """ + Calculates a new position by applying a Vector3 offset to the position + of an input Pose or PoseStamped. This is useful for creating a target + point that is relative to another object's frame. + """ + + def blackboard_inputs( + self, + input_pose: Union[BlackboardKey, Pose, PoseStamped], + offset: Union[BlackboardKey, Vector3], + ) -> None: + """ + Blackboard Inputs + + Parameters + ---------- + input_pose: The source pose from which to take the initial position. + offset: The Vector3 offset to add to the pose's position. + """ + super().blackboard_inputs( + **{key: value for key, value in locals().items() if key != "self"} + ) + + def blackboard_outputs( + self, + output_position: Optional[BlackboardKey], # -> Optional[Point] + ) -> None: + """ + Blackboard Outputs + + Parameters + ---------- + output_position: The resulting position (as a geometry_msgs/Point) + after applying the offset. + """ + super().blackboard_outputs( + **{key: value for key, value in locals().items() if key != "self"} + ) + + @override + def update(self) -> Status: + """ + Performs the position extraction, offset addition, and writes + the result to the blackboard. + """ + try: + pose_in = self.blackboard_get("input_pose") + offset_in = self.blackboard_get("offset") + + # Extract the position part of the input pose + if isinstance(pose_in, PoseStamped): + position_np = ros2_numpy.numpify(pose_in.pose.position) + elif isinstance(pose_in, Pose): + position_np = ros2_numpy.numpify(pose_in.position) + else: + self.logger.error( + f"[{self.name}] Input 'input_pose' is not a Pose or PoseStamped, but {type(pose_in)}." + ) + return Status.FAILURE + + # Convert offset to numpy array + offset_np = ros2_numpy.numpify(offset_in) + + # Calculate the new position + new_position_np = position_np + offset_np + + # Convert back to a ROS message and write to blackboard + new_position_msg = ros2_numpy.msgify(Point, new_position_np) + self.blackboard_set("output_position", new_position_msg) + + self.logger.info(f"[{self.name}] Successfully calculated offset position.") + return Status.SUCCESS + + except KeyError as e: + self.logger.error(f"[{self.name}] Blackboard key error: {e}") + return Status.FAILURE + except Exception as e: + self.logger.error(f"[{self.name}] Unexpected error: {e}") + return Status.FAILURE diff --git a/ada_feeding/ada_feeding/behaviors/transfer/compute_mouth_frame.py b/ada_feeding/ada_feeding/behaviors/transfer/compute_mouth_frame.py index ea6b7bad..8c503ee2 100644 --- a/ada_feeding/ada_feeding/behaviors/transfer/compute_mouth_frame.py +++ b/ada_feeding/ada_feeding/behaviors/transfer/compute_mouth_frame.py @@ -49,7 +49,7 @@ def blackboard_inputs( world_frame: Union[BlackboardKey, str] = "root", # +z will match this frame frame_to_orient_towards: Union[ BlackboardKey, str - ] = "forkTip", # +x will point towards this frame + ] = "tool_tip", # +x will point towards this frame ) -> None: """ Blackboard Inputs diff --git a/ada_feeding/ada_feeding/helpers.py b/ada_feeding/ada_feeding/helpers.py index 38573ba3..4a832312 100644 --- a/ada_feeding/ada_feeding/helpers.py +++ b/ada_feeding/ada_feeding/helpers.py @@ -302,6 +302,7 @@ def get_tf_object( def get_moveit2_object( blackboard: py_trees.blackboard.Client, node: Optional[Node] = None, + group_name: str = "jaco_arm_with_articutool", ) -> Tuple[MoveIt2, Lock]: """ Gets the MoveIt2 object and its corresponding lock from the blackboard. @@ -315,6 +316,7 @@ def get_moveit2_object( node: The ROS2 node that the MoveIt2 object should be associated with, if we need to create it from scratch. If None, this function will not create the MoveIt2 object if it doesn't exist, and will instead raise a KeyError. + group_name: The name of the MoveIt2 planning group Returns ------- @@ -329,8 +331,8 @@ def get_moveit2_object( # and its corresponding lock. Note that it is important that these keys start with # a "/" because to indicate it is an absolute path, so all behaviors can access # the same object. - moveit2_blackboard_key = "/moveit2" - moveit2_lock_blackboard_key = "/moveit2_lock" + moveit2_blackboard_key = f"/moveit2/{group_name}" + moveit2_lock_blackboard_key = f"/moveit2_lock/{group_name}" # First, register the MoveIt2 object and its corresponding lock for READ access if not blackboard.is_registered(moveit2_blackboard_key, Access.READ): @@ -353,22 +355,42 @@ def get_moveit2_object( node.get_logger().info( "MoveIt2 object and lock do not exist on the blackboard. Creating them now." ) + + if group_name == "jaco_arm": + _joint_names = kinova.joint_names() + _base_link = kinova.base_link_name() + _end_effector = "j2n6s200_end_effector" + elif group_name == "articutool": + _joint_names = ["atool_joint1", "atool_joint2"] + _base_link = "atool_base" + _end_effector = "tool_tip" + elif group_name == "jaco_arm_with_articutool": + _joint_names = kinova.joint_names() + ["atool_joint1", "atool_joint2"] + _base_link = kinova.base_link_name() + _end_effector = "tool_tip" + else: + raise ValueError(f"Unknown planning group name provided: {group_name}") + blackboard.register_key(moveit2_blackboard_key, Access.WRITE) blackboard.register_key(moveit2_lock_blackboard_key, Access.WRITE) # TODO: Assess whether ReentrantCallbackGroup is necessary for MoveIt2. callback_group = ReentrantCallbackGroup() moveit2 = MoveIt2( node=node, - joint_names=kinova.joint_names(), - base_link_name=kinova.base_link_name(), - end_effector_name="forkTip", - group_name="jaco_arm", + joint_names=_joint_names, + base_link_name=_base_link, + end_effector_name=_end_effector, + group_name=group_name, callback_group=callback_group, ) lock = Lock() blackboard.set(moveit2_blackboard_key, moveit2) blackboard.set(moveit2_lock_blackboard_key, lock) + node.get_logger().info( + f"Successfully created and stored MoveIt2 object and lock for group '{group_name}'." + ) + return moveit2, lock diff --git a/ada_feeding/ada_feeding/idioms/servo_until.py b/ada_feeding/ada_feeding/idioms/servo_until.py index 04d0d6ec..3e373695 100644 --- a/ada_feeding/ada_feeding/idioms/servo_until.py +++ b/ada_feeding/ada_feeding/idioms/servo_until.py @@ -174,7 +174,7 @@ def servo_until_pose( name: str, ns: str, target_pose_stamped_key: BlackboardKey, - end_effector_frame: str = "forkTip", + end_effector_frame: str = "tool_tip", tolerance_position: float = 0.005, tolerance_orientation: Union[float, Tuple[float, float, float]] = 0.09, relaxed_tolerance_position: float = 0.005, diff --git a/ada_feeding/ada_feeding/trees/acquire_food_tree.py b/ada_feeding/ada_feeding/trees/acquire_food_tree.py index 1bea2c2e..87e9ac2a 100644 --- a/ada_feeding/ada_feeding/trees/acquire_food_tree.py +++ b/ada_feeding/ada_feeding/trees/acquire_food_tree.py @@ -9,6 +9,7 @@ # Standard imports import pickle +import operator from typing import List, Optional # Third-party imports @@ -17,6 +18,7 @@ from overrides import override import py_trees from py_trees.blackboard import Blackboard +from py_trees.common import Status, ComparisonExpression from py_trees.behaviours import Success import py_trees_ros from rcl_interfaces.srv import SetParameters @@ -33,6 +35,8 @@ ComputeFoodFrame, ComputeActionConstraints, ComputeActionTwist, + RotateLocalApproachPoses, + ConditionallyRotateFoodFrame, ) from ada_feeding.behaviors.moveit2 import ( MoveIt2JointConstraint, @@ -41,9 +45,32 @@ MoveIt2PositionOffsetConstraint, MoveIt2Plan, MoveIt2Execute, + MoveIt2ComputeIK, + MoveIt2ComputeFK, ServoMove, ToggleCollisionObject, ) +from ada_feeding.behaviors.state import ( + GetJointStates, + ExtractJointsFromState, + CombineJointStates, + ExtractPoseFromPosesByLink, + ExtractPoseComponents, + CheckJacoDirectionalManipulability, + CheckArticutoolPathOrientationFeasibility, + CheckArticutoolPathLevelingFeasibility, + LoadPinocchioModel, +) +from ada_feeding.behaviors.ros.msgs import StampPoseFromPose +from ada_feeding.behaviors.ros.tf import ApplyTransform +from ada_feeding.behaviors.articutool import ( + ExecuteArticutoolTrajectory, + CallSetOrientationControl, + SwitchArticutoolControllers, + ComputeArticutoolLevelingJoints, + TriggerArticutoolCalibration, + ExecuteNamedPrimitive, +) from ada_feeding.helpers import BlackboardKey from ada_feeding.idioms import ( pre_moveto_config, @@ -92,7 +119,7 @@ def __init__( max_velocity_scaling_to_resting_configuration: Optional[float] = 0.8, max_acceleration_scaling_to_resting_configuration: Optional[float] = 0.8, pickle_goal_path: Optional[str] = None, - allowed_planning_time_for_move_above: float = 0.5, + allowed_planning_time_for_move_above: float = 1.0, allowed_planning_time_for_move_into: float = 0.5, allowed_planning_time_to_resting_configuration: float = 0.5, allowed_planning_time_for_recovery: float = 0.5, @@ -162,7 +189,7 @@ def create_tree( # to reduce swivels. max_path_len_joint = { "j2n6s200_joint_1": np.pi * 5.0 / 6.0, - "j2n6s200_joint_2": np.pi / 2.0, + "j2n6s200_joint_2": np.pi / 4.0, } # Get the base lin to publish servo commands in @@ -216,16 +243,50 @@ def create_tree( "max_velocity_scale": self.max_velocity_scaling_to_resting_configuration, "max_acceleration_scale": self.max_acceleration_scaling_to_resting_configuration, "allowed_planning_time": self.allowed_planning_time_to_resting_configuration, + "group_name": "jaco_arm", }, outputs={ "trajectory": BlackboardKey("resting_trajectory") }, ), ), + CheckArticutoolPathLevelingFeasibility( + name="CheckArticutoolLevelingFeasibilityForResting", + ns=name, + inputs={ + "pinocchio_model": BlackboardKey("pinocchio_model"), + "pinocchio_data": BlackboardKey("pinocchio_data"), + "jaco_joint_names_pin": [ + "j2n6s200_joint_1", + "j2n6s200_joint_2", + "j2n6s200_joint_3", + "j2n6s200_joint_4", + "j2n6s200_joint_5", + "j2n6s200_joint_6", + ], + "jaco_ee_frame_id_pin": BlackboardKey( + "jaco_ee_frame_id_pin" + ), + "jaco_trajectory": BlackboardKey( + "move_into_jaco_arm_trajectory" + ), + "articutool_pitch_limits_rad": (-np.pi / 2, np.pi / 2), + "articutool_roll_limits_rad": (-np.pi, np.pi), + "num_trajectory_points_to_check": 20, + }, + outputs={ + "articutool_is_leveling_feasible": BlackboardKey( + "articutool_can_maintain_leveling" + ) + }, + ), MoveIt2Execute( name="Resting", ns=name, - inputs={"trajectory": BlackboardKey("resting_trajectory")}, + inputs={ + "trajectory": BlackboardKey("resting_trajectory"), + "group_name": "jaco_arm", + }, outputs={}, ), ], @@ -344,6 +405,7 @@ def create_tree( "cartesian_max_step": 0.001, "cartesian_fraction_threshold": 0.92, "allowed_planning_time": self.allowed_planning_time_for_recovery, + "group_name": "jaco_arm", }, outputs={ "trajectory": BlackboardKey( @@ -358,7 +420,8 @@ def create_tree( inputs={ "trajectory": BlackboardKey( "recovery_trajectory" - ) + ), + "group_name": "jaco_arm", }, outputs={}, ), @@ -369,12 +432,195 @@ def create_tree( ], # End RecoverySequence.children ) # End RecoverySequence - def move_above_plan( + def post_acquisition_sequence() -> py_trees.behaviour.Behaviour: + return py_trees.composites.Sequence( + name="PreAcquisitionSequence", + memory=True, + children=[ + CallSetOrientationControl( + name="DisableArticutoolOrientation", + ns=name, + inputs={ + "control_mode": 0, + }, + outputs={}, + ), + GetJointStates( + name="GetJacoArmStateForLeveling", + ns=name, + node=self._node, + inputs={ + "joint_names": [ + "j2n6s200_joint_1", + "j2n6s200_joint_2", + "j2n6s200_joint_3", + "j2n6s200_joint_4", + "j2n6s200_joint_5", + "j2n6s200_joint_6", + ], + }, + outputs={ + "joint_state": BlackboardKey( + "current_jaco_arm_state_for_leveling_fk" + ), + "joint_positions": None, + "joint_names": None, + }, + ), + MoveIt2ComputeFK( + name="GetJacoEEPoseForLeveling", + ns=name, + inputs={ + "group_name": "jaco_arm", + "joint_state": BlackboardKey( + "current_jaco_arm_state_for_leveling_fk" + ), + "fk_link_names": ["j2n6s200_end_effector"], + }, + outputs={ + "fk_poses": BlackboardKey("current_jaco_arm_fk_poses"), + "success": None, + }, + ), + ExtractPoseFromPosesByLink( + name="ExtractJacoEEPoseForLeveling", + ns=name, + inputs={ + "fk_poses": BlackboardKey("current_jaco_arm_fk_poses"), + "target_link_name": "j2n6s200_end_effector", + "requested_link_names": ["j2n6s200_end_effector"], + }, + outputs={ + "extracted_pose": BlackboardKey( + "current_jaco_ee_world_pose_stamped" + ), + "success": None, + }, + ), + ComputeArticutoolLevelingJoints( + name="ComputeLevelingAngles", + ns=name, + inputs={ + "jaco_ee_world_pose": BlackboardKey( + "current_jaco_ee_world_pose_stamped" + ), + }, + outputs={ + "articutool_joint_positions": BlackboardKey( + "articutool_joint_positions" + ), + "articutool_leveling_ik_found": BlackboardKey( + "leveling_ik_success" + ), + }, + ), + MoveIt2JointConstraint( + name="SetLevelingJointGoal", + ns=name, + inputs={ + "joint_positions": BlackboardKey( + "articutool_joint_positions" + ), + }, + outputs={ + "constraints": BlackboardKey( + "articutool_leveling_constraints" + ) + }, + ), + py_trees.decorators.Timeout( + name="PlanToLevelArticutoolTimeout", + # Increase allowed_planning_time to account for ROS2 overhead and MoveIt2 setup and such + duration=10.0 + * self.allowed_planning_time_to_resting_configuration, + child=MoveIt2Plan( + name="PlanToLevelArticutool", + ns=name, + inputs={ + "goal_constraints": BlackboardKey( + "articutool_leveling_constraints" + ), + "group_name": "articutool", + }, + outputs={ + "trajectory": BlackboardKey( + "level_articutool_trajectory" + ) + }, + ), + ), + SwitchArticutoolControllers( + name="SwitchArticutoolToJointTrajectory", + ns=name, + inputs={ + "controllers_to_activate": ["joint_trajectory_controller"], + "controllers_to_deactivate": ["velocity_controller"], + }, + outputs={ + "switch_call_succeeded": None, + "switch_response_ok": None, + }, + ), + ExecuteArticutoolTrajectory( + name="LevelArticutool", + ns=name, + inputs={ + "trajectory": BlackboardKey("level_articutool_trajectory"), + }, + outputs={ + "action_goal_accepted": BlackboardKey("tool_goal_accepted"), + "action_result_code": BlackboardKey( + "tool_exec_result_code" + ), + "action_status": BlackboardKey("tool_action_status"), + }, + ), + SwitchArticutoolControllers( + name="SwitchArticutoolToVelocity", + ns=name, + inputs={ + "controllers_to_activate": ["velocity_controller"], + "controllers_to_deactivate": [ + "joint_trajectory_controller" + ], + }, + outputs={ + "switch_call_succeeded": None, + "switch_response_ok": None, + }, + ), + ExecuteNamedPrimitive( + name="RunPostAcquisitionPrimitive", + ns=name, + inputs={ + "primitive_name": BlackboardKey( + "post_acquisition_action_name" + ), + "primitive_params": BlackboardKey( + "post_acquisition_action_params" + ), + }, + outputs={ + "primitive_status": None, + }, + ), + CallSetOrientationControl( + name="SetArticutoolOrientation", + ns=name, + inputs={ + "control_mode": 1, + }, + outputs={}, + ), + ], + ) + + def pre_acquisition_sequence( flip_food_frame: bool = False, action: Optional[BlackboardKey] = None, ) -> py_trees.behaviour.Behaviour: return py_trees.composites.Sequence( - name="MoveAbovePlanningSeq", + name="PreAcquisitionSequence", memory=True, children=[ # Compute Food Frame @@ -394,12 +640,13 @@ def move_above_plan( # Default food_frame_id = "food" # Default world_frame = "world" "flip_food_frame": flip_food_frame, + "align_to_robot_base": False, }, outputs={ "action_select_request": BlackboardKey( "action_request" ), - "food_frame": None, + "food_frame": BlackboardKey("initial_food_frame"), }, ), ), @@ -435,91 +682,596 @@ def move_above_plan( # Default approach_frame_id = "approach" }, outputs={ - "move_above_pose": BlackboardKey("move_above_pose"), - "move_into_pose": BlackboardKey("move_into_pose"), + "move_above_pose": BlackboardKey( + "move_above_pose_food_frame" + ), + "move_into_pose": BlackboardKey( + "move_into_pose_food_frame" + ), "approach_thresh": BlackboardKey("approach_thresh"), "grasp_thresh": BlackboardKey("grasp_thresh"), "ext_thresh": BlackboardKey("ext_thresh"), "action": BlackboardKey("action"), "action_index": BlackboardKey("action_index"), + "post_move_into_primitive_name": BlackboardKey( + "post_move_into_action_name" + ), + "post_move_into_primitive_params": BlackboardKey( + "post_move_into_action_params" + ), + "post_acquisition_primitive_name": BlackboardKey( + "post_acquisition_action_name" + ), + "post_acquisition_primitive_params": BlackboardKey( + "post_acquisition_action_params" + ), + "should_align_to_base": BlackboardKey( + "should_align_to_base" + ), }, ), ), + ConditionallyRotateFoodFrame( + name="ConditionallyRotateFoodFrame", + ns=name, + inputs={ + "initial_food_frame": BlackboardKey("initial_food_frame"), + "should_align_to_base": BlackboardKey( + "should_align_to_base" + ), + }, + outputs={ + "food_frame_updated": None, + }, + ), # Re-Tare FT Sensor and default to 4N threshold pre_moveto_config(name="PreAcquireFTTare"), - ### Move Above Food - MoveIt2PoseConstraint( - name="MoveAbovePose", + # --- Prepare MoveAbove Pose for IK --- + StampPoseFromPose( + name="StampMoveAbovePoseFood", ns=name, inputs={ - "pose": BlackboardKey("move_above_pose"), + "input_pose": BlackboardKey("move_above_pose_food_frame"), "frame_id": "food", - "tolerance_orientation": [ - 0.01, - 0.01, - 0.01, - ], # x, y, z rotvec - "parameterization": 1, }, outputs={ - "constraints": BlackboardKey("goal_constraints"), + "output_pose_stamped": BlackboardKey( + "move_above_pose_stamped_food" + ) + }, + ), + ApplyTransform( + name="TransformMoveAbovePoseToWorld", + ns=name, + inputs={ + "stamped_msg": BlackboardKey( + "move_above_pose_stamped_food" + ), + "target_frame": "j2n6s200_link_base", + }, + outputs={ + "transformed_msg": BlackboardKey( + "tool_tip_move_above_pose_world" + ) + }, + ), + StampPoseFromPose( + name="StampMoveIntoPoseFood", + ns=name, + inputs={ + "input_pose": BlackboardKey("move_into_pose_food_frame"), + "frame_id": "food", + }, + outputs={ + "output_pose_stamped": BlackboardKey( + "move_into_pose_stamped_food" + ) + }, + ), + ApplyTransform( + name="TransformMoveIntoPoseToWorld", + ns=name, + inputs={ + "stamped_msg": BlackboardKey("move_into_pose_stamped_food"), + "target_frame": "j2n6s200_link_base", + }, + outputs={ + "transformed_msg": BlackboardKey( + "tool_tip_move_into_pose_world" + ) + }, + ), + LoadPinocchioModel( + name="LoadPinocchioModel", + ns=name, + inputs={ + "urdf_file_path": "package://ada_moveit/config/ada.urdf.xacro", + "jaco_joint_names": [ + "j2n6s200_joint_1", + "j2n6s200_joint_2", + "j2n6s200_joint_3", + "j2n6s200_joint_4", + "j2n6s200_joint_5", + "j2n6s200_joint_6", + ], + "articutool_joint_names": ["atool_joint1", "atool_joint2"], + "jaco_end_effector_link_name": "j2n6s200_end_effector", + "tool_tip_link_name": "tool_tip", + }, + outputs={ + "pinocchio_model": BlackboardKey("pinocchio_model"), + "pinocchio_data": BlackboardKey("pinocchio_data"), + "jaco_vel_indices_pin": BlackboardKey( + "jaco_vel_indices_pin" + ), + "articutool_vel_indices_pin": BlackboardKey( + "articutool_vel_indices_pin" + ), + "jaco_ee_frame_id_pin": BlackboardKey( + "jaco_ee_frame_id_pin" + ), + "tool_tip_frame_id_pin": BlackboardKey( + "tool_tip_frame_id_pin" + ), + }, + ), + ], + ) + + def move_above_sequence() -> py_trees.behaviour.Behaviour: + return py_trees.composites.Sequence( + name="MoveAboveSequence", + memory=True, + children=[ + GetJointStates( + name="GetFullCurrentJointStateForIKSeed", + ns=name, + node=self._node, + inputs={ + "joint_names": [ + "j2n6s200_joint_1", + "j2n6s200_joint_2", + "j2n6s200_joint_3", + "j2n6s200_joint_4", + "j2n6s200_joint_5", + "j2n6s200_joint_6", + "atool_joint1", + "atool_joint2", + ] + }, + outputs={ + "joint_state": BlackboardKey( + "current_full_joint_state_for_ik" + ) + }, + ), + MoveIt2JointConstraint( + name="CreateMoveAboveArticutoolJointConstraint", + ns=name, + inputs={ + "joint_names": ["atool_joint1", "atool_joint2"], + "joint_positions": [0.0, 0.0], + "tolerance": 0.785, + }, + outputs={ + "constraints": BlackboardKey("move_above_ik_constraints") + }, + ), + MoveIt2ComputeIK( + name="ComputeIKForMoveAbove", + ns=name, + inputs={ + "target_pose": BlackboardKey( + "tool_tip_move_above_pose_world" + ), + "group_name": "jaco_arm_with_articutool", + "start_joint_state": BlackboardKey( + "current_full_joint_state_for_ik" + ), + "constraints": BlackboardKey("move_above_ik_constraints"), + }, + outputs={ + "ik_solution_joint_state": BlackboardKey( + "move_above_ik_solution_8dof" + ), + "success": BlackboardKey("move_above_ik_success"), + }, + ), + # Check Jaco Directional Manipulability + CheckJacoDirectionalManipulability( + name="CheckJacoManipulabilityForMoveInto", + ns=name, + inputs={ + "pinocchio_model": BlackboardKey("pinocchio_model"), + "pinocchio_data": BlackboardKey("pinocchio_data"), + "jaco_vel_indices_pin": BlackboardKey( + "jaco_vel_indices_pin" + ), + "jaco_ee_frame_id_pin": BlackboardKey( + "jaco_ee_frame_id_pin" + ), + "current_full_robot_joint_state_MA": BlackboardKey( + "move_above_ik_solution_8dof" + ), + "tool_tip_move_above_pose_world": BlackboardKey( + "tool_tip_move_above_pose_world" + ), + "tool_tip_move_into_pose_world": BlackboardKey( + "tool_tip_move_into_pose_world" + ), + "directional_manipulability_threshold": 0.01, + }, + outputs={ + "jaco_directional_manipulability_score": BlackboardKey( + "jaco_manip_score" + ), + "jaco_is_manipulable_for_direction": BlackboardKey( + "jaco_can_move_into" + ), + }, + ), + ExtractJointsFromState( + name="ExtractJacoArmJointsForMoveAbove", + ns=name, + inputs={ + "source_joint_state": BlackboardKey( + "move_above_ik_solution_8dof" + ), + "target_joint_names": [ + "j2n6s200_joint_1", + "j2n6s200_joint_2", + "j2n6s200_joint_3", + "j2n6s200_joint_4", + "j2n6s200_joint_5", + "j2n6s200_joint_6", + ], + }, + outputs={ + "output_joint_names": BlackboardKey( + "move_above_jaco_arm_joint_names" + ), + "output_joint_positions": BlackboardKey( + "move_above_jaco_arm_joint_positions" + ), + "success": None, + }, + ), + ExtractJointsFromState( + name="ExtractArticutoolJointsForMoveAbove", + ns=name, + inputs={ + "source_joint_state": BlackboardKey( + "move_above_ik_solution_8dof" + ), + "target_joint_names": ["atool_joint1", "atool_joint2"], + }, + outputs={ + "output_joint_names": BlackboardKey( + "move_above_articutool_joint_names" + ), + "output_joint_positions": BlackboardKey( + "move_above_articutool_joint_positions" + ), + "success": None, + }, + ), + MoveIt2JointConstraint( + name="SetJacoArmJointConstraintForMoveAbove", + ns=name, + inputs={ + "joint_positions": BlackboardKey( + "move_above_jaco_arm_joint_positions" + ), + "joint_names": BlackboardKey( + "move_above_jaco_arm_joint_names" + ), + }, + outputs={ + "constraints": BlackboardKey( + "move_above_jaco_arm_constraints" + ) + }, + ), + MoveIt2JointConstraint( + name="SetArticutoolJointConstraintForMoveAbove", + ns=name, + inputs={ + "joint_positions": BlackboardKey( + "move_above_articutool_joint_positions" + ), + "joint_names": BlackboardKey( + "move_above_articutool_joint_names" + ), + }, + outputs={ + "constraints": BlackboardKey( + "move_above_articutool_constraints" + ) + }, + ), + MoveIt2OrientationConstraint( + name="SetJacoArmPathConstraintForMoveAbove", + ns=name, + inputs={ + "constraints": None, + "quat_xyzw": (0.707, 0.0, 0.0, 0.707), + "tolerance": ( + np.pi, + 2.0 * np.pi, + 2.0 * np.pi, + ), + }, + outputs={ + "constraints": BlackboardKey( + "move_above_jaco_arm_path_constraints" + ), }, ), py_trees.decorators.Timeout( - name="MoveAbovePlanTimeout", - # Increase allowed_planning_time to account for ROS2 overhead and MoveIt2 setup and such + name="MoveAboveJacoArmPlanTimeout", duration=10.0 * self.allowed_planning_time_for_move_above, child=MoveIt2Plan( - name="MoveAbovePlan", + name="MoveAboveJacoArmPlan", ns=name, inputs={ - "goal_constraints": BlackboardKey("goal_constraints"), + "goal_constraints": BlackboardKey( + "move_above_jaco_arm_constraints" + ), + "path_constraints": BlackboardKey( + "move_above_jaco_arm_path_constraints" + ), "max_velocity_scale": self.max_velocity_scaling_move_above, "max_acceleration_scale": self.max_acceleration_scaling_move_above, "allowed_planning_time": self.allowed_planning_time_for_move_above, - "max_path_len_joint": max_path_len_joint, + "group_name": "jaco_arm", }, outputs={ - "trajectory": BlackboardKey("move_above_trajectory"), - "end_joint_state": BlackboardKey("test_into_joints"), + "trajectory": BlackboardKey( + "move_above_jaco_arm_trajectory" + ), + "end_joint_state": BlackboardKey( + "move_above_jaco_arm_end_joint_state" + ), }, ), ), - ### Test MoveIntoFood + py_trees.decorators.Timeout( + name="MoveAboveArticutoolPlanTimeout", + duration=10.0 * self.allowed_planning_time_for_move_above, + child=MoveIt2Plan( + name="MoveAboveArticutoolPlan", + ns=name, + inputs={ + "goal_constraints": BlackboardKey( + "move_above_articutool_constraints" + ), + "max_velocity_scale": self.max_velocity_scaling_move_above, + "max_acceleration_scale": self.max_acceleration_scaling_move_above, + "allowed_planning_time": self.allowed_planning_time_for_move_above, + "group_name": "articutool", + }, + outputs={ + "trajectory": BlackboardKey( + "move_above_articutool_trajectory" + ), + "end_joint_state": BlackboardKey( + "move_above_articutool_end_joint_state" + ), + }, + ), + ), + CombineJointStates( + name="CombineJacoArmAndArticutoolJoints", + ns=name, + inputs={ + "joint_state_1": BlackboardKey( + "move_above_jaco_arm_end_joint_state" + ), + "joint_state_2": BlackboardKey( + "move_above_articutool_end_joint_state" + ), + "full_joint_names": [ + "j2n6s200_joint_1", + "j2n6s200_joint_2", + "j2n6s200_joint_3", + "j2n6s200_joint_4", + "j2n6s200_joint_5", + "j2n6s200_joint_6", + "atool_joint1", + "atool_joint2", + ], + }, + outputs={ + "combined_joint_state": BlackboardKey( + "move_above_end_joint_state" + ), + }, + ), + ], + ) + + def move_into_sequence() -> py_trees.behaviour.Behaviour: + return py_trees.composites.Sequence( + name="MoveIntoSequence", + memory=True, + children=[ + # Compute IK for the target pose using the full jaco_arm_with_articutool planning group + MoveIt2ComputeIK( + name="ComputeIKForMoveInto", + ns=name, + inputs={ + "target_pose": BlackboardKey( + "tool_tip_move_into_pose_world" + ), + "group_name": "jaco_arm_with_articutool", + "start_joint_state": BlackboardKey( + "move_above_end_joint_state" + ), + }, + outputs={ + "ik_solution_joint_state": BlackboardKey( + "move_into_ik_solution_8dof" + ), + "success": BlackboardKey("move_into_ik_success"), + }, + ), + MoveIt2ComputeFK( + name="ComputeMoveIntoPoses", + ns=name, + inputs={ + "group_name": "jaco_arm_with_articutool", + "joint_state": BlackboardKey("move_into_ik_solution_8dof"), + "fk_link_names": ["j2n6s200_end_effector", "tool_tip"], + }, + outputs={ + "fk_poses": BlackboardKey("move_into_poses"), + "success": None, + }, + ), + ExtractPoseFromPosesByLink( + name="GetMoveIntoJacoArmEEPose", + ns=name, + inputs={ + "fk_poses": BlackboardKey("move_into_poses"), + "target_link_name": "j2n6s200_end_effector", + "requested_link_names": [ + "j2n6s200_end_effector", + "tool_tip", + ], + }, + outputs={ + "extracted_pose": BlackboardKey( + "move_into_jaco_arm_ee_pose" + ), + "success": None, + }, + ), + ExtractPoseFromPosesByLink( + name="GetMoveIntoToolTipPose", + ns=name, + inputs={ + "fk_poses": BlackboardKey("move_into_poses"), + "target_link_name": "tool_tip", + "requested_link_names": [ + "j2n6s200_end_effector", + "tool_tip", + ], + }, + outputs={ + "extracted_pose": BlackboardKey("move_into_tool_tip_pose"), + "success": None, + }, + ), + ComputeArticutoolLevelingJoints( + name="ComputeLevelingAngles", + ns=name, + inputs={ + "jaco_ee_world_pose": BlackboardKey( + "move_into_jaco_arm_ee_pose" + ), + }, + outputs={ + "articutool_joint_positions": None, + "articutool_leveling_ik_found": BlackboardKey( + "move_into_leveling_ik_success" + ), + }, + ), MoveIt2PoseConstraint( - name="MoveIntoPose", + name="MoveIntoJacoArmEEPoseConstraint", ns=name, inputs={ - "pose": BlackboardKey("move_into_pose"), - "frame_id": "food", + "pose": BlackboardKey("move_into_jaco_arm_ee_pose"), + "frame_id": "j2n6s200_link_base", }, outputs={ - "constraints": BlackboardKey("goal_constraints"), + "constraints": BlackboardKey("move_into_goal_constraints"), }, ), py_trees.decorators.Timeout( - name="MoveIntoPlanTimeout", + name="MoveIntoJacoArmEEPlanTimeout", # Increase allowed_planning_time to account for ROS2 overhead and MoveIt2 setup and such duration=10.0 * self.allowed_planning_time_for_move_into, child=MoveIt2Plan( - name="MoveIntoPlan", + name="MoveIntoJacoArmEEPlan", ns=name, inputs={ - "goal_constraints": BlackboardKey("goal_constraints"), + "goal_constraints": BlackboardKey( + "move_into_goal_constraints" + ), "max_velocity_scale": self.max_velocity_scaling_move_into, "max_acceleration_scale": self.max_acceleration_scaling_move_into, "cartesian": True, "cartesian_max_step": 0.001, "cartesian_fraction_threshold": 0.92, - "start_joint_state": BlackboardKey("test_into_joints"), + "start_joint_state": BlackboardKey( + "move_above_jaco_arm_end_joint_state" + ), "max_path_len_joint": max_path_len_joint, "allowed_planning_time": self.allowed_planning_time_for_move_into, + "group_name": "jaco_arm", }, outputs={ - "trajectory": BlackboardKey("move_into_trajectory") + "trajectory": BlackboardKey( + "move_into_jaco_arm_trajectory" + ) }, ), ), + ExtractPoseComponents( + name="ExtractMoveIntoPoseComponents", + ns=name, + inputs={ + "input_pose_object": BlackboardKey( + "move_into_tool_tip_pose" + ), + }, + outputs={ + "output_position": None, + "output_orientation": BlackboardKey( + "move_into_tool_tip_orientation" + ), + "output_header": None, + "success": None, + }, + ), + CheckArticutoolPathOrientationFeasibility( + name="CheckArticutoolFeasibilityForMoveInto", + ns=name, + inputs={ + "pinocchio_model": BlackboardKey("pinocchio_model"), + "pinocchio_data": BlackboardKey("pinocchio_data"), + "jaco_joint_names_pin": [ + "j2n6s200_joint_1", + "j2n6s200_joint_2", + "j2n6s200_joint_3", + "j2n6s200_joint_4", + "j2n6s200_joint_5", + "j2n6s200_joint_6", + ], + "jaco_ee_frame_id_pin": BlackboardKey( + "jaco_ee_frame_id_pin" + ), + "articutool_joint_names_pin": [ + "atool_joint1", + "atool_joint2", + ], + "jaco_trajectory": BlackboardKey( + "move_into_jaco_arm_trajectory" + ), + "desired_tool_tip_world_orientation": BlackboardKey( + "move_into_tool_tip_orientation" + ), + "articutool_pitch_limits_rad": (-np.pi / 2, np.pi / 2), + "articutool_roll_limits_rad": (-np.pi, np.pi), + "num_trajectory_points_to_check": 20, + }, + outputs={ + "articutool_is_orientation_feasible": BlackboardKey( + "articutool_can_maintain_scoop_angle" + ) + }, + ), ], ) @@ -619,17 +1371,76 @@ def move_above_plan( name="BackupFlipFoodFrameSel", memory=True, children=[ - move_above_plan(True), - move_above_plan(False, BlackboardKey("action")), + pre_acquisition_sequence(True), + pre_acquisition_sequence( + False, BlackboardKey("action") + ), ], ), + py_trees.decorators.Retry( + name="PlanAcquisitionSequenceRetry", + num_failures=10, + child=py_trees.composites.Sequence( + name="PlanAcquisitionSequence", + memory=True, + children=[ + move_above_sequence(), + move_into_sequence(), + ], + ), + ), + CallSetOrientationControl( + name="DisableArticutoolOrientation", + ns=name, + inputs={ + "control_mode": 0, + }, + outputs={}, + ), + SwitchArticutoolControllers( + name="SwitchArticutoolToJointTrajectory", + ns=name, + inputs={ + "controllers_to_activate": [ + "joint_trajectory_controller" + ], + "controllers_to_deactivate": [ + "velocity_controller" + ], + }, + outputs={ + "switch_call_succeeded": None, + "switch_response_ok": None, + }, + ), + ExecuteArticutoolTrajectory( + name="MoveAboveArticutool", + ns=name, + inputs={ + "trajectory": BlackboardKey( + "move_above_articutool_trajectory" + ), + }, + outputs={ + "action_goal_accepted": BlackboardKey( + "tool_goal_accepted" + ), + "action_result_code": BlackboardKey( + "tool_exec_result_code" + ), + "action_status": BlackboardKey( + "tool_action_status" + ), + }, + ), MoveIt2Execute( - name="MoveAbove", + name="MoveAboveJacoArm", ns=name, inputs={ "trajectory": BlackboardKey( - "move_above_trajectory" - ) + "move_above_jaco_arm_trajectory" + ), + "group_name": "jaco_arm", }, outputs={}, ), @@ -689,66 +1500,80 @@ def move_above_plan( # Starts a new Sequence w/ Memory internally workers=[ ### Move Into Food - MoveIt2PoseConstraint( - name="MoveIntoPose", + SwitchArticutoolControllers( + name="SwitchArticutoolToVelocity", ns=name, inputs={ - "pose": BlackboardKey("move_into_pose"), - "frame_id": "food", + "controllers_to_activate": [ + "velocity_controller" + ], + "controllers_to_deactivate": [ + "joint_trajectory_controller" + ], }, outputs={ - "constraints": BlackboardKey( - "goal_constraints" - ), + "switch_call_succeeded": None, + "switch_response_ok": None, }, ), - # If this fails - # Auto-fallback to precomputed MoveInto - # From move_above_plan() - py_trees.decorators.FailureIsSuccess( - name="MoveIntoPlanFallbackPrecomputed", - child=py_trees.decorators.Timeout( - name="MoveIntoPlanTimeout", - # Increase allowed_planning_time to account for ROS2 overhead and MoveIt2 setup and such - duration=10.0 - * self.allowed_planning_time_for_move_into, - child=MoveIt2Plan( - name="MoveIntoPlan", - ns=name, - inputs={ - "goal_constraints": BlackboardKey( - "goal_constraints" - ), - "max_velocity_scale": self.max_velocity_scaling_move_into, - "max_acceleration_scale": self.max_acceleration_scaling_move_into, - "cartesian": True, - "cartesian_max_step": 0.001, - "cartesian_fraction_threshold": 0.92, - "max_path_len_joint": max_path_len_joint, - "allowed_planning_time": self.allowed_planning_time_for_move_into, - }, - outputs={ - "trajectory": BlackboardKey( - "move_into_trajectory" - ) - }, + py_trees.timers.Timer( + name="WaitForIMUToSettle", + duration=2.0, + ), + TriggerArticutoolCalibration( + name="TriggerArticutoolCalibration", + ns=name, + inputs={}, + outputs={}, + ), + CallSetOrientationControl( + name="SetArticutoolOrientation", + ns=name, + inputs={ + "control_mode": 2, + "target_orientation_robot_base_quat": BlackboardKey( + "move_into_tool_tip_orientation" ), - ), + }, + outputs={}, ), # MoveInto expect F/T failure py_trees.decorators.FailureIsSuccess( - name="MoveIntoExecuteSucceed", + name="MoveIntoJacoArmExecuteSucceed", child=MoveIt2Execute( - name="MoveInto", + name="MoveIntoJacoArm", ns=name, inputs={ "trajectory": BlackboardKey( - "move_into_trajectory" + "move_into_jaco_arm_trajectory" ) }, outputs={}, ), ), + CallSetOrientationControl( + name="SetArticutoolOrientation", + ns=name, + inputs={ + "control_mode": 0, + }, + outputs={}, + ), + ExecuteNamedPrimitive( + name="RunPostMoveIntoPrimitive", + ns=name, + inputs={ + "primitive_name": BlackboardKey( + "post_move_into_action_name" + ), + "primitive_params": BlackboardKey( + "post_move_into_action_params" + ), + }, + outputs={ + "primitive_status": None, + }, + ), ### Scoped Behavior for Moveit2_Servo scoped_behavior( name="MoveIt2Servo", @@ -953,6 +1778,7 @@ def move_above_plan( ), # End MoveIt2Servo ], # End SafeFTPreempt.workers ), # End SafeFTPreempt + post_acquisition_sequence(), ], # End OctomapAndTableCollision.workers ), # OctomapAndTableCollision ] diff --git a/ada_feeding/ada_feeding/trees/move_to_configuration_with_ft_thresholds_tree.py b/ada_feeding/ada_feeding/trees/move_to_configuration_with_ft_thresholds_tree.py index ab98c495..06e2ccec 100644 --- a/ada_feeding/ada_feeding/trees/move_to_configuration_with_ft_thresholds_tree.py +++ b/ada_feeding/ada_feeding/trees/move_to_configuration_with_ft_thresholds_tree.py @@ -22,6 +22,7 @@ MoveIt2Execute, MoveIt2JointConstraint, ) +from ada_feeding.behaviors.state import GetJointStates from ada_feeding.helpers import BlackboardKey from ada_feeding.idioms import pre_moveto_config, scoped_behavior from ada_feeding.idioms.bite_transfer import get_toggle_watchdog_listener_behavior @@ -188,6 +189,7 @@ def create_tree( "allowed_planning_time": self.allowed_planning_time, "max_velocity_scale": self.max_velocity_scaling_factor, "max_acceleration_scale": self.max_acceleration_scaling_factor, + "group_name": "jaco_arm", }, outputs={"trajectory": BlackboardKey("trajectory")}, ), @@ -195,7 +197,10 @@ def create_tree( MoveIt2Execute( name="MoveToConfigurationExecute", ns=name, - inputs={"trajectory": BlackboardKey("trajectory")}, + inputs={ + "trajectory": BlackboardKey("trajectory"), + "group_name": "jaco_arm", + }, outputs={}, ), ], diff --git a/ada_feeding/ada_feeding/trees/move_to_configuration_with_wheelchair_wall_tree.py b/ada_feeding/ada_feeding/trees/move_to_configuration_with_wheelchair_wall_tree.py index 35d51970..6440c3e3 100644 --- a/ada_feeding/ada_feeding/trees/move_to_configuration_with_wheelchair_wall_tree.py +++ b/ada_feeding/ada_feeding/trees/move_to_configuration_with_wheelchair_wall_tree.py @@ -27,6 +27,11 @@ MoveIt2JointConstraint, MoveIt2OrientationConstraint, ) +from ada_feeding.behaviors.state import ( + GetJointStates, + CheckArticutoolPathLevelingFeasibility, + LoadPinocchioModel, +) from ada_feeding.helpers import BlackboardKey from ada_feeding.idioms import pre_moveto_config, scoped_behavior from ada_feeding.idioms.bite_transfer import ( @@ -37,6 +42,8 @@ MoveToTree, ) +import numpy as np + class MoveToConfigurationWithWheelchairWallTree(MoveToTree): """ @@ -135,6 +142,7 @@ def create_tree( name="KeepForkStraightPathConstraint", ns=name, inputs={ + "constraints": BlackboardKey("goal_constraints"), "quat_xyzw": self.orientation_constraint_quaternion, "tolerance": self.orientation_constraint_tolerances, "parameterization": 1, # Rotation vector @@ -150,6 +158,34 @@ def create_tree( name=name, memory=True, children=[ + LoadPinocchioModel( + name="LoadPinocchioModel", + ns=name, + inputs={ + "urdf_file_path": "package://ada_moveit/config/ada.urdf.xacro", + "jaco_joint_names": [ + "j2n6s200_joint_1", + "j2n6s200_joint_2", + "j2n6s200_joint_3", + "j2n6s200_joint_4", + "j2n6s200_joint_5", + "j2n6s200_joint_6", + ], + "articutool_joint_names": ["atool_joint1", "atool_joint2"], + "jaco_end_effector_link_name": "j2n6s200_end_effector", + "tool_tip_link_name": "tool_tip", + }, + outputs={ + "pinocchio_model": BlackboardKey("pinocchio_model"), + "pinocchio_data": BlackboardKey("pinocchio_data"), + "jaco_vel_indices_pin": BlackboardKey("jaco_vel_indices_pin"), + "articutool_vel_indices_pin": BlackboardKey( + "articutool_vel_indices_pin" + ), + "jaco_ee_frame_id_pin": BlackboardKey("jaco_ee_frame_id_pin"), + "tool_tip_frame_id_pin": BlackboardKey("tool_tip_frame_id_pin"), + }, + ), # Retare the F/T sensor and set the F/T Thresholds pre_moveto_config( name=name + "PreMoveToConfig", @@ -190,15 +226,47 @@ def create_tree( "allowed_planning_time": self.allowed_planning_time, "max_velocity_scale": self.max_velocity_scaling_factor, "ignore_violated_path_constraints": True, + "group_name": "jaco_arm", }, outputs={"trajectory": BlackboardKey("trajectory")}, ), ), + CheckArticutoolPathLevelingFeasibility( + name="CheckArticutoolLevelingFeasibilityForConfiguration", + ns=name, + inputs={ + "pinocchio_model": BlackboardKey("pinocchio_model"), + "pinocchio_data": BlackboardKey("pinocchio_data"), + "jaco_joint_names_pin": [ + "j2n6s200_joint_1", + "j2n6s200_joint_2", + "j2n6s200_joint_3", + "j2n6s200_joint_4", + "j2n6s200_joint_5", + "j2n6s200_joint_6", + ], + "jaco_ee_frame_id_pin": BlackboardKey( + "jaco_ee_frame_id_pin" + ), + "jaco_trajectory": BlackboardKey("trajectory"), + "articutool_pitch_limits_rad": (-np.pi / 2, np.pi / 2), + "articutool_roll_limits_rad": (-np.pi, np.pi), + "num_trajectory_points_to_check": 20, + }, + outputs={ + "articutool_is_leveling_feasible": BlackboardKey( + "articutool_can_maintain_leveling" + ) + }, + ), # Execute MoveIt2Execute( name="MoveToStagingConfigurationExecute", ns=name, - inputs={"trajectory": BlackboardKey("trajectory")}, + inputs={ + "trajectory": BlackboardKey("trajectory"), + "group_name": "jaco_arm", + }, outputs={}, ), ], diff --git a/ada_feeding/config/ada_feeding_action_servers_custom.yaml b/ada_feeding/config/ada_feeding_action_servers_custom.yaml index 2c463282..4a271efc 100644 --- a/ada_feeding/config/ada_feeding_action_servers_custom.yaml +++ b/ada_feeding/config/ada_feeding_action_servers_custom.yaml @@ -118,12 +118,12 @@ ada_feeding_action_servers: - -2.0289974534007733 - -3.1847696853949206 MoveAbovePlate.tree_kwargs.joint_positions: - - 6.794832881070045 - - 3.031285852819256 - - 4.479355460783922 - - 7.187106922258792 - - -1.9369287787234262 - - -3.5496749526931417 + - -2.4538579336877304 + - 3.07974419938212 + - 1.8320725365979 + - 4.096143890468605 + - -2.003422584820525 + - -3.2123560395465063 MoveFromMouth.tree_kwargs.staging_configuration_position: - -0.6081959669757366 - 0.07835060871598665 diff --git a/ada_feeding/config/ada_feeding_action_servers_default.yaml b/ada_feeding/config/ada_feeding_action_servers_default.yaml index ce333b10..ead904b7 100644 --- a/ada_feeding/config/ada_feeding_action_servers_default.yaml +++ b/ada_feeding/config/ada_feeding_action_servers_default.yaml @@ -39,12 +39,12 @@ ada_feeding_action_servers: - max_velocity_scaling_factor tree_kwargs: # optional joint_positions: # required - - -2.3149168248766614 # j2n6s200_joint_1 - - 3.1444595465032634 # j2n6s200_joint_2 - - 1.7332586075115999 # j2n6s200_joint_3 - - -2.3609596843308234 # j2n6s200_joint_4 - - 4.43936623280362 # j2n6s200_joint_5 - - 3.06866544924739 # j2n6s200_joint_6 + - -2.4538579336877304 # j2n6s200_joint_1 + - 3.07974419938212 # j2n6s200_joint_2 + - 1.8320725365979 # j2n6s200_joint_3 + - 4.096143890468605 # j2n6s200_joint_4 + - -2.003422584820525 # j2n6s200_joint_5 + - -3.2123560395465063 # j2n6s200_joint_6 toggle_watchdog_listener: false # optional, default: true f_mag: 4.0 # N max_velocity_scaling_factor: 1.0 # optional in (0.0, 1.0], default: 0.1 diff --git a/ada_feeding/launch/ada_feeding_launch.xml b/ada_feeding/launch/ada_feeding_launch.xml index 6e89a401..59ca77be 100644 --- a/ada_feeding/launch/ada_feeding_launch.xml +++ b/ada_feeding/launch/ada_feeding_launch.xml @@ -9,6 +9,7 @@ + @@ -19,17 +20,18 @@ - + + - - + + diff --git a/ada_feeding/scripts/analyze_benchmark.py b/ada_feeding/scripts/analyze_benchmark.py new file mode 100644 index 00000000..15796cd0 --- /dev/null +++ b/ada_feeding/scripts/analyze_benchmark.py @@ -0,0 +1,310 @@ +import argparse +import json +import os +import pandas as pd +import plotly.graph_objects as go +from plotly.subplots import make_subplots + + +def generate_visualizations(df_trials: pd.DataFrame, file_path: str): + """ + Generates interactive Plotly visualizations with a context-aware dropdown + for stage-specific analysis. + """ + if df_trials.empty: + print("Skipping visualization, no trial data to plot.") + return + + # --- 1. Load and Prepare Granular Stage Data --- + try: + with open(file_path, "r") as f: + full_data = [json.loads(line) for line in f if line.strip()] + except (FileNotFoundError, ValueError) as e: + print(f"Error reading or parsing file for visualization: {e}") + return + + # Create a DataFrame with one row per stage to get granular status + stages_records = [] + for trial in full_data: + for stage in trial.get("stages", []): + stages_records.append( + { + "trial_id": trial["trial_id"], + "stage_name": stage["stage_name"], + "status": ( + 1 if stage["status"] == "Success" else 0 + ), # Use 1 for success, 0 for failure + } + ) + df_stages = pd.DataFrame(stages_records) + + # Pivot the data so each trial has a column for each stage's status + df_stage_status = df_stages.pivot( + index="trial_id", columns="stage_name", values="status" + ) + # Fill NaN for trials that didn't reach certain stages + df_stage_status = df_stage_status.fillna(0) + df_stage_status.columns = [f"status_{col}" for col in df_stage_status.columns] + + # --- 2. Prepare Data for 3D Workspace Plot --- + plot_data = [] + for trial in full_data: + trial_id = trial.get("trial_id") + # Merge trial characteristics and stage statuses + characteristics = df_trials[df_trials["trial_id"] == trial_id] + if characteristics.empty: + continue + + trial_info = characteristics.iloc[0].to_dict() + if trial_id in df_stage_status.index: + trial_info.update(df_stage_status.loc[trial_id].to_dict()) + + for pose_name, pose_data in trial.get("scene_poses", {}).items(): + if pose_name in ["food_pose", "mouth_pose", "resting_pose"]: + pos = pose_data.get("position") + if not isinstance(pos, list) or len(pos) != 3: + continue + + # Add all info to the record for plotting + record = {"x": pos[0], "y": pos[1], "z": pos[2], "pose_name": pose_name} + record.update(trial_info) + plot_data.append(record) + + if not plot_data: + print("No valid pose data found to generate visualizations.") + return + vis_df = pd.DataFrame(plot_data) + + # --- 3. Create Figure and Six Persistent Traces --- + fig = go.Figure() + symbols = {"food_pose": "circle", "mouth_pose": "square", "resting_pose": "diamond"} + colors = {"Success": "green", "Failure": "red"} + + # Define the default view + default_status_name = "End-to-End" + default_status_col = "end_to_end_success" + relevance_map = { + "End-to-End": ["food_pose", "mouth_pose", "resting_pose"], + "Stage: HomeToAbovePlate": ["food_pose"], + "Stage: AbovePlateToAboveFood": ["food_pose"], + "Stage: AboveFoodToInFood": ["food_pose"], + "Stage: LevelArticutool": ["food_pose"], + "Stage: Resting": ["food_pose", "resting_pose"], + } + + # Create the 6 persistent traces, populating them with data for the default view + for pose_name, symbol in symbols.items(): + for outcome, color in colors.items(): + success_val = 1 if outcome == "Success" else 0 + + # Filter data for this trace's default view + df_subset = vis_df[ + (vis_df["pose_name"] == pose_name) + & (vis_df[default_status_col] == success_val) + ] + + x_data, y_data, z_data, custom_data, hover_template = ( + [], + [], + [], + None, + "none", + ) + if pose_name in relevance_map.get(default_status_name, []): + x_data = df_subset["x"] + y_data = df_subset["y"] + z_data = df_subset["z"] + custom_data = df_subset.to_dict("records") + hover_template = ( + "Trial ID: %{customdata.trial_id}
Pose: %{customdata.pose_name}
Status: " + + outcome + + "
Distance: %{customdata.food_mouth_distance_m:.2f}m" + ) + + fig.add_trace( + go.Scatter3d( + x=x_data, + y=y_data, + z=z_data, + customdata=custom_data, + hovertemplate=hover_template, + mode="markers", + marker=dict(color=color, symbol=symbol, size=5, opacity=0.7), + name=f"{outcome} ({pose_name.replace('_pose', '')})", + ) + ) + + # --- 4. Create the Dropdown Menu with 'restyle' Logic --- + status_cols = {"End-to-End": "end_to_end_success"} + stage_names = sorted([c.replace("status_", "") for c in df_stage_status.columns]) + for stage_name in stage_names: + status_cols[f"Stage: {stage_name}"] = f"status_{stage_name}" + buttons = [] + + # Create all traces upfront + for status_name, status_col in status_cols.items(): + update_args = {"x": [], "y": [], "z": [], "customdata": [], "hovertemplate": []} + + for pose_name in symbols.keys(): + for outcome in ["Success", "Failure"]: + success_val = 1 if outcome == "Success" else 0 + df_subset = vis_df[ + (vis_df["pose_name"] == pose_name) + & (vis_df[status_col] == success_val) + ] + + if pose_name in relevance_map.get(status_name, []): + update_args["x"].append(df_subset["x"]) + update_args["y"].append(df_subset["y"]) + update_args["z"].append(df_subset["z"]) + update_args["customdata"].append(df_subset.to_dict("records")) + update_args["hovertemplate"].append( + "Trial ID: %{customdata.trial_id}
Pose: %{customdata.pose_name}
Status: " + + outcome + + "
Distance: %{customdata.food_mouth_distance_m:.2f}m" + ) + else: + for key in ["x", "y", "z", "customdata", "hovertemplate"]: + update_args[key].append(None) # Use None to clear data + + buttons.append(dict(label=status_name, method="restyle", args=[update_args])) + + fig.update_layout( + title="3D Workspace Visualization", + updatemenus=[ + dict( + active=0, + buttons=buttons, + direction="down", + pad={"r": 10, "t": 10}, + showactive=True, + x=0.05, + xanchor="left", + y=1.1, + yanchor="top", + ) + ], + margin=dict(l=0, r=0, b=0, t=40), + scene=dict( + xaxis_title="X (m)", + yaxis_title="Y (m)", + zaxis_title="Z (m)", + aspectmode="data", + ), + legend_title="Legend", + ) + + # --- 5. Save to File --- + output_filename = "benchmark_visualization.html" + try: + fig.write_html(output_filename) + print( + f"\nInteractive visualization saved to: {os.path.abspath(output_filename)}" + ) + except Exception as e: + print(f"\nError saving visualization file: {e}") + + +def analyze_benchmark_results(file_path: str): + """ + Loads benchmark data from a .jsonl file, calculates key metrics, prints a summary report, + and generates interactive visualizations. + """ + try: + # Load the .jsonl file directly into a main DataFrame + main_df = pd.read_json(file_path, lines=True) + except (FileNotFoundError, ValueError) as e: + print(f"Error reading or parsing file: {e}") + return + + if main_df.empty: + print("No data found in the benchmark file.") + return + + # --- Properly flatten the nested data into two clean DataFrames --- + + # 1. Create the trials DataFrame by flattening 'scene_characteristics' + characteristics_df = pd.json_normalize(main_df["scene_characteristics"]) + df_trials = pd.concat( + [ + main_df.drop( + columns=[ + "scene_characteristics", + "stages", + "scene_poses", + "parameters", + ], + errors="ignore", + ), + characteristics_df, + ], + axis=1, + ) + + # --- Print High-Level Summary --- + num_trials = len(df_trials) + successful_trials = df_trials["end_to_end_success"].sum() + overall_success_rate = ( + (successful_trials / num_trials) * 100 if num_trials > 0 else 0 + ) + + print("\n" + "=" * 60) + print(" BENCHMARK ANALYSIS REPORT") + print("=" * 60) + print(f"File: {file_path}") + print(f"Total Trials: {num_trials}") + print( + f"End-to-End Success Rate: {overall_success_rate:.1f}% ({successful_trials}/{num_trials})" + ) + print("-" * 60) + + # 2. Create the stages DataFrame by "exploding" the list of stages + if "stages" in main_df.columns and not main_df["stages"].isnull().all(): + df_stages = ( + main_df[["trial_id", "stages"]].explode("stages").reset_index(drop=True) + ) + # Filter out rows where stages might be null after exploding + df_stages.dropna(subset=["stages"], inplace=True) + stage_details = pd.json_normalize(df_stages["stages"]) + df_stages = pd.concat( + [df_stages.drop(columns=["stages"]), stage_details], axis=1 + ) + + if not df_stages.empty: + failed_stages_df = df_stages[df_stages["status"] != "Success"] + stage_failure_counts = failed_stages_df["stage_name"].value_counts() + print("\nFailure Count by Stage:") + if not stage_failure_counts.empty: + print(stage_failure_counts.to_string()) + else: + print("No stage failures recorded.") + print("-" * 60) + + # --- Failure analysis by TYPE for the most problematic stage --- + if not stage_failure_counts.empty: + most_problematic_stage = stage_failure_counts.index[0] + print( + f"\nFailure Breakdown for Most Problematic Stage ('{most_problematic_stage}'):" + ) + problem_stage_df = failed_stages_df[ + failed_stages_df["stage_name"] == most_problematic_stage + ] + status_counts = problem_stage_df["status"].value_counts() + print(status_counts.to_string()) + print("=" * 60) + + # --- Generate Visualizations --- + # Call the new function to generate and save the plots + generate_visualizations(df_trials, file_path) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser( + description="Analyze end-to-end benchmark results and generate visualizations." + ) + parser.add_argument( + "benchmark_file", type=str, help="Path to the benchmark JSONL output file." + ) + args = parser.parse_args() + + analyze_benchmark_results(args.benchmark_file) diff --git a/ada_feeding/scripts/articutool_feasibility_benchmark.py b/ada_feeding/scripts/articutool_feasibility_benchmark.py new file mode 100644 index 00000000..4fc9a7b6 --- /dev/null +++ b/ada_feeding/scripts/articutool_feasibility_benchmark.py @@ -0,0 +1,572 @@ +#!/usr/bin/env python3 +# Copyright (c) 2024-2025, Personal Robotics Laboratory +# License: BSD 3-Clause. See LICENSE.md file in root directory. + +""" +This script runs a simulation-based benchmark to evaluate the "plan-then-verify" +control methodology for the Articutool, as described in the accompanying paper. + +It operates in a simulation-only mode: +1. Generates a specified number of random start and goal joint configurations for + the Jaco arm. +2. For each pair, it uses MoveIt2 to plan a trajectory. +3. If a plan is found, it performs a post-hoc feasibility analysis on the + trajectory. For each waypoint, it calculates the required Articutool pitch and + roll angles to keep the end-effector level with respect to gravity. +4. It checks if these required angles are within the Articutool's joint limits. +5. It saves an "enhanced" trajectory file (.json) containing the per-waypoint + Articutool solutions, which can then be used by `visualize_trajectory.py`. +""" + +# Standard imports +from collections import namedtuple +from datetime import datetime +import os +import time +from threading import Thread +from typing import Optional, List, Dict, Tuple, Any +import json +import math +import subprocess +import sys + +# Third-party imports +import numpy as np +from pymoveit2 import MoveIt2 +from pymoveit2.robots import kinova +import rclpy +from rclpy.callback_groups import ReentrantCallbackGroup +from rclpy.node import Node +from trajectory_msgs.msg import JointTrajectory +import moveit_msgs.msg +from scipy.spatial.transform import Rotation as R +import pinocchio as pin + +# --- Constants and Named Tuples --- +LOGGER = rclpy.logging.get_logger("articutool_benchmark_sim") +PLANNING_GROUP = "jaco_arm" +JOINT_NAMES = kinova.joint_names() +BASE_LINK = kinova.base_link_name() +END_EFFECTOR_LINK = kinova.end_effector_name() +EPSILON = 1e-6 + +# Define Articutool joint limits in radians +ARTICUTOOL_PITCH_LIMITS_RAD = (-math.pi / 2, math.pi / 2) # (-90, 90) degrees +ARTICUTOOL_ROLL_LIMITS_RAD = (-math.pi, math.pi) # (-180, 180) degrees + +# Define the world's "up" vector (positive Z axis) +WORLD_UP_VECTOR = np.array([0.0, 0.0, 1.0]) + +# --- Data Structures for Clarity --- +PlanningTask = namedtuple("PlanningTask", ["start_config", "goal_config"]) +ArticutoolWaypointSolution = namedtuple( + "ArticutoolWaypointSolution", + ["waypoint_feasible", "pitch_solution_rad", "roll_solution_rad"], +) +ArticutoolMetrics = namedtuple( + "ArticutoolMetrics", + [ + "path_feasible", + "num_infeasible_points", + "pitch_range_used_percent", + "roll_range_used_percent", + "avg_pitch_abs_rad", + "avg_roll_abs_rad", + ], +) + + +class ArticutoolBenchmarkSimulator: + """Manages the simulation-based benchmarking process.""" + + def __init__( + self, + node: Node, + moveit2: MoveIt2, + xacro_file_path: str, + num_tasks: int = 100, + planning_timeout: float = 5.0, + planner_id: str = "RRTConnectkConfigDefault", + trajectory_save_dir: Optional[str] = None, + ): + self.node = node + self.moveit2 = moveit2 + self.xacro_file_path = xacro_file_path + self.num_tasks = num_tasks + self.planning_timeout = planning_timeout + self.planner_id = planner_id + self.trajectory_save_dir = trajectory_save_dir + self.results: List[Dict[str, Any]] = [] + + # Pinocchio model for forward kinematics + self.pinocchio_model: Optional[pin.Model] = None + self.pinocchio_data: Optional[pin.Data] = None + self.jaco_ee_frame_id_pin: Optional[int] = None + self.joint_name_to_pinocchio_id: Dict[str, int] = {} + self._initialize_pinocchio() + + self.joint_limits = self._get_joint_limits() + + LOGGER.info("Benchmark simulator initialized.") + LOGGER.info(f"Testing with planner: {self.planner_id}") + LOGGER.info(f"Number of random tasks to generate: {self.num_tasks}") + if self.trajectory_save_dir: + os.makedirs(self.trajectory_save_dir, exist_ok=True) + LOGGER.info( + f"Enhanced trajectories will be saved to: {self.trajectory_save_dir}" + ) + + def _initialize_pinocchio(self): + """ + Loads the robot model from a XACRO file into Pinocchio. + This is a more robust method that explicitly handles XACRO to URDF conversion + and ensures Pinocchio can find all mesh files. + """ + LOGGER.info(f"Processing Xacro file: {self.xacro_file_path}") + try: + # Convert XACRO to a URDF string + process = subprocess.run( + ["ros2", "run", "xacro", "xacro", self.xacro_file_path], + check=True, + capture_output=True, + text=True, + ) + urdf_xml_string = process.stdout + except (FileNotFoundError, subprocess.CalledProcessError) as e: + LOGGER.error(f"Failed to process XACRO file: {e}") + LOGGER.error( + "Make sure ROS 2 environment is sourced and xacro is installed." + ) + self.pinocchio_model = None + return + + # Load the model from the URDF string + try: + self.pinocchio_model = pin.buildModelFromXML(urdf_xml_string) + self.pinocchio_data = self.pinocchio_model.createData() + self.jaco_ee_frame_id_pin = self.pinocchio_model.getFrameId( + END_EFFECTOR_LINK + ) + # Map joint names to their Pinocchio model IDs for quick access + for name in JOINT_NAMES: + if self.pinocchio_model.existJointName(name): + self.joint_name_to_pinocchio_id[name] = ( + self.pinocchio_model.getJointId(name) + ) + LOGGER.info("Pinocchio model loaded successfully.") + except Exception as e: + LOGGER.error(f"Failed to build Pinocchio model from URDF string: {e}") + self.pinocchio_model = None + + def _get_joint_limits(self) -> List[Tuple[float, float]]: + """Retrieves joint limits from the Pinocchio model in the correct order.""" + if self.pinocchio_model is None: + LOGGER.warn("Pinocchio model not available, using default joint limits.") + return [(-math.pi, math.pi)] * len(JOINT_NAMES) + + limits = [] + for joint_name in JOINT_NAMES: + if self.pinocchio_model.existJointName(joint_name): + joint_id = self.pinocchio_model.getJointId(joint_name) + joint = self.pinocchio_model.joints[joint_id] + # Get the index in the configuration vector 'q' for this joint + limit_idx = joint.idx_q + lower = self.pinocchio_model.lowerPositionLimit[limit_idx] + upper = self.pinocchio_model.upperPositionLimit[limit_idx] + limits.append((lower, upper)) + else: + # Fallback for safety, though this shouldn't happen with a correct URDF + limits.append((-math.pi, math.pi)) + return limits + + def generate_random_config(self) -> List[float]: + """Generates a random joint configuration within the robot's limits.""" + return [ + ( + np.random.uniform(low, high) + if np.isfinite(low) and np.isfinite(high) + else np.random.uniform(-np.pi, np.pi) + ) + for low, high in self.joint_limits + ] + + def _normalize_angle(self, angle: float) -> float: + """Normalizes an angle to the range [-pi, pi].""" + return (angle + math.pi) % (2 * math.pi) - math.pi + + def _solve_articutool_ik_for_leveling( + self, target_y_axis_in_atool_base: np.ndarray + ) -> List[Tuple[float, float]]: + """ + Analytical Inverse Kinematics (IK) solver for the 2-DOF Articutool. + """ + vx, vy, vz = target_y_axis_in_atool_base + solutions: List[Tuple[float, float]] = [] + + # From the kinematic model: sin(theta_r) = -vx + asin_arg_for_tr = -vx + if not (-1.0 - EPSILON <= asin_arg_for_tr <= 1.0 + EPSILON): + return [] + + asin_arg_for_tr_clipped = np.clip(asin_arg_for_tr, -1.0, 1.0) + theta_r_sol1 = math.asin(asin_arg_for_tr_clipped) + theta_r_sol2 = self._normalize_angle(math.pi - theta_r_sol1) + + candidate_thetas_r = list(set([theta_r_sol1, theta_r_sol2])) + + for theta_r in candidate_thetas_r: + cos_theta_r = math.cos(theta_r) + if math.isclose(cos_theta_r, 0.0, abs_tol=EPSILON): + if math.isclose(vy, 0.0, abs_tol=EPSILON) and math.isclose( + vz, 0.0, abs_tol=EPSILON + ): + solutions.append((0.0, self._normalize_angle(theta_r))) + continue + + theta_p_sol = math.atan2(vz, vy) + solutions.append( + (self._normalize_angle(theta_p_sol), self._normalize_angle(theta_r)) + ) + return solutions + + def _analyze_trajectory_for_articutool_leveling( + self, trajectory: JointTrajectory + ) -> Tuple[ArticutoolMetrics, List[ArticutoolWaypointSolution]]: + """ + Performs the core "plan-then-verify" analysis on a trajectory. + """ + if self.pinocchio_model is None: + LOGGER.error( + "Cannot perform feasibility check: Pinocchio model not loaded." + ) + return ( + ArticutoolMetrics( + False, len(trajectory.points), np.nan, np.nan, np.nan, np.nan + ), + [], + ) + + required_pitches, required_rolls = [], [] + num_infeasible_wps = 0 + per_waypoint_solutions: List[ArticutoolWaypointSolution] = [] + q = pin.neutral(self.pinocchio_model) + + for point in trajectory.points: + # ----- KINEMATIC FIX START ----- + # Correctly populate the Pinocchio configuration vector 'q'. + # Some joints (especially revolute) are represented by Pinocchio + # using 2 values (cos(theta), sin(theta)) in the 'q' vector for a + # 1-DOF joint. A simple assignment q[idx] = theta is incorrect. + # We must check the model's representation for each joint. + for i, name in enumerate(JOINT_NAMES): + if name in self.joint_name_to_pinocchio_id: + joint_id = self.joint_name_to_pinocchio_id[name] + joint_obj = self.pinocchio_model.joints[joint_id] + theta = point.positions[i] + + # nq is the size of the joint's representation in 'q' + # nv is the number of degrees of freedom (velocity size) + if joint_obj.nq == 2 and joint_obj.nv == 1: + # This is a revolute joint represented by (cos, sin) + q[joint_obj.idx_q] = math.cos(theta) + q[joint_obj.idx_q + 1] = math.sin(theta) + elif joint_obj.nq == 1 and joint_obj.nv == 1: + # This is a simple 1-to-1 mapping (e.g., prismatic or simple revolute) + q[joint_obj.idx_q] = theta + else: + # Fallback for other joint types, though not expected for Jaco + q[joint_obj.idx_q] = theta + # ----- KINEMATIC FIX END ----- + + pin.forwardKinematics(self.pinocchio_model, self.pinocchio_data, q) + pin.updateFramePlacements(self.pinocchio_model, self.pinocchio_data) + + ee_transform: pin.SE3 = self.pinocchio_data.oMf[self.jaco_ee_frame_id_pin] + R_world_ee = R.from_matrix(ee_transform.rotation) + + target_up_in_ee_frame = R_world_ee.inv().apply(WORLD_UP_VECTOR) + + # Call the IK solver with the correctly calculated target vector. + ik_solutions = self._solve_articutool_ik_for_leveling(target_up_in_ee_frame) + + valid_solutions = [ + s + for s in ik_solutions + if ( + ARTICUTOOL_PITCH_LIMITS_RAD[0] - EPSILON + <= s[0] + <= ARTICUTOOL_PITCH_LIMITS_RAD[1] + EPSILON + and ARTICUTOOL_ROLL_LIMITS_RAD[0] - EPSILON + <= s[1] + <= ARTICUTOOL_ROLL_LIMITS_RAD[1] + EPSILON + ) + ] + + if not valid_solutions: + num_infeasible_wps += 1 + per_waypoint_solutions.append( + ArticutoolWaypointSolution(False, None, None) + ) + else: + best_sol = min(valid_solutions, key=lambda s: s[0] ** 2 + s[1] ** 2) + required_pitches.append(best_sol[0]) + required_rolls.append(best_sol[1]) + per_waypoint_solutions.append( + ArticutoolWaypointSolution(True, best_sol[0], best_sol[1]) + ) + + path_feasible = num_infeasible_wps == 0 + p_range = ( + (np.max(required_pitches) - np.min(required_pitches)) + if required_pitches + else 0 + ) + r_range = ( + (np.max(required_rolls) - np.min(required_rolls)) if required_rolls else 0 + ) + total_p_range = ARTICUTOOL_PITCH_LIMITS_RAD[1] - ARTICUTOOL_PITCH_LIMITS_RAD[0] + total_r_range = ARTICUTOOL_ROLL_LIMITS_RAD[1] - ARTICUTOOL_ROLL_LIMITS_RAD[0] + + return ( + ArticutoolMetrics( + path_feasible=path_feasible, + num_infeasible_points=num_infeasible_wps, + pitch_range_used_percent=( + (p_range / total_p_range * 100) if total_p_range > EPSILON else 0 + ), + roll_range_used_percent=( + (r_range / total_r_range * 100) if total_r_range > EPSILON else 0 + ), + avg_pitch_abs_rad=( + np.mean(np.abs(required_pitches)) if required_pitches else 0.0 + ), + avg_roll_abs_rad=( + np.mean(np.abs(required_rolls)) if required_rolls else 0.0 + ), + ), + per_waypoint_solutions, + ) + + def _save_enhanced_trajectory_to_file( + self, + original_trajectory: JointTrajectory, + articutool_solutions: List[ArticutoolWaypointSolution], + task_id: int, + ) -> Optional[str]: + """Saves the trajectory data along with Articutool solutions to a JSON file.""" + if not self.trajectory_save_dir: + return None + try: + timestamp = datetime.now().strftime("%Y%m%d-%H%M%S-%f") + filename = f"task_{task_id}_{self.planner_id}_{timestamp}.json" + filepath = os.path.join(self.trajectory_save_dir, filename) + + waypoints_data = [] + for i, jaco_point in enumerate(original_trajectory.points): + at_solution = ( + articutool_solutions[i] + if i < len(articutool_solutions) + else ArticutoolWaypointSolution(False, None, None) + ) + waypoints_data.append( + { + "time_from_start_sec": jaco_point.time_from_start.sec + + jaco_point.time_from_start.nanosec * 1e-9, + "jaco_positions_rad": list(jaco_point.positions), + "articutool_waypoint_feasible": at_solution.waypoint_feasible, + "articutool_pitch_solution_rad": at_solution.pitch_solution_rad, + "articutool_roll_solution_rad": at_solution.roll_solution_rad, + } + ) + + enhanced_data = { + "jaco_joint_names": list(original_trajectory.joint_names), + "waypoints": waypoints_data, + } + + with open(filepath, "w") as f: + json.dump(enhanced_data, f, indent=2) + return filename + except Exception as e: + LOGGER.error(f"Failed to save enhanced trajectory for task {task_id}: {e}") + return None + + def run(self): + """Main benchmark execution loop.""" + if not self.pinocchio_model: + LOGGER.error("Aborting benchmark: Pinocchio model failed to initialize.") + return + + LOGGER.info("Generating random planning tasks...") + tasks = [ + PlanningTask(self.generate_random_config(), self.generate_random_config()) + for _ in range(self.num_tasks) + ] + LOGGER.info(f"Successfully generated {len(tasks)} tasks.") + + for i, task in enumerate(tasks): + total_task_start_time = time.perf_counter() + LOGGER.info(f"--- Running Task {i + 1}/{len(tasks)} ---") + + start_state_msg = moveit_msgs.msg.RobotState() + start_state_msg.joint_state.name = JOINT_NAMES + start_state_msg.joint_state.position = list(task.start_config) + self.moveit2._MoveIt2__move_action_goal.request.start_state = ( + start_state_msg + ) + + self.moveit2.set_joint_goal(list(task.goal_config)) + self.moveit2.planner_id = self.planner_id + self.moveit2.allowed_planning_time = self.planning_timeout + + planning_start_time = time.perf_counter() + plan_future = self.moveit2.plan_async() + + while rclpy.ok() and not plan_future.done(): + time.sleep(0.01) + + trajectory = self.moveit2.get_trajectory(plan_future) + planning_time = time.perf_counter() - planning_start_time + + plan_success = trajectory is not None and bool(trajectory.points) + articutool_metrics = None + feasibility_check_time = 0.0 + trajectory_filename = None + + if plan_success: + LOGGER.info(f" Jaco plan SUCCEEDED in {planning_time:.4f}s.") + feasibility_start_time = time.perf_counter() + articutool_metrics, solutions = ( + self._analyze_trajectory_for_articutool_leveling(trajectory) + ) + feasibility_check_time = time.perf_counter() - feasibility_start_time + LOGGER.info( + f" Articutool Leveling Feasibility: {articutool_metrics.path_feasible} " + f"({articutool_metrics.num_infeasible_points} infeasible points) " + f"(checked in {feasibility_check_time:.4f}s)" + ) + trajectory_filename = self._save_enhanced_trajectory_to_file( + trajectory, solutions, i + ) + else: + LOGGER.warn(f" Jaco plan FAILED in {planning_time:.4f}s.") + + total_task_time = time.perf_counter() - total_task_start_time + self.results.append( + { + "task_id": i, + "jaco_plan_success": plan_success, + "planning_time_s": planning_time, + "feasibility_check_time_s": feasibility_check_time, + "total_task_time_s": total_task_time, + "trajectory_filename": trajectory_filename, + **(articutool_metrics._asdict() if articutool_metrics else {}), + } + ) + + def save_results(self, output_dir: str): + """Saves the benchmark results to a JSON file and prints a summary.""" + os.makedirs(output_dir, exist_ok=True) + timestamp = datetime.now().strftime("%Y%m%d-%H%M%S") + filename = os.path.join(output_dir, f"benchmark_results_{timestamp}.json") + + with open(filename, "w") as f: + json.dump(self.results, f, indent=2) + LOGGER.info(f"Benchmark results saved to {filename}") + + total_tasks = len(self.results) + jaco_successes = sum(1 for r in self.results if r["jaco_plan_success"]) + articutool_successes = sum( + 1 for r in self.results if r.get("path_feasible", False) + ) + + jaco_success_rate = ( + (jaco_successes / total_tasks * 100) if total_tasks > 0 else 0 + ) + rejection_rate = ( + ((jaco_successes - articutool_successes) / jaco_successes * 100) + if jaco_successes > 0 + else 0 + ) + + LOGGER.info("\n--- BENCHMARK SUMMARY ---") + LOGGER.info(f"Total Tasks Attempted: {total_tasks}") + LOGGER.info( + f"Jaco Planner Success Rate: {jaco_success_rate:.2f}% ({jaco_successes}/{total_tasks})" + ) + LOGGER.info( + f"Articutool Feasibility Rejection Rate (of successful Jaco plans): {rejection_rate:.2f}%" + ) + + feasible_results = [r for r in self.results if r.get("path_feasible")] + if feasible_results: + avg_plan_time = np.mean([r["planning_time_s"] for r in feasible_results]) + avg_check_time = np.mean( + [r["feasibility_check_time_s"] for r in feasible_results] + ) + LOGGER.info(f"Avg. Planning Time (on feasible paths): {avg_plan_time:.4f}s") + LOGGER.info( + f"Avg. Feasibility Check Time (on feasible paths): {avg_check_time:.4f}s" + ) + LOGGER.info("-------------------------\n") + + +def main(): + if len(sys.argv) < 2: + print( + "Usage: python3 articutool_feasibility_benchmark.py /path/to/your/robot.urdf.xacro" + ) + sys.exit(1) + xacro_file_arg = sys.argv[1] + if not os.path.exists(xacro_file_arg): + print(f"Error: XACRO file not found at '{xacro_file_arg}'") + sys.exit(1) + + rclpy.init() + node = Node("articutool_benchmark_node") + + executor = rclpy.executors.MultiThreadedExecutor() + executor.add_node(node) + executor_thread = Thread(target=executor.spin, daemon=True) + executor_thread.start() + + moveit2 = MoveIt2( + node=node, + joint_names=JOINT_NAMES, + base_link_name=kinova.base_link_name(), + end_effector_name=END_EFFECTOR_LINK, + group_name=PLANNING_GROUP, + callback_group=ReentrantCallbackGroup(), + ) + + NUM_TASKS = 200 + PLANNING_TIMEOUT = 5.0 + PLANNER_ID = "RRTConnectkConfigDefault" + OUTPUT_DIR = os.path.join(os.getcwd(), "articutool_benchmark_output") + + benchmark = ArticutoolBenchmarkSimulator( + node=node, + moveit2=moveit2, + xacro_file_path=xacro_file_arg, + num_tasks=NUM_TASKS, + planning_timeout=PLANNING_TIMEOUT, + planner_id=PLANNER_ID, + trajectory_save_dir=OUTPUT_DIR, + ) + + try: + if benchmark.pinocchio_model is not None: + benchmark.run() + benchmark.save_results(OUTPUT_DIR) + else: + LOGGER.error("Benchmark cannot run because Pinocchio model failed to load.") + except Exception as e: + LOGGER.error(f"An error occurred during the benchmark: {e}") + finally: + LOGGER.info("Shutting down.") + rclpy.shutdown() + executor_thread.join() + + +if __name__ == "__main__": + main() diff --git a/ada_feeding/scripts/benchmark_analysis_and_plotting.py b/ada_feeding/scripts/benchmark_analysis_and_plotting.py new file mode 100644 index 00000000..c9b415d7 --- /dev/null +++ b/ada_feeding/scripts/benchmark_analysis_and_plotting.py @@ -0,0 +1,278 @@ +#!/usr/bin/env python3 +# Copyright (c) 2024-2025, Personal Robotics Laboratory +# License: BSD 3-Clause. See LICENSE.md file in root directory. + +""" +This script loads the comprehensive JSON results file generated by +`constrained_task_space_benchmark.py`, performs a thorough analysis, +and generates a series of high-quality, interactive plots using Plotly. + +The generated plots are saved as HTML files, suitable for inclusion in +publications and presentations. + +Requires: pandas, plotly +Install with: pip install pandas plotly +""" + +import argparse +import json +import os +from typing import List, Dict, Any + +import numpy as np +import pandas as pd +import plotly.express as px +import plotly.graph_objects as go + + +def load_data(json_path: str) -> pd.DataFrame: + """Loads the benchmark results from a JSON file into a pandas DataFrame.""" + print(f"Loading data from {json_path}...") + try: + with open(json_path, "r") as f: + data = json.load(f) + # Normalize the nested dictionary structure into a flat DataFrame + df = pd.json_normalize(data, sep="_") + print(f"Successfully loaded {len(df)} trials.") + return df + except (FileNotFoundError, json.JSONDecodeError, KeyError) as e: + print(f"Error loading or parsing JSON file: {e}") + return pd.DataFrame() + + +def plot_success_rates(df: pd.DataFrame, output_dir: str): + """Plots planner success rate and verification success rate by planning mode.""" + if df.empty: + return + + print("Generating success rate plot...") + # Group by planning mode and calculate mean success rates + summary = ( + df.groupby("planning_mode") + .agg( + planner_success_rate=("plan_success", "mean"), + verification_success_rate=("verification_success", "mean"), + ) + .reset_index() + ) + summary["planner_success_rate"] *= 100 + summary["verification_success_rate"] *= 100 + + # --- FIX: Convert data from wide format to long format for robust plotting --- + # This resolves the "columns of different type" error by restructuring the data. + summary_long = summary.melt( + id_vars="planning_mode", + value_vars=["planner_success_rate", "verification_success_rate"], + var_name="Metric", + value_name="Success Rate (%)", + ) + + fig = px.bar( + summary_long, + x="planning_mode", + y="Success Rate (%)", + color="Metric", # Use color to differentiate the metrics + title="Planner and Verification Success Rates by Planning Mode", + labels={ + "Success Rate (%)": "Success Rate (%)", + "planning_mode": "Planning Mode", + }, + barmode="group", + text_auto=".2f", + ) + # --- END FIX --- + + fig.update_layout(yaxis_range=[0, 105]) + + filename = os.path.join(output_dir, "success_rates.html") + fig.write_html(filename) + print(f" - Saved to {filename}") + + +def plot_planning_times(df: pd.DataFrame, output_dir: str): + """Plots the distribution of planning times for successful plans.""" + if df.empty: + return + + print("Generating planning time distribution plot...") + successful_plans = df[df["plan_success"] == True].copy() + + fig = px.box( + successful_plans, + x="planning_mode", + y="planning_time_s", + color="planning_mode", + title="Distribution of Planning Times for Successful Plans", + labels={ + "planning_time_s": "Planning Time (s)", + "planning_mode": "Planning Mode", + }, + points="all", # Show all data points + ) + + filename = os.path.join(output_dir, "planning_times.html") + fig.write_html(filename) + print(f" - Saved to {filename}") + + +def plot_3d_trajectories(df: pd.DataFrame, output_dir: str, num_examples: int = 5): + """Plots start/end points and example trajectories in 3D space.""" + if df.empty or "trajectory_metrics_waypoints_data" not in df.columns: + return + + print("Generating 3D trajectory plot...") + successful_plans = df[df["plan_success"] == True].copy() + if successful_plans.empty: + print(" - No successful plans to plot.") + return + + # Extract start and end points + starts = np.array(successful_plans["start_joint_positions"].tolist()) + + # We need to compute FK for start and end points + # For simplicity in this offline script, we'll plot the target position + # and the first waypoint's EE position. + + end_points = [] + start_points = [] + for _, row in successful_plans.iterrows(): + waypoints = row["trajectory_metrics_waypoints_data"] + if waypoints and isinstance(waypoints, list) and len(waypoints) > 0: + start_points.append(waypoints[0]["ee_pose_world"]["position"]) + end_points.append(waypoints[-1]["ee_pose_world"]["position"]) + + fig = go.Figure() + + # Add scatter plot for start and end points + fig.add_trace( + go.Scatter3d( + x=[p[0] for p in start_points], + y=[p[1] for p in start_points], + z=[p[2] for p in start_points], + mode="markers", + marker=dict(color="blue", size=4, opacity=0.6), + name="Start EE Position", + ) + ) + fig.add_trace( + go.Scatter3d( + x=[p[0] for p in end_points], + y=[p[1] for p in end_points], + z=[p[2] for p in end_points], + mode="markers", + marker=dict(color="red", size=4, opacity=0.6), + name="End EE Position", + ) + ) + + # Add line plots for a few example trajectories + example_indices = np.random.choice( + successful_plans.index, min(num_examples, len(successful_plans)), replace=False + ) + for i, index in enumerate(example_indices): + waypoints = successful_plans.loc[index, "trajectory_metrics_waypoints_data"] + path_x = [wp["ee_pose_world"]["position"][0] for wp in waypoints] + path_y = [wp["ee_pose_world"]["position"][1] for wp in waypoints] + path_z = [wp["ee_pose_world"]["position"][2] for wp in waypoints] + fig.add_trace( + go.Scatter3d( + x=path_x, + y=path_y, + z=path_z, + mode="lines", + line=dict(width=5), + name=f"Example Traj {i + 1}", + ) + ) + + fig.update_layout( + title="3D Visualization of End-Effector Trajectories", + scene=dict( + xaxis_title="X (m)", + yaxis_title="Y (m)", + zaxis_title="Z (m)", + aspectmode="data", # Ensure aspect ratio is realistic + ), + margin=dict(l=0, r=0, b=0, t=40), + ) + + filename = os.path.join(output_dir, "3d_trajectories.html") + fig.write_html(filename) + print(f" - Saved to {filename}") + + +def plot_articutool_velocities(df: pd.DataFrame, output_dir: str): + """Plots the required Articutool joint velocities for all successful waypoints.""" + if df.empty or "trajectory_metrics_waypoints_data" not in df.columns: + return + + print("Generating Articutool velocity plot...") + successful_plans = df[df["plan_success"] == True].copy() + + all_pitch_vels = [] + all_roll_vels = [] + + for _, row in successful_plans.iterrows(): + waypoints = row["trajectory_metrics_waypoints_data"] + if waypoints and isinstance(waypoints, list): + for wp in waypoints: + vels = wp.get("articutool_velocities_rad_per_sec") + if vels and isinstance(vels, dict): + all_pitch_vels.append(vels.get("pitch_vel")) + all_roll_vels.append(vels.get("roll_vel")) + + if not all_pitch_vels: + print(" - No velocity data found to plot.") + return + + fig = px.scatter( + x=all_pitch_vels, + y=all_roll_vels, + title="Required Articutool Joint Velocities for Leveling", + labels={"x": "Pitch Velocity (rad/s)", "y": "Roll Velocity (rad/s)"}, + opacity=0.5, + ) + fig.update_traces(marker=dict(size=5)) + + filename = os.path.join(output_dir, "articutool_velocities.html") + fig.write_html(filename) + print(f" - Saved to {filename}") + + +def main(): + """Main function to load data and generate all plots.""" + parser = argparse.ArgumentParser( + description="Analyze benchmark results and generate plots." + ) + parser.add_argument( + "json_file", + type=str, + help="Path to the comprehensive benchmark results JSON file.", + ) + parser.add_argument( + "--output_dir", + type=str, + default="benchmark_plots", + help="Directory to save the generated plot HTML files.", + ) + args = parser.parse_args() + + # Create output directory if it doesn't exist + os.makedirs(args.output_dir, exist_ok=True) + + # Load data + df = load_data(args.json_file) + if df.empty: + return + + # Generate plots + plot_success_rates(df, args.output_dir) + plot_planning_times(df, args.output_dir) + plot_3d_trajectories(df, args.output_dir) + plot_articutool_velocities(df, args.output_dir) + + print("\nAnalysis complete. Plots saved in:", args.output_dir) + + +if __name__ == "__main__": + main() diff --git a/ada_feeding/scripts/constrained_task_space_benchmark.py b/ada_feeding/scripts/constrained_task_space_benchmark.py new file mode 100644 index 00000000..cb1e4a7b --- /dev/null +++ b/ada_feeding/scripts/constrained_task_space_benchmark.py @@ -0,0 +1,604 @@ +#!/usr/bin/env python3 +# Copyright (c) 2024-2025, Personal Robotics Laboratory +# License: BSD 3-Clause. See LICENSE.md file in root directory. + +""" +This script runs a benchmark to evaluate a "constraint-driven planning" +methodology for the Articutool system. It features comprehensive, single-file +logging designed for easy data analysis and plotting for publications. + +This version is a PURE SIMULATION. It does not move the robot. Each planning +task starts from the end-state of the previously computed plan. + +It can be run in two modes: +1. Position-Only Goals (default): Plans to a random (x, y, z) position. +2. Position + Yaw Goals (`--constrain-goal-yaw`): Plans to a random + (x, y, z) position AND a random final yaw orientation. + +In both modes, a "smart" orientation constraint (lenient pitch, free yaw, +strict roll) is applied to the ENTIRE PATH to ensure the Articutool can +always maintain a level configuration. +""" + +# Standard imports +from collections import namedtuple +from datetime import datetime +import os +import time +from threading import Thread +from typing import Optional, List, Dict, Tuple, Any +import json +import math +import subprocess +import sys + +# Third-party imports +import numpy as np +from pymoveit2 import MoveIt2 +from pymoveit2.robots import kinova +import rclpy +from rclpy.callback_groups import ReentrantCallbackGroup +from rclpy.node import Node +from trajectory_msgs.msg import JointTrajectory +import moveit_msgs.msg +from geometry_msgs.msg import Quaternion, Point +from scipy.spatial.transform import Rotation as R +import pinocchio as pin + +# --- Constants and Named Tuples --- +LOGGER = rclpy.logging.get_logger("constrained_task_space_benchmark") +PLANNING_GROUP = "jaco_arm" +JOINT_NAMES = kinova.joint_names() +BASE_LINK = kinova.base_link_name() +END_EFFECTOR_LINK = kinova.end_effector_name() +EPSILON = 1e-6 + +# Define Articutool joint limits in radians +ARTICUTOOL_PITCH_LIMITS_RAD = (-math.pi / 2, math.pi / 2) +ARTICUTOOL_ROLL_LIMITS_RAD = (-math.pi, math.pi) +WORLD_UP_VECTOR = np.array([0.0, 0.0, 1.0]) + +# --- Parameters for the "Smart" PATH Orientation Constraint --- +PATH_CONSTRAINT_QUAT_XYZW = (0.707, 0.0, 0.0, 0.707) +PATH_CONSTRAINT_TOLERANCE_XYZ_RAD = (1.5, 3.14, 0.8) + +# --- Parameters for the optional GOAL Yaw Orientation Constraint --- +GOAL_YAW_CONSTRAINT_TOLERANCE_XYZ_RAD = (math.pi, math.pi, 0.1) + +# --- Define a reasonable workspace for sampling random positions --- +WORKSPACE_OUTER_RADIUS = 0.7 +WORKSPACE_INNER_RADIUS = 0.3 + +# --- Data Structures for Clarity --- +TrajectoryMetrics = namedtuple( + "TrajectoryMetrics", + [ + "duration_s", + "joint_space_path_length_rad", + "final_joint_positions", + "articutool_pitch_stats_rad", + "articutool_roll_stats_rad", + "waypoints_data", + ], +) + + +class ConstrainedTaskSpaceBenchmark: + """Manages the task-space benchmark planning process.""" + + def __init__( + self, + node: Node, + moveit2: MoveIt2, + xacro_file_path: str, + num_tasks: int = 100, + planning_timeout: float = 5.0, + planner_id: str = "RRTConnectkConfigDefault", + output_dir: Optional[str] = None, + constrain_goal_yaw: bool = False, + ): + self.node = node + self.moveit2 = moveit2 + self.xacro_file_path = xacro_file_path + self.num_tasks = num_tasks + self.planning_timeout = planning_timeout + self.planner_id = planner_id + self.output_dir = output_dir + self.constrain_goal_yaw = constrain_goal_yaw + self.results: List[Dict[str, Any]] = [] + + self.pinocchio_model: Optional[pin.Model] = None + self.pinocchio_data: Optional[pin.Data] = None + self.jaco_ee_frame_id_pin: Optional[int] = None + self.joint_name_to_pinocchio_id: Dict[str, int] = {} + self.jaco_vel_indices: List[int] = [] + self._initialize_pinocchio() + self.joint_limits = self._get_joint_limits() + self.debug_printed = False # Flag to print debug logs only once + + LOGGER.info("Benchmark simulator initialized.") + if self.constrain_goal_yaw: + LOGGER.info("Planning Mode: Position + Yaw Goals.") + else: + LOGGER.info("Planning Mode: Position-Only Goals.") + LOGGER.info(f"Applying 'smart' orientation constraint to all paths.") + + if self.output_dir: + os.makedirs(self.output_dir, exist_ok=True) + LOGGER.info(f"Comprehensive results will be saved to: {self.output_dir}") + + def _initialize_pinocchio(self): + """Loads the robot model from a XACRO file into Pinocchio.""" + try: + process = subprocess.run( + ["ros2", "run", "xacro", "xacro", self.xacro_file_path], + check=True, + capture_output=True, + text=True, + ) + urdf_xml_string = process.stdout + self.pinocchio_model = pin.buildModelFromXML(urdf_xml_string) + self.pinocchio_data = self.pinocchio_model.createData() + self.jaco_ee_frame_id_pin = self.pinocchio_model.getFrameId( + END_EFFECTOR_LINK + ) + for name in JOINT_NAMES: + if self.pinocchio_model.existJointName(name): + joint_id = self.pinocchio_model.getJointId(name) + self.joint_name_to_pinocchio_id[name] = joint_id + # Store the velocity index for this joint + self.jaco_vel_indices.append( + self.pinocchio_model.joints[joint_id].idx_v + ) + + LOGGER.info("Pinocchio model loaded successfully.") + except (FileNotFoundError, subprocess.CalledProcessError, Exception) as e: + LOGGER.error(f"Failed to initialize Pinocchio model: {e}") + self.pinocchio_model = None + + def _get_joint_limits(self) -> List[Tuple[float, float]]: + """Retrieves joint limits from the Pinocchio model.""" + if self.pinocchio_model is None: + return [(-math.pi, math.pi)] * len(JOINT_NAMES) + limits = [] + for name in JOINT_NAMES: + if self.pinocchio_model.existJointName(name): + joint_id = self.pinocchio_model.getJointId(name) + idx = self.pinocchio_model.joints[joint_id].idx_q + limits.append( + ( + self.pinocchio_model.lowerPositionLimit[idx], + self.pinocchio_model.upperPositionLimit[idx], + ) + ) + return limits + + def generate_random_position_in_workspace(self) -> np.ndarray: + """Generates a random (x, y, z) position from a half-spherical shell.""" + r = ( + np.random.uniform(WORKSPACE_INNER_RADIUS**3, WORKSPACE_OUTER_RADIUS**3) + ) ** (1 / 3) + theta = np.random.uniform(0, 2 * math.pi) + cos_phi = np.random.uniform(0, 1) + phi = math.acos(cos_phi) + x = r * math.sin(phi) * math.cos(theta) + y = r * math.sin(phi) * math.sin(theta) + z = r * cos_phi + return np.array([x, y, z]) + + def generate_random_yaw_quaternion(self) -> Tuple[float, float, float, float]: + """Generates a quaternion representing a random yaw.""" + random_yaw_angle = np.random.uniform(-math.pi, math.pi) + quat_xyzw = R.from_euler("z", random_yaw_angle).as_quat() + return tuple(quat_xyzw) + + def _get_articutool_jacobian(self, pitch: float, roll: float) -> np.ndarray: + """ + Computes the analytical Jacobian for the Articutool's 'up' vector (y-axis). + This relates Articutool joint velocities to the angular velocity of its y-axis. + J = [∂y/∂θp, ∂y/∂θr] + """ + cp, sp = math.cos(pitch), math.sin(pitch) + cr, sr = math.cos(roll), math.sin(roll) + # Partial derivatives of the y-axis vector [-sr, cp*cr, sp*cr] + dydp = np.array([0, -sp * cr, cp * cr]) + dydr = np.array([-cr, -cp * sr, -sp * sr]) + return np.vstack([dydp, dydr]).T + + def _solve_articutool_ik_for_leveling( + self, target_y_axis_in_atool_base: np.ndarray + ) -> List[Tuple[float, float]]: + """Analytical IK solver for the Articutool.""" + vx, vy, vz = target_y_axis_in_atool_base + solutions: List[Tuple[float, float]] = [] + asin_arg_for_tr = -vx + if not (-1.0 - EPSILON <= asin_arg_for_tr <= 1.0 + EPSILON): + return [] + asin_arg_for_tr_clipped = np.clip(asin_arg_for_tr, -1.0, 1.0) + theta_r_sol1 = math.asin(asin_arg_for_tr_clipped) + theta_r_sol2 = (math.pi - theta_r_sol1 + math.pi) % (2 * math.pi) - math.pi + candidate_thetas_r = list(set([theta_r_sol1, theta_r_sol2])) + for theta_r in candidate_thetas_r: + cos_theta_r = math.cos(theta_r) + if math.isclose(cos_theta_r, 0.0, abs_tol=EPSILON): + if math.isclose(vy, 0.0, abs_tol=EPSILON) and math.isclose( + vz, 0.0, abs_tol=EPSILON + ): + solutions.append( + (0.0, (theta_r + math.pi) % (2 * math.pi) - math.pi) + ) + continue + theta_p_sol = math.atan2(vz, vy) + solutions.append( + ( + (theta_p_sol + math.pi) % (2 * math.pi) - math.pi, + (theta_r + math.pi) % (2 * math.pi) - math.pi, + ) + ) + return solutions + + def _calculate_trajectory_metrics( + self, trajectory: JointTrajectory + ) -> TrajectoryMetrics: + """Calculates detailed metrics for a given trajectory.""" + if self.pinocchio_model is None or not trajectory.points: + return TrajectoryMetrics(0.0, 0.0, [], {}, {}, []) + + joint_space_path_length = 0.0 + waypoints_data = [] + q = pin.neutral(self.pinocchio_model) + prev_positions = np.array(trajectory.points[0].positions) + all_pitches, all_rolls = [], [] + + for i, point in enumerate(trajectory.points): + # Update Pinocchio model for current waypoint + for j, name in enumerate(JOINT_NAMES): + if name in self.joint_name_to_pinocchio_id: + joint_id = self.joint_name_to_pinocchio_id[name] + joint_obj = self.pinocchio_model.joints[joint_id] + theta = point.positions[j] + if joint_obj.nq == 2: + q[joint_obj.idx_q : joint_obj.idx_q + 2] = [ + math.cos(theta), + math.sin(theta), + ] + else: + q[joint_obj.idx_q] = theta + + pin.computeAllTerms( + self.pinocchio_model, + self.pinocchio_data, + q, + np.zeros(self.pinocchio_model.nv), + ) + pin.updateFramePlacements(self.pinocchio_model, self.pinocchio_data) + + # Get EE Pose and Articutool IK solution + ee_transform = self.pinocchio_data.oMf[self.jaco_ee_frame_id_pin] + R_world_ee = R.from_matrix(ee_transform.rotation) + target_up = R_world_ee.inv().apply(WORLD_UP_VECTOR) + solutions = self._solve_articutool_ik_for_leveling(target_up) + + at_solution, at_velocities = None, None + if solutions: + valid_sols = [ + s + for s in solutions + if ARTICUTOOL_PITCH_LIMITS_RAD[0] + <= s[0] + <= ARTICUTOOL_PITCH_LIMITS_RAD[1] + and ARTICUTOOL_ROLL_LIMITS_RAD[0] + <= s[1] + <= ARTICUTOOL_ROLL_LIMITS_RAD[1] + ] + if valid_sols: + pitch, roll = min(valid_sols, key=lambda s: s[0] ** 2 + s[1] ** 2) + at_solution = {"pitch": pitch, "roll": roll} + all_pitches.append(pitch) + all_rolls.append(roll) + + # Calculate required Articutool velocities + if i > 0: + dt = ( + point.time_from_start.sec + + point.time_from_start.nanosec * 1e-9 + ) - ( + trajectory.points[i - 1].time_from_start.sec + + trajectory.points[i - 1].time_from_start.nanosec * 1e-9 + ) + if dt > EPSILON: + q_dot_jaco = ( + np.array(point.positions) + - np.array(trajectory.points[i - 1].positions) + ) / dt + + J_jaco_full = pin.getFrameJacobian( + self.pinocchio_model, + self.pinocchio_data, + self.jaco_ee_frame_id_pin, + pin.ReferenceFrame.LOCAL, + ) + J_jaco_arm = J_jaco_full[:, self.jaco_vel_indices] + + v_ee = J_jaco_arm @ q_dot_jaco + omega_disturbance_local = v_ee[ + 3: + ] # Angular velocity portion + + J_atool = self._get_articutool_jacobian(pitch, roll) + J_atool_inv = np.linalg.pinv(J_atool) + q_dot_atool = -J_atool_inv @ omega_disturbance_local + at_velocities = { + "pitch_vel": q_dot_atool[0], + "roll_vel": q_dot_atool[1], + } + + # --- START DEBUG LOGS --- + if not self.debug_printed and i < 5: + LOGGER.info(f"--- DEBUG WP {i} ---") + LOGGER.info(f" dt: {dt:.4f}s") + LOGGER.info(f" q_dot_jaco: {np.round(q_dot_jaco, 3)}") + LOGGER.info(f" J_jaco_full shape: {J_jaco_full.shape}") + LOGGER.info(f" J_jaco_arm shape: {J_jaco_arm.shape}") + LOGGER.info(f" v_ee (local): {np.round(v_ee, 3)}") + LOGGER.info( + f" omega_disturbance_local: {np.round(omega_disturbance_local, 3)}" + ) + LOGGER.info(f" J_atool:\n{np.round(J_atool, 3)}") + LOGGER.info( + f" q_dot_atool (req'd): {np.round(q_dot_atool, 3)}" + ) + # --- END DEBUG LOGS --- + + waypoints_data.append( + { + "time_from_start_sec": point.time_from_start.sec + + point.time_from_start.nanosec * 1e-9, + "jaco_positions_rad": list(point.positions), + "ee_pose_world": { + "position": ee_transform.translation.tolist(), + "quat_xyzw": R.from_matrix(ee_transform.rotation) + .as_quat() + .tolist(), + }, + "articutool_solution_rad": at_solution, + "articutool_velocities_rad_per_sec": at_velocities, + } + ) + joint_space_path_length += np.linalg.norm( + np.array(point.positions) - prev_positions + ) + prev_positions = np.array(point.positions) + + self.debug_printed = ( + True # Ensure debug logs only print for the first trajectory + ) + duration = ( + trajectory.points[-1].time_from_start.sec + + trajectory.points[-1].time_from_start.nanosec * 1e-9 + ) + pitch_stats = ( + { + "min": min(all_pitches), + "max": max(all_pitches), + "mean": np.mean(all_pitches), + "std_dev": np.std(all_pitches), + } + if all_pitches + else {} + ) + roll_stats = ( + { + "min": min(all_rolls), + "max": max(all_rolls), + "mean": np.mean(all_rolls), + "std_dev": np.std(all_rolls), + } + if all_rolls + else {} + ) + + return TrajectoryMetrics( + duration, + joint_space_path_length, + list(trajectory.points[-1].positions), + pitch_stats, + roll_stats, + waypoints_data, + ) + + def run(self): + """Main benchmark execution loop.""" + if not self.pinocchio_model: + LOGGER.error("Aborting benchmark: Pinocchio model failed to initialize.") + return + + for i in range(self.num_tasks): + LOGGER.info(f"--- Running Task {i + 1}/{self.num_tasks} ---") + + # Get the starting joint state for this trial from the robot's current position + start_joint_positions = self.moveit2.joint_state.position + + # Generate task goals + target_position = self.generate_random_position_in_workspace() + target_quat_xyzw = ( + self.generate_random_yaw_quaternion() + if self.constrain_goal_yaw + else None + ) + + # Set up and execute planning request + self.moveit2.clear_goal_constraints() + self.moveit2.clear_path_constraints() + self.moveit2.set_position_goal( + position=target_position.tolist(), + frame_id=BASE_LINK, + target_link=END_EFFECTOR_LINK, + tolerance=0.01, + ) + if self.constrain_goal_yaw: + self.moveit2.set_orientation_goal( + quat_xyzw=Quaternion( + x=target_quat_xyzw[0], + y=target_quat_xyzw[1], + z=target_quat_xyzw[2], + w=target_quat_xyzw[3], + ), + target_link=END_EFFECTOR_LINK, + tolerance=GOAL_YAW_CONSTRAINT_TOLERANCE_XYZ_RAD, + parameterization=1, + ) + self.moveit2.set_path_orientation_constraint( + quat_xyzw=Quaternion( + x=PATH_CONSTRAINT_QUAT_XYZW[0], + y=PATH_CONSTRAINT_QUAT_XYZW[1], + z=PATH_CONSTRAINT_QUAT_XYZW[2], + w=PATH_CONSTRAINT_QUAT_XYZW[3], + ), + frame_id=BASE_LINK, + target_link=END_EFFECTOR_LINK, + tolerance=PATH_CONSTRAINT_TOLERANCE_XYZ_RAD, + weight=1.0, + parameterization=1, + ) + self.moveit2.planner_id = self.planner_id + self.moveit2.allowed_planning_time = self.planning_timeout + + planning_start_time = time.perf_counter() + plan_future = self.moveit2.plan_async() + while rclpy.ok() and not plan_future.done(): + time.sleep(0.01) + trajectory = self.moveit2.get_trajectory(plan_future) + planning_time = time.perf_counter() - planning_start_time + + plan_success = trajectory is not None and bool(trajectory.points) + trial_data = { + "task_id": i, + "planning_mode": "pos_yaw" if self.constrain_goal_yaw else "pos_only", + "start_joint_positions": list(start_joint_positions), + "target_position": target_position.tolist(), + "target_yaw_quat": target_quat_xyzw, + "plan_success": plan_success, + "planning_time_s": planning_time, + "verification_success": None, + "trajectory_metrics": None, + } + + if plan_success: + LOGGER.info(f" Jaco plan SUCCEEDED in {planning_time:.4f}s.") + metrics = self._calculate_trajectory_metrics(trajectory) + is_feasible = all( + wp["articutool_solution_rad"] is not None + for wp in metrics.waypoints_data + ) + trial_data["verification_success"] = is_feasible + trial_data["trajectory_metrics"] = metrics._asdict() + LOGGER.info(f" Verification Result: Path Feasible = {is_feasible}") + else: + LOGGER.warn(f" Jaco plan FAILED in {planning_time:.4f}s.") + + self.results.append(trial_data) + + def save_results(self): + """Saves the comprehensive benchmark results to a single JSON file.""" + if not self.output_dir: + return + timestamp = datetime.now().strftime("%Y%m%d-%H%M%S") + mode = "pos_yaw" if self.constrain_goal_yaw else "pos_only" + filename = os.path.join( + self.output_dir, f"benchmark_results_{mode}_{timestamp}.json" + ) + try: + with open(filename, "w") as f: + json.dump(self.results, f, indent=2) + LOGGER.info(f"Comprehensive benchmark results saved to {filename}") + except Exception as e: + LOGGER.error(f"Failed to save results: {e}") + total_tasks, jaco_successes, verified_successes = ( + len(self.results), + sum(1 for r in self.results if r["plan_success"]), + sum(1 for r in self.results if r.get("verification_success", False)), + ) + jaco_success_rate = ( + (jaco_successes / total_tasks * 100) if total_tasks > 0 else 0 + ) + LOGGER.info(f"\n--- TASK-SPACE BENCHMARK SUMMARY ({mode.upper()}) ---") + LOGGER.info(f"Total Tasks Attempted: {total_tasks}") + LOGGER.info( + f"Jaco Planner Success Rate: {jaco_success_rate:.2f}% ({jaco_successes}/{total_tasks})" + ) + if jaco_successes > 0: + LOGGER.info( + f"Verification Success Rate (of successful plans): {(verified_successes / jaco_successes * 100):.2f}%" + ) + LOGGER.info("-------------------------------------\n") + + +def main(): + if len(sys.argv) < 2: + print( + "Usage: python3 constrained_task_space_benchmark.py /path/to/robot.urdf.xacro [--constrain-goal-yaw]" + ) + sys.exit(1) + xacro_file_arg, constrain_goal_yaw_arg = ( + sys.argv[1], + "--constrain-goal-yaw" in sys.argv, + ) + if not os.path.exists(xacro_file_arg): + print(f"Error: XACRO file not found at '{xacro_file_arg}'") + sys.exit(1) + + rclpy.init() + node = Node("constrained_task_space_benchmark_node") + executor = rclpy.executors.MultiThreadedExecutor() + executor.add_node(node) + executor_thread = Thread(target=executor.spin, daemon=True) + executor_thread.start() + + moveit2 = MoveIt2( + node=node, + joint_names=JOINT_NAMES, + base_link_name=BASE_LINK, + end_effector_name=END_EFFECTOR_LINK, + group_name=PLANNING_GROUP, + callback_group=ReentrantCallbackGroup(), + ) + + NUM_TASKS, PLANNING_TIMEOUT, PLANNER_ID, OUTPUT_DIR = ( + 100, + 10.0, + "RRTConnectkConfigDefault", + os.path.join(os.getcwd(), "constrained_task_space_benchmark_output"), + ) + + benchmark = ConstrainedTaskSpaceBenchmark( + node, + moveit2, + xacro_file_arg, + NUM_TASKS, + PLANNING_TIMEOUT, + PLANNER_ID, + OUTPUT_DIR, + constrain_goal_yaw_arg, + ) + + try: + if benchmark.pinocchio_model is not None: + # This benchmark is now a pure simulation. It starts from the robot's + # current state and each subsequent plan starts from the end of the + # previous one, without actually moving the robot. + LOGGER.info("Ready to begin benchmark from current robot configuration.") + benchmark.run() + benchmark.save_results() + else: + LOGGER.error("Benchmark cannot run because Pinocchio model failed to load.") + except Exception as e: + LOGGER.error(f"An error occurred during the benchmark: {e}", exc_info=True) + finally: + LOGGER.info("Shutting down.") + rclpy.shutdown() + executor_thread.join() + + +if __name__ == "__main__": + main() diff --git a/ada_feeding/scripts/end_to_end_feeding_benchmark.py b/ada_feeding/scripts/end_to_end_feeding_benchmark.py new file mode 100644 index 00000000..1bf3d265 --- /dev/null +++ b/ada_feeding/scripts/end_to_end_feeding_benchmark.py @@ -0,0 +1,2477 @@ +#!/usr/bin/env python3 +# Copyright (c) 2024-2025, Personal Robotics Laboratory +# License: BSD 3-Clause. See LICENSE.md file in root directory. + +""" +This script implements the End-to-End Assistive Feeding Benchmark. + +It evaluates a set of generalizable motion primitives across a complete, +simulated assistive feeding cycle. The benchmark operates within a variety of +procedurally generated, robot-centric planning scenes to test the +generalization and robustness of different control strategies. +""" + +# Standard imports +from collections import namedtuple +from datetime import datetime +import os +import time +from threading import Thread, Lock +from typing import Optional, List, Dict, Tuple, Any +import json +import math +import subprocess +import sys +import argparse +from enum import Enum, auto +from dataclasses import dataclass, asdict + +# Third-party imports +import numpy as np +from pymoveit2 import MoveIt2 +from pymoveit2.robots import kinova +import rclpy +from rclpy.callback_groups import ReentrantCallbackGroup +from rclpy.node import Node +from trajectory_msgs.msg import JointTrajectory, JointTrajectoryPoint +from geometry_msgs.msg import Pose, Point, Quaternion, PoseStamped +from sensor_msgs.msg import JointState +from scipy.spatial.transform import Rotation as R +import pinocchio as pin +from moveit_msgs.msg import PlanningScene, AllowedCollisionEntry, AllowedCollisionMatrix +from moveit_msgs.srv import GetPlanningScene +from moveit_msgs.msg import CollisionObject +from shape_msgs.msg import SolidPrimitive +import tf2_ros + +# --- Constants --- +LOGGER = rclpy.logging.get_logger("end_to_end_benchmark") +PLANNING_GROUP_JACO = "jaco_arm" +PLANNING_GROUP_ATOOL = "articutool" +PLANNING_GROUP_FULL = "jaco_arm_with_articutool" +JOINT_NAMES_JACO = [f"j2n6s200_joint_{i + 1}" for i in range(6)] +JOINT_NAMES_ATOOL = [f"atool_joint{i + 1}" for i in range(2)] +JOINT_NAMES_FULL = JOINT_NAMES_JACO + JOINT_NAMES_ATOOL +BASE_LINK_JACO = "j2n6s200_link_base" +BASE_LINK_ATOOL = "atool_link_base" +BASE_LINK_FULL = BASE_LINK_JACO +END_EFFECTOR_LINK_JACO = "j2n6s200_end_effector" +END_EFFECTOR_LINK_ATOOL = "tool_tip" +END_EFFECTOR_LINK_FULL = END_EFFECTOR_LINK_ATOOL +EPSILON = 1e-6 +ARTICUTOOL_PITCH_LIMITS_RAD = (-math.pi / 2, math.pi / 2) +ARTICUTOOL_ROLL_LIMITS_RAD = (-math.pi, math.pi) +WORLD_UP_VECTOR = np.array([0.0, 0.0, 1.0]) +PATH_CONSTRAINT_QUAT_XYZW = (0.707, 0.0, 0.0, 0.707) +PATH_CONSTRAINT_TOLERANCE_XYZ_RAD = (1.5, 3.14, 0.8) +ARTICUTOOL_LENGTH_M = 0.14 + + +# --- Data Structures --- +class TrialStatus(Enum): + SUCCESS = "Success" + IK_FAILURE = "IK Failure" + PLANNER_FAILURE = "Planner Failure" + VERIFICATION_FAILURE = "Path Verification Failure" + SKIPPED = "Skipped" + + +class ExecutionMode(Enum): + """Defines how trajectories for a stage should be interpreted.""" + + SEQUENTIAL = "Sequential" # Jaco and Articutool move one after the other. + SYNCHRONOUS = "Synchronous" # Jaco and Articutool move at the same time. + JACO_ONLY = "Jaco Only" # Only a Jaco trajectory exists for this stage. + ATOOL_ONLY = ( + "Articutool Only" # Only an Articutool trajectory exists for this stage. + ) + + +# --- Semantic Schema for Acquisition Actions --- +class AcquisitionStrategy(Enum): + """High-level choice of physical interaction""" + + SKEWER = auto() + SCOOP = auto() + CUT = auto() + + +class MotionAxis(Enum): + """The primary axis for the arm's linear motion""" + + MAJOR_AXIS = auto() + MINOR_AXIS = auto() + VERTICAL = auto() + + +class ToolAlignment(Enum): + """How the tool is oriented relative to the food's structure""" + + PARALLEL = auto() + PERPENDICULAR = auto() + + +@dataclass +class ActionRecipe: + """Holds the semantic parameters for a single acquisition action""" + + strategy: AcquisitionStrategy + motion_axis: MotionAxis + tool_alignment: ToolAlignment + + def __str__(self): + return ( + f"{self.strategy.name}-{self.motion_axis.name}-{self.tool_alignment.name}" + ) + + +@dataclass +class CylindricalSamplingParams: + """Parameters for sampling a pose within a cylindrical shell.""" + + name: str + inner_radius: float + outer_radius: float + min_height: float + max_height: float + + +@dataclass +class SphericalSamplingParams: + """Parameters for sampling a pose within a spherical shell.""" + + name: str + inner_radius: float + outer_radius: float + # Angles in radians for theta (XY plane) and phi (from Z-axis) + theta_range: Tuple[float, float] + phi_range: Tuple[float, float] + min_height: Optional[float] = None + max_height: Optional[float] = None + + +@dataclass +class SceneGenerationParams: + """Holds all parameters that define the random scene generation.""" + + food_sampling: SphericalSamplingParams = SphericalSamplingParams( + name="food", + inner_radius=0.4, + outer_radius=1.04, # Corresponds to the 8-DOF workspace + theta_range=(0.0, 2 * math.pi), + phi_range=(0.0, math.pi / 2), # Upper hemisphere + min_height=0.1, + max_height=0.4, + ) + mouth_sampling: SphericalSamplingParams = SphericalSamplingParams( + name="mouth", + inner_radius=0.4, + outer_radius=1.04, # Corresponds to the 8-DOF workspace + theta_range=(0.0, 2 * math.pi), + phi_range=(0.0, math.pi / 2), + min_height=0.3, + max_height=0.5, + ) + resting_sampling: SphericalSamplingParams = SphericalSamplingParams( + name="resting", + inner_radius=0.5, + outer_radius=0.9, # Corresponds to the 6-DOF workspace + theta_range=(0.0, 2 * math.pi), + phi_range=(0, math.pi / 2), + min_height=0.2, + max_height=0.4, + ) + above_plate_radial_dist: float = 0.3 + above_plate_polar_angle_rad_max: float = math.pi / 3 + above_plate_yaw_variability_rad: float = math.pi / 8 + skewer_polar_angle_rad_max: float = math.pi / 2 + in_food_tool_roll_angle_deg: float = 180.0 + above_food_offset_dist: float = 0.1 # 10 cm + staging_offset_dist: float = 0.15 # 15 cm + + +# --- Constraint Helpers --- +class MoveIt2ConstraintType(Enum): + JOINT = "joint" + POSITION = "position" + ORIENTATION = "orientation" + POSE = "pose" + + +def create_pose_constraint( + pose: Pose, tolerance_position: float = 0.001, tolerance_orientation: float = 0.001 +) -> Tuple[MoveIt2ConstraintType, Dict]: + """Creates a standard pose goal constraint.""" + return ( + MoveIt2ConstraintType.POSE, + { + "pose": pose, + "tolerance_position": tolerance_position, + "tolerance_orientation": tolerance_orientation, + }, + ) + + +def create_orientation_path_constraint( + quat_xyzw: Tuple, tolerance_rad: Tuple +) -> Tuple[MoveIt2ConstraintType, Dict]: + """Creates an orientation path constraint, useful for keeping the tool level.""" + return ( + MoveIt2ConstraintType.ORIENTATION, + { + "quat_xyzw": Quaternion( + x=quat_xyzw[0], y=quat_xyzw[1], z=quat_xyzw[2], w=quat_xyzw[3] + ), + "tolerance": tolerance_rad, + "weight": 1.0, + }, + ) + + +def create_joint_constraint( + joint_positions: List[float], +) -> Tuple[MoveIt2ConstraintType, Dict]: + """Creates a joint goal constraint.""" + return (MoveIt2ConstraintType.JOINT, {"joint_positions": joint_positions}) + + +def create_position_constraint( + position: Point, tolerance_position: float = 0.001 +) -> Tuple[MoveIt2ConstraintType, Dict]: + """Creates a standard position goal constraint.""" + return ( + MoveIt2ConstraintType.POSITION, + { + "position": position, + "tolerance": tolerance_position, + "weight": 1.0, + }, + ) + + +class SceneGenerator: + """A dedicated class for procedurally generating planning scenes.""" + + def __init__(self, params: SceneGenerationParams): + self.params = params + + def generate(self) -> Tuple[Dict[str, Any], Dict[str, float]]: + """ + Generates a randomized scene and returns the scene dict and sampled parameters. + """ + scene = {} + scene_characteristics = {} + + food_position, food_params = self._sample_pose_in_spherical_shell( + self.params.food_sampling + ) + food_orientation = self._calculate_base_facing_orientation(food_position) + scene["food_pose"] = Pose(position=food_position, orientation=food_orientation) + scene_characteristics.update(food_params) + + mouth_position, mouth_params = self._sample_pose_in_spherical_shell( + self.params.mouth_sampling + ) + mouth_orientation = self._calculate_base_facing_orientation(mouth_position) + scene["mouth_pose"] = Pose( + position=mouth_position, orientation=mouth_orientation + ) + scene_characteristics.update(mouth_params) + + resting_position, resting_params = self._sample_pose_in_spherical_shell( + self.params.resting_sampling + ) + resting_orientation = self._calculate_jaco_ee_orientation(resting_position) + scene["resting_pose"] = Pose( + position=resting_position, orientation=resting_orientation + ) + scene_characteristics.update(resting_params) + scene_characteristics.update( + self._characterize_pose(scene["resting_pose"], "resting_pose") + ) + + scene["home_config"] = [-1.47568, 2.92779, 1.00845, -2.0847, 1.43588, 1.32575] + + scene["above_plate_pose"], above_plate_params = ( + self._calculate_above_plate_pose(scene["food_pose"]) + ) + scene_characteristics.update(above_plate_params) + scene_characteristics.update( + self._characterize_pose(scene["above_plate_pose"], "above_plate_pose") + ) + + ( + scene["in_food_pose"], + approach_vector, + in_food_params, + ) = self._calculate_in_food_pose( + food_pose=scene["food_pose"], + recipe=ActionRecipe( + AcquisitionStrategy.SKEWER, + MotionAxis.VERTICAL, + ToolAlignment.PERPENDICULAR, + ), + ) + scene_characteristics.update(in_food_params) + scene_characteristics.update( + self._characterize_pose(scene["in_food_pose"], "in_food_pose") + ) + + scene["above_food_pose"] = self._calculate_above_food_pose( + scene["in_food_pose"], approach_vector + ) + scene_characteristics.update( + self._characterize_pose(scene["above_food_pose"], "above_food_pose") + ) + scene["staging_pose"] = self._calculate_staging_pose(scene["mouth_pose"]) + scene_characteristics.update( + self._characterize_pose(scene["staging_pose"], "staging_pose") + ) + + # Add derived characteristics for analysis + food_pos = scene["food_pose"].position + mouth_pos = scene["mouth_pose"].position + resting_pos = scene["resting_pose"].position + scene_characteristics["food_mouth_distance_m"] = math.sqrt( + (food_pos.x - mouth_pos.x) ** 2 + + (food_pos.y - mouth_pos.y) ** 2 + + (food_pos.z - mouth_pos.z) ** 2 + ) + scene_characteristics["food_resting_distance_m"] = math.sqrt( + (food_pos.x - resting_pos.x) ** 2 + + (food_pos.y - resting_pos.y) ** 2 + + (food_pos.z - resting_pos.z) ** 2 + ) + + return scene, scene_characteristics + + def _characterize_pose(self, pose: Pose, pose_name: str) -> Dict[str, float]: + """Extracts key kinematic characteristics from a pose.""" + pos = np.array([pose.position.x, pose.position.y, pose.position.z]) + rot = R.from_quat( + [ + pose.orientation.x, + pose.orientation.y, + pose.orientation.z, + pose.orientation.w, + ] + ) + + # For a Jaco EE frame, the Z-axis is "forward" + forward_vec = rot.apply([0.0, 0.0, 1.0]) + + characteristics = { + f"{pose_name}_dist_3d": np.linalg.norm(pos), + f"{pose_name}_dist_2d": np.linalg.norm(pos[:2]), + f"{pose_name}_global_yaw_rad": np.arctan2(forward_vec[1], forward_vec[0]), + f"{pose_name}_global_pitch_rad": np.arcsin(forward_vec[2]), + } + return characteristics + + def _sample_pose_in_cylindrical_shell( + self, sampling_params: CylindricalSamplingParams + ) -> Tuple[Point, Dict[str, float]]: + """ + A general function to sample a POSITION within the given cylindrical parameters. + Returns a Point and the sampled characteristic values. + """ + radius = np.sqrt( + np.random.uniform( + sampling_params.inner_radius**2, sampling_params.outer_radius**2 + ) + ) + theta = np.random.uniform(0, 2 * np.pi) + z = np.random.uniform(sampling_params.min_height, sampling_params.max_height) + + x = radius * np.cos(theta) + y = radius * np.sin(theta) + + position = Point(x=x, y=y, z=z) + + prefix = sampling_params.name + sampled_values = { + f"{prefix}_sampled_radius": radius, + f"{prefix}_sampled_theta_rad": theta, + f"{prefix}_sampled_z": z, + } + return position, sampled_values + + def _sample_pose_in_spherical_shell( + self, sampling_params: SphericalSamplingParams + ) -> Tuple[Point, Dict[str, float]]: + """ + A general function to sample a POSITION within a spherical shell. + Returns a Point and the sampled characteristic values. + Uses rejection sampling if min/max height is specified. + """ + max_attempts = 100 + for _ in range(max_attempts): + r = np.random.uniform( + sampling_params.inner_radius**3, sampling_params.outer_radius**3 + ) ** (1 / 3) + theta = np.random.uniform(*sampling_params.theta_range) + phi = np.random.uniform(*sampling_params.phi_range) + + x = r * np.cos(theta) * np.sin(phi) + y = r * np.sin(theta) * np.sin(phi) + z = r * np.cos(phi) + + if ( + sampling_params.min_height is None or z >= sampling_params.min_height + ) and ( + sampling_params.max_height is None or z <= sampling_params.max_height + ): + position = Point(x=x, y=y, z=z) + prefix = sampling_params.name + sampled_values = { + f"{prefix}_sampled_radius": r, + f"{prefix}_sampled_theta_rad": theta, + f"{prefix}_sampled_phi_rad": phi, + } + return position, sampled_values + + def _calculate_base_facing_orientation(self, position: Point) -> Quaternion: + """ + Calculates an orientation that is upright (Z-up) and has its X-axis + pointing towards the robot base (origin). + """ + z_axis = np.array([0.0, 0.0, 1.0]) + look_at_vector = -np.array([position.x, position.y, 0.0]) + if np.linalg.norm(look_at_vector) < 1e-6: + look_at_vector = np.array([1.0, 0.0, 0.0]) + x_axis = look_at_vector / np.linalg.norm(look_at_vector) + y_axis = np.cross(z_axis, x_axis) + + rotation_matrix = np.array([x_axis, y_axis, z_axis]).T + # Add random yaw variability + rand_yaw = np.random.uniform(-np.deg2rad(30), np.deg2rad(30)) + final_rot = R.from_matrix(rotation_matrix) * R.from_euler("z", rand_yaw) + quat = final_rot.as_quat() + + return Quaternion(x=quat[0], y=quat[1], z=quat[2], w=quat[3]) + + def _calculate_jaco_ee_orientation(self, position: Point) -> Quaternion: + """ + Calculates an orientation matching the Jaco EE convention (Y-up, Z-forward) + at the given position. + """ + # (Y-up) is the world's Z-axis + target_y_axis = np.array([0.0, 0.0, 1.0]) + + # (Z-forward) points horizontally away from the robot's base + target_z_axis_dir = np.array([position.x, position.y, 0.0]) + if np.linalg.norm(target_z_axis_dir) < 1e-6: + target_z_axis_dir = np.array([1.0, 0.0, 0.0]) + target_z_axis = target_z_axis_dir / np.linalg.norm(target_z_axis_dir) + + # (X-left) is derived from the cross product + target_x_axis = np.cross(target_y_axis, target_z_axis) + + rotation_matrix = np.array([target_x_axis, target_y_axis, target_z_axis]).T + rotation = R.from_matrix(rotation_matrix) + quat = rotation.as_quat() + + return Quaternion(x=quat[0], y=quat[1], z=quat[2], w=quat[3]) + + def _calculate_above_plate_pose( + self, food_pose: Pose + ) -> Tuple[Pose, Dict[str, float]]: + """ + Calculates a camera pose for the Jaco end-effector that looks at the food, + with a yaw constraint that keeps the arm aligned with the robot base. + """ + food_position = np.array( + [food_pose.position.x, food_pose.position.y, food_pose.position.z] + ) + + # --- Position Calculation with Yaw Constraint --- + # 1. Determine the base yaw angle from the robot's origin to the food's XY position. + base_yaw_angle = np.arctan2(food_position[1], food_position[0]) + + # 2. Add random variability to this base angle. + yaw_variability = np.random.uniform( + -self.params.above_plate_yaw_variability_rad, + self.params.above_plate_yaw_variability_rad, + ) + final_azimuthal_angle = base_yaw_angle + yaw_variability + np.pi + + # 3. Use spherical coordinates relative to the food pose to find the camera position. + radial_distance = self.params.above_plate_radial_dist + polar_angle = np.random.uniform(0, self.params.above_plate_polar_angle_rad_max) + + # Calculate the offset from the food pose. + x_offset = radial_distance * np.sin(polar_angle) * np.cos(final_azimuthal_angle) + y_offset = radial_distance * np.sin(polar_angle) * np.sin(final_azimuthal_angle) + z_offset = radial_distance * np.cos(polar_angle) + + # The final camera position is the food position plus this offset. + camera_position = food_position + np.array([x_offset, y_offset, z_offset]) + + # --- Orientation Calculation (Look-at with no roll) --- + # (This logic remains the same) + z_axis = food_position - camera_position + z_axis /= np.linalg.norm(z_axis) + world_up = np.array([0.0, 0.0, 1.0]) + if np.abs(np.dot(z_axis, world_up)) > 0.999: + x_axis = np.array([0.0, 1.0, 0.0]) + else: + x_axis = np.cross(world_up, z_axis) + x_axis /= np.linalg.norm(x_axis) + y_axis = np.cross(z_axis, x_axis) + rotation_matrix = np.array([x_axis, y_axis, z_axis]).T + rotation = R.from_matrix(rotation_matrix) + quat = rotation.as_quat() + + # Create and return the final Pose message + final_pose = Pose() + final_pose.position = Point( + x=camera_position[0], y=camera_position[1], z=camera_position[2] + ) + final_pose.orientation = Quaternion(x=quat[0], y=quat[1], z=quat[2], w=quat[3]) + + sampled_params = { + "above_plate_sampled_yaw_variability_rad": yaw_variability, + "above_plate_sampled_polar_angle_rad": polar_angle, + } + return final_pose, sampled_params + + def _calculate_in_food_pose( + self, food_pose: Pose, recipe: ActionRecipe + ) -> Tuple[Pose, np.ndarray, Dict[str, float]]: + """ + Calculates the InFood tool tip pose based on a semantic ActionRecipe. + Returns the pose, the approach vector, and sampled parameters. + """ + # Default to identity rotation and a vertical approach vector + final_rotation = R.identity() + approach_vector = np.array([0.0, 0.0, -1.0]) + sampled_polar_angle = 0.0 + + # --- SKEWER Strategy Logic --- + if recipe.strategy == AcquisitionStrategy.SKEWER: + # (Logic for finding food axes remains the same) + food_orientation = R.from_quat( + [ + food_pose.orientation.x, + food_pose.orientation.y, + food_pose.orientation.z, + food_pose.orientation.w, + ] + ) + minor_axis_food = food_orientation.apply([0.0, 1.0, 0.0]) + tool_x_final = minor_axis_food + + # Use parameter from the params object + sampled_polar_angle = np.random.uniform( + 0, self.params.skewer_polar_angle_rad_max + ) + rotation = R.from_rotvec(sampled_polar_angle * tool_x_final) + tool_z_final = rotation.apply(np.array([0.0, 0.0, -1.0])) + + tool_y_final = np.cross(tool_z_final, tool_x_final) + + base_rotation_matrix = np.array( + [tool_x_final, tool_y_final, tool_z_final] + ).T + base_orientation = R.from_matrix(base_rotation_matrix) + roll_rotation = R.from_rotvec( + np.deg2rad(self.params.in_food_tool_roll_angle_deg) * tool_z_final + ) + final_rotation = roll_rotation * base_orientation + approach_vector = tool_z_final + + # --- SCOOP Strategy Logic (Placeholder) --- + elif recipe.strategy == AcquisitionStrategy.SCOOP: + # ... + pass + + # --- Construct the final pose --- + q = final_rotation.as_quat() + in_food_pose = Pose() + in_food_pose.position = food_pose.position + in_food_pose.orientation = Quaternion(x=q[0], y=q[1], z=q[2], w=q[3]) + + sampled_params = {"in_food_sampled_polar_angle_rad": sampled_polar_angle} + return in_food_pose, approach_vector, sampled_params + + def _calculate_above_food_pose( + self, in_food_pose: Pose, approach_vector: np.ndarray + ) -> Pose: + """ + Calculates the AboveFood pose by offsetting from InFood along the + calculated approach vector. + """ + # 1. Inherit the orientation directly from the target pose + final_orientation = in_food_pose.orientation + + # 2. The motion vector for the linear path IS the approach vector (tool's Z-axis) + motion_vector = approach_vector + + # 3. Calculate the offset position using parameter from the params object + offset_dist = self.params.above_food_offset_dist + p = in_food_pose.position + offset = motion_vector * -offset_dist + final_position = Point(x=p.x + offset[0], y=p.y + offset[1], z=p.z + offset[2]) + + return Pose(position=final_position, orientation=final_orientation) + + def _calculate_staging_pose(self, mouth_pose: Pose) -> Pose: + """Calculate the staging pose relative to the mouth.""" + offset_dist = self.params.staging_offset_dist + + p = mouth_pose.position + q = mouth_pose.orientation + mouth_rot = R.from_quat([q.x, q.y, q.z, q.w]) + + # Offset is along the mouth's forward-facing X-axis + offset_vec = mouth_rot.apply([offset_dist, 0, 0]) + + staged_pos = Point( + x=p.x - offset_vec[0], y=p.y - offset_vec[1], z=p.z - offset_vec[2] + ) + + return Pose(position=staged_pos, orientation=q) + + def _calculate_resting_pose(self, food_pose: Pose, mouth_pose: Pose) -> Pose: + """ + Calculates a dynamic "Resting" pose for the Jaco end-effector. + + This method implements the "Angular Standoff" strategy. It positions + the resting pose at a fixed radial distance from the robot base, but with + an angular offset from the food's position. This offset is directed + away from the mouth's position, placing the arm in a safe, clear, and + context-aware staging area. + + Args: + food_pose: The 6D pose of the food item. + mouth_pose: The 6D pose of the user's mouth. + + Returns: + The calculated 6D resting pose. + """ + # --- Extract XY positions and create 2D vectors from the origin --- + v_food = np.array([food_pose.position.x, food_pose.position.y]) + v_mouth = np.array([mouth_pose.position.x, mouth_pose.position.y]) + + # --- 1. Determine the direction of angular offset --- + # Use the 2D cross product to find the sign of the angle between vectors. + # This tells us if the mouth is clockwise or counter-clockwise from the food. + cross_product_z = np.cross(v_food, v_mouth) + + # We apply the offset in the direction that moves away from the mouth. + angle = ( + -np.deg2rad(self.params.resting_angular_offset_deg) + if cross_product_z > 0 + else np.deg2rad(self.params.resting_angular_offset_deg) + ) + + # --- 2. Calculate the new position --- + # Normalize the food vector to get its direction + v_food_dir = v_food / np.linalg.norm(v_food) + + # Create a 2D rotation matrix and apply it to the food's direction + c, s = np.cos(angle), np.sin(angle) + rotation_matrix = np.array(((c, -s), (s, c))) + v_rest_dir = rotation_matrix @ v_food_dir + + # Scale the new direction by the fixed radial distance for the final XY position + rest_position_xy = v_rest_dir * self.params.resting_radial_dist + rest_position_z = food_pose.position.z + self.params.resting_vertical_offset + + # --- 3. Calculate the new orientation (upright and facing outward) --- + # The z-axis (forward) points horizontally from the base to the new position + z_axis = np.array([rest_position_xy[0], rest_position_xy[1], 0.0]) + z_axis /= np.linalg.norm(z_axis) + + # The y-axis (up) is aligned with the world's Z-axis + y_axis = np.array([0.0, 0.0, 1.0]) + + # The x-axis (left) is the cross product, forming an orthonormal frame + x_axis = np.cross(y_axis, z_axis) + + # Construct the final rotation matrix and convert to a quaternion + rotation_matrix_3d = np.array([x_axis, y_axis, z_axis]).T + rotation = R.from_matrix(rotation_matrix_3d) + quat = rotation.as_quat() + + # --- 4. Assemble and return the final Pose message --- + return Pose( + position=Point( + x=rest_position_xy[0], y=rest_position_xy[1], z=rest_position_z + ), + orientation=Quaternion(x=quat[0], y=quat[1], z=quat[2], w=quat[3]), + ) + + +# --- Core Benchmark Classes --- +class MotionPlanner: + """A wrapper for MoveIt2 to provide a seamless, synchronous API for planning.""" + + def __init__( + self, + node: Node, + moveit2_jaco: MoveIt2, + moveit2_atool: MoveIt2, + moveit2_full: MoveIt2, + tf_buffer: tf2_ros.Buffer, + planning_timeout: float, + ): + self._node = node + self._planning_timeout = planning_timeout + + self._tf_buffer = tf_buffer + + # Manages all moveit2 objects and the shared lock + self._moveit2_objects = { + PLANNING_GROUP_JACO: moveit2_jaco, + PLANNING_GROUP_ATOOL: moveit2_atool, + PLANNING_GROUP_FULL: moveit2_full, + } + self._lock = Lock() + + def _get_planner(self, group_name: str) -> MoveIt2: + """Selects the correct moveit2 object based on group name.""" + if group_name not in self._moveit2_objects: + raise ValueError(f"Unknown planning group: {group_name}") + return self._moveit2_objects[group_name] + + @staticmethod + def _scale_cartesian_trajectory_velocity( + traj: JointTrajectory, scale_factor: float + ): + """Scales the velocity of a Cartesian trajectory""" + for point in traj.points: + nsec = (point.time_from_start.sec * 1e9) + point.time_from_start.nanosec + nsec /= scale_factor + sec = int(math.floor(nsec / 1e9)) + point.time_from_start.sec = sec + point.time_from_start.nanosec = int(nsec - (sec * 1e9)) + for i in range(len(point.velocities)): + point.velocities[i] *= scale_factor + for i in range(len(point.accelerations)): + point.accelerations[i] *= scale_factor**2 + + def _transform_goal_to_base_link( + self, planner: MoveIt2, constraint: Tuple[MoveIt2ConstraintType, Dict] + ): + """Transforms a pose-based goal constraint to the robot's base frame.""" + constraint_type, kwargs = constraint + + # Only transform pose constraints and only if a frame_id is specified + if ( + constraint_type != MoveIt2ConstraintType.POSE + or "frame_id" not in kwargs + or kwargs["frame_id"] is None + ): + return + + pose_stamped = PoseStamped() + pose_stamped.header.frame_id = kwargs["frame_id"] + pose_stamped.pose = kwargs["pose"] + + transformed_pose = self._tf_buffer.transform( + pose_stamped, planner.base_link_name + ) + + kwargs["pose"] = transformed_pose.pose + kwargs["frame_id"] = planner.base_link_name + + def compute_ik( + self, group_name: str, target_pose: Pose, start_joint_state: List[float] + ) -> Optional[JointState]: + """ + Computes Inverse Kinematics for a given group and target pose. + + Returns: + A JointState message on success, None on failure. + """ + planner = self._get_planner(group_name) + ik_solution = None + + with self._lock: + ik_solution = planner.compute_ik( + position=target_pose.position, + quat_xyzw=target_pose.orientation, + start_joint_state=start_joint_state, + ) + + return ik_solution + + def compute_fk( + self, group_name: str, joint_state: JointState, fk_link_names: List[str] + ) -> Optional[List[PoseStamped]]: + """ + Computes Forward Kinematics for a given set of links. + + Returns: + A list of PoseStamped messages, one for each requested link. + """ + planner = self._get_planner(group_name) + fk_poses = None + + with self._lock: + fk_poses = planner.compute_fk( + joint_state=joint_state, fk_link_names=fk_link_names + ) + + return fk_poses + + def plan( + self, + group_name: str, + start_state: Optional[List[float]], + goal_constraints: List[Tuple[MoveIt2ConstraintType, Dict]], + path_constraints: Optional[List[Tuple[MoveIt2ConstraintType, Dict]]] = None, + cartesian: bool = False, + cartesian_max_step: float = 0.001, + cartesian_jump_threshold: float = 5.0, + cartesian_fraction_threshold: float = 0.92, + ) -> Tuple[TrialStatus, Optional[JointTrajectory], float]: + """ + Plans a trajectory and measures the planning time. + + Returns: + A status, the resulting trajectory, and the planning time in seconds. + """ + planner = self._get_planner(group_name) + future = None + + if not goal_constraints: + LOGGER.error("Planning failed: At least one goal constraint is required.") + return TrialStatus.IK_FAILURE, None, 0.0 + + if cartesian: + planner.cartesian_jump_threshold = cartesian_jump_threshold + # Note: Other properties like prismatic/revolute jump thresholds could also be set here + + with self._lock: + planner.clear_goal_constraints() + planner.clear_path_constraints() + + # If cartesian, transform pose goals to the base link frame + if cartesian: + try: + for constraint in goal_constraints: + self._transform_goal_to_base_link(planner, constraint) + except Exception as e: + LOGGER.error( + f"Failed to transform Cartesian goal to base link: {e}" + ) + return TrialStatus.IK_FAILURE, None, 0.0 + + # --- Process Goal Constraints --- + for constraint_type, kwargs in goal_constraints: + if constraint_type == MoveIt2ConstraintType.JOINT: + planner.set_joint_goal(**kwargs) + elif constraint_type == MoveIt2ConstraintType.POSITION: + planner.set_position_goal(**kwargs) + elif constraint_type == MoveIt2ConstraintType.ORIENTATION: + planner.set_orientation_goal(**kwargs) + elif constraint_type == MoveIt2ConstraintType.POSE: + planner.set_pose_goal(**kwargs) + + # --- Process Path Constraints --- + if path_constraints: + for constraint_type, kwargs in path_constraints: + if constraint_type == MoveIt2ConstraintType.JOINT: + planner.set_path_joint_constraint(**kwargs) + elif constraint_type == MoveIt2ConstraintType.POSITION: + planner.set_path_position_constraint(**kwargs) + elif constraint_type == MoveIt2ConstraintType.ORIENTATION: + planner.set_path_orientation_constraint(**kwargs) + + # --- Initiate Asynchronous Planning --- + future = planner.plan_async( + start_joint_state=start_state, + cartesian=cartesian, + max_step=cartesian_max_step, + ) + + # --- Start timing the planning operation --- + start_time = time.time() + while rclpy.ok() and not future.done(): + if time.time() - start_time > self._planning_timeout: + future.cancel() + return TrialStatus.PLANNER_FAILURE, None, self._planning_timeout + time.sleep(0.1) + planning_time = time.time() - start_time + # --- End timing --- + + traj = planner.get_trajectory( + future, + cartesian=cartesian, + cartesian_fraction_threshold=cartesian_fraction_threshold, + ) + if not traj or not traj.points: + return TrialStatus.PLANNER_FAILURE, None, planning_time + + # Scale velocity for cartesian plans, as pymoveit2 doesn't do this automatically + if cartesian and planner.max_velocity > 0.0: + MotionPlanner._scale_cartesian_trajectory_velocity( + traj, planner.max_velocity + ) + + return TrialStatus.SUCCESS, traj, planning_time + + +class PinocchioModel: + """A wrapper for Pinocchio to provide a seamless interface for kinematic queries.""" + + def __init__(self, xacro_file_path: str): + """Loads the robot model from a XACRO file.""" + self.model: Optional[pin.Model] = None + self.data: Optional[pin.Data] = None + self._is_ready = False + + try: + # Convert XACRO to URDF string + process = subprocess.run( + ["ros2", "run", "xacro", "xacro", xacro_file_path], + check=True, + capture_output=True, + text=True, + ) + urdf_xml_string = process.stdout + + # Load model from string + self.model = pin.buildModelFromXML(urdf_xml_string) + self.data = self.model.createData() + self._is_ready = True + LOGGER.info("Pinocchio model loaded successfully.") + except Exception as e: + LOGGER.error(f"Failed to initialize Pinocchio model: {e}", exc_info=True) + + def is_ready(self) -> bool: + """Returns True if the model was loaded successfully.""" + return self._is_ready + + def _update_configuration( + self, jaco_joints: List[float], atool_joints: Optional[List[float]] = None + ) -> np.ndarray: + """ + Populates Pinocchio's configuration vector `q` from joint lists. + Handles the sin/cos representation for revolute joints correctly using the + python `math` library to ensure type compatibility with Pinocchio's bindings. + """ + q = pin.neutral(self.model) + + all_joints = jaco_joints + (atool_joints if atool_joints is not None else []) + all_names = JOINT_NAMES_JACO + ( + JOINT_NAMES_ATOOL if atool_joints is not None else [] + ) + + for i, name in enumerate(all_names): + if self.model.existJointName(name): + joint_id = self.model.getJointId(name) + joint_obj = self.model.joints[joint_id] + angle = all_joints[i] + + idx = joint_obj.idx_q + if joint_obj.nq == 2: + q[idx : idx + 2] = [math.cos(angle), math.sin(angle)] + else: + q[idx] = angle + return q + + def get_frame_transform( + self, + frame_name: str, + jaco_joints: List[float], + atool_joints: Optional[List[float]] = None, + ) -> Optional[pin.SE3]: + """ + Performs Forward Kinematics to get the transform of a specific frame. + + Returns: + A Pinocchio SE3 transform object, or None on failure. + """ + if not self.is_ready(): + return None + + try: + # Update the model's configuration vector + q = self._update_configuration(jaco_joints, atool_joints) + + # Run Forward Kinematics + pin.forwardKinematics(self.model, self.data, q) + pin.updateFramePlacements(self.model, self.data) + + # Get and return the transform + frame_id = self.model.getFrameId(frame_name) + return self.data.oMf[frame_id] + except Exception as e: + LOGGER.error(f"Pinocchio FK failed for frame '{frame_name}': {e}") + return None + + def get_frame_jacobian( + self, + frame_name: str, + jaco_joints: List[float], + atool_joints: Optional[List[float]] = None, + ) -> Optional[np.ndarray]: + """ + Computes the full Jacobian for a specific frame in the world frame. + + Returns: + A 6xN numpy array representing the Jacobian, or None on failure. + """ + if not self.is_ready(): + return None + + try: + q = self._update_configuration(jaco_joints, atool_joints) + frame_id = self.model.getFrameId(frame_name) + + # Compute the Jacobian for the frame. + # The result is stored in self.data.J + pin.computeFrameJacobian(self.model, self.data, q, frame_id) + + return self.data.J + except Exception as e: + LOGGER.error( + f"Pinocchio Jacobian calculation failed for frame '{frame_name}': {e}" + ) + return None + + +class EndToEndBenchmark: + """Manages the end-to-end benchmark planning process.""" + + def __init__( + self, + node: Node, + moveit2_jaco: MoveIt2, + moveit2_atool: MoveIt2, + moveit2_full: MoveIt2, + xacro_file_path: str, + num_trials: int = 100, + planning_timeout: float = 5.0, + output_dir: Optional[str] = None, + ): + self.node = node + self.num_trials = num_trials + self.output_filename = None + if output_dir: + os.makedirs(output_dir, exist_ok=True) + timestamp = datetime.now().strftime("%Y%m%d-%H%M%S") + # Use the .jsonl extension for JSON Lines format + self.output_filename = os.path.join( + output_dir, f"end_to_end_benchmark_{timestamp}.jsonl" + ) + + self.motion_planner = MotionPlanner( + node, + moveit2_jaco, + moveit2_atool, + moveit2_full, + tf2_ros.Buffer(), + planning_timeout, + ) + # Pinocchio model for feasibility checks + self.kinematics_model = PinocchioModel(xacro_file_path) + + def add_articutool_bounding_cylinder(self): + """ + Adds a cylindrical collision object to represent the Articutool + and updates the ACM to prevent spurious collisions. + """ + # Create a publisher to the planning scene topic if it doesn't exist + if not hasattr(self, "planning_scene_publisher"): + self.planning_scene_publisher = self.node.create_publisher( + PlanningScene, "/planning_scene", 10 + ) + + # --- Step 1: Define and add the CollisionObject --- + collision_object = CollisionObject() + collision_object.header.frame_id = ( + END_EFFECTOR_LINK_JACO # Attach to Jaco wrist + ) + collision_object.id = "articutool_bounding_cylinder" + collision_object.operation = CollisionObject.ADD + + # Define the cylinder's dimensions and pose relative to the wrist link + cylinder = SolidPrimitive() + cylinder.type = SolidPrimitive.CYLINDER + cylinder.dimensions = [ + 0.04, # height + ARTICUTOOL_LENGTH_M, # radius + ] + cylinder_pose = Pose() + cylinder_pose.position = Point(x=0.0, y=0.0, z=0.132) + cylinder_pose.orientation = Quaternion(x=0.0, y=0.707, z=0.0, w=0.707) + + collision_object.primitives.append(cylinder) + collision_object.primitive_poses.append(cylinder_pose) + + # Create a PlanningScene message to add the object + planning_scene_msg = PlanningScene() + planning_scene_msg.world.collision_objects.append(collision_object) + planning_scene_msg.is_diff = True + self.planning_scene_publisher.publish(planning_scene_msg) + self.node.get_logger().info("Published bounding cylinder to planning scene.") + + # Short delay to ensure the scene is updated + time.sleep(1.0) + + # --- Step 2: Modify the Allowed Collision Matrix (ACM) --- + # Create a client to get the planning scene + scene_client = self.node.create_client(GetPlanningScene, "/get_planning_scene") + while not scene_client.wait_for_service(timeout_sec=1.0): + self.node.get_logger().info( + 'Service "/get_planning_scene" not available, waiting...' + ) + + # Request the full scene, including the ACM + req = GetPlanningScene.Request() + req.components.components = req.components.ALLOWED_COLLISION_MATRIX + future = scene_client.call_async(req) + + # Add a callback to the future, which will be executed by the executor thread + future.add_done_callback(self._update_acm_callback) + + def _update_acm_callback(self, future): + """ + This callback is executed once the GetPlanningScene service returns a result. + """ + try: + result = future.result() + if result is None: + self.node.get_logger().error("Failed to get planning scene") + return + + acm = result.scene.allowed_collision_matrix + object_name = "articutool_bounding_cylinder" + links_to_allow = [ + "j2n6s200_link_6", + "atool_base", + "atool_electronics_holder_bottom_plate", + "atool_electronics_holder_upper_plate", + "atool_electronics_holder_wire_guard", + "atool_ft_adapter", + "atool_handle", + "atool_handle_cover", + "atool_link1", + "atool_link2", + "atool_motor_link", + "atool_u2d2", + "tool", + "ft", + "j2n6s200_link_finger_1", + "j2n6s200_link_finger_2", + "j2n6s200_link_finger_tip_1", + "j2n6s200_link_finger_tip_2", + ] + + if object_name not in acm.entry_names: + for row in acm.entry_values: + row.enabled.append(False) + acm.entry_names.append(object_name) + new_row = AllowedCollisionEntry() + new_row.enabled = [False] * len(acm.entry_names) + acm.entry_values.append(new_row) + + obj_idx = acm.entry_names.index(object_name) + link_indices = [] + for name in links_to_allow: + try: + link_indices.append(acm.entry_names.index(name)) + except ValueError: + self.node.get_logger().warning( + f"Link '{name}' not in ACM, skipping." + ) + + for link_idx in link_indices: + acm.entry_values[obj_idx].enabled[link_idx] = True + acm.entry_values[link_idx].enabled[obj_idx] = True + + scene_update = PlanningScene(is_diff=True) + scene_update.allowed_collision_matrix = acm + self.planning_scene_publisher.publish(scene_update) + self.node.get_logger().info("Successfully updated ACM via callback.") + + except Exception as e: + self.node.get_logger().error(f"Error in ACM callback: {e}") + + # --- Scene Generation --- + def _generate_scene(self) -> Dict[str, Any]: + """Generates a randomized, robot-centric planning scene.""" + scene = {} + + # Sample food and mouth poses + scene["food_pose"] = self._sample_pose_in_cylindrical_shell( + inner_radius=0.4, + outer_radius=0.7, + min_height=0.0, + max_height=0.3, + ) + scene["mouth_pose"] = self._sample_pose_in_cylindrical_shell( + inner_radius=0.3, + outer_radius=0.6, + min_height=0.0, + max_height=0.6, + ) + + # Define derived poses + scene["home_config"] = [-1.47568, 2.92779, 1.00845, -2.0847, 1.43588, 1.32575] + scene["above_plate_pose"] = self._calculate_above_plate_pose(scene["food_pose"]) + scene["in_food_pose"], approach_vector = self._calculate_in_food_pose( + food_pose=scene["food_pose"], + recipe=ActionRecipe( + AcquisitionStrategy.SKEWER, + MotionAxis.VERTICAL, + ToolAlignment.PERPENDICULAR, + ), + ) + scene["above_food_pose"] = self._calculate_above_food_pose( + in_food_pose=scene["in_food_pose"], approach_vector=approach_vector + ) + scene["staging_pose"] = self._calculate_staging_pose(scene["mouth_pose"]) + scene["resting_pose"] = self._calculate_resting_pose( + scene["food_pose"], scene["mouth_pose"] + ) + + return scene + + def _sample_pose_in_cylindrical_shell( + self, + inner_radius: float, + outer_radius: float, + min_height: float, + max_height: float, + ) -> Pose: + """ + Samples a random pose within a cylindrical shell + + The frame's Z-axis is constrained to be world up, and its X-axis is + oriented to point towards the robot base with some random variability. + """ + # --- Position Sampling in a Cylindrical Shell --- + # 1. Sample the radius and angle + radius = np.sqrt(np.random.uniform(inner_radius**2, outer_radius**2)) + theta = np.random.uniform(0, 2 * np.pi) + + # 2. Convert to Cartesian coordinates + x = radius * np.cos(theta) + y = radius * np.sin(theta) + z = np.random.uniform(min_height, max_height) + position = Point(x=x, y=y, z=z) + + # --- Orientation Calculation --- + z_axis = np.array([0.0, 0.0, 1.0]) + look_at_vector = -np.array([x, y, 0.0]) # Project to XY plane + if np.linalg.norm(look_at_vector) < 1e-6: + look_at_vector = np.array([1.0, 0.0, 0.0]) + x_axis_direction = look_at_vector / np.linalg.norm(look_at_vector) + + y_axis = np.cross(z_axis, x_axis_direction) + rotation_matrix = np.array([x_axis_direction, y_axis, z_axis]).T + main_rot = R.from_matrix(rotation_matrix) + + rand_yaw_angle = np.random.uniform(-np.deg2rad(30), np.deg2rad(30)) + variability_rot = R.from_euler("z", rand_yaw_angle) + + final_rot = main_rot * variability_rot + quat = final_rot.as_quat() + + return Pose( + position=position, + orientation=Quaternion(x=quat[0], y=quat[1], z=quat[2], w=quat[3]), + ) + + def _sample_pose_in_spherical_shell(self) -> Pose: + """ + Samples a random pose within a spherical shell in front of the robot. + The frame's Z-axis is constrained to be world up, and its X-axis is + oriented to point towards the robot base with some random variability. + """ + inner_radius, outer_radius = 0.3, 0.7 + + # Sample position + r = np.random.uniform(inner_radius**3, outer_radius**3) ** (1 / 3) + theta = np.random.uniform(0, 2 * np.pi) + phi = np.random.uniform(0, np.pi / 2) + x = r * np.cos(theta) * np.sin(phi) + y = r * np.sin(theta) * np.sin(phi) + z = r * np.cos(phi) + position = Point(x=x, y=y, z=z) + + # --- Corrected Orientation Logic --- + # 1. Define the primary Z-axis constraint (world up). + z_axis = np.array([0.0, 0.0, 1.0]) + + # 2. Define the vector pointing from the sampled point to the robot base (origin). + look_at_vector = -np.array([x, y, z]) + + # 3. Project the look-at vector onto the XY plane to get the direction for the X-axis. + # This ensures the final X-axis will be perpendicular to the world Z-axis. + x_axis_direction = np.array([look_at_vector[0], look_at_vector[1], 0.0]) + + # Normalize the direction vector. Handle the case where the point is directly above the origin. + if np.linalg.norm(x_axis_direction) < 1e-6: + x_axis_direction = np.array( + [1.0, 0.0, 0.0] + ) # Default to pointing along world X + x_axis_direction /= np.linalg.norm(x_axis_direction) + + # 4. Create a base rotation where the X-axis points in the desired direction and Z is up. + # The Y-axis is derived from the cross product to form a right-handed frame. + y_axis = np.cross(z_axis, x_axis_direction) + rotation_matrix = np.array([x_axis_direction, y_axis, z_axis]).T + main_rot = R.from_matrix(rotation_matrix) + + # 5. Add a small random rotational variability around the Z-axis. + rand_yaw_angle = np.random.uniform(-np.deg2rad(30), np.deg2rad(30)) + variability_rot = R.from_euler("z", rand_yaw_angle) + + # 6. Combine the main rotation with the variability. + final_rot = main_rot * variability_rot + quat = final_rot.as_quat() + + return Pose( + position=position, + orientation=Quaternion(x=quat[0], y=quat[1], z=quat[2], w=quat[3]), + ) + + def _normalize_angle(self, angle: float) -> float: + """Normalize an angle to [-pi, pi].""" + return (angle + math.pi) % (2 * math.pi) - math.pi + + def _test_articutool_ik_solver(self): + """ + Performs a round-trip test of the Articutool's analytical IK solver. + """ + LOGGER.info("--- Running Articutool IK Solver Test ---") + + test_cases_deg = { + "Zero": (0, 0), + "Pitch_Positive": (30, 0), + "Pitch_Negative": (-45, 0), + "Roll_Positive": (0, 30), + "Roll_Negative": (0, -45), + "Combined_1": (30, 45), + "Combined_2": (-20, -60), + "Limit_Pitch": (90, 0), # A singularity case + "Limit_Roll": (0, 90), + } + + all_passed = True + for name, (pitch_deg, roll_deg) in test_cases_deg.items(): + # --- 1. START: Convert test case to radians --- + theta_p = np.deg2rad(pitch_deg) + theta_r = np.deg2rad(roll_deg) + + # --- 2. FORWARD KINEMATICS: Calculate the target Y-axis vector --- + # Note the swapped sin/cos for y_y due to kinematic conventions + y_x = np.cos(theta_p) * np.cos(theta_r) + y_y = np.sin(theta_r) + y_z = np.sin(theta_p) * np.cos(theta_r) + target_y_in_wrist_frame = np.array([y_x, y_y, y_z]) + + # --- 3. PRE-ROTATION: Apply the necessary R_z(+pi/2) rotation --- + # This is the crucial step that mimics what our trajectory generator does. + ik_input_vector = np.array( + [ + -target_y_in_wrist_frame[1], + target_y_in_wrist_frame[0], + target_y_in_wrist_frame[2], + ] + ) + + # --- 4. INVERSE KINEMATICS: Call the solver --- + ik_solutions_rad = self._solve_articutool_ik(ik_input_vector) + + # --- 5. VERIFICATION: Check if the original angles are in the solution set --- + found_match = False + for sol_p, sol_r in ik_solutions_rad: + # Check if a solution is close to the original input, accounting for angle wrapping + if np.isclose( + self._normalize_angle(sol_p), self._normalize_angle(theta_p) + ) and np.isclose( + self._normalize_angle(sol_r), self._normalize_angle(theta_r) + ): + found_match = True + break + + status = "✅ SUCCESS" if found_match else "❌ FAILURE" + if not found_match: + all_passed = False + + print(f"\n- Test Case: {name} ({pitch_deg}°, {roll_deg}°)") + print( + f" - FK Target Vector (y_axis): {np.round(target_y_in_wrist_frame, 3)}" + ) + print(f" - IK Input Vector (pre-rotated): {np.round(ik_input_vector, 3)}") + print( + f" - IK Solutions Found (deg): {[(np.rad2deg(p), np.rad2deg(r)) for p, r in ik_solutions_rad]}" + ) + print(f" - Result: {status}") + + LOGGER.info("--- IK Solver Test Finished ---") + if all_passed: + LOGGER.info("✅ All test cases passed!") + else: + LOGGER.error("❌ One or more test cases failed!") + + def _generate_orientation_holding_atool_trajectory( + self, traj_jaco: JointTrajectory, desired_tool_tip_world_orientation: Quaternion + ) -> Optional[JointTrajectory]: + """ + Generates a synchronized Articutool trajectory that maintains a fixed world + orientation, including the necessary kinematic pre-rotation for the IK solver. + """ + if not traj_jaco or not traj_jaco.points: + return None + + R_World_TipTarget = R.from_quat( + [ + desired_tool_tip_world_orientation.x, + desired_tool_tip_world_orientation.y, + desired_tool_tip_world_orientation.z, + desired_tool_tip_world_orientation.w, + ] + ) + y_axis_TipTarget_InWorld = R_World_TipTarget.apply(np.array([0.0, 1.0, 0.0])) + + atool_solutions = [] + last_valid_solution = None + + for point in traj_jaco.points: + jaco_points = list(point.positions) + jaco_wrist_transform = self.kinematics_model.get_frame_transform( + frame_name=END_EFFECTOR_LINK_JACO, jaco_joints=jaco_points + ) + if jaco_wrist_transform is None: + LOGGER.error(" Pinocchio FK failed for a waypoint.") + return None + R_World_JacoEE = R.from_matrix(jaco_wrist_transform.rotation) + + target_y_in_wrist_frame = R_World_JacoEE.inv().apply( + y_axis_TipTarget_InWorld + ) + ik_solutions = self._solve_articutool_ik(target_y_in_wrist_frame) + + valid_solutions = [ + np.array(sol) + for sol in ik_solutions + if ( + ARTICUTOOL_PITCH_LIMITS_RAD[0] + <= sol[0] + <= ARTICUTOOL_PITCH_LIMITS_RAD[1] + and ARTICUTOOL_ROLL_LIMITS_RAD[0] + <= sol[1] + <= ARTICUTOOL_ROLL_LIMITS_RAD[1] + ) + ] + + if not valid_solutions: + LOGGER.warning( + " Failed to find valid IK solution for an Articutool waypoint." + ) + return None + + if last_valid_solution is None: + chosen_solution = valid_solutions[0] + else: + distances = [ + np.linalg.norm(sol - last_valid_solution) for sol in valid_solutions + ] + chosen_solution = valid_solutions[np.argmin(distances)] + + atool_solutions.append(chosen_solution) + last_valid_solution = chosen_solution + + traj_atool = JointTrajectory() + traj_atool.joint_names = JOINT_NAMES_ATOOL + for i, pos in enumerate(atool_solutions): + point = JointTrajectoryPoint() + point.positions = pos.tolist() + point.time_from_start = traj_jaco.points[i].time_from_start + traj_atool.points.append(point) + + return traj_atool + + def _generate_leveling_atool_trajectory( + self, traj_jaco: JointTrajectory + ) -> Optional[JointTrajectory]: + """ + Generates a synchronized Articutool trajectory that maintains a level-to-gravity + orientation during a Jaco arm motion, ensuring solution continuity. + """ + if not traj_jaco or not traj_jaco.points: + return None + + atool_solutions = [] + last_valid_solution = None + + # 1. Iterate through each waypoint of the Jaco trajectory + for point in traj_jaco.points: + jaco_joint_config = list(point.positions) + + # 2. For each arm configuration, find the pose of the wrist using FK + jaco_wrist_transform = self.kinematics_model.get_frame_transform( + frame_name=END_EFFECTOR_LINK_JACO, jaco_joints=jaco_joint_config + ) + if jaco_wrist_transform is None: + LOGGER.error( + " Pinocchio FK failed for a waypoint while generating leveling trajectory." + ) + return None + + p = jaco_wrist_transform.translation + q = R.from_matrix(jaco_wrist_transform.rotation).as_quat() + jaco_wrist_pose = Pose( + position=Point(x=p[0], y=p[1], z=p[2]), + orientation=Quaternion(x=q[0], y=q[1], z=q[2], w=q[3]), + ) + + # 3. Calculate all possible Articutool IK solutions for leveling + R_world_jacoee = R.from_quat([q[0], q[1], q[2], q[3]]) + target_up_in_wrist_frame = R_world_jacoee.inv().apply( + np.array([0.0, 0.0, 1.0]) + ) + ik_solutions = self._solve_articutool_ik(target_up_in_wrist_frame) + + # 4. Filter for solutions that are within joint limits + valid_solutions = [ + np.array(sol) + for sol in ik_solutions + if ( + ARTICUTOOL_PITCH_LIMITS_RAD[0] + <= sol[0] + <= ARTICUTOOL_PITCH_LIMITS_RAD[1] + and ARTICUTOOL_ROLL_LIMITS_RAD[0] + <= sol[1] + <= ARTICUTOOL_ROLL_LIMITS_RAD[1] + ) + ] + + if not valid_solutions: + LOGGER.warning( + " Failed to find any valid leveling IK solution for a waypoint." + ) + # As a fallback, try to re-use the last known good solution + if last_valid_solution is not None: + chosen_solution = last_valid_solution + else: + return None # Cannot proceed if the first waypoint has no solution + elif last_valid_solution is None: + # For the first waypoint, just pick the first valid solution + chosen_solution = valid_solutions[0] + else: + # For subsequent waypoints, choose the solution closest to the previous one + distances = [ + np.linalg.norm(sol - last_valid_solution) for sol in valid_solutions + ] + chosen_solution = valid_solutions[np.argmin(distances)] + + atool_solutions.append(chosen_solution) + last_valid_solution = chosen_solution + + # 5. Compile the continuous solutions into a JointTrajectory message + traj_atool = JointTrajectory() + traj_atool.joint_names = JOINT_NAMES_ATOOL + for i, pos in enumerate(atool_solutions): + point_msg = JointTrajectoryPoint() + point_msg.positions = pos.tolist() + point_msg.time_from_start = traj_jaco.points[i].time_from_start + traj_atool.points.append(point_msg) + + return traj_atool + + def _solve_articutool_ik( + self, target_vector: np.ndarray + ) -> List[Tuple[float, float]]: + """Analytical IK solver for the Articutool.""" + vx, vy, vz = target_vector + solutions = [] + asin_arg = -vx + if not (-1.0 - EPSILON <= asin_arg <= 1.0 + EPSILON): + return [] + theta_r1 = math.asin(np.clip(asin_arg, -1.0, 1.0)) + theta_r2 = (math.pi - theta_r1 + math.pi) % (2 * math.pi) - math.pi + for theta_r in list(set([theta_r1, theta_r2])): + cos_tr = math.cos(theta_r) + if math.isclose(cos_tr, 0.0, abs_tol=EPSILON): + if math.isclose(vy, 0.0, abs_tol=EPSILON) and math.isclose( + vz, 0.0, abs_tol=EPSILON + ): + solutions.append( + (0.0, (theta_r + math.pi) % (2 * math.pi) - math.pi) + ) + continue + theta_p = math.atan2(vz, vy) + solutions.append( + ( + (theta_p + math.pi) % (2 * math.pi) - math.pi, + (theta_r + math.pi) % (2 * math.pi) - math.pi, + ) + ) + return solutions + + def _compute_leveling_joints(self, jaco_wrist_pose: Pose) -> Optional[List[float]]: + """ + Calculates the Articutool joint angles required to point the tool's Y-axis up. + This now uses the original `_solve_articutool_ik` method. + """ + # 1. Get the rotation of the Jaco wrist in the world frame + R_world_jacoee = R.from_quat( + [ + jaco_wrist_pose.orientation.x, + jaco_wrist_pose.orientation.y, + jaco_wrist_pose.orientation.z, + jaco_wrist_pose.orientation.w, + ] + ) + + # 2. Transform the world "up" vector into the wrist's frame + world_z_up_vector = np.array([0.0, 0.0, 1.0]) + target_up_in_wrist_frame = R_world_jacoee.inv().apply(world_z_up_vector) + + # 3. Solve the Articutool IK for the transformed vector using the original solver + # The solver's math is correct for this specific leveling task. + ik_solutions = self._solve_articutool_ik(target_up_in_wrist_frame) + + if not ik_solutions: + return None + + # 4. Find the first valid solution within joint limits + for theta_p, theta_r in ik_solutions: + if ( + ARTICUTOOL_PITCH_LIMITS_RAD[0] + <= theta_p + <= ARTICUTOOL_PITCH_LIMITS_RAD[1] + and ARTICUTOOL_ROLL_LIMITS_RAD[0] + <= theta_r + <= ARTICUTOOL_ROLL_LIMITS_RAD[1] + ): + LOGGER.info( + f" Found valid leveling solution: Pitch={math.degrees(theta_p):.1f}, Roll={math.degrees(theta_r):.1f}" + ) + return [theta_p, theta_r] + + return None + + def _calculate_cartesian_path_length( + self, trajectory: Optional[JointTrajectory], group_name: str + ) -> float: + """Calculates the Cartesian path length of the end-effector for a trajectory.""" + if ( + not trajectory + or not self.kinematics_model.is_ready() + or len(trajectory.points) < 2 + ): + return 0.0 + + total_length = 0.0 + last_position = None + joint_names = trajectory.joint_names + + # Determine the correct end-effector link for the planning group + if group_name == PLANNING_GROUP_JACO: + ee_link = END_EFFECTOR_LINK_JACO + else: + ee_link = END_EFFECTOR_LINK_FULL + + for point in trajectory.points: + joint_map = dict(zip(joint_names, point.positions)) + jaco_config = [joint_map.get(name, 0.0) for name in JOINT_NAMES_JACO] + atool_config = [joint_map.get(name, 0.0) for name in JOINT_NAMES_ATOOL] + + transform = self.kinematics_model.get_frame_transform( + ee_link, jaco_config, atool_config + ) + if transform is None: + continue + + current_position = transform.translation + if last_position is not None: + total_length += np.linalg.norm(current_position - last_position) + last_position = current_position + + return total_length + + # --- Feasibility Checking --- + def _is_config_kinematically_feasible(self, jaco_joint_config: List[float]) -> bool: + """ + Checks if the Articutool can maintain leveling at a single Jaco configuration + by using the PinocchioModel to perform forward kinematics. + """ + if not self.kinematics_model.is_ready(): + return False + + # Get the Jaco end-effector's transform for the given joint configuration. + # We don't need to specify atool_joints, as they don't affect the Jaco wrist's pose. + ee_transform = self.kinematics_model.get_frame_transform( + frame_name=END_EFFECTOR_LINK_JACO, jaco_joints=jaco_joint_config + ) + + if ee_transform is None: + # The FK calculation failed. + return False + + target_up_in_ee_frame = ( + R.from_matrix(ee_transform.rotation).inv().apply(WORLD_UP_VECTOR) + ) + solutions = self._solve_articutool_ik(target_up_in_ee_frame) + + if not solutions: + return False + + return any( + ARTICUTOOL_PITCH_LIMITS_RAD[0] <= pitch <= ARTICUTOOL_PITCH_LIMITS_RAD[1] + and ARTICUTOOL_ROLL_LIMITS_RAD[0] <= roll <= ARTICUTOOL_ROLL_LIMITS_RAD[1] + for pitch, roll in solutions + ) + + def _verify_trajectory(self, trajectory: JointTrajectory) -> float: + """Verifies a trajectory and returns the percentage of feasible waypoints.""" + if not trajectory or not trajectory.points: + return 0.0 + + feasible_waypoints = 0 + for point in trajectory.points: + jaco_joint_config = list(point.positions) + if self._is_config_kinematically_feasible(jaco_joint_config): + feasible_waypoints += 1 + + return (feasible_waypoints / len(trajectory.points)) * 100.0 + + def _serialize_pose(self, pose: Pose) -> Dict[str, List[float]]: + """Converts a Pose message to a JSON-serializable dictionary.""" + return { + "position": [pose.position.x, pose.position.y, pose.position.z], + "orientation_xyzw": [ + pose.orientation.x, + pose.orientation.y, + pose.orientation.z, + pose.orientation.w, + ], + } + + def _serialize_trajectory( + self, trajectory: JointTrajectory + ) -> Optional[Dict[str, Any]]: + """Converts a JointTrajectory message to a JSON-serializable dictionary.""" + if not trajectory or not trajectory.points: + return None + + serialized_points = [] + for point in trajectory.points: + serialized_points.append( + { + "positions": list(point.positions), + "velocities": list(point.velocities), + "accelerations": list(point.accelerations), + "time_from_start_sec": point.time_from_start.sec, + "time_from_start_nanosec": point.time_from_start.nanosec, + } + ) + + return { + "joint_names": list(trajectory.joint_names), + "points": serialized_points, + } + + # --- Planning Primitive --- + def _plan_to_above_plate( + self, above_plate_pose: Pose, start_state_jaco: Any + ) -> Tuple[TrialStatus, Optional[JointTrajectory], float]: + LOGGER.info(" Planning to AbovePlate pose...") + goal_constraints = [ + create_pose_constraint(above_plate_pose, tolerance_orientation=0.2) + ] + + return self.motion_planner.plan( + group_name=PLANNING_GROUP_JACO, + start_state=start_state_jaco, + goal_constraints=goal_constraints, + ) + + def _plan_to_above_food( + self, + above_food_pose: Pose, + start_state_jaco: List[float], + start_state_atool: List[float], + ) -> Tuple[ + TrialStatus, Optional[JointTrajectory], Optional[JointTrajectory], float + ]: + LOGGER.info(" Solving 8-DOF IK for AboveFood pose...") + + # 1. Compute IK using the planner + ik_solution = self.motion_planner.compute_ik( + group_name=PLANNING_GROUP_FULL, + target_pose=above_food_pose, + start_joint_state=(start_state_jaco + start_state_atool), + ) + + if not ik_solution: + LOGGER.warning(" IK solution for 8-DOF system not found.") + return TrialStatus.IK_FAILURE, None, None, 0.0 + + LOGGER.info(f" 8-DOF IK solution found. Planning for subgroups.") + solution_map = dict(zip(ik_solution.name, ik_solution.position)) + target_jaco_config = [solution_map[name] for name in JOINT_NAMES_JACO] + target_atool_config = [solution_map[name] for name in JOINT_NAMES_ATOOL] + + # 2. Plan for the Jaco arm + jaco_goal_constraints = [create_joint_constraint(target_jaco_config)] + jaco_path_constraints = [ + create_orientation_path_constraint( + quat_xyzw=PATH_CONSTRAINT_QUAT_XYZW, + tolerance_rad=(np.pi, 2 * np.pi, 2 * np.pi), + ) + ] + status_jaco, traj_jaco, time_jaco = self.motion_planner.plan( + group_name=PLANNING_GROUP_JACO, + start_state=start_state_jaco, + goal_constraints=jaco_goal_constraints, + path_constraints=jaco_path_constraints, + ) + + if status_jaco != TrialStatus.SUCCESS: + LOGGER.warning(" Jaco arm planning failed.") + return TrialStatus.PLANNER_FAILURE, None, None, time_jaco + + # 3. Plan for the Articutool + atool_goal_constraints = [create_joint_constraint(target_atool_config)] + status_atool, traj_atool, time_atool = self.motion_planner.plan( + group_name=PLANNING_GROUP_ATOOL, + start_state=start_state_atool, + goal_constraints=atool_goal_constraints, + ) + + total_time = time_jaco + time_atool + if status_atool != TrialStatus.SUCCESS: + LOGGER.warning(" Articutool planning failed.") + return TrialStatus.PLANNER_FAILURE, traj_jaco, None, total_time + + return TrialStatus.SUCCESS, traj_jaco, traj_atool, total_time + + def _plan_to_in_food( + self, + in_food_pose: Pose, + start_state_jaco: List[float], + start_state_atool: List[float], + ) -> Tuple[ + TrialStatus, + Optional[JointTrajectory], + Optional[JointTrajectory], + Optional[Pose], + float, + ]: + LOGGER.info(" Solving 8-DOF IK for InFood pose...") + + # 1. Compute the 8-DOF IK solution for the target tool tip pose + ik_solution = self.motion_planner.compute_ik( + group_name=PLANNING_GROUP_FULL, + target_pose=in_food_pose, + start_joint_state=(start_state_jaco + start_state_atool), + ) + + if not ik_solution: + LOGGER.warning(" IK solution for 8-DOF system not found.") + return TrialStatus.IK_FAILURE, None, None, None, 0.0 + + LOGGER.info(f" 8-DOF IK solution found. Calculating wrist pose via FK.") + + # 2. Use FK to find the Jaco wrist pose from the 8-DOF solution + fk_poses = self.motion_planner.compute_fk( + group_name=PLANNING_GROUP_FULL, + joint_state=ik_solution, + fk_link_names=[END_EFFECTOR_LINK_JACO], + ) + + if not fk_poses: + LOGGER.warning(" FK calculation for Jaco wrist failed.") + return TrialStatus.IK_FAILURE, None, None, None, 0.0 + + # The goal for the Jaco arm is the calculated pose of its wrist + jaco_wrist_goal_pose = fk_poses[0].pose + + # 3. Plan a Cartesian motion for the Jaco arm to the wrist pose + jaco_goal_constraints = [create_pose_constraint(jaco_wrist_goal_pose)] + + # 4. Plan a Cartesian motion for the Jaco arm to the wrist pose + status_jaco, traj_jaco, planning_time = self.motion_planner.plan( + group_name=PLANNING_GROUP_JACO, + start_state=start_state_jaco, + goal_constraints=jaco_goal_constraints, + cartesian=True, + ) + + if status_jaco != TrialStatus.SUCCESS: + LOGGER.warning(" Jaco arm planning failed.") + return TrialStatus.PLANNER_FAILURE, None, None, None, planning_time + + # 4. Generate the corresponding synchronous Articutool trajectory + LOGGER.info(" Generating synchronous Articutool trajectory...") + traj_atool = self._generate_orientation_holding_atool_trajectory( + traj_jaco, in_food_pose.orientation + ) + + if traj_atool is None: + LOGGER.error(" Failed to generate synchronous Articutool trajectory.") + return TrialStatus.IK_FAILURE, traj_jaco, None, None, planning_time + + return ( + TrialStatus.SUCCESS, + traj_jaco, + traj_atool, + jaco_wrist_goal_pose, + planning_time, + ) + + def _plan_to_reorient_wrist( + self, start_state_jaco: List[float] + ) -> Tuple[ + TrialStatus, Optional[JointTrajectory], Optional[JointTrajectory], float, float + ]: + """ + Plans a motion to reorient the Jaco EE to a neutral "level" pose (Y-axis up) while maintaining its current position. + This acts as a preparatory step before the long-distance transport. + """ + LOGGER.info(" Planning to Reorient Wrist to a level posture...") + + # 1. Get the current EE pose via FK + start_joint_state = JointState() + start_joint_state.name = JOINT_NAMES_JACO + start_joint_state.position = start_state_jaco + fk_poses = self.motion_planner.compute_fk( + group_name=PLANNING_GROUP_JACO, + joint_state=start_joint_state, + fk_link_names=[END_EFFECTOR_LINK_JACO], + ) + if not fk_poses: + LOGGER.warning(" FK failed, cannot determine current wrist pose.") + return TrialStatus.IK_FAILURE, None, 0.0 + + current_ee_pose = fk_poses[0].pose + current_position = current_ee_pose.position + + # 2. Construct a goal constraint that keeps the Jaco EE roughly in the same position, but level against gravity + goal_constraints = [ + create_position_constraint(current_position, 0.1), + create_orientation_path_constraint( + PATH_CONSTRAINT_QUAT_XYZW, (0.1, 2 * math.pi, 0.1) + ), + ] + + # 3. Plan the 6-DOF trajectory for the Jaco arm + status, traj_jaco, planning_time = self.motion_planner.plan( + group_name=PLANNING_GROUP_JACO, + start_state=start_state_jaco, + goal_constraints=goal_constraints, + cartesian=True, + ) + if status != TrialStatus.SUCCESS: + return TrialStatus.PLANNER_FAILURE, None, None, 0.0, planning_time + + # 4. VERIFY the Jaco trajectory for leveling feasibility + feasibility_percent = self._verify_trajectory(traj_jaco) + if feasibility_percent < 99.0: + LOGGER.warning( + f" Path to Resting failed verification ({feasibility_percent:.1f}% feasible)." + ) + return ( + TrialStatus.VERIFICATION_FAILURE, + traj_jaco, + None, + feasibility_percent, + planning_time, + ) + + # 5. Generate the corresponding synchronous Articutool trajectory + LOGGER.info(" Generating synchronous Articutool trajectory...") + traj_atool = self._generate_leveling_atool_trajectory(traj_jaco) + + if traj_atool is None: + LOGGER.error(" Failed to generate synchronous Articutool trajectory.") + return ( + TrialStatus.IK_FAILURE, + traj_jaco, + None, + feasibility_percent, + planning_time, + ) + return ( + TrialStatus.SUCCESS, + traj_jaco, + traj_atool, + feasibility_percent, + planning_time, + ) + + def _plan_to_level_articutool( + self, + jaco_wrist_pose: Pose, + start_state_atool: List[float], + ) -> Tuple[TrialStatus, Optional[JointTrajectory], float]: + """ + Calculates the required joint angles for the Articutool to achieve a level + pose and plans a trajectory to that configuration. + """ + LOGGER.info(" Calculating Articutool leveling configuration...") + + # 1. Compute the target joint angles for leveling based on the wrist's pose + target_leveling_joints = self._compute_leveling_joints(jaco_wrist_pose) + + if target_leveling_joints is None: + LOGGER.warning(" Could not find an IK solution for Articutool leveling.") + return TrialStatus.IK_FAILURE, None, 0.0 + + # 2. Create a joint goal constraint for the Articutool + goal_constraints = [create_joint_constraint(target_leveling_joints)] + + # 3. Plan a joint-space motion for the Articutool group + status, trajectory, planning_time = self.motion_planner.plan( + group_name=PLANNING_GROUP_ATOOL, + start_state=start_state_atool, + goal_constraints=goal_constraints, + ) + + if status != TrialStatus.SUCCESS: + LOGGER.warning(" Articutool leveling plan failed.") + return TrialStatus.PLANNER_FAILURE, None, planning_time + + LOGGER.info(" Articutool leveling plan successful.") + return TrialStatus.SUCCESS, trajectory, planning_time + + def _plan_to_resting( + self, + resting_wrist_pose: Pose, + start_state_jaco: List[float], + ) -> Tuple[ + TrialStatus, + Optional[JointTrajectory], + Optional[JointTrajectory], + float, + float, + ]: + """ + Plans a 6-DOF guided motion for the Jaco arm to a resting wrist pose + and measures its leveling feasibility. + """ + LOGGER.info(" Planning to Resting pose (S2-Heuristic)...") + + # 1. Define goal and path constraints for the Jaco arm's wrist + goal_constraints = [create_position_constraint(resting_wrist_pose.position)] + path_constraints = [ + create_orientation_path_constraint( + quat_xyzw=PATH_CONSTRAINT_QUAT_XYZW, + tolerance_rad=PATH_CONSTRAINT_TOLERANCE_XYZ_RAD, + ) + ] + + # 2. Plan the guided 6-DOF trajectory for the Jaco arm + status, traj_jaco, planning_time = self.motion_planner.plan( + group_name=PLANNING_GROUP_JACO, + start_state=start_state_jaco, + goal_constraints=goal_constraints, + path_constraints=path_constraints, + ) + if status != TrialStatus.SUCCESS: + return TrialStatus.PLANNER_FAILURE, None, None, 0.0, planning_time + + # 3. VERIFY the Jaco trajectory for leveling feasibility + feasibility_percent = self._verify_trajectory(traj_jaco) + if feasibility_percent < 99.0: + LOGGER.warning( + f" Path to Resting failed verification ({feasibility_percent:.1f}% feasible)." + ) + return ( + TrialStatus.VERIFICATION_FAILURE, + traj_jaco, + None, + feasibility_percent, + planning_time, + ) + + # 4. Generate the corresponding synchronous Articutool trajectory + LOGGER.info(" Generating synchronous Articutool trajectory...") + traj_atool = self._generate_leveling_atool_trajectory(traj_jaco) + + if traj_atool is None: + LOGGER.error(" Failed to generate synchronous Articutool trajectory.") + return ( + TrialStatus.IK_FAILURE, + traj_jaco, + None, + feasibility_percent, + planning_time, + ) + return ( + TrialStatus.SUCCESS, + traj_jaco, + traj_atool, + feasibility_percent, + planning_time, + ) + + # --- Main Benchmark Loop --- + def run(self): + """Main benchmark execution loop with granular metric collection.""" + # TODO: Decide whether we should keep the bounding cylinder active + # self.add_articutool_bounding_cylinder() + + for i in range(self.num_trials): + LOGGER.info(f"--- Running Trial {i + 1}/{self.num_trials} ---") + + # 1. Create the parameter set and the generator for this trial + generation_params = SceneGenerationParams() + scene_generator = SceneGenerator(generation_params) + + # 2. Generate the scene and characteristics with a single, clean call + scene, scene_characteristics = scene_generator.generate() + + params_dict = asdict(generation_params) + params_dict["path_constraint_tolerance_xyz_rad"] = list( + PATH_CONSTRAINT_TOLERANCE_XYZ_RAD + ) + params_dict["articutool_pitch_limits_rad"] = list( + ARTICUTOOL_PITCH_LIMITS_RAD + ) + params_dict["articutool_roll_limits_rad"] = list(ARTICUTOOL_ROLL_LIMITS_RAD) + + trial_data = { + "trial_id": i, + "scene_poses": { + "food_pose": self._serialize_pose(scene["food_pose"]), + "mouth_pose": self._serialize_pose(scene["mouth_pose"]), + "above_plate_pose": self._serialize_pose(scene["above_plate_pose"]), + "above_food_pose": self._serialize_pose(scene["above_food_pose"]), + "in_food_pose": self._serialize_pose(scene["in_food_pose"]), + "staging_pose": self._serialize_pose(scene["staging_pose"]), + "resting_pose": self._serialize_pose(scene["resting_pose"]), + }, + "scene_characteristics": scene_characteristics, + "parameters": params_dict, + "stages": [], + "end_to_end_success": False, # Default to False + } + + # 3. Initialize the robot's state for the trial + current_jaco_state = scene["home_config"] + current_atool_state = [0.0, 0.0] + trial_failed = False + + # --- Stage 1: Home -> AbovePlate --- + if not trial_failed: + LOGGER.info("Stage 1: Home -> AbovePlate") + max_attempts = 3 + total_planning_time = 0.0 + status, traj_jaco = TrialStatus.PLANNER_FAILURE, None + for attempt in range(max_attempts): + LOGGER.info(f" Attempt {attempt + 1}/{max_attempts}...") + status, traj_jaco, planning_time = self._plan_to_above_plate( + scene["above_plate_pose"], current_jaco_state + ) + total_planning_time += planning_time + if status == TrialStatus.SUCCESS: + LOGGER.info(f" Success on attempt {attempt + 1}.") + break + LOGGER.warning( + f" Attempt {attempt + 1} failed with status: {status.value}" + ) + path_length = self._calculate_cartesian_path_length( + traj_jaco, PLANNING_GROUP_JACO + ) + trial_data["stages"].append( + { + "stage_name": "HomeToAbovePlate", + "target_frame": END_EFFECTOR_LINK_JACO, + "status": status.value, + "execution_mode": ExecutionMode.JACO_ONLY.value, + "planning_time_sec": total_planning_time, + "trajectory_path_length_m": path_length, + "custom_metrics": {}, + "traj_jaco": self._serialize_trajectory(traj_jaco), + "traj_atool": None, + } + ) + if status != TrialStatus.SUCCESS: + LOGGER.error(f" Stage 1 failed. Skipping trial.") + trial_failed = True + else: + current_jaco_state = list(traj_jaco.points[-1].positions) + + # --- Stage 2: AbovePlate -> AboveFood --- + if not trial_failed: + LOGGER.info("Stage 2: AbovePlate -> AboveFood") + ( + status, + traj_jaco, + traj_atool, + planning_time, + ) = self._plan_to_above_food( + scene["above_food_pose"], current_jaco_state, current_atool_state + ) + path_length_jaco = self._calculate_cartesian_path_length( + traj_jaco, PLANNING_GROUP_JACO + ) + path_length_atool = self._calculate_cartesian_path_length( + traj_atool, PLANNING_GROUP_ATOOL + ) + trial_data["stages"].append( + { + "stage_name": "AbovePlateToAboveFood", + "target_frame": END_EFFECTOR_LINK_FULL, + "status": status.value, + "execution_mode": ExecutionMode.SEQUENTIAL.value, + "planning_time_sec": planning_time, + "trajectory_path_length_m": path_length_jaco + + path_length_atool, + "custom_metrics": {}, + "traj_jaco": self._serialize_trajectory(traj_jaco), + "traj_atool": self._serialize_trajectory(traj_atool), + } + ) + if status != TrialStatus.SUCCESS: + LOGGER.error(f" Stage 2 failed. Skipping trial.") + trial_failed = True + else: + current_jaco_state = list(traj_jaco.points[-1].positions) + current_atool_state = list(traj_atool.points[-1].positions) + + # --- Stage 3: AboveFood -> InFood --- + if not trial_failed: + LOGGER.info("Stage 3: AboveFood -> InFood (Cartesian)") + ( + status, + traj_jaco, + traj_atool, + jaco_wrist_pose, + planning_time, + ) = self._plan_to_in_food( + scene["in_food_pose"], current_jaco_state, current_atool_state + ) + path_length = self._calculate_cartesian_path_length( + traj_jaco, PLANNING_GROUP_JACO + ) + trial_data["stages"].append( + { + "stage_name": "AboveFoodToInFood", + "target_frame": END_EFFECTOR_LINK_FULL, + "status": status.value, + "execution_mode": ExecutionMode.SYNCHRONOUS.value, + "planning_time_sec": planning_time, + "trajectory_path_length_m": path_length, + "custom_metrics": {}, + "traj_jaco": self._serialize_trajectory(traj_jaco), + "traj_atool": self._serialize_trajectory(traj_atool), + } + ) + if status != TrialStatus.SUCCESS: + LOGGER.error(f" Stage 3 failed. Skipping trial.") + trial_failed = True + else: + current_jaco_state = list(traj_jaco.points[-1].positions) + current_atool_state = list(traj_atool.points[-1].positions) + + # --- Stage 4: InFood -> LevelArticutool --- + if not trial_failed: + LOGGER.info("Stage 4: Level Articutool") + status, traj_atool, planning_time = self._plan_to_level_articutool( + jaco_wrist_pose, current_atool_state + ) + path_length = self._calculate_cartesian_path_length( + traj_atool, PLANNING_GROUP_ATOOL + ) + trial_data["stages"].append( + { + "stage_name": "LevelArticutool", + "target_frame": END_EFFECTOR_LINK_ATOOL, + "status": status.value, + "execution_mode": ExecutionMode.ATOOL_ONLY.value, + "planning_time_sec": planning_time, + "trajectory_path_length_m": path_length, + "custom_metrics": {}, + "traj_jaco": None, + "traj_atool": self._serialize_trajectory(traj_atool), + } + ) + if status != TrialStatus.SUCCESS: + LOGGER.error(f" Stage 4 failed. Skipping trial.") + trial_failed = True + else: + current_atool_state = list(traj_atool.points[-1].positions) + + # --- Stage 5: LevelArticutool -> Reorient Wrist --- + if not trial_failed: + LOGGER.info("Stage 5: Reorient Wrist") + (status, traj_jaco, traj_atool, leveling_feasibility, planning_time) = ( + self._plan_to_reorient_wrist(current_jaco_state) + ) + path_length = self._calculate_cartesian_path_length( + traj_jaco, PLANNING_GROUP_JACO + ) + trial_data["stages"].append( + { + "stage_name": "ReorientWrist", + "target_frame": END_EFFECTOR_LINK_JACO, + "status": status.value, + "execution_mode": ExecutionMode.SYNCHRONOUS.value, + "planning_time_sec": planning_time, + "trajectory_path_length_m": path_length, + "custom_metrics": { + "leveling_feasibility_percent": leveling_feasibility + }, + "traj_jaco": self._serialize_trajectory(traj_jaco), + "traj_atool": self._serialize_trajectory(traj_atool), + } + ) + if status != TrialStatus.SUCCESS: + LOGGER.error(f" Stage 5 failed. Skipping trial.") + trial_failed = True + else: + current_jaco_state = list(traj_jaco.points[-1].positions) + current_atool_state = list(traj_atool.points[-1].positions) + + # --- Stage 6: LevelArticutool -> Resting --- + if not trial_failed: + LOGGER.info("Stage 6: Resting") + ( + status, + traj_jaco, + traj_atool, + leveling_feasibility, + planning_time, + ) = self._plan_to_resting(scene["resting_pose"], current_jaco_state) + path_length = self._calculate_cartesian_path_length( + traj_jaco, PLANNING_GROUP_JACO + ) + trial_data["stages"].append( + { + "stage_name": "Resting", + "target_frame": END_EFFECTOR_LINK_JACO, + "status": status.value, + "execution_mode": ExecutionMode.SYNCHRONOUS.value, + "planning_time_sec": planning_time, + "trajectory_path_length_m": path_length, + "custom_metrics": { + "leveling_feasibility_percent": leveling_feasibility + }, + "traj_jaco": self._serialize_trajectory(traj_jaco), + "traj_atool": self._serialize_trajectory(traj_atool), + } + ) + if status != TrialStatus.SUCCESS: + LOGGER.error(f" Stage 6 failed. Skipping trial.") + trial_failed = True + else: + current_jaco_state = list(traj_jaco.points[-1].positions) + current_atool_state = list(traj_atool.points[-1].positions) + + # --- Finalize Trial --- + if not trial_failed: + trial_data["end_to_end_success"] = True + + self._save_trial_data(trial_data) + LOGGER.info(f"Trial {i} data saved to {self.output_filename}") + + LOGGER.info("Benchmark finished.") + + def _save_trial_data(self, trial_data: Dict[str, Any]): + """ + Saves a single trial's data by appending it as a new line + to the output file. + """ + if not self.output_filename: + return + try: + # Open the file in append mode ('a') + with open(self.output_filename, "a") as f: + # Use json.dumps to serialize the single trial dictionary + json_string = json.dumps(trial_data) + # Write the string followed by a newline character + f.write(json_string + "\n") + except Exception as e: + LOGGER.error( + f"Failed to save trial data for trial {trial_data.get('trial_id', 'N/A')}: {e}" + ) + + +def main(): + parser = argparse.ArgumentParser( + description="Run the end-to-end feeding benchmark." + ) + parser.add_argument( + "--xacro_file", + type=str, + default="/home/regulus/ada_ws/src/ada_ros2/ada_moveit/config/ada.urdf.xacro", + help="Path to the robot URDF/XACRO file.", + ) + parser.add_argument( + "--num_trials", + type=int, + default=100, + help="Number of full feeding trials to run.", + ) + parser.add_argument( + "--timeout", type=float, default=5.0, help="Planning timeout in seconds." + ) + parser.add_argument( + "--output_dir", + type=str, + default="e2e_benchmark_output", + help="Directory to save results.", + ) + args = parser.parse_args() + + rclpy.init() + node = Node("end_to_end_benchmark_node") + executor = rclpy.executors.MultiThreadedExecutor() + executor.add_node(node) + executor_thread = Thread(target=executor.spin, daemon=True) + executor_thread.start() + + # Initialize MoveIt2 for the Jaco arm, Articutool, and Full + moveit2_jaco = MoveIt2( + node=node, + joint_names=JOINT_NAMES_JACO, + base_link_name=BASE_LINK_JACO, + end_effector_name=END_EFFECTOR_LINK_JACO, + group_name=PLANNING_GROUP_JACO, + callback_group=ReentrantCallbackGroup(), + ) + moveit2_atool = MoveIt2( + node=node, + joint_names=JOINT_NAMES_ATOOL, + base_link_name=BASE_LINK_ATOOL, + end_effector_name=END_EFFECTOR_LINK_ATOOL, + group_name=PLANNING_GROUP_ATOOL, + callback_group=ReentrantCallbackGroup(), + ) + moveit2_full = MoveIt2( + node=node, + joint_names=JOINT_NAMES_FULL, + base_link_name=BASE_LINK_FULL, + end_effector_name=END_EFFECTOR_LINK_FULL, + group_name=PLANNING_GROUP_FULL, + callback_group=ReentrantCallbackGroup(), + ) + + benchmark = EndToEndBenchmark( + node, + moveit2_jaco, + moveit2_atool, + moveit2_full, + args.xacro_file, + args.num_trials, + args.timeout, + args.output_dir, + ) + + try: + benchmark.run() + except KeyboardInterrupt: + LOGGER.info("Benchmark interrupted by user.") + except Exception as e: + LOGGER.error(f"An unhandled error occurred: {e}") + finally: + LOGGER.info("Shutting down.") + rclpy.shutdown() + executor_thread.join() + + +if __name__ == "__main__": + main() diff --git a/ada_feeding/scripts/holistic_benchmark.py b/ada_feeding/scripts/holistic_benchmark.py new file mode 100644 index 00000000..f8a26d0a --- /dev/null +++ b/ada_feeding/scripts/holistic_benchmark.py @@ -0,0 +1,670 @@ +#!/usr/bin/env python3 +# Copyright (c) 2024-2025, Personal Robotics Laboratory +# License: BSD 3-Clause. See LICENSE.md file in root directory. + +""" +This script provides a holistic benchmark to compare multiple planning +methodologies for the Jaco + Articutool system. Its goal is to generate a +comprehensive dataset to empirically evaluate the trade-offs between planner +success, motion flexibility, and guaranteed kinematic/dynamic feasibility. + +This version uses "constraint-aware" start state sampling, ensuring that each +trial begins from a configuration that is kinematically feasible for the +Articutool to maintain a level orientation. It also uses granular failure +logging to distinguish between different failure modes. +""" + +# Standard imports +from collections import namedtuple, Counter +from datetime import datetime +import os +import time +from threading import Thread +from typing import Optional, List, Dict, Tuple, Any +import json +import math +import subprocess +import sys +import argparse +from enum import Enum + +# Third-party imports +import numpy as np +from pymoveit2 import MoveIt2 +from pymoveit2.robots import kinova +import rclpy +from rclpy.callback_groups import ReentrantCallbackGroup +from rclpy.node import Node +from trajectory_msgs.msg import JointTrajectory +from geometry_msgs.msg import Quaternion +from scipy.spatial.transform import Rotation as R +import pinocchio as pin + +# --- Constants --- +LOGGER = rclpy.logging.get_logger("holistic_benchmark") +PLANNING_GROUP = "jaco_arm" +JOINT_NAMES = kinova.joint_names() +BASE_LINK = kinova.base_link_name() +END_EFFECTOR_LINK = kinova.end_effector_name() +EPSILON = 1e-6 +ARTICUTOOL_PITCH_LIMITS_RAD = (-math.pi / 2, math.pi / 2) +ARTICUTOOL_ROLL_LIMITS_RAD = (-math.pi, math.pi) +WORLD_UP_VECTOR = np.array([0.0, 0.0, 1.0]) +PATH_CONSTRAINT_QUAT_XYZW = (0.707, 0.0, 0.0, 0.707) +PATH_CONSTRAINT_TOLERANCE_XYZ_RAD = (1.5, 3.14, 0.8) +GOAL_YAW_CONSTRAINT_TOLERANCE_XYZ_RAD = (math.pi, math.pi, 0.1) +WORKSPACE_OUTER_RADIUS = 0.7 +WORKSPACE_INNER_RADIUS = 0.3 + +# --- Data Structures --- +TrajectoryMetrics = namedtuple( + "TrajectoryMetrics", + [ + "duration_s", + "joint_space_path_length_rad", + "final_joint_positions", + "waypoints_data", + ], +) + + +class TrialStatus(Enum): + """Enumerates the possible outcomes of a benchmark trial.""" + + SUCCESS = "Success" + PLANNER_FAILURE = "Planner Failure" + START_STATE_SEARCH_FAILED = "Start State Search Failed" + PATH_VERIFICATION_FAILURE = "Path Verification Failure" + + +class HolisticBenchmark: + """Manages the holistic benchmark planning process.""" + + def __init__( + self, + node: Node, + moveit2: MoveIt2, + xacro_file_path: str, + mode: str, + num_tasks: int = 100, + planning_timeout: float = 10.0, + planner_id: str = "RRTConnectkConfigDefault", + output_dir: Optional[str] = None, + ): + self.node = node + self.moveit2 = moveit2 + self.xacro_file_path = xacro_file_path + self.mode = mode + self.num_tasks = num_tasks + self.moveit2.planning_time = planning_timeout + self.planner_id = planner_id + self.output_dir = output_dir + self.results: List[Dict[str, Any]] = [] + + self.pinocchio_model: Optional[pin.Model] = None + self.pinocchio_data: Optional[pin.Data] = None + self.jaco_ee_frame_id_pin: Optional[int] = None + self.jaco_vel_indices: List[int] = [] + self._initialize_pinocchio() + self.joint_limits = self._get_joint_limits() + + LOGGER.info(f"--- Initializing Benchmark: Mode '{self.mode}' ---") + if self.output_dir: + os.makedirs(self.output_dir, exist_ok=True) + LOGGER.info(f" - Results will be saved to: {self.output_dir}") + + def _initialize_pinocchio(self): + """Loads the robot model from a XACRO file into Pinocchio.""" + try: + process = subprocess.run( + ["ros2", "run", "xacro", "xacro", self.xacro_file_path], + check=True, + capture_output=True, + text=True, + ) + urdf_xml_string = process.stdout + self.pinocchio_model = pin.buildModelFromXML(urdf_xml_string) + self.pinocchio_data = self.pinocchio_model.createData() + self.jaco_ee_frame_id_pin = self.pinocchio_model.getFrameId( + END_EFFECTOR_LINK + ) + for name in JOINT_NAMES: + if self.pinocchio_model.existJointName(name): + joint_id = self.pinocchio_model.getJointId(name) + self.jaco_vel_indices.append( + self.pinocchio_model.joints[joint_id].idx_v + ) + LOGGER.info("Pinocchio model loaded successfully.") + except Exception as e: + LOGGER.error(f"Failed to initialize Pinocchio model: {e}") + self.pinocchio_model = None + + def _get_joint_limits(self) -> List[Tuple[float, float]]: + """Retrieves joint limits from the Pinocchio model.""" + if self.pinocchio_model is None: + return [(-math.pi, math.pi)] * len(JOINT_NAMES) + limits = [] + for name in JOINT_NAMES: + if self.pinocchio_model.existJointName(name): + joint_id = self.pinocchio_model.getJointId(name) + idx = self.pinocchio_model.joints[joint_id].idx_q + limits.append( + ( + self.pinocchio_model.lowerPositionLimit[idx], + self.pinocchio_model.upperPositionLimit[idx], + ) + ) + return limits + + def generate_random_joint_config(self) -> List[float]: + """Generates a random joint configuration within limits.""" + return [np.random.uniform(low, high) for low, high in self.joint_limits] + + def _solve_articutool_ik( + self, target_vector: np.ndarray + ) -> List[Tuple[float, float]]: + """Analytical IK solver for the Articutool.""" + vx, vy, vz = target_vector + solutions = [] + asin_arg = -vx + if not (-1.0 - EPSILON <= asin_arg <= 1.0 + EPSILON): + return [] + theta_r1 = math.asin(np.clip(asin_arg, -1.0, 1.0)) + theta_r2 = (math.pi - theta_r1 + math.pi) % (2 * math.pi) - math.pi + for theta_r in list(set([theta_r1, theta_r2])): + cos_tr = math.cos(theta_r) + if math.isclose(cos_tr, 0.0, abs_tol=EPSILON): + if math.isclose(vy, 0.0, abs_tol=EPSILON) and math.isclose( + vz, 0.0, abs_tol=EPSILON + ): + solutions.append( + (0.0, (theta_r + math.pi) % (2 * math.pi) - math.pi) + ) + continue + theta_p = math.atan2(vz, vy) + solutions.append( + ( + (theta_p + math.pi) % (2 * math.pi) - math.pi, + (theta_r + math.pi) % (2 * math.pi) - math.pi, + ) + ) + return solutions + + def _is_config_kinematically_feasible(self, jaco_joint_config: List[float]) -> bool: + """ + Checks if the Articutool can maintain leveling at a single Jaco configuration. + This is our "oracle" for testing membership in the true feasibility manifold. + """ + if ( + self.pinocchio_model is None + or self.pinocchio_data is None + or self.jaco_ee_frame_id_pin is None + ): + return False + + q = pin.neutral(self.pinocchio_model) + for j, name in enumerate(JOINT_NAMES): + if self.pinocchio_model.existJointName(name): + joint_id = self.pinocchio_model.getJointId(name) + joint_obj = self.pinocchio_model.joints[joint_id] + if joint_obj.nq == 2 and not joint_obj.shortname().startswith( + "JointModelRX" + ): + q[joint_obj.idx_q : joint_obj.idx_q + 2] = [ + math.cos(jaco_joint_config[j]), + math.sin(jaco_joint_config[j]), + ] + else: + q[joint_obj.idx_q] = jaco_joint_config[j] + + pin.forwardKinematics(self.pinocchio_model, self.pinocchio_data, q) + pin.updateFramePlacements(self.pinocchio_model, self.pinocchio_data) + + ee_transform = self.pinocchio_data.oMf[self.jaco_ee_frame_id_pin] + target_up_in_ee_frame = ( + R.from_matrix(ee_transform.rotation).inv().apply(WORLD_UP_VECTOR) + ) + solutions = self._solve_articutool_ik(target_up_in_ee_frame) + + if not solutions: + return False + + return any( + ARTICUTOOL_PITCH_LIMITS_RAD[0] <= pitch <= ARTICUTOOL_PITCH_LIMITS_RAD[1] + and ARTICUTOOL_ROLL_LIMITS_RAD[0] <= roll <= ARTICUTOOL_ROLL_LIMITS_RAD[1] + for pitch, roll in solutions + ) + + def _generate_feasible_start_config( + self, max_attempts=200 + ) -> Optional[List[float]]: + """ + Generates a random start configuration that lies on the true feasibility manifold. + """ + for _ in range(max_attempts): + config = self.generate_random_joint_config() + if self._is_config_kinematically_feasible(config): + return config + return None + + def generate_random_position_in_workspace(self) -> np.ndarray: + """Generates a random (x, y, z) position from a half-spherical shell.""" + r = ( + np.random.uniform(WORKSPACE_INNER_RADIUS**3, WORKSPACE_OUTER_RADIUS**3) + ) ** (1 / 3) + theta = np.random.uniform(0, 2 * math.pi) + cos_phi = np.random.uniform(0, 1) + phi = math.acos(cos_phi) + return np.array( + [ + r * math.sin(phi) * math.cos(theta), + r * math.sin(phi) * math.sin(theta), + r * cos_phi, + ] + ) + + def generate_random_yaw_quaternion(self) -> Tuple[float, float, float, float]: + """Generates a quaternion representing a random yaw.""" + return tuple(R.from_euler("z", np.random.uniform(-math.pi, math.pi)).as_quat()) + + def _get_articutool_jacobian(self, pitch: float, roll: float) -> np.ndarray: + """Computes the analytical Jacobian for the Articutool.""" + cp, sp = math.cos(pitch), math.sin(pitch) + cr, sr = math.cos(roll), math.sin(roll) + dydp = np.array([0, -sp * cr, cp * cr]) + dydr = np.array([-cr, -cp * sr, -sp * sr]) + return np.vstack([dydp, dydr]).T + + def _calculate_trajectory_metrics( + self, trajectory: JointTrajectory + ) -> Tuple[bool, Optional[TrajectoryMetrics]]: + """ + Calculates detailed metrics for a given trajectory and verifies its feasibility. + Returns a tuple: (is_feasible, metrics_object). + """ + if self.pinocchio_model is None or not trajectory.points: + return False, None + + q = pin.neutral(self.pinocchio_model) + prev_pos = np.array(trajectory.points[0].positions) + waypoints_data, path_len = [], 0.0 + is_path_feasible = True + + for i, point in enumerate(trajectory.points): + if not self._is_config_kinematically_feasible(point.positions): + is_path_feasible = False + # We can break early if we only care about the boolean result, + # but we continue here to gather full data for the failed path. + + for j, name in enumerate(JOINT_NAMES): + joint_id = self.pinocchio_model.getJointId(name) + joint_obj = self.pinocchio_model.joints[joint_id] + if joint_obj.nq == 2: + q[joint_obj.idx_q : joint_obj.idx_q + 2] = [ + math.cos(point.positions[j]), + math.sin(point.positions[j]), + ] + else: + q[joint_obj.idx_q] = point.positions[j] + + pin.computeAllTerms( + self.pinocchio_model, + self.pinocchio_data, + q, + np.zeros(self.pinocchio_model.nv), + ) + ee_transform = self.pinocchio_data.oMf[self.jaco_ee_frame_id_pin] + target_up = ( + R.from_matrix(ee_transform.rotation).inv().apply(WORLD_UP_VECTOR) + ) + solutions = self._solve_articutool_ik(target_up) + + at_sol, at_vel = None, None + if solutions: + valid_sols = [ + s + for s in solutions + if ARTICUTOOL_PITCH_LIMITS_RAD[0] + <= s[0] + <= ARTICUTOOL_PITCH_LIMITS_RAD[1] + and ARTICUTOOL_ROLL_LIMITS_RAD[0] + <= s[1] + <= ARTICUTOOL_ROLL_LIMITS_RAD[1] + ] + if valid_sols: + pitch, roll = min(valid_sols, key=lambda s: s[0] ** 2 + s[1] ** 2) + at_sol = {"pitch": pitch, "roll": roll} + if i > 0: + dt = ( + point.time_from_start.sec + + 1e-9 * point.time_from_start.nanosec + ) - ( + trajectory.points[i - 1].time_from_start.sec + + 1e-9 * trajectory.points[i - 1].time_from_start.nanosec + ) + if dt > EPSILON: + q_dot = ( + np.array(point.positions) + - np.array(trajectory.points[i - 1].positions) + ) / dt + J_full = pin.getFrameJacobian( + self.pinocchio_model, + self.pinocchio_data, + self.jaco_ee_frame_id_pin, + pin.ReferenceFrame.LOCAL, + ) + J_arm = J_full[:, self.jaco_vel_indices] + omega = (J_arm @ q_dot)[3:] + J_atool_inv = np.linalg.pinv( + self._get_articutool_jacobian(pitch, roll) + ) + q_dot_atool = -J_atool_inv @ omega + at_vel = { + "pitch_vel": q_dot_atool[0], + "roll_vel": q_dot_atool[1], + } + + waypoints_data.append( + { + "jaco_positions_rad": list(point.positions), + "ee_pose_world": { + "position": ee_transform.translation.tolist(), + "quat_xyzw": R.from_matrix(ee_transform.rotation) + .as_quat() + .tolist(), + }, + "articutool_solution_rad": at_sol, + "articutool_velocities_rad_per_sec": at_vel, + } + ) + path_len += np.linalg.norm(np.array(point.positions) - prev_pos) + prev_pos = np.array(point.positions) + + duration = ( + trajectory.points[-1].time_from_start.sec + + 1e-9 * trajectory.points[-1].time_from_start.nanosec + ) + metrics = TrajectoryMetrics( + duration, path_len, list(trajectory.points[-1].positions), waypoints_data + ) + return is_path_feasible, metrics + + def run(self): + """Main benchmark execution loop with constraint-aware start state sampling.""" + if not self.pinocchio_model: + LOGGER.error("Benchmark cannot run because Pinocchio model failed to load.") + return + + successful_tasks = 0 + total_attempts = 0 + max_total_attempts = self.num_tasks * 50 + + LOGGER.info( + f"Attempting to collect {self.num_tasks} successful planning tasks..." + ) + + while successful_tasks < self.num_tasks and total_attempts < max_total_attempts: + total_attempts += 1 + LOGGER.info( + f"--- Task {successful_tasks + 1}/{self.num_tasks} (Attempt {total_attempts}) ---" + ) + + # For modes requiring leveling, find a start state on the true manifold. + # For pure joint-space planning, any random start is fine. + if self.mode != "joint_unconstrained": + LOGGER.info( + " Searching for a kinematically feasible start configuration..." + ) + start_pos = self._generate_feasible_start_config() + if start_pos is None: + LOGGER.warn( + " Attempt FAILED: Could not find a feasible start state within max attempts." + ) + self.results.append( + { + "task_id": successful_tasks + 1, + "attempt_id": total_attempts, + "planning_mode": self.mode, + "status": TrialStatus.START_STATE_SEARCH_FAILED.value, + } + ) + continue + LOGGER.info(" Found feasible start state.") + else: + start_pos = self.generate_random_joint_config() + + self.moveit2.clear_goal_constraints() + self.moveit2.clear_path_constraints() + + target_pos, target_quat, goal_joint_pos = None, None, None + if self.mode == "joint_unconstrained": + goal_joint_pos = self.generate_random_joint_config() + self.moveit2.set_joint_goal(joint_positions=goal_joint_pos) + else: + target_pos = self.generate_random_position_in_workspace() + self.moveit2.set_position_goal( + position=target_pos.tolist(), + frame_id=BASE_LINK, + target_link=END_EFFECTOR_LINK, + tolerance=0.01, + ) + if self.mode == "task_pos_yaw_constrained": + target_quat = self.generate_random_yaw_quaternion() + self.moveit2.set_orientation_goal( + quat_xyzw=Quaternion( + x=target_quat[0], + y=target_quat[1], + z=target_quat[2], + w=target_quat[3], + ), + target_link=END_EFFECTOR_LINK, + tolerance=GOAL_YAW_CONSTRAINT_TOLERANCE_XYZ_RAD, + parameterization=1, + ) + if "constrained" in self.mode: + self.moveit2.set_path_orientation_constraint( + quat_xyzw=Quaternion( + x=PATH_CONSTRAINT_QUAT_XYZW[0], + y=PATH_CONSTRAINT_QUAT_XYZW[1], + z=PATH_CONSTRAINT_QUAT_XYZW[2], + w=PATH_CONSTRAINT_QUAT_XYZW[3], + ), + frame_id=BASE_LINK, + target_link=END_EFFECTOR_LINK, + tolerance=PATH_CONSTRAINT_TOLERANCE_XYZ_RAD, + weight=1.0, + parameterization=1, + ) + + planning_start = time.perf_counter() + future = self.moveit2.plan_async(start_joint_state=start_pos) + rclpy.spin_until_future_complete( + self.node, future, timeout_sec=self.moveit2.planning_time + 1.0 + ) + traj = self.moveit2.get_trajectory(future) + planning_time = time.perf_counter() - planning_start + + trial_data = { + "task_id": successful_tasks + 1, + "attempt_id": total_attempts, + "planning_mode": self.mode, + "start_joint_positions": list(start_pos), + "planning_time_s": planning_time, + } + if goal_joint_pos: + trial_data["target_joint_positions"] = list(goal_joint_pos) + if target_pos is not None: + trial_data["target_position"] = target_pos.tolist() + if target_quat: + trial_data["target_yaw_quat"] = list(target_quat) + + if not traj or not traj.points: + LOGGER.warn( + f" Attempt FAILED: Planner did not find a solution in {planning_time:.4f}s." + ) + trial_data["status"] = TrialStatus.PLANNER_FAILURE.value + self.results.append(trial_data) + continue + + is_feasible, metrics = self._calculate_trajectory_metrics(traj) + trial_data["trajectory_metrics"] = metrics._asdict() if metrics else None + + if not is_feasible: + LOGGER.warn( + f" Attempt FAILED: Path verification failed for a {planning_time:.4f}s plan." + ) + trial_data["status"] = TrialStatus.PATH_VERIFICATION_FAILURE.value + self.results.append(trial_data) + continue + + successful_tasks += 1 + LOGGER.info( + f" Plan SUCCEEDED and VERIFIED in {planning_time:.4f}s. ({successful_tasks}/{self.num_tasks} collected)" + ) + trial_data["status"] = TrialStatus.SUCCESS.value + self.results.append(trial_data) + + if successful_tasks < self.num_tasks: + LOGGER.error( + f"Benchmark finished due to max attempts ({max_total_attempts}). Only collected {successful_tasks}/{self.num_tasks} tasks." + ) + + def save_results(self): + """Saves the comprehensive benchmark results to a single JSON file.""" + if not self.output_dir: + return + timestamp = datetime.now().strftime("%Y%m%d-%H%M%S") + filename = os.path.join( + self.output_dir, f"holistic_benchmark_results_{self.mode}_{timestamp}.json" + ) + try: + with open(filename, "w") as f: + json.dump(self.results, f, indent=2) + LOGGER.info(f"Comprehensive results saved to {filename}") + except Exception as e: + LOGGER.error(f"Failed to save results: {e}") + + status_counts = Counter(r["status"] for r in self.results) + total_attempts = len(self.results) + total_successful = status_counts[TrialStatus.SUCCESS.value] + + LOGGER.info(f"\n--- HOLISTIC BENCHMARK SUMMARY ({self.mode.upper()}) ---") + LOGGER.info(f"Total Attempts: {total_attempts}") + LOGGER.info( + f" - Success: {total_successful} ({total_successful / total_attempts * 100:.2f}%)" + ) + for status in TrialStatus: + if status != TrialStatus.SUCCESS: + count = status_counts[status.value] + if count > 0: + LOGGER.info( + f" - {status.name}: {count} ({count / total_attempts * 100:.2f}%)" + ) + LOGGER.info("---------------------------------------------------\n") + + +def main(): + parser = argparse.ArgumentParser( + description="Holistic benchmark for Articutool planning methodologies." + ) + parser.add_argument( + "xacro_file", type=str, help="Path to the robot URDF/XACRO file." + ) + # MODIFICATION: Add 'all' to choices and update help text + parser.add_argument( + "--mode", + type=str, + required=True, + choices=[ + "joint_unconstrained", + "task_pos_unconstrained", + "task_pos_constrained", + "task_pos_yaw_constrained", + "all", + ], + help="The planning methodology to benchmark, or 'all' to run every mode.", + ) + parser.add_argument( + "--num_tasks", + type=int, + default=100, + help="Number of successful tasks to generate per mode.", + ) + parser.add_argument( + "--timeout", type=float, default=10.0, help="Planning timeout in seconds." + ) + parser.add_argument( + "--output_dir", + type=str, + default="holistic_benchmark_output", + help="Directory to save results.", + ) + args = parser.parse_args() + + if not os.path.exists(args.xacro_file): + print(f"Error: XACRO file not found at '{args.xacro_file}'") + sys.exit(1) + + # MODIFICATION: Logic to handle running a single mode or all modes + scenarios_to_run = [] + if args.mode == "all": + scenarios_to_run = [ + "joint_unconstrained", + "task_pos_unconstrained", + "task_pos_constrained", + "task_pos_yaw_constrained", + ] + else: + scenarios_to_run.append(args.mode) + + rclpy.init() + node = Node("holistic_benchmark_node") + executor = rclpy.executors.MultiThreadedExecutor() + executor.add_node(node) + executor_thread = Thread(target=executor.spin, daemon=True) + executor_thread.start() + + moveit2 = MoveIt2( + node=node, + joint_names=JOINT_NAMES, + base_link_name=BASE_LINK, + end_effector_name=END_EFFECTOR_LINK, + group_name=PLANNING_GROUP, + callback_group=ReentrantCallbackGroup(), + ) + + # MODIFICATION: Loop through the selected scenarios + for mode in scenarios_to_run: + LOGGER.info(f"\n\n===== STARTING BENCHMARK FOR MODE: {mode.upper()} =====\n") + + # Instantiate a new benchmark object for each mode to ensure clean state + benchmark = HolisticBenchmark( + node, + moveit2, + args.xacro_file, + mode, + args.num_tasks, + args.timeout, + "RRTConnectkConfigDefault", + args.output_dir, + ) + + try: + benchmark.run() + benchmark.save_results() + except Exception as e: + LOGGER.error( + f"An unhandled error occurred during the benchmark for mode '{mode}': {e}", + exc_info=True, + ) + + # A brief pause can help separate logs if running multiple modes + time.sleep(2) + + LOGGER.info("All benchmark scenarios complete.") + rclpy.shutdown() + executor_thread.join() + + +if __name__ == "__main__": + main() diff --git a/ada_feeding/scripts/holistic_benchmark_analysis.py b/ada_feeding/scripts/holistic_benchmark_analysis.py new file mode 100644 index 00000000..b36a69a3 --- /dev/null +++ b/ada_feeding/scripts/holistic_benchmark_analysis.py @@ -0,0 +1,484 @@ +#!/usr/bin/env python3 +# Copyright (c) 2024-2025, Personal Robotics Laboratory +# License: BSD 3-Clause. See LICENSE.md file in root directory. + +""" +This script analyzes the output of the holistic_benchmark.py script. +It loads one or more JSON result files, concatenates them, and generates +a series of comparative plots to evaluate the different planning methodologies. + +Improvements in this version: +- Centralized constants for plot styling and ordering. +- A helper function to streamline extraction of granular waypoint data. +- New plots for Articutool joint angles, motion smoothness (jerk), and + a 3D visualization of where tasks succeed or fail in the workspace. +- A text-based summary of results printed to the console. +""" + +import argparse +import sys +import os +import pandas as pd +import plotly.express as px +import plotly.graph_objects as go +from plotly.subplots import make_subplots +import json +from typing import List, Dict, Any +import numpy as np + +# --- Plotting Constants --- +# Centralize category order and color maps to ensure consistency across all plots. +PLANNING_MODE_ORDER = [ + "joint_unconstrained", + "task_pos_unconstrained", + "task_pos_constrained", + "task_pos_yaw_constrained", +] +STATUS_ORDER = [ + "Success", + "Path Verification Failure", + "Planner Failure", + "Start State Search Failed", +] +STATUS_COLOR_MAP = { + "Success": "#2ca02c", # Green + "Path Verification Failure": "#ff7f0e", # Orange + "Planner Failure": "#d62728", # Red + "Start State Search Failed": "#9467bd", # Purple +} +# Preferred joint ranges for manifold adherence, based on prior analysis. +# Jaco Joint 2 (index 1) prefers negative values. +# Jaco Joint 3 (index 2) prefers positive values. +PREFERRED_JOINT_2_RANGE = (-float("inf"), 0.0) +PREFERRED_JOINT_3_RANGE = (0.0, float("inf")) + + +def load_data(file_paths: List[str]) -> pd.DataFrame: + """Loads and concatenates data from multiple JSON files.""" + all_data = [] + for file_path in file_paths: + print(f"Loading data from {file_path}...") + try: + with open(file_path, "r") as f: + data = json.load(f) + all_data.extend(data) + except Exception as e: + print( + f"Warning: Could not load or parse {file_path}. Error: {e}", + file=sys.stderr, + ) + if not all_data: + print("Error: No data loaded. Exiting.", file=sys.stderr) + sys.exit(1) + print(f"Successfully loaded {len(all_data)} trials from {len(file_paths)} file(s).") + return pd.json_normalize(all_data, sep="_") + + +def _extract_waypoint_data(df: pd.DataFrame) -> pd.DataFrame: + """Helper function to parse and explode waypoint data from the main dataframe.""" + waypoint_rows = [] + for _, row in df.iterrows(): + waypoints = row.get("trajectory_metrics_waypoints_data") + if isinstance(waypoints, list): + for wp in waypoints: + if not isinstance(wp, dict): + continue + + # Extract data from the nested dictionaries with checks + at_sol = wp.get("articutool_solution_rad", {}) or {} + at_vel = wp.get("articutool_velocities_rad_per_sec", {}) or {} + ee_pose = wp.get("ee_pose_world", {}) or {} + + waypoint_rows.append( + { + "planning_mode": row["planning_mode"], + "task_id": row["task_id"], + "jaco_positions": wp.get("jaco_positions_rad"), + "ee_position": ee_pose.get("position"), + "time": wp.get("time_from_start_sec", 0.0), + "pitch_angle": at_sol.get("pitch"), + "roll_angle": at_sol.get("roll"), + "pitch_vel": at_vel.get("pitch_vel"), + "roll_vel": at_vel.get("roll_vel"), + } + ) + return pd.DataFrame(waypoint_rows).dropna(subset=["jaco_positions", "ee_position"]) + + +def print_summary_table(df: pd.DataFrame): + """Prints a text-based summary of trial outcomes to the console.""" + print("\n--- Benchmark Summary ---") + summary = ( + df.groupby("planning_mode")["status"] + .value_counts(normalize=True) + .unstack(fill_value=0) + * 100 + ) + summary["Total_Attempts"] = df.groupby("planning_mode").size() + + # Ensure all status columns exist for consistent printing + for status in STATUS_ORDER: + if status not in summary.columns: + summary[status] = 0.0 + + # Reorder columns for clarity + summary = summary[["Total_Attempts"] + STATUS_ORDER] + + print(summary.to_string(float_format="%.2f%%")) + print("-------------------------\n") + + +def plot_outcomes(df: pd.DataFrame, output_dir: str): + """Generates a stacked bar chart of trial outcomes.""" + print("Generating trial outcomes plot...") + status_counts = df.groupby(["planning_mode", "status"]).size().unstack(fill_value=0) + status_percentages = status_counts.div(status_counts.sum(axis=1), axis=0) * 100 + status_percentages = status_percentages.reset_index() + + fig = px.bar( + status_percentages, + x="planning_mode", + y=STATUS_ORDER, + title="Breakdown of Trial Outcomes by Planning Methodology", + labels={"value": "Percentage of Trials (%)"}, + category_orders={"planning_mode": PLANNING_MODE_ORDER}, + color_discrete_map=STATUS_COLOR_MAP, + ) + fig.update_layout( + barmode="stack", + yaxis_title="Percentage of Total Trials (%)", + xaxis_title="Planning Methodology", + legend_title_text="Trial Outcome", + ) + output_path = os.path.join(output_dir, "comparative_outcomes.html") + fig.write_html(output_path) + print(f"Saved outcomes plot to {output_path}") + + +def plot_planning_times(df: pd.DataFrame, output_dir: str): + """Generates a box plot of planning times for successful trials.""" + print("Generating planning time plot...") + success_df = df[df["status"] == "Success"] + if success_df.empty: + print("Warning: No successful trials found. Skipping planning time plot.") + return + + fig = px.box( + success_df, + x="planning_mode", + y="planning_time_s", + color="planning_mode", + title="Planning Time for Successful Trials", + labels={"planning_time_s": "Planning Time (s)"}, + category_orders={"planning_mode": PLANNING_MODE_ORDER}, + ) + fig.update_layout(xaxis_title="Planning Methodology") + output_path = os.path.join(output_dir, "comparative_planning_times.html") + fig.write_html(output_path) + print(f"Saved planning time plot to {output_path}") + + +def plot_path_lengths(df: pd.DataFrame, output_dir: str): + """Generates a box plot of joint-space path lengths for successful trials.""" + print("Generating path length plot...") + success_df = df[df["status"] == "Success"] + if success_df.empty: + print("Warning: No successful trials found. Skipping path length plot.") + return + + fig = px.box( + success_df, + x="planning_mode", + y="trajectory_metrics_joint_space_path_length_rad", + color="planning_mode", + title="Joint-Space Path Length for Successful Trials", + labels={"trajectory_metrics_joint_space_path_length_rad": "Path Length (rad)"}, + category_orders={"planning_mode": PLANNING_MODE_ORDER}, + ) + fig.update_layout(xaxis_title="Planning Methodology") + output_path = os.path.join(output_dir, "comparative_path_lengths.html") + fig.write_html(output_path) + print(f"Saved path length plot to {output_path}") + + +def plot_articutool_metrics(df: pd.DataFrame, output_dir: str): + """Generates violin plots for Articutool angles and velocities.""" + print("Generating Articutool performance plots (angles and velocities)...") + success_df = df[df["status"] == "Success"].copy() + if success_df.empty: + print("Warning: No successful trials found. Skipping Articutool plots.") + return + + wp_df = _extract_waypoint_data(success_df) + if wp_df.empty: + print("Warning: No valid waypoint data found. Skipping Articutool plots.") + return + + # Create a 2x2 subplot + fig = make_subplots( + rows=2, + cols=2, + subplot_titles=( + "Required Pitch Angle", + "Required Roll Angle", + "Required Pitch Velocity", + "Required Roll Velocity", + ), + vertical_spacing=0.15, + ) + + metrics_to_plot = { + (1, 1): ("pitch_angle", "Angle (rad)"), + (1, 2): ("roll_angle", "Angle (rad)"), + (2, 1): ("pitch_vel", "Velocity (rad/s)"), + (2, 2): ("roll_vel", "Velocity (rad/s)"), + } + + for (row, col), (metric, y_title) in metrics_to_plot.items(): + sub_fig = px.violin( + wp_df, + x="planning_mode", + y=metric, + color="planning_mode", + category_orders={"planning_mode": PLANNING_MODE_ORDER}, + ) + for trace in sub_fig.data: + fig.add_trace(trace, row=row, col=col) + fig.update_yaxes(title_text=y_title, row=row, col=col) + fig.update_xaxes( + title_text="Planning Methodology" if row == 2 else "", + showticklabels=True if row == 2 else False, + row=row, + col=col, + ) + + fig.update_layout( + height=800, + title_text="Articutool Performance Metrics During Successful Trajectories", + showlegend=False, + ) + output_path = os.path.join(output_dir, "comparative_articutool_performance.html") + fig.write_html(output_path) + print(f"Saved Articutool performance plot to {output_path}") + + +def plot_manifold_adherence(df: pd.DataFrame, output_dir: str): + """Calculates and plots the 'Manifold Adherence' for each successful trajectory.""" + print("Generating manifold adherence plot...") + success_df = df[df["status"] == "Success"].copy() + if success_df.empty: + print("Warning: No successful trials found. Skipping manifold adherence plot.") + return + + wp_df = _extract_waypoint_data(success_df) + if wp_df.empty: + print( + "Warning: No valid waypoint data found. Skipping manifold adherence plot." + ) + return + + # Calculate adherence for each waypoint + wp_df["is_adherent"] = wp_df.apply( + lambda row: PREFERRED_JOINT_2_RANGE[0] + <= row["jaco_positions"][1] + <= PREFERRED_JOINT_2_RANGE[1] + and PREFERRED_JOINT_3_RANGE[0] + <= row["jaco_positions"][2] + <= PREFERRED_JOINT_3_RANGE[1], + axis=1, + ) + + # Group by trajectory and calculate percentage + adherence_df = ( + wp_df.groupby(["planning_mode", "task_id"])["is_adherent"].mean().reset_index() + ) + adherence_df["adherence_score"] = adherence_df["is_adherent"] * 100 + + fig = px.box( + adherence_df, + x="planning_mode", + y="adherence_score", + color="planning_mode", + title="Manifold Adherence of Successful Trajectories
Percentage of waypoints within preferred ranges for Joints 2 & 3", + labels={"adherence_score": "Adherence Score (%)"}, + category_orders={"planning_mode": PLANNING_MODE_ORDER}, + ) + fig.update_yaxes(range=[-5, 105]) + fig.update_layout(xaxis_title="Planning Methodology") + output_path = os.path.join(output_dir, "comparative_manifold_adherence.html") + fig.write_html(output_path) + print(f"Saved manifold adherence plot to {output_path}") + + +def plot_motion_smoothness(df: pd.DataFrame, output_dir: str): + """Calculates and plots end-effector jerk as a measure of motion smoothness.""" + print("Generating motion smoothness (jerk) plot...") + success_df = df[df["status"] == "Success"].copy() + if success_df.empty: + print("Warning: No successful trials found. Skipping smoothness plot.") + return + + wp_df = _extract_waypoint_data(success_df) + if wp_df.empty: + print("Warning: No valid EE position data found. Skipping smoothness plot.") + return + + wp_df = wp_df.sort_values(by=["planning_mode", "task_id", "time"]) + + # Calculate derivatives (velocity, acceleration, jerk) in a more robust way + # to avoid the multi-column assignment error with groupby. + + # Group by individual trajectory + grouped = wp_df.groupby(["planning_mode", "task_id"]) + + # Calculate delta_t once for efficiency + delta_t = grouped["time"].diff() + + # Position columns + pos_cols = ["vx", "vy", "vz"] + wp_df[pos_cols] = pd.DataFrame(wp_df["ee_position"].tolist(), index=wp_df.index) + + # Velocity + vel_cols = ["vel_x", "vel_y", "vel_z"] + velocity = grouped[pos_cols].diff().div(delta_t, axis=0) + velocity.columns = vel_cols # Rename columns to the target names + wp_df[vel_cols] = velocity + + # Acceleration + acc_cols = ["acc_x", "acc_y", "acc_z"] + acceleration = grouped[vel_cols].diff().div(delta_t, axis=0) + acceleration.columns = acc_cols + wp_df[acc_cols] = acceleration + + # Jerk + jerk_cols = ["jerk_x", "jerk_y", "jerk_z"] + jerk = grouped[acc_cols].diff().div(delta_t, axis=0) + jerk.columns = jerk_cols + wp_df[jerk_cols] = jerk + + wp_df["jerk_magnitude"] = np.linalg.norm(wp_df[jerk_cols].fillna(0), axis=1) + + fig = px.box( + wp_df, + x="planning_mode", + y="jerk_magnitude", + color="planning_mode", + title="Motion Smoothness: End-Effector Jerk", + labels={"jerk_magnitude": "Jerk Magnitude (m/s³)"}, + category_orders={"planning_mode": PLANNING_MODE_ORDER}, + ) + # Use a logarithmic scale for jerk as it can have a very large range, + # and clip the view to a reasonable upper quantile to hide extreme outliers. + fig.update_yaxes(type="log") + fig.update_layout(xaxis_title="Planning Methodology") + output_path = os.path.join(output_dir, "comparative_motion_smoothness.html") + fig.write_html(output_path) + print(f"Saved motion smoothness plot to {output_path}") + + +def plot_workspace_outcomes(df: pd.DataFrame, output_dir: str): + """Generates a 3D scatter plot of where tasks succeed and fail.""" + print("Generating workspace outcomes plot...") + + task_space_df = df[df["target_position"].notna()].copy() + if task_space_df.empty: + print( + "Warning: No trials with target positions found. Skipping workspace plot." + ) + return + + task_space_df[["target_x", "target_y", "target_z"]] = pd.DataFrame( + task_space_df["target_position"].tolist(), index=task_space_df.index + ) + + # --- FIX STARTS HERE --- + # Manually create subplots because facet_col is not supported for 3D scatter plots + # in all versions of Plotly. + + # We only care about task-space modes for this plot + task_modes = [m for m in PLANNING_MODE_ORDER if "task_pos" in m] + + fig = make_subplots( + rows=1, + cols=len(task_modes), + specs=[[{"type": "scene"}] * len(task_modes)], + subplot_titles=task_modes, + ) + + # Keep track of which legend items have been added to avoid duplicates + legend_added = set() + + for i, mode in enumerate(task_modes): + mode_df = task_space_df[task_space_df["planning_mode"] == mode] + + for status in STATUS_ORDER: + status_df = mode_df[mode_df["status"] == status] + if status_df.empty: + continue + + show_legend_for_trace = status not in legend_added + legend_added.add(status) + + fig.add_trace( + go.Scatter3d( + x=status_df["target_x"], + y=status_df["target_y"], + z=status_df["target_z"], + mode="markers", + marker=dict( + color=STATUS_COLOR_MAP.get(status, "grey"), + size=3, + opacity=0.7, + ), + name=status, + legendgroup=status, + showlegend=show_legend_for_trace, + ), + row=1, + col=i + 1, + ) + + fig.update_layout( + height=600, + title_text="Success and Failure Locations in the Workspace", + legend_title_text="Trial Outcome", + ) + # --- FIX ENDS HERE --- + + output_path = os.path.join(output_dir, "comparative_workspace_outcomes.html") + fig.write_html(output_path) + print(f"Saved workspace outcomes plot to {output_path}") + + +def main(): + parser = argparse.ArgumentParser( + description="Analyze results from the holistic benchmark." + ) + parser.add_argument("json_files", nargs="+", help="One or more JSON result files.") + parser.add_argument( + "--output_dir", + type=str, + default="benchmark_analysis_plots", + help="Directory to save the output HTML plots.", + ) + args = parser.parse_args() + + os.makedirs(args.output_dir, exist_ok=True) + + df = load_data(args.json_files) + + # --- Generate Analysis --- + print_summary_table(df) + plot_outcomes(df, args.output_dir) + plot_planning_times(df, args.output_dir) + plot_path_lengths(df, args.output_dir) + plot_articutool_metrics(df, args.output_dir) + plot_manifold_adherence(df, args.output_dir) + plot_motion_smoothness(df, args.output_dir) + plot_workspace_outcomes(df, args.output_dir) + + print("\nAnalysis complete.") + + +if __name__ == "__main__": + main() diff --git a/ada_feeding/scripts/joint_analyzer.py b/ada_feeding/scripts/joint_analyzer.py new file mode 100644 index 00000000..c5776db4 --- /dev/null +++ b/ada_feeding/scripts/joint_analyzer.py @@ -0,0 +1,139 @@ +#!/usr/bin/env python3 +# Copyright (c) 2024-2025, Personal Robotics Laboratory +# License: BSD 3-Clause. See LICENSE.md file in root directory. + +""" +This script performs a quantitative analysis of the manifold data generated by +manifold_explorer.py. + +For each joint, it creates an overlapping histogram to visualize the distribution +of joint values for both feasible and infeasible configurations. This helps +identify which joints, and which specific ranges of motion for those joints, +are critical for the Articutool's leveling task. +""" + +import argparse +import sys +import os +import pandas as pd +import plotly.graph_objects as go +from plotly.subplots import make_subplots +import math + + +def analyze_joint_distributions(csv_file_path: str, output_dir: str): + """ + Loads manifold data and generates overlapping histograms for each joint. + """ + print(f"Loading data from {csv_file_path}...") + try: + df = pd.read_csv(csv_file_path) + except FileNotFoundError: + print(f"Error: File not found at '{csv_file_path}'", file=sys.stderr) + sys.exit(1) + except Exception as e: + print(f"Error reading CSV file: {e}", file=sys.stderr) + sys.exit(1) + + # --- Data Preparation --- + # Get the names of the joint columns (assumes they start with 'j') + joint_columns = [col for col in df.columns if col.startswith("j")] + if not joint_columns: + print( + "Error: No joint columns (e.g., 'j1', 'j2') found in the CSV file.", + file=sys.stderr, + ) + sys.exit(1) + + # Separate the DataFrame into feasible and infeasible sets + feasible_df = df[df["is_feasible"] == 1] + infeasible_df = df[df["is_feasible"] == 0] + + print( + f"Found {len(feasible_df)} feasible and {len(infeasible_df)} infeasible samples." + ) + + # --- Plotting --- + # Create a subplot grid for all joints + num_joints = len(joint_columns) + rows = math.ceil(num_joints / 2) + fig = make_subplots( + rows=rows, + cols=2, + subplot_titles=[f"Joint {i + 1} Distribution" for i in range(num_joints)], + ) + + print("Generating joint distribution plots...") + for i, joint in enumerate(joint_columns): + row = (i // 2) + 1 + col = (i % 2) + 1 + + # Add histogram for feasible configurations + fig.add_trace( + go.Histogram( + x=feasible_df[joint], + name="Feasible", + marker_color="#2ca02c", # Green + opacity=0.7, + legendgroup="feasible", + showlegend=(i == 0), # Show legend only for the first plot + ), + row=row, + col=col, + ) + + # Add histogram for infeasible configurations + fig.add_trace( + go.Histogram( + x=infeasible_df[joint], + name="Infeasible", + marker_color="#d62728", # Red + opacity=0.6, + legendgroup="infeasible", + showlegend=(i == 0), # Show legend only for the first plot + ), + row=row, + col=col, + ) + + fig.update_xaxes(title_text="Joint Value (radians)", row=row, col=col) + fig.update_yaxes(title_text="Count", row=row, col=col) + + # Update the overall layout + fig.update_layout( + height=400 * rows, + width=900, + title_text="Joint Value Distributions for Feasible vs. Infeasible Configurations", + barmode="overlay", # Overlay histograms + legend_title_text="Configuration Status", + ) + + # Save the plot to an HTML file + output_filename = os.path.join( + output_dir, + f"joint_distributions_{os.path.basename(csv_file_path).replace('.csv', '.html')}", + ) + fig.write_html(output_filename) + print(f"\nSuccessfully saved analysis to:\n{output_filename}") + + +def main(): + parser = argparse.ArgumentParser( + description="Analyze and visualize joint distributions from manifold data." + ) + parser.add_argument( + "csv_file", type=str, help="Path to the manifold data CSV file." + ) + parser.add_argument( + "--output_dir", + type=str, + default="manifold_data", + help="Directory to save the output HTML file.", + ) + args = parser.parse_args() + + analyze_joint_distributions(args.csv_file, args.output_dir) + + +if __name__ == "__main__": + main() diff --git a/ada_feeding/scripts/manifold_explorer.py b/ada_feeding/scripts/manifold_explorer.py new file mode 100644 index 00000000..da64cb55 --- /dev/null +++ b/ada_feeding/scripts/manifold_explorer.py @@ -0,0 +1,278 @@ +#!/usr/bin/env python3 +# Copyright (c) 2024-2025, Personal Robotics Laboratory +# License: BSD 3-Clause. See LICENSE.md file in root directory. + +""" +This script explores the "Leveling Feasibility Manifold" for the Jaco arm +equipped with the Articutool. It densely samples the robot's configuration +space and, for each sample, determines if the Articutool can achieve a +level orientation. + +The output is a CSV file containing the sampled joint configurations and their +feasibility, which can then be used for visualization and analysis. +""" + +# Standard imports +from datetime import datetime +import os +import math +import subprocess +import sys +import argparse +import csv +from typing import Optional, List, Tuple + +# Third-party imports +import numpy as np +from scipy.spatial.transform import Rotation as R +import pinocchio as pin + +# --- Constants --- +# These should be adjusted to match your robot's configuration +JOINT_NAMES = [f"j2n6s200_joint_{i + 1}" for i in range(6)] +END_EFFECTOR_LINK = "j2n6s200_end_effector" +WORLD_UP_VECTOR = np.array([0.0, 0.0, 1.0]) +ARTICUTOOL_PITCH_LIMITS_RAD = (-math.pi / 2, math.pi / 2) +ARTICUTOOL_ROLL_LIMITS_RAD = (-math.pi, math.pi) +EPSILON = 1e-6 + + +class ManifoldExplorer: + """ + Samples and analyzes the configuration space of the Jaco arm to find the + Articutool's leveling feasibility manifold. + """ + + def __init__(self, xacro_file_path: str): + self.xacro_file_path = xacro_file_path + self.pinocchio_model: Optional[pin.Model] = None + self.pinocchio_data: Optional[pin.Data] = None + self.jaco_ee_frame_id_pin: Optional[int] = None + self.joint_limits: List[Tuple[float, float]] = [] + + self._initialize_pinocchio() + if self.pinocchio_model: + self.joint_limits = self._get_joint_limits() + + def _initialize_pinocchio(self): + """Loads the robot model from a XACRO file into Pinocchio.""" + print("Initializing Pinocchio model from XACRO...") + try: + # Use ros2 run to convert xacro to urdf in memory + process = subprocess.run( + ["ros2", "run", "xacro", "xacro", self.xacro_file_path], + check=True, + capture_output=True, + text=True, + ) + urdf_xml_string = process.stdout + self.pinocchio_model = pin.buildModelFromXML(urdf_xml_string) + self.pinocchio_data = self.pinocchio_model.createData() + self.jaco_ee_frame_id_pin = self.pinocchio_model.getFrameId( + END_EFFECTOR_LINK + ) + print("Pinocchio model loaded successfully.") + except FileNotFoundError: + print( + f"Error: 'ros2' command not found. Is ROS 2 sourced?", file=sys.stderr + ) + sys.exit(1) + except subprocess.CalledProcessError as e: + print(f"Error running xacro: {e}", file=sys.stderr) + print(f"Stderr: {e.stderr}", file=sys.stderr) + sys.exit(1) + except Exception as e: + print(f"Failed to initialize Pinocchio model: {e}", file=sys.stderr) + self.pinocchio_model = None + + def _get_joint_limits(self) -> List[Tuple[float, float]]: + """Retrieves joint limits from the Pinocchio model.""" + if self.pinocchio_model is None: + return [(-math.pi, math.pi)] * len(JOINT_NAMES) + + limits = [] + for name in JOINT_NAMES: + if self.pinocchio_model.existJointName(name): + joint_id = self.pinocchio_model.getJointId(name) + # In Pinocchio, joint indices for position/velocity can differ + idx_q = self.pinocchio_model.joints[joint_id].idx_q + limits.append( + ( + self.pinocchio_model.lowerPositionLimit[idx_q], + self.pinocchio_model.upperPositionLimit[idx_q], + ) + ) + else: + print( + f"Warning: Joint '{name}' not found in Pinocchio model.", + file=sys.stderr, + ) + limits.append((-math.pi, math.pi)) # Default limit + return limits + + def generate_random_joint_config(self) -> List[float]: + """Generates a random joint configuration within limits.""" + return [np.random.uniform(low, high) for low, high in self.joint_limits] + + def _solve_articutool_ik( + self, target_vector: np.ndarray + ) -> List[Tuple[float, float]]: + """Analytical IK solver for the Articutool.""" + vx, vy, vz = target_vector + solutions = [] + + # Check if arcsin argument is valid + asin_arg = -vx + if not (-1.0 - EPSILON <= asin_arg <= 1.0 + EPSILON): + return [] + + # Clamp the argument to be safe + theta_r1 = math.asin(np.clip(asin_arg, -1.0, 1.0)) + # Find the second solution for roll + theta_r2 = (math.pi - theta_r1 + math.pi) % (2 * math.pi) - math.pi + + for theta_r in list(set([theta_r1, theta_r2])): + cos_tr = math.cos(theta_r) + # Handle the case where cos(theta_r) is near zero + if math.isclose(cos_tr, 0.0, abs_tol=EPSILON): + # If cos(theta_r) is 0, then vy and vz must also be 0 for a solution + if math.isclose(vy, 0.0, abs_tol=EPSILON) and math.isclose( + vz, 0.0, abs_tol=EPSILON + ): + # Pitch can be anything, but 0 is a reasonable choice + solutions.append( + (0.0, (theta_r + math.pi) % (2 * math.pi) - math.pi) + ) + continue + + # General solution for pitch + theta_p = math.atan2(vz, vy) + solutions.append( + ( + (theta_p + math.pi) % (2 * math.pi) - math.pi, + (theta_r + math.pi) % (2 * math.pi) - math.pi, + ) + ) + return solutions + + def check_feasibility(self, jaco_joint_config: List[float]) -> bool: + """ + Checks if the Articutool can maintain leveling at a single Jaco configuration. + Returns True if a valid Articutool joint configuration exists, False otherwise. + """ + if ( + self.pinocchio_model is None + or self.pinocchio_data is None + or self.jaco_ee_frame_id_pin is None + ): + return False + + q = pin.neutral(self.pinocchio_model) + for j, name in enumerate(JOINT_NAMES): + if self.pinocchio_model.existJointName(name): + joint_id = self.pinocchio_model.getJointId(name) + joint_obj = self.pinocchio_model.joints[joint_id] + # Handle different joint types (revolute vs. prismatic, etc.) + if joint_obj.nq == 2 and not joint_obj.shortname().startswith( + "JointModelRX" + ): # e.g., for SO(2) joints + q[joint_obj.idx_q : joint_obj.idx_q + 2] = [ + math.cos(jaco_joint_config[j]), + math.sin(jaco_joint_config[j]), + ] + else: # Standard revolute joint + q[joint_obj.idx_q] = jaco_joint_config[j] + + # Run forward kinematics to get frame placements + pin.forwardKinematics(self.pinocchio_model, self.pinocchio_data, q) + pin.updateFramePlacements(self.pinocchio_model, self.pinocchio_data) + + # Get the end-effector's transformation matrix + ee_transform = self.pinocchio_data.oMf[self.jaco_ee_frame_id_pin] + + # Determine the "up" vector in the end-effector's frame + target_up_in_ee_frame = ( + R.from_matrix(ee_transform.rotation).inv().apply(WORLD_UP_VECTOR) + ) + + # Solve the Articutool's IK for this target vector + solutions = self._solve_articutool_ik(target_up_in_ee_frame) + + if not solutions: + return False + + # Check if any solution is within the Articutool's joint limits + return any( + ARTICUTOOL_PITCH_LIMITS_RAD[0] <= pitch <= ARTICUTOOL_PITCH_LIMITS_RAD[1] + and ARTICUTOOL_ROLL_LIMITS_RAD[0] <= roll <= ARTICUTOOL_ROLL_LIMITS_RAD[1] + for pitch, roll in solutions + ) + + def run_exploration(self, num_samples: int, output_file: str): + """ + Main exploration loop. Samples the C-space and saves results to a file. + """ + if not self.pinocchio_model: + print( + "Cannot run exploration: Pinocchio model not loaded.", file=sys.stderr + ) + return + + print(f"Starting exploration of {num_samples} samples...") + results = [] + header = [f"j{i + 1}" for i in range(len(JOINT_NAMES))] + ["is_feasible"] + + for i in range(num_samples): + config = self.generate_random_joint_config() + is_feasible = self.check_feasibility(config) + results.append(config + [1 if is_feasible else 0]) + + if (i + 1) % 1000 == 0: + print(f" ...processed {i + 1}/{num_samples} samples.") + + print(f"Exploration complete. Saving results to {output_file}...") + try: + with open(output_file, "w", newline="") as f: + writer = csv.writer(f) + writer.writerow(header) + writer.writerows(results) + print("Successfully saved results.") + except IOError as e: + print(f"Error saving file: {e}", file=sys.stderr) + + +def main(): + parser = argparse.ArgumentParser( + description="Explore the Articutool's leveling feasibility manifold." + ) + parser.add_argument( + "xacro_file", type=str, help="Path to the robot URDF/XACRO file." + ) + parser.add_argument( + "--num_samples", + type=int, + default=50000, + help="Number of random configurations to sample.", + ) + parser.add_argument( + "--output_dir", + type=str, + default="manifold_data", + help="Directory to save the output CSV file.", + ) + args = parser.parse_args() + + if not os.path.exists(args.xacro_file): + print(f"Error: XACRO file not found at '{args.xacro_file}'", file=sys.stderr) + sys.exit(1) + + os.makedirs(args.output_dir, exist_ok=True) + timestamp = datetime.now().strftime("%Y%m%d-%H%M%S") + output_file = os.path.join(args.output_dir, f"manifold_samples_{timestamp}.csv") + + explorer = ManifoldExplorer(args.xacro_file) + explorer.run_exploration(args.num_samples, output_file) + + +if __name__ == "__main__": + main() diff --git a/ada_feeding/scripts/manifold_visualizer.py b/ada_feeding/scripts/manifold_visualizer.py new file mode 100644 index 00000000..3f0c21ef --- /dev/null +++ b/ada_feeding/scripts/manifold_visualizer.py @@ -0,0 +1,130 @@ +#!/usr/bin/env python3 +# Copyright (c) 2024-2025, Personal Robotics Laboratory +# License: BSD 3-Clause. See LICENSE.md file in root directory. + +""" +This script visualizes the high-dimensional feasibility manifold data generated +by manifold_explorer.py. It uses t-SNE for dimensionality reduction to project +the 6D joint space data into a 2D plot. + +The resulting interactive plot helps in understanding the structure, size, and +fragmentation of the feasible configuration space. +""" + +import argparse +import sys +import os +import pandas as pd +from sklearn.manifold import TSNE +import plotly.express as px + + +def visualize_manifold(csv_file_path: str, output_dir: str): + """ + Loads manifold data, performs t-SNE, and generates an interactive plot. + """ + print(f"Loading data from {csv_file_path}...") + try: + df = pd.read_csv(csv_file_path) + except FileNotFoundError: + print(f"Error: File not found at '{csv_file_path}'", file=sys.stderr) + sys.exit(1) + except Exception as e: + print(f"Error reading CSV file: {e}", file=sys.stderr) + sys.exit(1) + + # Separate features (joint values) and labels (feasibility) + features = df.drop(columns=["is_feasible"]) + labels = df["is_feasible"] + + # --- Basic Analysis: Feasibility Ratio --- + feasibility_ratio = labels.mean() + print(f"\nFeasibility Ratio: {feasibility_ratio:.2%}") + print(f"Out of {len(df)} samples, {int(labels.sum())} were feasible.") + + # --- Dimensionality Reduction with t-SNE --- + # Note: t-SNE can be slow on large datasets. For very large files (>50k samples), + # you might consider using a subset of the data for faster iteration. + print("\nPerforming t-SNE for dimensionality reduction (this may take a while)...") + tsne = TSNE(n_components=2, verbose=1, perplexity=40, n_iter=300, random_state=42) + features_2d = tsne.fit_transform(features) + print("t-SNE complete.") + + # --- Create a new, combined DataFrame for plotting --- + # This DataFrame will contain the t-SNE components for the plot axes, + # the original joint values for the hover tooltip, and the feasibility label for color. + plot_df = pd.DataFrame(features_2d, columns=["tsne_1", "tsne_2"]) + + # Reset indices of original data to ensure correct concatenation + features.reset_index(drop=True, inplace=True) + labels.reset_index(drop=True, inplace=True) + + plot_df = pd.concat([plot_df, features, labels], axis=1) + plot_df["is_feasible"] = plot_df["is_feasible"].astype( + bool + ) # Convert 0/1 to False/True for a better legend + + # --- Generate the Interactive Plot --- + print("Generating interactive plot...") + fig = px.scatter( + plot_df, # Use the new, combined DataFrame + x="tsne_1", + y="tsne_2", + color="is_feasible", + color_discrete_map={ + True: "rgba(44, 160, 44, 0.7)", # Green, slightly transparent + False: "rgba(214, 39, 40, 0.7)", # Red, slightly transparent + }, + title="2D Visualization of the Articutool Leveling Feasibility Manifold", + labels={"color": "Is Feasible"}, + # The hover_data argument now works correctly because the 'j1', 'j2', etc. + # columns exist in the `plot_df` DataFrame. + hover_data={ + "tsne_1": False, # Hide the t-SNE coordinates from the hover tooltip + "tsne_2": False, + "is_feasible": True, + "j1": ":.3f", + "j2": ":.3f", + "j3": ":.3f", + "j4": ":.3f", + "j5": ":.3f", + "j6": ":.3f", + }, + ) + + fig.update_layout( + xaxis_title="t-SNE Dimension 1", + yaxis_title="t-SNE Dimension 2", + legend_title_text="Feasibility", + plot_bgcolor="rgba(240, 240, 240, 0.95)", + ) + + # Save the plot to an HTML file + output_filename = os.path.join( + output_dir, + f"manifold_visualization_{os.path.basename(csv_file_path).replace('.csv', '.html')}", + ) + fig.write_html(output_filename) + print(f"\nSuccessfully saved visualization to:\n{output_filename}") + + +def main(): + parser = argparse.ArgumentParser( + description="Visualize high-dimensional manifold data using t-SNE." + ) + parser.add_argument( + "csv_file", type=str, help="Path to the manifold data CSV file." + ) + parser.add_argument( + "--output_dir", + type=str, + default="manifold_data", + help="Directory to save the output HTML file.", + ) + args = parser.parse_args() + + visualize_manifold(args.csv_file, args.output_dir) + + +if __name__ == "__main__": + main() diff --git a/ada_feeding/scripts/orientation_constrained_planner_benchmark.py b/ada_feeding/scripts/orientation_constrained_planner_benchmark.py new file mode 100755 index 00000000..8bc3ebf2 --- /dev/null +++ b/ada_feeding/scripts/orientation_constrained_planner_benchmark.py @@ -0,0 +1,1417 @@ +#!/usr/bin/env python3 +# Copyright (c) 2024-2025, Personal Robotics Laboratory +# License: BSD 3-Clause. See LICENSE.md file in root directory. + +""" +This script is used to benchmark planner performance for reaching +pre-defined joint configurations. It has been updated to include: +- Kinematic feasibility checks for a 2-DOF Articutool attached to the end-effector. +- Saving of Articutool's per-waypoint IK solutions (pitch, roll) within the + trajectory file for visualization and detailed analysis. +- Detailed metrics related to Articutool performance and joint utilization. +- Physical movement to start states for pairwise planning tasks. +""" + +# Standard imports +from collections import defaultdict, namedtuple +from datetime import datetime +import os +import sys +import time +from threading import Thread, Lock +from typing import Optional, List, Dict, Tuple, Any +import json +import math # For atan2, asin, cos, sin, pi, isclose + +# Third-party imports +import numpy as np +from pymoveit2 import MoveIt2 +from pymoveit2.robots import kinova +import rclpy +from rclpy.callback_groups import ReentrantCallbackGroup +from rclpy.node import Node +from rclpy.duration import ( + Duration as RCLPYDuration, +) # Alias to avoid conflict with local Duration +from trajectory_msgs.msg import JointTrajectoryPoint, JointTrajectory +from geometry_msgs.msg import Quaternion, PoseStamped, Pose +import moveit_msgs.msg +from sensor_msgs.msg import JointState +from scipy.spatial.transform import Rotation as R +from rosidl_runtime_py import message_to_ordereddict + +# --- Logging and Path Length Utility --- +_LOGGER_INSTANCE = None + + +def _get_logger(): + global _LOGGER_INSTANCE + if _LOGGER_INSTANCE is None: + if rclpy.ok() and rclpy.utilities.get_default_context().ok(): + _LOGGER_INSTANCE = rclpy.logging.get_logger("planner_benchmark_script") + else: + + class PrintLogger: + def info(self, msg): + print(f"INFO: {msg}") + + def warn(self, msg): + print(f"WARN: {msg}") + + def error(self, msg): + print(f"ERROR: {msg}") + + def debug(self, msg): + print(f"DEBUG: {msg}") + + _LOGGER_INSTANCE = PrintLogger() + return _LOGGER_INSTANCE + + +try: + from ada_feeding.behaviors.moveit2.moveit2_plan import ( + MoveIt2Plan as AdaMoveIt2PlanUtil, + ) + + GET_PATH_LEN_METHOD = AdaMoveIt2PlanUtil.get_path_len +except ImportError: + _get_logger().warn( + "Could not import AdaMoveIt2PlanUtil.get_path_len. Using fallback." + ) + + def fallback_get_path_len( + trajectory: JointTrajectory, exclude_j6_name: Optional[str] = "j2n6s200_joint_6" + ) -> Tuple[Optional[float], Optional[Dict[str, float]]]: + if not trajectory or not trajectory.points or len(trajectory.points) < 1: + return None, None + if len(trajectory.points) < 2: + return 0.0, {name: 0.0 for name in trajectory.joint_names} + total_len_l2_no_j6 = 0.0 + joint_lens = {name: 0.0 for name in trajectory.joint_names} + j6_idx = -1 + if exclude_j6_name and exclude_j6_name in trajectory.joint_names: + try: + j6_idx = trajectory.joint_names.index(exclude_j6_name) + except ValueError: + j6_idx = -1 + prev_positions = np.array(trajectory.points[0].positions) + for point_idx in range(1, len(trajectory.points)): + point = trajectory.points[point_idx] + curr_positions = np.array(point.positions) + if len(curr_positions) != len(prev_positions): + _get_logger().warn( + f"Path length calc: Mismatch joint count at point {point_idx}. Skipping." + ) + prev_positions = curr_positions + continue + diff = np.abs(curr_positions - prev_positions) + diff = np.minimum(diff, 2 * np.pi - diff) + current_segment_geom_dist_sq_no_j6 = 0.0 + for i in range(len(diff)): + joint_lens[trajectory.joint_names[i]] += diff[i] + if i != j6_idx: + current_segment_geom_dist_sq_no_j6 += diff[i] ** 2 + total_len_l2_no_j6 += np.sqrt(current_segment_geom_dist_sq_no_j6) + prev_positions = curr_positions + return total_len_l2_no_j6, joint_lens + + GET_PATH_LEN_METHOD = fallback_get_path_len + +BenchmarkNamedConfig = namedtuple("BenchmarkNamedConfig", ["name", "joint_values"]) +PlanningTask = namedtuple("PlanningTask", ["start_config", "goal_config"]) + +ArticutoolWaypointSolution = namedtuple( + "ArticutoolWaypointSolution", + [ + "waypoint_feasible", # bool: Is Articutool feasible at this specific waypoint? + "pitch_solution_rad", # Optional[float]: Solved pitch angle (theta_p) + "roll_solution_rad", # Optional[float]: Solved roll angle (theta_r) + ], +) + +ArticutoolMetrics = namedtuple( + "ArticutoolMetrics", + [ + "path_feasible", + "min_pitch_rad", + "max_pitch_rad", + "avg_pitch_abs_rad", + "pitch_range_used_percent", + "min_roll_rad", + "max_roll_rad", + "avg_roll_abs_rad", + "roll_range_used_percent", + "num_infeasible_points", + ], +) + +PlanResult = namedtuple( + "PlanResult", + [ + "trajectory", # Original JointTrajectory from planner + "elapsed_time", + "path_length", + "joint_path_lengths", + "max_jaco_hand_roll_deviation", + "jaco_plan_success", + "trajectory_filename", # Filename of the *enhanced* JSON trajectory + "articutool_metrics", # Aggregate metrics for the Articutool over the path + "articutool_solutions_per_waypoint", # List[ArticutoolWaypointSolution] + ], +) + + +class PlannerBenchmark: + NO_ROLL_CONSTRAINT_TARGET_QUATERNION_XYWZ = (0.5, 0.5, 0.5, 0.5) + NO_ROLL_CONSTRAINT_TOLERANCE_XYZ_ABS = ( + np.pi * 1.99, + np.pi * 1.99, + np.deg2rad(90.0), + ) + NO_ROLL_CONSTRAINT_WEIGHT = 1.0 + NO_ROLL_PARAMETERIZATION = 0 + R_JACO_HAND_TO_ATOOL_BASE_SCIPY = R.from_euler("z", np.pi / 2) + ARTICUTOOL_PITCH_LIMITS_RAD = (-np.pi / 2, np.pi / 2) + ARTICUTOOL_ROLL_LIMITS_RAD = (-np.pi, np.pi) + WORLD_UP_VECTOR = np.array([0.0, 0.0, 1.0]) + EPSILON = 1e-6 + + def __init__( + self, + node: Node, + moveit2_interface: MoveIt2, + hardcoded_target_configs: Dict[str, List[float]], + planners_to_test: List[str], + initial_joint_config: List[float], + planning_group: str, + end_effector_link: str, + base_link: str, + joint_names_for_group: List[str], + planning_timeout_sec: float = 10.0, + trajectory_save_dir: Optional[str] = None, + joint_state_topic: str = "/joint_states", + continuous_joint_indices: Optional[List[int]] = None, + use_naive_jaco_hand_constraint: bool = True, + ): + self.node = node + self.logger = _get_logger() # Use the global logger + self.moveit2_interface = moveit2_interface + self.planners_to_test = planners_to_test + self.initial_joint_config = initial_joint_config + self.planning_group = planning_group + self.end_effector_link = end_effector_link + self.base_link = base_link + self.joint_names_for_group = joint_names_for_group + self.planning_timeout_sec = planning_timeout_sec + self.joint_state_topic_name = joint_state_topic + self.use_naive_jaco_hand_constraint = use_naive_jaco_hand_constraint + + if continuous_joint_indices is None: + self.continuous_joint_indices = [] + default_continuous = [ + "j2n6s200_joint_1", + "j2n6s200_joint_4", + "j2n6s200_joint_5", + "j2n6s200_joint_6", + ] + for i, name in enumerate(self.joint_names_for_group): + if name in default_continuous: + self.continuous_joint_indices.append(i) + self.logger.info( + f"Auto-detected continuous joint indices: {self.continuous_joint_indices}" + ) + else: + self.continuous_joint_indices = continuous_joint_indices + self.logger.info( + f"Using provided continuous joint indices: {self.continuous_joint_indices}" + ) + + self.all_named_configurations = self._process_hardcoded_configurations( + hardcoded_target_configs + ) + self.planning_tasks = self._generate_planning_tasks( + self.all_named_configurations + ) + self.num_tasks = len(self.planning_tasks) + self.results: Dict[Tuple[str, str], Dict[str, PlanResult]] = defaultdict( + lambda: defaultdict(dict) + ) + self.rate = self.node.create_rate(10) + self.naive_jaco_hand_constraint_kwargs = ( + self._get_naive_jaco_hand_constraint_kwargs() + ) + self.trajectory_save_dir = trajectory_save_dir + if self.trajectory_save_dir: + os.makedirs(self.trajectory_save_dir, exist_ok=True) + self.logger.info( + f"Successful trajectories will be saved in: {self.trajectory_save_dir}" + ) + + self._latest_joint_state_msg: Optional[JointState] = None + self._joint_state_lock = Lock() + self.joint_state_sub = self.node.create_subscription( + JointState, self.joint_state_topic_name, self._joint_state_callback, 10 + ) + self.logger.info( + f"Subscribed to '{self.joint_state_topic_name}' for current joint state." + ) + time.sleep(0.5) + self.logger.info( + f"Benchmark initialized for group '{self.planning_group}' and EE '{self.end_effector_link}'." + ) + self.logger.info( + f"Generated {self.num_tasks} planning tasks from {len(self.all_named_configurations)} unique configurations." + ) + self.logger.info(f"Testing with planners: {self.planners_to_test}") + if self.use_naive_jaco_hand_constraint: + self.logger.info( + f"Using NAIVE Jaco hand constraint: Target Quat (xyzw)={self.NO_ROLL_CONSTRAINT_TARGET_QUATERNION_XYWZ}, Tol(xyz_abs)={self.NO_ROLL_CONSTRAINT_TOLERANCE_XYZ_ABS}" + ) + else: + self.logger.info( + "NOT using naive Jaco hand orientation constraint during planning." + ) + self.logger.info( + f"Articutool Pitch Limits (theta_p): {self.ARTICUTOOL_PITCH_LIMITS_RAD} rad" + ) + self.logger.info( + f"Articutool Roll Limits (theta_r): {self.ARTICUTOOL_ROLL_LIMITS_RAD} rad" + ) + + def _joint_state_callback(self, msg: JointState): + # ... (implementation from previous version) ... + if not self.joint_names_for_group: + return + msg_joint_names_set = set(msg.name) + all_required_joints_present = all( + req_joint_name in msg_joint_names_set + for req_joint_name in self.joint_names_for_group + ) + if all_required_joints_present: + with self._joint_state_lock: + self._latest_joint_state_msg = msg + + def get_current_joint_positions( + self, timeout_sec: float = 1.0 + ) -> Optional[Dict[str, float]]: + # ... (implementation from previous version) ... + start_time = self.node.get_clock().now() + latest_msg_to_process: Optional[JointState] = None + while rclpy.ok() and (self.node.get_clock().now() - start_time) < RCLPYDuration( + seconds=timeout_sec + ): + with self._joint_state_lock: + if self._latest_joint_state_msg is not None: + latest_msg_to_process = self._latest_joint_state_msg + break + self.node.get_clock().sleep_for(RCLPYDuration(seconds=0.02)) + + if latest_msg_to_process: + current_positions_dict: Dict[str, float] = {} + name_to_pos_map = { + name: latest_msg_to_process.position[i] + for i, name in enumerate(latest_msg_to_process.name) + if i < len(latest_msg_to_process.position) + } + all_planning_group_joints_found = True + for req_name in self.joint_names_for_group: + if req_name in name_to_pos_map: + current_positions_dict[req_name] = name_to_pos_map[req_name] + else: + all_planning_group_joints_found = False + break + if all_planning_group_joints_found: + return current_positions_dict + self.logger.warn( + f"Timed out or no relevant JointState msg on '{self.joint_state_topic_name}' for get_current_joint_positions." + ) + return None + + def _process_hardcoded_configurations( + self, hardcoded_configs: Dict[str, List[float]] + ) -> List[BenchmarkNamedConfig]: + # ... (implementation from previous version) ... + configs = [] + self.logger.info(f"Processing hardcoded named configurations...") + num_expected_joints = len(self.joint_names_for_group) + for name, values in hardcoded_configs.items(): + if isinstance(values, list) and len(values) == num_expected_joints: + configs.append(BenchmarkNamedConfig(name, values)) + else: + self.logger.warn( + f" Skipping hardcoded config '{name}'. Expected {num_expected_joints}, got {len(values) if isinstance(values, list) else type(values)}." + ) + if not configs or len(configs) < 2: + self.logger.error( + f"Not enough valid named configurations provided (found {len(configs)}, need at least 2). Exiting." + ) + sys.exit(1) + return configs + + def _generate_planning_tasks( + self, named_configs: List[BenchmarkNamedConfig] + ) -> List[PlanningTask]: + # ... (implementation from previous version) ... + tasks = [] + for start_config in named_configs: + for goal_config in named_configs: + if start_config.name != goal_config.name: + tasks.append(PlanningTask(start_config, goal_config)) + self.logger.info( + f"Generated {len(tasks)} planning tasks (all pairs, start != goal)." + ) + return tasks + + def _get_naive_jaco_hand_constraint_kwargs(self) -> Dict[str, Any]: + # ... (implementation from previous version) ... + return { + "quat_xyzw": Quaternion( + x=self.NO_ROLL_CONSTRAINT_TARGET_QUATERNION_XYWZ[0], + y=self.NO_ROLL_CONSTRAINT_TARGET_QUATERNION_XYWZ[1], + z=self.NO_ROLL_CONSTRAINT_TARGET_QUATERNION_XYWZ[2], + w=self.NO_ROLL_CONSTRAINT_TARGET_QUATERNION_XYWZ[3], + ), + "frame_id": self.base_link, + "target_link": self.end_effector_link, + "tolerance": self.NO_ROLL_CONSTRAINT_TOLERANCE_XYZ_ABS, + "weight": self.NO_ROLL_CONSTRAINT_WEIGHT, + "parameterization": self.NO_ROLL_PARAMETERIZATION, + } + + def _normalize_angle(self, angle: float) -> float: + return (angle + np.pi) % (2 * np.pi) - np.pi + + def _solve_articutool_ik( + self, target_vector_in_jaco_hand_frame: np.ndarray + ) -> List[Tuple[float, float]]: + # ... (implementation from previous version, using math module) ... + vx, vy, vz = target_vector_in_jaco_hand_frame + solutions: List[Tuple[float, float]] = [] + if not (-1.0 - self.EPSILON <= -vx <= 1.0 + self.EPSILON): + return [] + asin_arg = np.clip(-vx, -1.0, 1.0) + theta_r_cand1 = math.asin(asin_arg) + theta_r_cand2 = self._normalize_angle(math.pi - theta_r_cand1) + candidate_thetas_r = [theta_r_cand1] + if not math.isclose(theta_r_cand1, theta_r_cand2, abs_tol=self.EPSILON): + candidate_thetas_r.append(theta_r_cand2) + + for theta_r in candidate_thetas_r: + cos_theta_r = math.cos(theta_r) + theta_p_sol: float = 0.0 + if math.isclose(cos_theta_r, 0.0, abs_tol=self.EPSILON): + if ( + math.isclose(abs(vx), 1.0, abs_tol=self.EPSILON) + and math.isclose(vy, 0.0, abs_tol=self.EPSILON) + and math.isclose(vz, 0.0, abs_tol=self.EPSILON) + ): + solutions.append((theta_p_sol, theta_r)) + else: + theta_p_sol = math.atan2(vz / cos_theta_r, vy / cos_theta_r) + solutions.append((theta_p_sol, theta_r)) + return solutions + + def _check_articutool_feasibility_at_waypoint( + self, R_world_jaco_hand: R + ) -> ArticutoolWaypointSolution: + target_vector_in_jaco_hand = R_world_jaco_hand.inv().apply(self.WORLD_UP_VECTOR) + ik_solutions = self._solve_articutool_ik(target_vector_in_jaco_hand) + valid_solutions_in_limits = [] + for theta_p, theta_r in ik_solutions: + theta_p_norm = self._normalize_angle(theta_p) + theta_r_norm = self._normalize_angle(theta_r) + if ( + self.ARTICUTOOL_PITCH_LIMITS_RAD[0] - self.EPSILON + <= theta_p_norm + <= self.ARTICUTOOL_PITCH_LIMITS_RAD[1] + self.EPSILON + and self.ARTICUTOOL_ROLL_LIMITS_RAD[0] - self.EPSILON + <= theta_r_norm + <= self.ARTICUTOOL_ROLL_LIMITS_RAD[1] + self.EPSILON + ): + valid_solutions_in_limits.append((theta_p_norm, theta_r_norm)) + if not valid_solutions_in_limits: + return ArticutoolWaypointSolution(False, None, None) + best_sol = min(valid_solutions_in_limits, key=lambda s: s[0] ** 2 + s[1] ** 2) + return ArticutoolWaypointSolution(True, best_sol[0], best_sol[1]) + + def _analyze_trajectory_for_articutool( + self, trajectory: Optional[JointTrajectory] + ) -> Tuple[ArticutoolMetrics, List[ArticutoolWaypointSolution]]: + # Default return values + default_metrics = ArticutoolMetrics(False, *([np.nan] * 8), 0) + default_solutions_per_wp: List[ArticutoolWaypointSolution] = [] + + if not trajectory or not trajectory.points: + return default_metrics, default_solutions_per_wp + + required_pitches_rad: List[float] = [] + required_rolls_rad: List[float] = [] + path_is_articutool_feasible = True + num_infeasible_wps = 0 + per_waypoint_solutions: List[ArticutoolWaypointSolution] = [] + + for point_idx, point in enumerate(trajectory.points): + current_wp_solution = ArticutoolWaypointSolution(False, None, None) + if len(point.positions) != len(self.joint_names_for_group): + self.logger.warn( + f"Articutool Metrics: Mismatch joint count at point {point_idx}." + ) + path_is_articutool_feasible = False + num_infeasible_wps += 1 + else: + jaco_joint_positions = list(point.positions) + try: + fk_results: Optional[List[PoseStamped]] = ( + self.moveit2_interface.compute_fk( + joint_state=jaco_joint_positions, + fk_link_names=[self.end_effector_link], + ) + ) + if not fk_results or not fk_results[0]: + self.logger.warn( + f"Articutool Metrics: FK failed for Jaco hand at point {point_idx}." + ) + path_is_articutool_feasible = False + num_infeasible_wps += 1 + else: + jaco_hand_pose_msg: Pose = fk_results[0].pose + R_world_jaco_hand = R.from_quat( + [ + jaco_hand_pose_msg.orientation.x, + jaco_hand_pose_msg.orientation.y, + jaco_hand_pose_msg.orientation.z, + jaco_hand_pose_msg.orientation.w, + ] + ) + current_wp_solution = ( + self._check_articutool_feasibility_at_waypoint( + R_world_jaco_hand + ) + ) + if not current_wp_solution.waypoint_feasible: + path_is_articutool_feasible = False + num_infeasible_wps += 1 + else: + if current_wp_solution.pitch_solution_rad is not None: + required_pitches_rad.append( + current_wp_solution.pitch_solution_rad + ) + if current_wp_solution.roll_solution_rad is not None: + required_rolls_rad.append( + current_wp_solution.roll_solution_rad + ) + except Exception as e: + self.logger.error( + f"Articutool Metrics: Error at point {point_idx}: {e}" + ) + path_is_articutool_feasible = False + num_infeasible_wps += 1 + per_waypoint_solutions.append(current_wp_solution) + + p_min_lim, p_max_lim = self.ARTICUTOOL_PITCH_LIMITS_RAD + r_min_lim, r_max_lim = self.ARTICUTOOL_ROLL_LIMITS_RAD + pitch_joint_range = p_max_lim - p_min_lim + roll_joint_range = r_max_lim - r_min_lim + min_p_val, max_p_val, avg_abs_p_val, pitch_range_used_val = [np.nan] * 4 + if required_pitches_rad: + min_p_val, max_p_val = ( + np.min(required_pitches_rad), + np.max(required_pitches_rad), + ) + avg_abs_p_val = np.mean(np.abs(required_pitches_rad)) + pitch_range_used_val = ( + ((max_p_val - min_p_val) / pitch_joint_range * 100) + if pitch_joint_range > self.EPSILON + else 0.0 + ) + min_r_val, max_r_val, avg_abs_r_val, roll_range_used_val = [np.nan] * 4 + if required_rolls_rad: + min_r_val, max_r_val = ( + np.min(required_rolls_rad), + np.max(required_rolls_rad), + ) + avg_abs_r_val = np.mean(np.abs(required_rolls_rad)) + roll_range_used_val = ( + ((max_r_val - min_r_val) / roll_joint_range * 100) + if roll_joint_range > self.EPSILON + else 0.0 + ) + + aggregate_metrics = ArticutoolMetrics( + path_is_articutool_feasible, + min_p_val, + max_p_val, + avg_abs_p_val, + pitch_range_used_val, + min_r_val, + max_r_val, + avg_abs_r_val, + roll_range_used_val, + num_infeasible_wps, + ) + return aggregate_metrics, per_waypoint_solutions + + def _calculate_max_jaco_hand_roll_deviation( + self, trajectory: Optional[JointTrajectory] + ) -> Optional[float]: + # ... (implementation from previous version, ensure it uses self.NO_ROLL_CONSTRAINT_TARGET_QUATERNION_XYWZ) ... + if trajectory is None or not trajectory.points: + return None + max_abs_roll_deviation = 0.0 + target_quat_xyzw_array = np.array( + self.NO_ROLL_CONSTRAINT_TARGET_QUATERNION_XYWZ + ) + target_ee_orientation_scipy = R.from_quat(target_quat_xyzw_array) + for point_idx, point in enumerate(trajectory.points): + if len(point.positions) != len(self.joint_names_for_group): + continue + joint_positions_list = list(point.positions) + try: + result_poses: Optional[List[PoseStamped]] = ( + self.moveit2_interface.compute_fk( + joint_state=joint_positions_list, + fk_link_names=[self.end_effector_link], + ) + ) + if not result_poses or not result_poses[0]: + continue + q_msg = result_poses[0].pose.orientation + actual_ee_orientation_scipy = R.from_quat( + [q_msg.x, q_msg.y, q_msg.z, q_msg.w] + ) + diff_rotation = ( + target_ee_orientation_scipy.inv() * actual_ee_orientation_scipy + ) + euler_angles_of_diff = diff_rotation.as_euler("xyz", degrees=False) + current_roll_deviation = self._normalize_angle(euler_angles_of_diff[2]) + max_abs_roll_deviation = max( + max_abs_roll_deviation, abs(current_roll_deviation) + ) + except Exception as e: + self.logger.error( + f"Jaco Hand Roll Dev FK: Error at point {point_idx}: {e}" + ) + return float("inf") + return max_abs_roll_deviation + + def _get_path_length_stats( + self, trajectory: Optional[JointTrajectory] + ) -> Tuple[Optional[float], Optional[Dict[str, float]]]: + # ... (implementation from previous version) ... + if trajectory is None or not trajectory.points: + return None, None + return GET_PATH_LEN_METHOD(trajectory) + + def _save_trajectory_to_file( + self, + original_trajectory: JointTrajectory, # Original Jaco trajectory + articutool_solutions_per_wp: List[ + ArticutoolWaypointSolution + ], # Per-waypoint solutions + start_config_name: str, + goal_config_name: str, + planner_id_str: str, + ) -> Optional[str]: + if not self.trajectory_save_dir: + return None + try: + timestamp = datetime.now().strftime("%Y%m%d-%H%M%S-%f") + safe_start_name = start_config_name.replace(" ", "_").replace("/", "_") + safe_goal_name = goal_config_name.replace(" ", "_").replace("/", "_") + safe_planner_id_str = planner_id_str.replace(" ", "_").replace("/", "_") + filename = f"{safe_start_name}_to_{safe_goal_name}_{safe_planner_id_str}_{timestamp}_enhanced.json" + filepath = os.path.join(self.trajectory_save_dir, filename) + + enhanced_trajectory_data: Dict[str, Any] = { + "jaco_joint_names": list(original_trajectory.joint_names), + "waypoints": [], + } + + num_jaco_points = len(original_trajectory.points) + num_articutool_solutions = len(articutool_solutions_per_wp) + + if num_jaco_points != num_articutool_solutions: + self.logger.warn( + f"Mismatch between Jaco trajectory points ({num_jaco_points}) and " + f"Articutool solutions ({num_articutool_solutions}) for {filename}. " + f"Saving only up to the minimum length." + ) + + min_len = min(num_jaco_points, num_articutool_solutions) + + for i in range(min_len): + jaco_point = original_trajectory.points[i] + at_solution = articutool_solutions_per_wp[i] + + waypoint_data = { + "time_from_start_sec": jaco_point.time_from_start.sec + + jaco_point.time_from_start.nanosec * 1e-9, + "jaco_positions_rad": list(jaco_point.positions), + "jaco_velocities_rad_per_sec": ( + list(jaco_point.velocities) if jaco_point.velocities else [] + ), + "jaco_accelerations_rad_per_sec2": ( + list(jaco_point.accelerations) + if jaco_point.accelerations + else [] + ), + "articutool_waypoint_feasible": at_solution.waypoint_feasible, + "articutool_pitch_solution_rad": ( + at_solution.pitch_solution_rad + if at_solution.pitch_solution_rad is not None + else None + ), # Ensure None is JSON null + "articutool_roll_solution_rad": ( + at_solution.roll_solution_rad + if at_solution.roll_solution_rad is not None + else None + ), + } + enhanced_trajectory_data["waypoints"].append(waypoint_data) + + with open(filepath, "w") as f: + json.dump(enhanced_trajectory_data, f, indent=2) + self.logger.info( + f" Successfully saved ENHANCED trajectory to: {filepath}" + ) + return filename + except Exception as e: + self.logger.error( + f" Failed to save ENHANCED trajectory for {start_config_name} to {goal_config_name} ({planner_id_str}): {e}", + exc_info=True, + ) + return None + + def plan_to_target_configuration( + self, + goal_joints: List[float], + planner_id_str: str, + path_constraints_kwargs_to_apply: Optional[Dict[str, Any]] = None, + ) -> Tuple[Optional[JointTrajectory], float]: + # ... (implementation from previous version, ensure self.rate.sleep() is time.sleep() or handled by executor) ... + self.logger.info( + f" Attempting to plan with: {planner_id_str} to GOAL: {np.round(goal_joints, 3).tolist()}" + ) + if planner_id_str.lower() == "chomp": + self.moveit2_interface.pipeline_id = "chomp" + self.moveit2_interface.planner_id = "chomp" + elif planner_id_str.lower() == "stomp": + self.moveit2_interface.pipeline_id = "stomp" + self.moveit2_interface.planner_id = "" + else: + self.moveit2_interface.pipeline_id = "ompl" + self.moveit2_interface.planner_id = planner_id_str + self.moveit2_interface.allowed_planning_time = self.planning_timeout_sec + self.moveit2_interface.clear_goal_constraints() + self.moveit2_interface.clear_path_constraints() + try: + self.moveit2_interface.set_joint_goal( + joint_positions=goal_joints, + joint_names=self.joint_names_for_group, + tolerance=0.01, + weight=1.0, + ) + except Exception as e: + self.logger.error(f" Failed to set joint goal: {e}") + return None, 0.0 + if path_constraints_kwargs_to_apply: + try: + self.moveit2_interface.set_path_orientation_constraint( + **path_constraints_kwargs_to_apply + ) + except Exception as e: + self.logger.error(f" Failed to set path orientation constraint: {e}") + start_time_ros = self.node.get_clock().now() + future = self.moveit2_interface.plan_async(start_joint_state=None) + joint_trajectory_for_analysis: Optional[JointTrajectory] = None + timeout_duration_rclpy = RCLPYDuration(seconds=self.planning_timeout_sec + 5.0) + wait_start_time_rclpy = self.node.get_clock().now() + while rclpy.ok() and not future.done(): + elapsed_wait = self.node.get_clock().now() - wait_start_time_rclpy + if elapsed_wait >= timeout_duration_rclpy: + self.logger.warn( + f" Planner {planner_id_str} timed out after {elapsed_wait.nanoseconds / 1e9:.2f}s waiting for future." + ) + if ( + hasattr(future, "cancel") + and callable(future.cancel) + and not future.cancelled() + ): + future.cancel() + break + time.sleep(0.05) # Yield for other ROS processing + if future.done() and not future.cancelled(): + try: + plan_result_srv_response = future.result() + if ( + plan_result_srv_response.motion_plan_response.error_code.val + == moveit_msgs.msg.MoveItErrorCodes.SUCCESS + ): + traj_msg = plan_result_srv_response.motion_plan_response.trajectory + if traj_msg and traj_msg.joint_trajectory.points: + joint_trajectory_for_analysis = traj_msg.joint_trajectory + self.logger.info(f" Planner {planner_id_str} succeeded.") + else: + self.logger.warn( + f" Planner {planner_id_str} succeeded but returned empty trajectory." + ) + else: + self.logger.warn( + f" Planner {planner_id_str} failed with MoveItErrorCode: {plan_result_srv_response.motion_plan_response.error_code.val}" + ) + except Exception as e: + self.logger.error( + f" Exception getting plan result for {planner_id_str}: {e}" + ) + elif future.cancelled(): + self.logger.warn(f" Planning for {planner_id_str} was cancelled.") + elapsed_time_sec = ( + self.node.get_clock().now() - start_time_ros + ).nanoseconds / 1e9 + self.moveit2_interface.clear_path_constraints() + self.moveit2_interface.clear_goal_constraints() + return joint_trajectory_for_analysis, elapsed_time_sec + + def _are_angles_close( + self, angle1: float, angle2: float, tolerance: float, is_continuous: bool + ) -> bool: + # ... (implementation from previous version) ... + if is_continuous: + diff = self._normalize_angle(angle1 - angle2) + return abs(diff) <= tolerance + else: + return abs(angle1 - angle2) <= tolerance + + def _move_to_config_blocking( + self, + target_config_name: str, + target_joints: List[float], + verification_timeout_sec: float = 10.0, + verification_poll_interval_sec: float = 1.0, + verification_tolerance: float = 0.05, + max_move_attempts: int = 3, + ): + # ... (implementation from previous version) ... + self.logger.info( + f"Attempting to move to config: '{target_config_name}' target: {np.round(target_joints, 4).tolist()}" + ) + original_planner, original_pipeline, original_timeout = ( + self.moveit2_interface.planner_id, + self.moveit2_interface.pipeline_id, + self.moveit2_interface.allowed_planning_time, + ) + self.moveit2_interface.pipeline_id = "ompl" + self.moveit2_interface.planner_id = "RRTConnectkConfigDefault" + self.moveit2_interface.allowed_planning_time = max( + 10.0, self.planning_timeout_sec + ) + achieved_target = False + last_known_joints_str = "N/A" + last_exception_detail = "No attempts." + for attempt in range(max_move_attempts): + self.logger.info( + f"Move attempt {attempt + 1}/{max_move_attempts} to '{target_config_name}'." + ) + try: + self.moveit2_interface.clear_goal_constraints() + self.moveit2_interface.clear_path_constraints() + self.moveit2_interface.move_to_configuration( + joint_positions=target_joints, + joint_names=self.joint_names_for_group, + tolerance=0.01, + ) + self.logger.info( + f" Move_to_configuration call for '{target_config_name}' completed. Verifying..." + ) + # Verification loop + verif_start_time = self.node.get_clock().now() + achieved_this_attempt = False + num_verif_checks = max( + 3, + int(verification_timeout_sec / verification_poll_interval_sec) + 1, + ) + for v_idx in range(num_verif_checks): + current_joints_dict = self.get_current_joint_positions( + timeout_sec=0.5 + ) + if current_joints_dict: + current_joints_list = [ + current_joints_dict[name] + for name in self.joint_names_for_group + if name in current_joints_dict + ] + if len(current_joints_list) != len(self.joint_names_for_group): + last_known_joints_str = "Partial state." + self.node.get_clock().sleep_for( + RCLPYDuration(seconds=verification_poll_interval_sec) + ) + continue + last_known_joints_str = str( + np.round(current_joints_list, 4).tolist() + ) + all_close = True + for j_idx, (tgt, act) in enumerate( + zip(target_joints, current_joints_list) + ): + if not self._are_angles_close( + tgt, + act, + verification_tolerance, + j_idx in self.continuous_joint_indices, + ): + all_close = False + break + if all_close: + self.logger.info( + f" Successfully verified robot reached '{target_config_name}'." + ) + achieved_target = True + achieved_this_attempt = True + break + else: + last_known_joints_str = "No joint state." + if ( + self.node.get_clock().now() - verif_start_time + ) >= RCLPYDuration(seconds=verification_timeout_sec): + if not achieved_this_attempt: + self.logger.warn( + f" Verification timeout for '{target_config_name}'." + ) + break + if not achieved_this_attempt: + self.node.get_clock().sleep_for( + RCLPYDuration(seconds=verification_poll_interval_sec) + ) + if achieved_target: + break + if not achieved_this_attempt: + last_exception_detail = ( + f"Verification failed. Last state: {last_known_joints_str}" + ) + except Exception as e: + self.logger.warn( + f" Move attempt {attempt + 1} to '{target_config_name}' failed during move: {e}" + ) + last_exception_detail = f"Error in move: {e}" + if achieved_target: + break + if attempt < max_move_attempts - 1: + self.node.get_clock().sleep_for(RCLPYDuration(seconds=1.5)) + ( + self.moveit2_interface.planner_id, + self.moveit2_interface.pipeline_id, + self.moveit2_interface.allowed_planning_time, + ) = ( + original_planner, + original_pipeline, + original_timeout, + ) + if not achieved_target: + final_error_msg = f"CRITICAL: Failed to move AND verify robot at '{target_config_name}' after {max_move_attempts} attempts. Last state: {last_known_joints_str}. Detail: {last_exception_detail}" + self.logger.error(final_error_msg) + raise RuntimeError(final_error_msg) + self.logger.info( + f"Successfully moved to and verified configuration '{target_config_name}'." + ) + + def move_to_initial_config(self): + self._move_to_config_blocking("InitialScriptSetup", self.initial_joint_config) + + def run_benchmark_planning_task(self, task: PlanningTask): + start_name, goal_name, goal_joints = ( + task.start_config.name, + task.goal_config.name, + task.goal_config.joint_values, + ) + self.logger.info(f"--- Testing Task: FROM '{start_name}' TO '{goal_name}' ---") + self.logger.info(f" Goal Joints: {np.round(goal_joints, 3).tolist()}") + task_key = (start_name, goal_name) + + for planner_id_str in self.planners_to_test: + path_constraints_to_apply = ( + self.naive_jaco_hand_constraint_kwargs + if self.use_naive_jaco_hand_constraint + else None + ) + + jaco_trajectory, elapsed_time = self.plan_to_target_configuration( + goal_joints, planner_id_str, path_constraints_to_apply + ) + + jaco_plan_success = jaco_trajectory is not None and bool( + jaco_trajectory.points + ) + path_len_total, joint_path_lens_map = self._get_path_length_stats( + jaco_trajectory + ) + max_jaco_roll_dev = self._calculate_max_jaco_hand_roll_deviation( + jaco_trajectory + ) + + aggregate_at_metrics: ArticutoolMetrics + per_wp_at_solutions: List[ArticutoolWaypointSolution] + + if jaco_plan_success: + aggregate_at_metrics, per_wp_at_solutions = ( + self._analyze_trajectory_for_articutool(jaco_trajectory) + ) + else: + num_potential_wps = len( + goal_joints + ) # A rough estimate if trajectory is None + aggregate_at_metrics = ArticutoolMetrics( + False, *([np.nan] * 8), num_potential_wps + ) + per_wp_at_solutions = [ + ArticutoolWaypointSolution(False, None, None) + ] * num_potential_wps + + trajectory_filename = None + if jaco_plan_success and self.trajectory_save_dir: + trajectory_filename = self._save_trajectory_to_file( + jaco_trajectory, + per_wp_at_solutions, + start_name, + goal_name, + planner_id_str, + ) + + self.results[task_key][planner_id_str] = PlanResult( + jaco_trajectory, + elapsed_time, + path_len_total, + joint_path_lens_map, + max_jaco_roll_dev, + jaco_plan_success, + trajectory_filename, + aggregate_at_metrics, + per_wp_at_solutions, # Storing per-wp solutions in PlanResult + ) + + # Pre-format strings for logging to avoid f-string errors with None/NaN + path_len_str = ( + f"{path_len_total:.3f}" + if path_len_total is not None + and not np.isnan(path_len_total) + and not np.isinf(path_len_total) + else "N/A" + ) + jaco_roll_dev_str = ( + f"{max_jaco_roll_dev:.3f}" + if max_jaco_roll_dev is not None + and not np.isnan(max_jaco_roll_dev) + and not np.isinf(max_jaco_roll_dev) + else "N/A" + ) + + at_feasible_str = str(aggregate_at_metrics.path_feasible) + at_infeasible_pts_str = str(aggregate_at_metrics.num_infeasible_points) + at_p_range_str = ( + f"{aggregate_at_metrics.pitch_range_used_percent:.1f}" + if not np.isnan(aggregate_at_metrics.pitch_range_used_percent) + else "N/A" + ) + at_r_range_str = ( + f"{aggregate_at_metrics.roll_range_used_percent:.1f}" + if not np.isnan(aggregate_at_metrics.roll_range_used_percent) + else "N/A" + ) + + articutool_log = ( + f"| Articutool Feasible: {at_feasible_str} (Infeasible Pts: {at_infeasible_pts_str}, " + f"P-Range%: {at_p_range_str}, R-Range%: {at_r_range_str})" + ) + log_suffix = ( + f"| TrajFile: {trajectory_filename}" if trajectory_filename else "" + ) + + if jaco_plan_success: + self.logger.info( + f" Planner: {planner_id_str} | Jaco Success: True | Time: {elapsed_time:.3f}s | PathLen: {path_len_str} | JacoHandRollDev: {jaco_roll_dev_str} rad {articutool_log} {log_suffix}" + ) + else: + self.logger.warn( + f" Planner: {planner_id_str} | Jaco Success: False | Time: {elapsed_time:.3f}s {log_suffix}" + ) + + def run_all_tests(self, log_summary_every_n_tasks: Optional[int] = None): + # ... (implementation from previous version, ensure default ArticutoolMetrics and empty list for per_wp_solutions on failure) ... + self.logger.info( + "=== Starting Benchmark: Moving to Initial Script Configuration ===" + ) + try: + self.move_to_initial_config() + except RuntimeError as e: + self.logger.error( + f"CRITICAL FAILURE: Could not move to initial config. Benchmark aborted. Error: {e}" + ) + return + + for i, planning_task in enumerate(self.planning_tasks): + self.logger.info( + f"\n=== Running Planning Task {i + 1}/{self.num_tasks}: FROM '{planning_task.start_config.name}' TO '{planning_task.goal_config.name}' ===" + ) + try: + self._move_to_config_blocking( + planning_task.start_config.name, + planning_task.start_config.joint_values, + ) + except RuntimeError as e: + self.logger.error( + f"Skipping task FROM '{planning_task.start_config.name}' TO '{planning_task.goal_config.name}' due to failure to reach start state: {e}" + ) + task_key = ( + planning_task.start_config.name, + planning_task.goal_config.name, + ) + default_at_metrics = ArticutoolMetrics(False, *([np.nan] * 8), 0) + empty_at_solutions: List[ArticutoolWaypointSolution] = [] + for planner_id_str_fail in self.planners_to_test: + self.results[task_key][planner_id_str_fail] = PlanResult( + None, + 0.0, + None, + None, + None, + False, + f"ERROR_MOVE_TO_START_FAILED:{str(e)[:50]}", + default_at_metrics, + empty_at_solutions, + ) + continue + self.run_benchmark_planning_task(planning_task) + if log_summary_every_n_tasks and (i + 1) % log_summary_every_n_tasks == 0: + self.log_summary_results() + self.logger.info("=== Benchmark Run Completed ===") + + def get_csv_header(self) -> List[str]: + # ... (implementation from previous version, ensure names match PlanResult) ... + header = [ + "start_config_name", + *[f"start_{name}" for name in self.joint_names_for_group], + "goal_config_name", + *[f"goal_{name}" for name in self.joint_names_for_group], + "planner_id", + "elapsed_time_s", + "jaco_plan_success", + "path_length_total", + "max_jaco_hand_roll_deviation_rad", + "trajectory_filename", + "articutool_path_feasible", + "articutool_num_infeasible_points", + "articutool_min_pitch_rad", + "articutool_max_pitch_rad", + "articutool_avg_pitch_abs_rad", + "articutool_pitch_range_used_percent", + "articutool_min_roll_rad", + "articutool_max_roll_rad", + "articutool_avg_roll_abs_rad", + "articutool_roll_range_used_percent", + *[f"path_length_{name}" for name in self.joint_names_for_group], + ] + return header + + def write_results_to_csv(self, filename: str): + # ... (implementation from previous version, ensure all fields from PlanResult.articutool_metrics are written) ... + self.logger.info(f"Writing results to {filename}") + with open(filename, "w", newline="") as f: + import csv + + csv_writer = csv.writer(f) + csv_writer.writerow(self.get_csv_header()) + for task_key in sorted(self.results.keys()): + start_name, goal_name = task_key + start_cfg_obj = next( + (c for c in self.all_named_configurations if c.name == start_name), + None, + ) + goal_cfg_obj = next( + (c for c in self.all_named_configurations if c.name == goal_name), + None, + ) + if not start_cfg_obj or not goal_cfg_obj: + continue + if task_key in self.results: + for planner_id_str in self.planners_to_test: + if planner_id_str in self.results[task_key]: + result = self.results[task_key][planner_id_str] + row = [ + start_cfg_obj.name, + *map(str, start_cfg_obj.joint_values), + goal_cfg_obj.name, + *map(str, goal_cfg_obj.joint_values), + planner_id_str, + f"{result.elapsed_time:.4f}", + str(1 if result.jaco_plan_success else 0), + ( + f"{result.path_length:.4f}" + if result.path_length is not None + else "" + ), + ( + f"{result.max_jaco_hand_roll_deviation:.4f}" + if result.max_jaco_hand_roll_deviation is not None + else "" + ), + ( + result.trajectory_filename + if result.trajectory_filename + else "" + ), + ] + atm = result.articutool_metrics + if atm: + row.extend( + [ + str(1 if atm.path_feasible else 0), + str(atm.num_infeasible_points), + ( + f"{atm.min_pitch_rad:.4f}" + if not np.isnan(atm.min_pitch_rad) + else "" + ), + ( + f"{atm.max_pitch_rad:.4f}" + if not np.isnan(atm.max_pitch_rad) + else "" + ), + ( + f"{atm.avg_pitch_abs_rad:.4f}" + if not np.isnan(atm.avg_pitch_abs_rad) + else "" + ), + ( + f"{atm.pitch_range_used_percent:.2f}" + if not np.isnan( + atm.pitch_range_used_percent + ) + else "" + ), + ( + f"{atm.min_roll_rad:.4f}" + if not np.isnan(atm.min_roll_rad) + else "" + ), + ( + f"{atm.max_roll_rad:.4f}" + if not np.isnan(atm.max_roll_rad) + else "" + ), + ( + f"{atm.avg_roll_abs_rad:.4f}" + if not np.isnan(atm.avg_roll_abs_rad) + else "" + ), + ( + f"{atm.roll_range_used_percent:.2f}" + if not np.isnan(atm.roll_range_used_percent) + else "" + ), + ] + ) + else: + row.extend(["ERROR_NO_AT_METRICS"] * 10) + if result.joint_path_lengths: + row.extend( + [ + ( + f"{result.joint_path_lengths.get(name, ''):.4f}" + if result.joint_path_lengths.get(name) + is not None + else "" + ) + for name in self.joint_names_for_group + ] + ) + else: + row.extend([""] * len(self.joint_names_for_group)) + csv_writer.writerow(row) + self.logger.info(f"Results successfully written to {filename}") + + def log_summary_results(self): + # ... (implementation from previous version, ensure it uses updated PlanResult fields) ... + self.logger.info("--- BENCHMARK SUMMARY (ALL COMPLETED TASKS) ---") + for planner_id_str in self.planners_to_test: + results_for_planner = [ + res[planner_id_str] + for res in self.results.values() + if planner_id_str in res + ] + if not results_for_planner: + self.logger.info( + f"--- Planner: {planner_id_str} ---\n No tasks recorded." + ) + continue + + successful_jaco_plans = sum( + 1 for r in results_for_planner if r.jaco_plan_success + ) + total_tasks = len(results_for_planner) + jaco_success_rate = ( + (successful_jaco_plans / total_tasks * 100) if total_tasks > 0 else 0 + ) + + self.logger.info(f"--- Planner: {planner_id_str} ---") + self.logger.info( + f" Jaco Plan Success Rate: {jaco_success_rate:.2f}% ({successful_jaco_plans}/{total_tasks})" + ) + # ... (other Jaco metrics as before) ... + + articutool_feasible_paths_count = sum( + 1 + for r in results_for_planner + if r.jaco_plan_success + and r.articutool_metrics + and r.articutool_metrics.path_feasible + ) + if successful_jaco_plans > 0: + articutool_path_success_rate = ( + articutool_feasible_paths_count / successful_jaco_plans + ) * 100 + self.logger.info( + f" Articutool Path Feasibility Rate (of Jaco succ. plans): {articutool_path_success_rate:.2f}% ({articutool_feasible_paths_count}/{successful_jaco_plans})" + ) + else: + self.logger.info( + " Articutool Path Feasibility Rate: N/A (no Jaco successful plans)" + ) + self.logger.info("-------------------------") + + @staticmethod + def _summary_stats_str(data: List[float]) -> str: + # ... (implementation from previous version) ... + if not data: + return "N/A" + valid_data = [x for x in data if isinstance(x, (int, float)) and np.isfinite(x)] + if not valid_data: + return "N/A (no valid numeric data)" + return ( + f"Mean {np.mean(valid_data):.3f}, Median {np.median(valid_data):.3f}, " + f"Min {np.min(valid_data):.3f}, Max {np.max(valid_data):.3f}, " + f"Std {np.std(valid_data):.3f}, Count {len(valid_data)}" + ) + + +def main(output_dir_arg: Optional[str], use_naive_constraint_arg: bool): + # ... (implementation from previous version, ensure PlannerBenchmark is instantiated correctly) ... + rclpy.init() + node = Node("planner_benchmark_pairwise_physical_moves") + global _LOGGER_INSTANCE + _LOGGER_INSTANCE = node.get_logger() + executor = rclpy.executors.MultiThreadedExecutor(num_threads=2) + executor.add_node(node) + executor_thread = Thread(target=executor.spin, daemon=True) + executor_thread.start() + _LOGGER_INSTANCE.info( + "Benchmark node spinning. Waiting for services (approx 5s)..." + ) + time.sleep(5.0) + + hardcoded_configs = { + "MoveAbovePlate": [-2.4538, 3.0797, 1.8320, 4.0961, -2.0034, -3.2123], + "RestingAcquireFood": [-1.9467, 2.5126, 0.3565, -4.7650, 5.9999, 4.9955], + "StagingConfig": [-2.3252, 4.4562, 4.1676, 1.5326, -2.1835, -2.1952], + "StowLocation": [-1.5210, 2.6009, 0.3281, -4.0001, 0.2283, 3.8788], + } + planners = ["RRTConnectkConfigDefault", "RRTstarkConfigDefault", "CHOMP"] + initial_script_setup_config = ( + list(hardcoded_configs.values())[0] + if hardcoded_configs + else [0.0] * len(kinova.joint_names()) + ) + planning_group_name, ee_link_name, base_link_name, group_joint_names = ( + "jaco_arm", + kinova.end_effector_name(), + kinova.base_link_name(), + kinova.joint_names(), + ) + planning_timeout = 15.0 + base_output_dir = ( + output_dir_arg + if output_dir_arg + else os.path.join(os.getcwd(), "benchmark_results") + ) + if not os.path.exists(base_output_dir): + os.makedirs(base_output_dir) + trajectory_save_location = os.path.join( + base_output_dir, "saved_trajectories_enhanced" + ) # New subdir + if not os.path.exists(trajectory_save_location): + os.makedirs(trajectory_save_location) + + callback_group = ReentrantCallbackGroup() + moveit2_interface = MoveIt2( + node=node, + joint_names=group_joint_names, + base_link_name=base_link_name, + end_effector_name=ee_link_name, + group_name=planning_group_name, + callback_group=callback_group, + ) + moveit2_interface.allowed_planning_time = planning_timeout + moveit2_interface.max_velocity_scaling_factor = 0.5 + moveit2_interface.max_acceleration_scaling_factor = 0.5 + + benchmark_runner = PlannerBenchmark( + node=node, + moveit2_interface=moveit2_interface, + hardcoded_target_configs=hardcoded_configs, + planners_to_test=planners, + initial_joint_config=initial_script_setup_config, + planning_group=planning_group_name, + end_effector_link=ee_link_name, + base_link=base_link_name, + joint_names_for_group=group_joint_names, + planning_timeout_sec=planning_timeout, + trajectory_save_dir=trajectory_save_location, + joint_state_topic="/joint_states", + use_naive_jaco_hand_constraint=use_naive_constraint_arg, + ) + try: + num_total_tasks = benchmark_runner.num_tasks + log_interval = max(1, num_total_tasks // 5 if num_total_tasks > 0 else 1) + benchmark_runner.run_all_tests(log_summary_every_n_tasks=log_interval) + if not os.path.exists(base_output_dir): + os.makedirs(base_output_dir) + constraint_label = ( + "naive_constraint" if use_naive_constraint_arg else "no_hand_constraint" + ) + csv_filename = os.path.join( + base_output_dir, + datetime.now().strftime("%Y%m%d-%H%M%S") + + f"_benchmark_{constraint_label}.csv", + ) + benchmark_runner.write_results_to_csv(csv_filename) + benchmark_runner.log_summary_results() + except Exception as e: + _LOGGER_INSTANCE.error(f"Benchmark run failed: {e}", exc_info=True) + finally: + _LOGGER_INSTANCE.info("Shutting down benchmark node.") + rclpy.shutdown() + executor_thread.join() + _LOGGER_INSTANCE.info("Executor joined. Script finished.") + + +if __name__ == "__main__": + default_out_dir = os.path.join(os.getcwd(), "planner_benchmark_output_v3") + out_dir_arg = default_out_dir + use_naive_arg_str = "true" # Default + + if len(sys.argv) == 2: # Only one arg + if sys.argv[1].lower() in ["true", "use_naive", "naive"]: + use_naive_arg_str = "true" + elif sys.argv[1].lower() in ["false", "no_constraint", "none"]: + use_naive_arg_str = "false" + else: + out_dir_arg = os.path.expanduser(sys.argv[1]) # Assume it's output dir + elif len(sys.argv) > 2: # Two or more args + out_dir_arg = os.path.expanduser(sys.argv[1]) + if sys.argv[2].lower() in ["true", "use_naive", "naive"]: + use_naive_arg_str = "true" + elif sys.argv[2].lower() in ["false", "no_constraint", "none"]: + use_naive_arg_str = "false" + + use_naive_constraint_bool = use_naive_arg_str == "true" + _get_logger().info(f"Output directory set to: {out_dir_arg}") + _get_logger().info(f"Using naive Jaco hand constraint: {use_naive_constraint_bool}") + main(out_dir_arg, use_naive_constraint_bool) diff --git a/ada_feeding/scripts/planner_benchmark.py b/ada_feeding/scripts/planner_benchmark.py index 481846c3..47c54d67 100755 --- a/ada_feeding/scripts/planner_benchmark.py +++ b/ada_feeding/scripts/planner_benchmark.py @@ -332,7 +332,7 @@ def main(out_dir: Optional[str]): node=node, joint_names=kinova.joint_names(), base_link_name=kinova.base_link_name(), - end_effector_name="forkTip", + end_effector_name="j2n6s200_end_effector", group_name="jaco_arm", callback_group=callback_group, ) diff --git a/ada_feeding/scripts/planner_benchmark_analysis.py b/ada_feeding/scripts/planner_benchmark_analysis.py new file mode 100755 index 00000000..76ada504 --- /dev/null +++ b/ada_feeding/scripts/planner_benchmark_analysis.py @@ -0,0 +1,388 @@ +#!/usr/bin/env python3 + +""" +Analyzes the CSV output from the PlannerBenchmark script. +Generates summary statistics and comparative plots for different planners. +""" + +import pandas as pd +import numpy as np +import matplotlib.pyplot as plt +import seaborn as sns +import argparse +import os +from typing import List, Dict, Any + +# Define expected numeric columns that might need conversion and could contain NaN-like strings +NUMERIC_COLUMNS = [ + "elapsed_time_s", + "jaco_plan_success", + "path_length_total", + "max_jaco_hand_roll_deviation_rad", + "articutool_path_feasible", + "articutool_num_infeasible_points", + "articutool_min_pitch_rad", + "articutool_max_pitch_rad", + "articutool_avg_pitch_abs_rad", + "articutool_pitch_range_used_percent", + "articutool_min_roll_rad", + "articutool_max_roll_rad", + "articutool_avg_roll_abs_rad", + "articutool_roll_range_used_percent", +] + + +def load_and_preprocess_data(csv_filepath: str) -> pd.DataFrame: + """Loads data from CSV and preprocesses it.""" + print(f"Loading data from: {csv_filepath}") + try: + df = pd.read_csv(csv_filepath) + except FileNotFoundError: + print(f"Error: CSV file not found at {csv_filepath}") + raise + except Exception as e: + print(f"Error loading CSV: {e}") + raise + + print(f"Loaded {len(df)} rows.") + + # Identify path_length_joint columns dynamically + path_length_joint_cols = [ + col for col in df.columns if col.startswith("path_length_j") + ] + all_numeric_cols = NUMERIC_COLUMNS + path_length_joint_cols + + for col in all_numeric_cols: + if col in df.columns: + # Replace common non-numeric placeholders with NaN before attempting conversion + df[col] = df[col].replace( + ["N/A", "nan", "inf", "-inf", "ERROR", "ERROR_NO_AT_METRICS", ""], + np.nan, + ) + # Attempt to convert to numeric, coercing errors to NaN + df[col] = pd.to_numeric(df[col], errors="coerce") + else: + print( + f"Warning: Expected numeric column '{col}' not found in CSV. Skipping." + ) + + # Ensure boolean-like columns are 0 or 1 (or NaN) + for bool_col in ["jaco_plan_success", "articutool_path_feasible"]: + if bool_col in df.columns: + # After to_numeric, valid values should be 0.0, 1.0, or NaN. + # We can keep them as float or convert to Int64 (which supports NaN) + # For calculations like mean (success rate), float is fine. + pass + + print("Data preprocessing complete.") + return df + + +def calculate_summary_stats(df: pd.DataFrame) -> Dict[str, Dict[str, Any]]: + """Calculates summary statistics for each planner.""" + summary: Dict[str, Dict[str, Any]] = {} + planners = df["planner_id"].unique() + + for planner in planners: + planner_df = df[ + df["planner_id"] == planner + ].copy() # Use .copy() to avoid SettingWithCopyWarning + total_tasks = len(planner_df) + if total_tasks == 0: + summary[planner] = {"total_tasks": 0} + continue + + # Jaco Plan Success + jaco_successful_plans_df = planner_df[planner_df["jaco_plan_success"] == 1] + num_jaco_successful = len(jaco_successful_plans_df) + jaco_success_rate = ( + (num_jaco_successful / total_tasks) * 100 if total_tasks > 0 else 0 + ) + + # Articutool Path Feasibility (of Jaco successful plans) + articutool_feasible_df = pd.DataFrame() # Ensure it's defined + num_articutool_feasible = 0 + articutool_feasibility_rate = 0.0 # Default + if num_jaco_successful > 0: + articutool_feasible_df = jaco_successful_plans_df[ + jaco_successful_plans_df["articutool_path_feasible"] == 1 + ] + num_articutool_feasible = len(articutool_feasible_df) + articutool_feasibility_rate = ( + (num_articutool_feasible / num_jaco_successful) * 100 + if num_jaco_successful > 0 + else 0 + ) + + # Overall Success (Jaco success AND Articutool feasible) + overall_success_rate = ( + (num_articutool_feasible / total_tasks) * 100 if total_tasks > 0 else 0 + ) + + # Planning Time + avg_time_all = planner_df["elapsed_time_s"].mean() + avg_time_jaco_succ = ( + jaco_successful_plans_df["elapsed_time_s"].mean() + if num_jaco_successful > 0 + else np.nan + ) + avg_time_overall_succ = ( + articutool_feasible_df["elapsed_time_s"].mean() + if num_articutool_feasible > 0 + else np.nan + ) + + # Path Length + avg_path_len_jaco_succ = ( + jaco_successful_plans_df["path_length_total"].mean() + if num_jaco_successful > 0 + else np.nan + ) + + # Max Jaco Hand Roll Deviation + avg_jaco_roll_dev_jaco_succ = ( + jaco_successful_plans_df["max_jaco_hand_roll_deviation_rad"].mean() + if num_jaco_successful > 0 + else np.nan + ) + + # Articutool Metrics (for overall successful paths) + avg_at_pitch_range_overall_succ = ( + articutool_feasible_df["articutool_pitch_range_used_percent"].mean() + if num_articutool_feasible > 0 + else np.nan + ) + avg_at_roll_range_overall_succ = ( + articutool_feasible_df["articutool_roll_range_used_percent"].mean() + if num_articutool_feasible > 0 + else np.nan + ) + avg_at_infeasible_pts_overall_succ = ( + articutool_feasible_df["articutool_num_infeasible_points"].mean() + if num_articutool_feasible > 0 + else np.nan + ) + + summary[planner] = { + "total_tasks": total_tasks, + "num_jaco_successful": num_jaco_successful, + "jaco_success_rate_percent": jaco_success_rate, + "num_articutool_feasible_of_jaco_succ": num_articutool_feasible, # Corrected key + "articutool_feasibility_rate_percent": articutool_feasibility_rate, + "num_overall_successful": num_articutool_feasible, # Same as above, but for clarity in overall rate + "overall_success_rate_percent": overall_success_rate, + "avg_time_all_s": avg_time_all, + "avg_time_jaco_succ_s": avg_time_jaco_succ, + "avg_time_overall_succ_s": avg_time_overall_succ, + "avg_path_len_jaco_succ": avg_path_len_jaco_succ, + "avg_jaco_roll_dev_jaco_succ_rad": avg_jaco_roll_dev_jaco_succ, + "avg_at_pitch_range_overall_succ_percent": avg_at_pitch_range_overall_succ, + "avg_at_roll_range_overall_succ_percent": avg_at_roll_range_overall_succ, + "avg_at_infeasible_pts_overall_succ": avg_at_infeasible_pts_overall_succ, + } + return summary + + +def print_summary_report(summary_stats: Dict[str, Dict[str, Any]]): + """Prints a formatted summary report to the console.""" + print("\n--- Benchmark Analysis Report ---") + for planner, stats in summary_stats.items(): + print(f"\nPlanner: {planner} (Total Tasks: {stats.get('total_tasks', 0)})") + if stats.get("total_tasks", 0) == 0: + print(" No data for this planner.") + continue + + print( + f" Jaco Plan Success Rate: {stats.get('jaco_success_rate_percent', 0):.2f}% ({stats.get('num_jaco_successful', 0)}/{stats.get('total_tasks', 0)})" + ) + if stats.get("num_jaco_successful", 0) > 0: + print( + f" Articutool Feasibility (of Jaco succ.): {stats.get('articutool_feasibility_rate_percent', 0):.2f}% ({stats.get('num_articutool_feasible_of_jaco_succ', 0)}/{stats.get('num_jaco_successful', 0)})" + ) + else: + print( + " Articutool Feasibility (of Jaco succ.): N/A (No Jaco successful plans)" + ) + print( + f" Overall Task Success Rate: {stats.get('overall_success_rate_percent', 0):.2f}% ({stats.get('num_overall_successful', 0)}/{stats.get('total_tasks', 0)})" + ) + + print( + f" Avg. Planning Time (all attempts): {stats.get('avg_time_all_s', np.nan):.3f} s" + ) + print( + f" Avg. Planning Time (Jaco successful): {stats.get('avg_time_jaco_succ_s', np.nan):.3f} s" + ) + print( + f" Avg. Planning Time (Overall successful): {stats.get('avg_time_overall_succ_s', np.nan):.3f} s" + ) + + print( + f" Avg. Path Length (Jaco successful): {stats.get('avg_path_len_jaco_succ', np.nan):.3f}" + ) + print( + f" Avg. Max Jaco Hand Roll Dev (Jaco successful): {stats.get('avg_jaco_roll_dev_jaco_succ_rad', np.nan):.3f} rad" + ) + + if stats.get("num_overall_successful", 0) > 0: + print( + f" Avg. Articutool Pitch Range Used (Overall successful): {stats.get('avg_at_pitch_range_overall_succ_percent', np.nan):.2f}%" + ) + print( + f" Avg. Articutool Roll Range Used (Overall successful): {stats.get('avg_at_roll_range_overall_succ_percent', np.nan):.2f}%" + ) + print( + f" Avg. Articutool Infeasible Points (Overall successful): {stats.get('avg_at_infeasible_pts_overall_succ', np.nan):.2f}" + ) + else: + print(" Articutool Performance Metrics (Overall successful): N/A") + print("-----------------------------") + + +def generate_plots(df: pd.DataFrame, output_dir: str): + """Generates and saves comparative plots.""" + if not os.path.exists(output_dir): + os.makedirs(output_dir) + print(f"Created output directory for plots: {output_dir}") + + sns.set_theme(style="whitegrid") + planners = df["planner_id"].unique() + + # --- Success Rate Plots --- + success_data = [] + for planner in planners: + planner_df = df[df["planner_id"] == planner] + total = len(planner_df) + if total == 0: + continue + jaco_succ = planner_df["jaco_plan_success"].sum() + + # For Articutool feasibility, only consider Jaco successful plans + jaco_succ_df = planner_df[planner_df["jaco_plan_success"] == 1] + at_feasible_of_jaco_succ = ( + jaco_succ_df["articutool_path_feasible"].sum() + if not jaco_succ_df.empty + else 0 + ) + + success_data.append( + { + "planner": planner, + "type": "Jaco Plan Success", + "rate": (jaco_succ / total) * 100 if total else 0, + } + ) + success_data.append( + { + "planner": planner, + "type": "Articutool Feasible (of Jaco Succ.)", + "rate": ( + (at_feasible_of_jaco_succ / jaco_succ) * 100 if jaco_succ else 0 + ), + } + ) + success_data.append( + { + "planner": planner, + "type": "Overall Success", + "rate": (at_feasible_of_jaco_succ / total) * 100 if total else 0, + } + ) + + success_df = pd.DataFrame(success_data) + + plt.figure(figsize=(12, 7)) + sns.barplot(x="planner", y="rate", hue="type", data=success_df, palette="viridis") + plt.title("Planner Success Rates") + plt.ylabel("Success Rate (%)") + plt.xlabel("Planner ID") + plt.xticks(rotation=45, ha="right") + plt.legend(title="Success Type") + plt.tight_layout() + plt.savefig(os.path.join(output_dir, "success_rates.png")) + plt.close() + print(f"Saved plot: success_rates.png to {output_dir}") + + # --- Distribution Plots (Box Plots) --- + # Filter for Jaco successful plans for these metrics + jaco_successful_df = df[df["jaco_plan_success"] == 1].copy() + # Filter for overall successful plans for Articutool specific metrics + overall_successful_df = jaco_successful_df[ + jaco_successful_df["articutool_path_feasible"] == 1 + ].copy() + + plot_metrics = [ + ("elapsed_time_s", "Planning Time (Jaco Successful) (s)", jaco_successful_df), + ("path_length_total", "Path Length (Jaco Successful)", jaco_successful_df), + ( + "max_jaco_hand_roll_deviation_rad", + "Max Jaco Hand Roll Dev (Jaco Successful) (rad)", + jaco_successful_df, + ), + ( + "articutool_pitch_range_used_percent", + "Articutool Pitch Range Used (Overall Successful) (%)", + overall_successful_df, + ), + ( + "articutool_roll_range_used_percent", + "Articutool Roll Range Used (Overall Successful) (%)", + overall_successful_df, + ), + ] + + for metric_col, title, data_subset in plot_metrics: + if ( + metric_col not in data_subset.columns + or data_subset[metric_col].isnull().all() + ): + print(f"Skipping plot for '{title}': column missing or all NaN.") + continue + if data_subset.empty: + print(f"Skipping plot for '{title}': no data after filtering.") + continue + + plt.figure(figsize=(10, 6)) + sns.boxplot(x="planner_id", y=metric_col, data=data_subset, palette="pastel") + plt.title(title) + plt.ylabel(metric_col.replace("_", " ").title()) + plt.xlabel("Planner ID") + plt.xticks(rotation=45, ha="right") + plt.tight_layout() + plt.savefig(os.path.join(output_dir, f"{metric_col}_distribution.png")) + plt.close() + print(f"Saved plot: {metric_col}_distribution.png to {output_dir}") + + print("Plot generation complete.") + + +def main(): + parser = argparse.ArgumentParser( + description="Analyze Planner Benchmark CSV results." + ) + parser.add_argument( + "csv_filepath", type=str, help="Path to the benchmark CSV file." + ) + parser.add_argument( + "--output_dir", + type=str, + default="benchmark_analysis_plots", + help="Directory to save generated plots (default: benchmark_analysis_plots).", + ) + args = parser.parse_args() + + try: + data_df = load_and_preprocess_data(args.csv_filepath) + summary_stats = calculate_summary_stats(data_df) + print_summary_report(summary_stats) + generate_plots(data_df, args.output_dir) + print("\nAnalysis complete.") + except Exception as e: + print(f"An error occurred during analysis: {e}") + import traceback + + traceback.print_exc() + + +if __name__ == "__main__": + main() diff --git a/ada_feeding/scripts/visualize_e2e_benchmark.py b/ada_feeding/scripts/visualize_e2e_benchmark.py new file mode 100644 index 00000000..3be67ab5 --- /dev/null +++ b/ada_feeding/scripts/visualize_e2e_benchmark.py @@ -0,0 +1,640 @@ +#!/usr/bin/env python3 +# Copyright (c) 2024-2025, Personal Robotics Laboratory +# License: BSD 3-Clause. See LICENSE.md file in root directory. + +""" +This script interactively visualizes trajectories from end_to_end_benchmark.py +output files using Pinocchio and MeshCat. + +This refactored version uses a "trial-centric" workflow. A user first selects +a Trial ID, which immediately displays the static scene for that trial. The user +can then choose to play back any of the available (successful) stage +trajectories from within that trial's context. +""" + +# Standard imports +import argparse +import json +import os +import subprocess +import tempfile +import time +from typing import Any, Dict, List, Optional + +import meshcat +import meshcat.geometry as g +import numpy as np +import pandas as pd +import pinocchio as pin +import pinocchio.visualize + + +def xacro_to_urdf_string(xacro_filename: str, logger_func=print) -> Optional[str]: + """Converts a Xacro file to a URDF XML string using ros2 run xacro.""" + if not os.path.exists(xacro_filename): + logger_func(f"Error: Xacro file not found at {xacro_filename}") + return None + try: + process = subprocess.run( + ["ros2", "run", "xacro", "xacro", xacro_filename], + check=True, + capture_output=True, + text=True, + ) + return process.stdout + except Exception as e: + logger_func(f"An unexpected error occurred during XACRO processing: {e}") + return None + + +def find_ros_package_paths(xacro_file_path: str, logger_func=print) -> List[str]: + """ + Finds and returns a list of package paths for Pinocchio to search for meshes. + This function robustly finds the Colcon workspace by searching upwards from + the provided XACRO file's location. + """ + package_dirs = [] + ros_package_path = os.environ.get("ROS_PACKAGE_PATH") + if ros_package_path: + package_dirs.extend( + [p for p in ros_package_path.split(os.pathsep) if os.path.isdir(p)] + ) + + current_path = os.path.abspath(os.path.dirname(xacro_file_path)) + ws_root = None + while True: + if os.path.isdir(os.path.join(current_path, "src")): + ws_root = current_path + break + parent_path = os.path.dirname(current_path) + if parent_path == current_path: + break + current_path = parent_path + + if ws_root: + logger_func(f"Found workspace root by searching upwards: {ws_root}") + src_dir = os.path.join(ws_root, "src") + if os.path.isdir(src_dir) and src_dir not in package_dirs: + package_dirs.append(src_dir) + install_dir = os.path.join(ws_root, "install") + if os.path.isdir(install_dir) and install_dir not in package_dirs: + package_dirs.append(install_dir) + + unique_package_dirs = list(dict.fromkeys(package_dirs)) + logger_func(f"Using package search paths for Pinocchio: {unique_package_dirs}") + return unique_package_dirs + + +def load_pinocchio_model_from_urdf_string( + urdf_xml_string: str, package_dirs: List[str], logger_func=print +): + """Loads a Pinocchio model from a URDF XML string via a temporary file.""" + temp_urdf_path = "" + try: + with tempfile.NamedTemporaryFile( + mode="w", suffix=".urdf", delete=False + ) as temp_file: + temp_urdf_path = temp_file.name + temp_file.write(urdf_xml_string) + + model = pin.buildModelFromUrdf(temp_urdf_path) + visual_model = pin.buildGeomFromUrdf( + model, temp_urdf_path, pin.GeometryType.VISUAL, package_dirs=package_dirs + ) + collision_model = pin.buildGeomFromUrdf( + model, temp_urdf_path, pin.GeometryType.COLLISION, package_dirs=package_dirs + ) + + data = model.createData() + logger_func(f"Pinocchio model loaded. Nq: {model.nq}, Nv: {model.nv}") + return model, collision_model, visual_model, data + except Exception as e: + logger_func("\n" + "=" * 80) + logger_func("FATAL: Error loading Pinocchio model.") + logger_func(f"Original Pinocchio error: {e}") + logger_func("=" * 80 + "\n") + return None, None, None, None + finally: + if temp_urdf_path and os.path.exists(temp_urdf_path): + os.remove(temp_urdf_path) + + +def load_benchmark_data(file_paths: List[str], logger_func=print) -> pd.DataFrame: + """ + Loads data from benchmark .jsonl files, ensuring all trials and + stages are loaded. + """ + all_stages_flat = [] + for file_path in file_paths: + try: + # --- Read the file line by line for .jsonl format --- + with open(file_path, "r") as f: + # Create a list of trial dictionaries by parsing each line + trials = [json.loads(line) for line in f if line.strip()] + + for trial in trials: + if not trial.get("stages"): + all_stages_flat.append( + { + "trial_id": trial.get("trial_id"), + "scene_poses": trial.get("scene_poses"), + "stage_name": "N/A", + "status": "NoStages", + } + ) + continue + + for stage in trial.get("stages", []): + flat_record = { + "trial_id": trial.get("trial_id"), + "scene_poses": trial.get("scene_poses"), + "stage_name": stage.get("stage_name"), + "status": stage.get("status"), + "execution_mode": stage.get("execution_mode"), + "traj_jaco": stage.get("traj_jaco"), + "traj_atool": stage.get("traj_atool"), + } + all_stages_flat.append(flat_record) + except Exception as e: + logger_func(f"Warning: Could not load or parse {file_path}. Error: {e}") + + if not all_stages_flat: + return pd.DataFrame() + + logger_func( + f"Successfully loaded {len(all_stages_flat)} stage records across all trials." + ) + return pd.DataFrame(all_stages_flat) + + +def select_trial(df: pd.DataFrame) -> Optional[pd.DataFrame]: + """ + Prompts the user to select a Trial ID, showing the last successfully + completed stage for each trial. + """ + all_trial_ids = sorted(df["trial_id"].unique()) + if not all_trial_ids: + print("No trials found in the loaded data.") + return None + + # Pre-calculate the completion status for each trial + trial_completion_info = {} + stage_order = [ + "HomeToAbovePlate", + "AbovePlateToAboveFood", + "AboveFoodToInFood", + "LevelArticutool", + "Resting", + ] + for trial_id in all_trial_ids: + trial_df = df[df["trial_id"] == trial_id] + successful_stages = trial_df[trial_df["status"] == "Success"][ + "stage_name" + ].tolist() + + last_success = "None" + last_success_idx = -1 + for stage_name in successful_stages: + if stage_name in stage_order: + last_success_idx = max(last_success_idx, stage_order.index(stage_name)) + + if last_success_idx != -1: + last_success = stage_order[last_success_idx] + trial_completion_info[trial_id] = last_success + + while True: + print("\n--- Please Select a Trial to Visualize ---") + for i, trial_id in enumerate(all_trial_ids): + completion_status = trial_completion_info.get(trial_id, "None") + print( + f" [{i + 1}] Trial ID: {trial_id} (Last Success: {completion_status})" + ) + print(" [q] Quit") + try: + choice_str = ( + input(f"Enter choice (1-{len(all_trial_ids)} or q): ").strip().lower() + ) + if choice_str == "q": + return None + selected_trial_id = all_trial_ids[int(choice_str) - 1] + return df[df["trial_id"] == selected_trial_id].copy() + except (ValueError, IndexError): + print("Invalid choice.") + + +def draw_scene_frames(pin_viz, scene_poses: Dict[str, Any], frame_scale: float = 0.2): + """Draws coordinate frames and identifying markers for all poses in the scene.""" + if not scene_poses: + return + + # Clear any previous frames before drawing new ones + try: + pin_viz.viewer["scene/frames"].delete() + except KeyError: + pass + print("Drawing scene frames and markers...") + for name, pose_data in scene_poses.items(): + position = np.array(pose_data["position"]) + quat_xyzw = np.array(pose_data["orientation_xyzw"]) + + transform = pin.SE3(pin.Quaternion(quat_xyzw), position) + + # Create a parent path for the frame and its marker + frame_path = f"scene/frames/{name}" + + # Draw the coordinate frame (triad) + pin_viz.viewer[f"{frame_path}/triad"].set_object(g.triad(frame_scale)) + + # Draw a small sphere as a visual marker/label + material = g.MeshLambertMaterial(color=0x5555FF, transparent=True, opacity=0.6) + pin_viz.viewer[f"{frame_path}/marker"].set_object(g.Sphere(0.02), material) + + # Apply the same transform to the parent path + pin_viz.viewer[frame_path].set_transform(transform.homogeneous) + + +# --- Helper functions for robot state updates --- +def get_joint_map(model, joint_names): + """Helper to map joint names to their indices in Pinocchio's q vector.""" + if not joint_names: + return {} + return { + name: model.getJointId(name) + for name in joint_names + if model.existJointName(name) + } + + +def update_q_from_waypoint(q, model, waypoint, joint_names, joint_map): + """Helper to update the robot configuration from a single waypoint.""" + if not waypoint: + return + for name, pos in zip(joint_names, waypoint["positions"]): + if name in joint_map: + joint_id = joint_map[name] + if model.joints[joint_id].nq == 1: + q[model.joints[joint_id].idx_q] = pos + elif model.joints[joint_id].nq == 2: + q[model.joints[joint_id].idx_q] = np.cos(pos) + q[model.joints[joint_id].idx_q + 1] = np.sin(pos) + + +def trajectory_playback_loop(pin_viz, model, data, selected_stage, args): + """ + Runs the interactive UI for a SINGLE trajectory. + Handles different execution modes and frame-by-frame controls. + """ + execution_mode = selected_stage["execution_mode"] + traj_jaco = selected_stage.get("traj_jaco") + traj_atool = selected_stage.get("traj_atool") + static_jaco_config = selected_stage.get("static_jaco_config") + + jaco_joint_map = get_joint_map( + model, (traj_jaco or static_jaco_config or {}).get("joint_names", []) + ) + atool_joint_map = get_joint_map(model, (traj_atool or {}).get("joint_names", [])) + + # --- Prepare a unified list of waypoints for consistent navigation --- + waypoints = [] + if execution_mode in ["Jaco Only", "Articutool Only"]: + waypoints = (traj_jaco or traj_atool).get("points", []) + elif execution_mode == "Sequential" and traj_jaco and traj_atool: + waypoints.extend(traj_jaco["points"]) + waypoints.extend(traj_atool["points"]) + elif execution_mode == "Synchronous" and traj_jaco: + waypoints = traj_jaco["points"] + + if not waypoints: + print("No waypoints to display for this stage.") + input("Press Enter to continue...") + return "back" + + current_idx = 0 + + while True: + # This ensures a clean slate and prevents state from bleeding over. + q = pin.neutral(model) + + # 1. Start with the static Jaco pose from the previous stage, if available. + if static_jaco_config: + update_q_from_waypoint( + q, + model, + {"positions": static_jaco_config["positions"]}, + static_jaco_config["joint_names"], + jaco_joint_map, + ) + + # 2. Layer on the animated joint positions for the current waypoint. + if execution_mode == "Sequential": + # For sequential, we need to know the final pose of the first trajectory + jaco_end_waypoint = traj_jaco["points"][-1] + if current_idx >= len(traj_jaco["points"]): + update_q_from_waypoint( + q, + model, + jaco_end_waypoint, + traj_jaco["joint_names"], + jaco_joint_map, + ) + update_q_from_waypoint( + q, + model, + waypoints[current_idx], + traj_atool["joint_names"], + atool_joint_map, + ) + else: + update_q_from_waypoint( + q, + model, + waypoints[current_idx], + traj_jaco["joint_names"], + jaco_joint_map, + ) + if traj_atool and traj_atool["points"]: + update_q_from_waypoint( + q, + model, + traj_atool["points"][0], + traj_atool["joint_names"], + atool_joint_map, + ) + else: + if traj_jaco: + update_q_from_waypoint( + q, + model, + traj_jaco["points"][min(current_idx, len(traj_jaco["points"]) - 1)], + traj_jaco["joint_names"], + jaco_joint_map, + ) + if traj_atool: + update_q_from_waypoint( + q, + model, + traj_atool["points"][ + min(current_idx, len(traj_atool["points"]) - 1) + ], + traj_atool["joint_names"], + atool_joint_map, + ) + + # 3. Update and display the final calculated pose + pin.forwardKinematics(model, data, q) + pin_viz.display(q) + + print(f"\nDisplaying Frame: {current_idx + 1}/{len(waypoints)}") + print(f"--- Stage: {selected_stage['stage_name']} ---") + print( + "Commands: [n]ext, [p]rev, [f]irst, [l]ast, [a]nimate, [b]ack to stage selection, [q]uit" + ) + user_input = input("Enter command: ").strip().lower() + + if user_input == "q": + return "quit" + if user_input == "b": + return "back" + + if user_input == "n": + current_idx = min(current_idx + 1, len(waypoints) - 1) + elif user_input == "p": + current_idx = max(current_idx - 1, 0) + elif user_input == "f": + current_idx = 0 + elif user_input == "l": + current_idx = len(waypoints) - 1 + elif user_input == "a": + print("Animating... Press Ctrl+C to stop.") + try: + for i in range(current_idx, len(waypoints)): + # Also reset q inside the animation loop for consistency + q_anim = pin.neutral(model) + if static_jaco_config: + update_q_from_waypoint( + q_anim, + model, + {"positions": static_jaco_config["positions"]}, + static_jaco_config["joint_names"], + jaco_joint_map, + ) + + if execution_mode == "Sequential": + jaco_end_waypoint = traj_jaco["points"][-1] + if i >= len(traj_jaco["points"]): + update_q_from_waypoint( + q_anim, + model, + jaco_end_waypoint, + traj_jaco["joint_names"], + jaco_joint_map, + ) + update_q_from_waypoint( + q_anim, + model, + waypoints[i], + traj_atool["joint_names"], + atool_joint_map, + ) + else: + update_q_from_waypoint( + q_anim, + model, + waypoints[i], + traj_jaco["joint_names"], + jaco_joint_map, + ) + if traj_atool and traj_atool["points"]: + update_q_from_waypoint( + q_anim, + model, + traj_atool["points"][0], + traj_atool["joint_names"], + atool_joint_map, + ) + else: + if traj_jaco: + update_q_from_waypoint( + q_anim, + model, + traj_jaco["points"][ + min(i, len(traj_jaco["points"]) - 1) + ], + traj_jaco["joint_names"], + jaco_joint_map, + ) + if traj_atool: + update_q_from_waypoint( + q_anim, + model, + traj_atool["points"][ + min(i, len(traj_atool["points"]) - 1) + ], + traj_atool["joint_names"], + atool_joint_map, + ) + + pin.forwardKinematics(model, data, q_anim) + pin_viz.display(q_anim) + print(f" Displaying frame {i + 1}/{len(waypoints)}", end="\r") + time.sleep(1.0 / args.fps) + current_idx = len(waypoints) - 1 + print("\nAnimation finished.") + except KeyboardInterrupt: + print("\nAnimation stopped.") + + +def trial_visualization_loop(pin_viz, model, data, trial_df, args): + """ + The main menu loop for an individual trial. Handles trials with + no successful stages gracefully. + """ + trial_id = trial_df.iloc[0]["trial_id"] + scene_poses = trial_df.iloc[0]["scene_poses"] + + # 1. Immediately visualize the scene for this trial + draw_scene_frames(pin_viz, scene_poses) + + # 2. Prepare a map of all trajectories in this trial for state carry-over + stage_data_map = {row["stage_name"]: row for row in trial_df.to_dict("records")} + stage_order = [ + "HomeToAbovePlate", + "AbovePlateToAboveFood", + "AboveFoodToInFood", + "LevelArticutool", + "Resting", + ] + + while True: + print(f"\n--- Visualizing Trial ID: {trial_id} ---") + print("Scene frames are now displayed.") + + available_stages = [ + row for row in trial_df.to_dict("records") if row["status"] == "Success" + ] + + if not available_stages: + print("No successful trajectories available to visualize for this trial.") + print(" [m] Back to Trial Selection") + print(" [q] Quit") + choice_str = input(f"Enter choice (m/q): ").strip().lower() + if choice_str == "q": + return "quit" + if choice_str == "m": + return "menu" + print("Invalid choice.") + continue + + print("Select a trajectory to play:") + for i, stage in enumerate(available_stages): + print( + f" [{i + 1}] {stage['stage_name']} (Mode: {stage['execution_mode']})" + ) + print(" [m] Back to Trial Selection") + print(" [q] Quit") + + try: + choice_str = ( + input(f"Enter choice (1-{len(available_stages)} or m/q): ") + .strip() + .lower() + ) + if choice_str == "q": + return "quit" + if choice_str == "m": + return "menu" + + selected_stage_data = available_stages[int(choice_str) - 1] + + # Find the previous stage to set the robot's static starting pose + try: + current_stage_index = stage_order.index( + selected_stage_data["stage_name"] + ) + if current_stage_index > 0: + for i in range(current_stage_index - 1, -1, -1): + prev_stage_name = stage_order[i] + if ( + prev_stage_name in stage_data_map + and stage_data_map[prev_stage_name]["traj_jaco"] + ): + prev_traj_jaco = stage_data_map[prev_stage_name][ + "traj_jaco" + ] + if prev_traj_jaco and prev_traj_jaco.get("points"): + last_waypoint = prev_traj_jaco["points"][-1] + selected_stage_data["static_jaco_config"] = { + "positions": last_waypoint["positions"], + "joint_names": prev_traj_jaco["joint_names"], + } + break + except ValueError: + pass + + # Enter the playback loop for the chosen trajectory + action = trajectory_playback_loop( + pin_viz, model, data, selected_stage_data, args + ) + if action == "quit": + return "quit" + # If 'back', the loop will continue, re-displaying this menu + + except (ValueError, IndexError): + print("Invalid choice.") + + +def main(args): + """Main execution function with the new trial-centric workflow.""" + print("--- Interactive Benchmark Trajectory Visualizer ---") + + df = load_benchmark_data(args.benchmark_files) + if df.empty: + print("No data could be loaded. Exiting.") + return + + package_dirs = find_ros_package_paths(args.xacro_file) + urdf_string = xacro_to_urdf_string(args.xacro_file) + if not urdf_string: + return + + model, collision_model, visual_model, data = load_pinocchio_model_from_urdf_string( + urdf_xml_string=urdf_string, package_dirs=package_dirs + ) + if not model: + return + + try: + visualizer = meshcat.Visualizer().open() + pin_viz = pin.visualize.MeshcatVisualizer(model, collision_model, visual_model) + pin_viz.initViewer(viewer=visualizer) + pin_viz.loadViewerModel(rootNodeName="robot") + print(f"\nMeshCat viewer URL: {visualizer.url()}\n") + except Exception as e: + print(f"Error initializing MeshCat: {e}") + return + + while True: + trial_df = select_trial(df) + if trial_df is None or trial_df.empty: + break + + action = trial_visualization_loop(pin_viz, model, data, trial_df, args) + if action == "quit": + break + + print("Visualizer finished.") + + +if __name__ == "__main__": + parser = argparse.ArgumentParser( + description="Visualize robot trajectories from benchmark files." + ) + parser.add_argument("xacro_file", type=str, help="Path to the robot XACRO file.") + parser.add_argument( + "benchmark_files", nargs="+", help="One or more benchmark JSON result files." + ) + parser.add_argument( + "--fps", type=int, default=30, help="Frames per second for animation." + ) + args = parser.parse_args() + main(args) diff --git a/ada_feeding/scripts/visualize_trajectory.py b/ada_feeding/scripts/visualize_trajectory.py new file mode 100755 index 00000000..1938561b --- /dev/null +++ b/ada_feeding/scripts/visualize_trajectory.py @@ -0,0 +1,466 @@ +#!/usr/bin/env python3 +# Copyright (c) 2024-2025, Personal Robotics Laboratory +# License: BSD 3-Clause. See LICENSE.md file in root directory. + +""" +This script interactively visualizes trajectories from holistic_benchmark.py +output files using Pinocchio and MeshCat. + +It allows a user to load multiple benchmark result files, choose a specific +planning mode, and then select a successful trajectory from that mode to +inspect frame-by-frame or animate. The script runs in a persistent session, +allowing multiple trajectories to be viewed without relaunching. +""" + +# Standard imports +import pinocchio as pin +import pinocchio.visualize +import meshcat +import numpy as np +import json +import subprocess +import tempfile +import os +import argparse +import time +import sys +import math +from typing import Optional, Dict, Any, List +import pandas as pd + +# Third-party imports for saving images/videos +# Note: You may need to install imageio and a backend: +# pip install imageio imageio-ffmpeg +from PIL import Image +import imageio + + +# Define Articutool joint names as they appear in the Pinocchio model +ARTICUTOOL_PITCH_JOINT_NAME = "atool_joint1" +ARTICUTOOL_ROLL_JOINT_NAME = "atool_joint2" + + +def xacro_to_urdf_string(xacro_filename: str, logger_func=print) -> Optional[str]: + """Converts a Xacro file to a URDF XML string using ros2 run xacro.""" + if not os.path.exists(xacro_filename): + logger_func(f"Error: Xacro file not found at {xacro_filename}") + return None + try: + process = subprocess.run( + ["ros2", "run", "xacro", "xacro", xacro_filename], + check=True, + capture_output=True, + text=True, + ) + return process.stdout + except Exception as e: + logger_func(f"An unexpected error occurred during XACRO processing: {e}") + return None + + +def find_ros_workspace_root(start_path: str) -> Optional[str]: + """Searches upwards from a starting path for a directory containing 'src'.""" + current_path = os.path.abspath(start_path) + while True: + if os.path.isdir(os.path.join(current_path, "src")): + return current_path + parent_path = os.path.dirname(current_path) + if parent_path == current_path: # Reached the filesystem root + return None + current_path = parent_path + + +def load_pinocchio_model_from_urdf_string( + urdf_xml_string: str, xacro_file_path: str, logger_func=print +): + """Loads a Pinocchio model from a URDF XML string via a temporary file.""" + temp_urdf_path = "" + package_dirs = [] + try: + with tempfile.NamedTemporaryFile( + mode="w", suffix=".urdf", delete=False + ) as temp_file: + temp_urdf_path = temp_file.name + temp_file.write(urdf_xml_string) + + # Robustly find package directories for mesh files + ros_package_path = os.environ.get("ROS_PACKAGE_PATH") + if ros_package_path: + package_dirs.extend( + [p for p in ros_package_path.split(os.pathsep) if os.path.isdir(p)] + ) + + ws_root = find_ros_workspace_root(os.path.dirname(xacro_file_path)) + if ws_root: + logger_func(f"Found potential workspace root at: {ws_root}") + src_dir = os.path.join(ws_root, "src") + if os.path.isdir(src_dir) and src_dir not in package_dirs: + package_dirs.append(src_dir) + install_dir = os.path.join(ws_root, "install") + if os.path.isdir(install_dir) and install_dir not in package_dirs: + package_dirs.append(install_dir) + + package_dirs = list(dict.fromkeys(package_dirs)) + logger_func(f"Using package search paths for Pinocchio: {package_dirs}") + + # Load the kinematic model from the temporary URDF file path. + model = pin.buildModelFromUrdf(temp_urdf_path) + + # Load the geometry (visual/collision meshes) using the package search paths. + visual_model = pin.buildGeomFromUrdf( + model, temp_urdf_path, pin.GeometryType.VISUAL, package_dirs=package_dirs + ) + collision_model = pin.buildGeomFromUrdf( + model, temp_urdf_path, pin.GeometryType.COLLISION, package_dirs=package_dirs + ) + + data = model.createData() + logger_func(f"Pinocchio model loaded. Nq: {model.nq}, Nv: {model.nv}") + return model, collision_model, visual_model, data + except Exception as e: + logger_func("\n" + "=" * 80) + logger_func("FATAL: Error loading Pinocchio model.") + logger_func( + "This often happens if mesh files (.stl, .dae) referenced in the URDF cannot be found." + ) + logger_func( + "Please ensure your ROS 2 workspace is sourced correctly, so that `package://` paths can be resolved." + ) + logger_func(f"Pinocchio search paths used: {package_dirs}") + logger_func(f"Original Pinocchio error: {e}") + logger_func("=" * 80 + "\n") + return None, None, None, None + finally: + if temp_urdf_path and os.path.exists(temp_urdf_path): + os.remove(temp_urdf_path) + + +def load_benchmark_data(file_paths: List[str], logger_func=print) -> pd.DataFrame: + """Loads and concatenates data from multiple benchmark JSON files.""" + all_data = [] + for file_path in file_paths: + try: + with open(file_path, "r") as f: + data = json.load(f) + all_data.extend(data) + except Exception as e: + logger_func(f"Warning: Could not load or parse {file_path}. Error: {e}") + if not all_data: + return pd.DataFrame() + logger_func( + f"Successfully loaded {len(all_data)} total trials from {len(file_paths)} file(s)." + ) + return pd.json_normalize(all_data, sep="_") + + +def get_pinocchio_joint_info( + model: pin.Model, joint_name: str +) -> Optional[Dict[str, Any]]: + """Gets key info for a named joint in the Pinocchio model.""" + if model.existJointName(joint_name): + joint_id = model.getJointId(joint_name) + joint_obj = model.joints[joint_id] + if joint_obj.idx_q >= 0: + return { + "q_idx_start": joint_obj.idx_q, + "nq": joint_obj.nq, + "nv": joint_obj.nv, + } + return None + + +def set_q_from_waypoint( + q_vector: np.ndarray, + model: pin.Model, + waypoint: Dict[str, Any], + jaco_mappings, + atool_pitch_info, + atool_roll_info, + is_rigid: bool, +): + """Updates Pinocchio q_vector based on a waypoint from the benchmark data.""" + # Set Jaco arm joints + jaco_positions = waypoint.get("jaco_positions_rad", []) + for k, mapping_info in enumerate(jaco_mappings): + if mapping_info and k < len(jaco_positions): + theta_traj = jaco_positions[k] + q_idx_start, nq = mapping_info["q_idx_start"], mapping_info["nq"] + if nq == 1: + q_vector[q_idx_start] = theta_traj + elif nq == 2: # Revolute joint with quaternion representation + q_vector[q_idx_start], q_vector[q_idx_start + 1] = ( + np.cos(theta_traj), + np.sin(theta_traj), + ) + + # Set Articutool joints + articutool_solution = waypoint.get("articutool_solution_rad") + is_feasible = isinstance(articutool_solution, dict) + + if is_rigid or not is_feasible: + pitch_sol, roll_sol = 0.0, 0.0 + else: + pitch_sol = articutool_solution.get("pitch", 0.0) + roll_sol = articutool_solution.get("roll", 0.0) + + for sol, info in [(pitch_sol, atool_pitch_info), (roll_sol, atool_roll_info)]: + if sol is not None and info: + q_idx, nq = info["q_idx_start"], info["nq"] + if nq == 1: + q_vector[q_idx] = sol + elif nq == 2: + q_vector[q_idx], q_vector[q_idx + 1] = np.cos(sol), np.sin(sol) + + +def select_trajectory(df: pd.DataFrame) -> Optional[Dict[str, Any]]: + """Interactively prompts the user to select a mode and trajectory.""" + # --- Step 1: Select Planning Mode --- + modes = df["planning_mode"].unique() + print("\n--- Please Select a Planning Mode ---") + for i, mode in enumerate(modes): + print(f" [{i + 1}] {mode}") + print(" [q] Quit") + + try: + choice_str = input(f"Enter choice (1-{len(modes)} or q): ").strip().lower() + if choice_str == "q": + return None + mode_choice = int(choice_str) - 1 + if not 0 <= mode_choice < len(modes): + raise ValueError + selected_mode = modes[mode_choice] + except (ValueError, IndexError): + print("Invalid choice.") + return "retry" # Special value to retry selection + + # --- Step 2: Select Trajectory from Successful Trials --- + mode_df = df[(df["planning_mode"] == selected_mode) & (df["status"] == "Success")] + if mode_df.empty: + print(f"No successful trials found for mode: {selected_mode}") + return "retry" + + trajectories = mode_df.sort_values("task_id").to_dict("records") + print(f"\n--- Select a Successful Trajectory from '{selected_mode}' ---") + for i, trial in enumerate(trajectories): + print( + f" [{i + 1}] Task ID: {trial['task_id']} (Planning Time: {trial['planning_time_s']:.2f}s)" + ) + print(" [m] Back to Mode Selection") + + try: + choice_str = ( + input(f"Enter choice (1-{len(trajectories)} or m): ").strip().lower() + ) + if choice_str == "m": + return "retry" + traj_choice = int(choice_str) - 1 + if not 0 <= traj_choice < len(trajectories): + raise ValueError + return trajectories[traj_choice] + except (ValueError, IndexError): + print("Invalid choice.") + return "retry" + + +def visualization_loop( + pin_viz, model, waypoints, jaco_mappings, atool_pitch_info, atool_roll_info, args +): + """Runs the interactive UI for a single selected trajectory.""" + q = pin.neutral(model) + is_rigid_mode = args.rigid + current_idx = 0 + + while True: + waypoint_info = waypoints[current_idx] + set_q_from_waypoint( + q, + model, + waypoint_info, + jaco_mappings, + atool_pitch_info, + atool_roll_info, + is_rigid_mode, + ) + pin_viz.display(q) + + is_feasible_in_data = isinstance( + waypoint_info.get("articutool_solution_rad"), dict + ) + rigidity_status = ( + "ON (Rigid)" + if is_rigid_mode + else f"OFF (Data says feasible: {is_feasible_in_data})" + ) + + print(f"\nDisplaying Frame: {current_idx + 1}/{len(waypoints)}") + print(f"Articutool Rigidity: {rigidity_status}") + print( + "Commands: [n]ext, [p]rev, [f]irst, [l]ast, [t]oggle, [a]nimate, [s]creenshot, [m]enu, [q]uit" + ) + + user_input = input("Enter command: ").strip().lower() + + if user_input == "q": + return "quit" + if user_input == "m": + return "menu" + + if user_input == "n": + current_idx = min(current_idx + 1, len(waypoints) - 1) + elif user_input == "p": + current_idx = max(current_idx - 1, 0) + elif user_input == "f": + current_idx = 0 + elif user_input == "l": + current_idx = len(waypoints) - 1 + elif user_input == "t": + is_rigid_mode = not is_rigid_mode + print(f"Articutool rigidity toggled to: {'ON' if is_rigid_mode else 'OFF'}") + elif user_input == "s": + print("Taking screenshot...") + img = visualizer.get_image() + rigidity_str = "rigid" if is_rigid_mode else "active" + filepath = os.path.join( + args.output_dir, f"frame_{current_idx + 1}_{rigidity_str}.png" + ) + img.save(filepath) + print(f"Screenshot saved to {filepath}") + elif user_input == "a": + print("Animating... Press Ctrl+C to stop.") + try: + for i in range(current_idx, len(waypoints)): + set_q_from_waypoint( + q, + model, + waypoints[i], + jaco_mappings, + atool_pitch_info, + atool_roll_info, + is_rigid_mode, + ) + pin_viz.display(q) + print(f" Displaying frame {i + 1}/{len(waypoints)}", end="\r") + time.sleep(1.0 / args.fps) + current_idx = len(waypoints) - 1 + print("\nAnimation finished.") + except KeyboardInterrupt: + print("\nAnimation stopped.") + elif user_input: + print("Invalid command.") + + +def main(args): + print("--- Interactive Benchmark Trajectory Visualizer ---") + + # --- Load Data and Models (One-time setup) --- + df = load_benchmark_data(args.benchmark_files) + if df.empty: + print("No data could be loaded. Exiting.") + return + + urdf_string = xacro_to_urdf_string(args.xacro_file) + if not urdf_string: + return + + model, collision_model, visual_model, data = load_pinocchio_model_from_urdf_string( + urdf_string, args.xacro_file + ) + if not model: + return + + # --- Initialize Visualizer (One-time setup) --- + try: + global visualizer + visualizer = meshcat.Visualizer().open() + pin_viz = pin.visualize.MeshcatVisualizer(model, collision_model, visual_model) + pin_viz.initViewer(viewer=visualizer) + pin_viz.loadViewerModel(rootNodeName=model.name or "robot") + print(f"MeshCat viewer URL: {visualizer.url()}") + except Exception as e: + print(f"Error initializing MeshCat: {e}") + return + + # --- Main Application Loop --- + while True: + selected_trial = select_trajectory(df) + + if selected_trial is None: # User chose to quit + break + if selected_trial == "retry": # User made an invalid choice or wants to go back + continue + + waypoints = selected_trial.get("trajectory_metrics_waypoints_data", []) + if not waypoints: + print("Selected trial contains no waypoints. Returning to menu.") + continue + + if args.output_dir: + os.makedirs(args.output_dir, exist_ok=True) + print(f"Screenshots and videos will be saved to: {args.output_dir}") + + jaco_joint_names = [f"{args.prefix}_joint_{i + 1}" for i in range(6)] + jaco_mappings = [ + get_pinocchio_joint_info(model, name) for name in jaco_joint_names + ] + articutool_pitch_info = get_pinocchio_joint_info( + model, ARTICUTOOL_PITCH_JOINT_NAME + ) + articutool_roll_info = get_pinocchio_joint_info( + model, ARTICUTOOL_ROLL_JOINT_NAME + ) + + if ( + not all(jaco_mappings) + or not articutool_pitch_info + or not articutool_roll_info + ): + print("Error: Could not map all required joints to Pinocchio model.") + for name, mapping in zip(jaco_joint_names, jaco_mappings): + if not mapping: + print(f" - FAILED to find joint '{name}' in the Pinocchio model.") + continue + + # Enter the visualization UI for the selected trajectory + action = visualization_loop( + pin_viz, + model, + waypoints, + jaco_mappings, + articutool_pitch_info, + articutool_roll_info, + args, + ) + + if action == "quit": + break + + print("Visualizer finished.") + + +if __name__ == "__main__": + parser = argparse.ArgumentParser( + description="Visualize robot trajectories from benchmark files." + ) + parser.add_argument("xacro_file", type=str, help="Path to the robot XACRO file.") + parser.add_argument( + "benchmark_files", nargs="+", help="One or more benchmark JSON result files." + ) + # --- FIX: Changed default prefix to the correct one --- + parser.add_argument( + "--prefix", + type=str, + default="j2n6s200", + help="The prefix for the Jaco arm's joint names (e.g., j2n6s200).", + ) + parser.add_argument("--rigid", action="store_true", help="Start in rigid mode.") + parser.add_argument( + "--output-dir", + type=str, + default="visualization_output", + help="Directory to save screenshots and videos.", + ) + parser.add_argument( + "--fps", type=int, default=30, help="Frames per second for animation." + ) + args = parser.parse_args() + main(args) diff --git a/ada_feeding_action_select/ada_feeding_action_select/helpers.py b/ada_feeding_action_select/ada_feeding_action_select/helpers.py index 535dab42..59980b7f 100644 --- a/ada_feeding_action_select/ada_feeding_action_select/helpers.py +++ b/ada_feeding_action_select/ada_feeding_action_select/helpers.py @@ -117,6 +117,20 @@ def get_action_library( schema.ext_force = element["ext_force"] schema.ext_torque = element["ext_torque"] + schema.post_move_into_primitive_name = str( + element.get("post_move_into_primitive_name", "NONE") + ) + schema.post_move_into_primitive_params = [ + float(p) for p in element.get("post_move_into_primitive_params", []) + ] + + schema.post_acquisition_primitive_name = str( + element.get("post_acquisition_primitive_name", "NONE") + ) + schema.post_acquisition_primitive_params = [ + float(p) for p in element.get("post_acquisition_primitive_params", []) + ] + schema.align_to_robot_base = bool(element.get("align_to_robot_base", False)) library.append(schema) return library diff --git a/ada_feeding_action_select/config/acquisition_library.yaml b/ada_feeding_action_select/config/acquisition_library.yaml index c6494abc..5bd9b577 100644 --- a/ada_feeding_action_select/config/acquisition_library.yaml +++ b/ada_feeding_action_select/config/acquisition_library.yaml @@ -262,3 +262,117 @@ actions: - -0.0486593 - -0.1055426 pre_torque: 4.0 +# Scrape / Scoop with fork +- ext_angular: [0.0, 0.0, 0.0] + ext_duration: 1.0 + ext_force: 50.0 + ext_linear: [0.0, 0.0, 0.08] # Lift after scoop + ext_torque: 4.0 + grasp_angular: [0.0, 0.0, 0.0] + grasp_duration: 0.0 + grasp_force: 15.0 + grasp_linear: [0.0, 0.0, 0.0] + grasp_torque: 4.0 + pre_force: 20.0 + pre_offset: # Tool tip target position IN FOOD_FRAME at end of "MoveInto" + - 0.02 # X: e.g., 2cm "into" the food along food_frame X-axis + - 0.0 # Y + - -0.01 # Z: e.g., 1cm down from food_frame origin + pre_pos: # Defines DIRECTION for "MoveAbove" standoff IN FOOD_FRAME + - -1.0 # Approach from food_frame -X (will be normalized & scaled by move_above_dist_m) + - 0.0 # Y + - 0.0 # Z (can add a small positive Z for height if needed, e.g., 0.1 or 0.2) + pre_quat: # Tool tip orientation IN FOOD_FRAME + - 0.353453 # qx + - 0.6122721 # qy + - 0.6127465 # qz + - 0.3531793 # qw + pre_torque: 4.0 +# Liquid dip with spoon +- ext_angular: [0.0, 0.0, 0.0] + ext_duration: 0.1 + ext_force: 50.0 + ext_linear: [0.0, 0.0, 0.04] + ext_torque: 4.0 + grasp_angular: [0.0, 0.0, 0.0] + grasp_duration: 0.0 + grasp_force: 15.0 + grasp_linear: [0.0, 0.0, 0.0] + grasp_torque: 4.0 + pre_force: 20.0 + pre_offset: + - 0.0 + - 0.0 + - 0.0 + pre_pos: + - 0.0 + - 0.0 + - 1.0 + pre_quat: + - 0.353453 + - 0.6122721 + - 0.6127465 + - 0.3531793 + pre_torque: 4.0 +# Granular scoop with spoon +- ext_angular: [0.0, 0.0, 0.0] + ext_duration: 0.1 + ext_force: 50.0 + ext_linear: [0.0, 0.0, 0.0] + ext_torque: 8.0 + grasp_angular: [0.0, 0.0, 0.0] + grasp_duration: 1.0 + grasp_force: 70.0 + grasp_linear: [-0.05, 0.0, 0.0] + grasp_torque: 15.0 + pre_force: 20.0 + pre_offset: + - -0.035 + - 0.0 + - -0.02 + pre_pos: + - -0.035 + - 0.0 + - 1.0 + pre_quat: + - 0.2759853 + - 0.6508167 + - 0.6517596 + - 0.2747348 + pre_torque: 8.0 + post_move_into_primitive_name: "NONE" + post_move_into_primitive_params: [] + post_acquisition_primitive_name: "VIBRATE_ROLL" + post_acquisition_primitive_params: [5.0, 0.1, 1.0] # [frequency_hz, amplitude_rad, duration_sec] + align_to_robot_base: True +# Granular + Liquid scoop with spoon +- ext_angular: [0.0, 0.0, 0.0] + ext_duration: 0.1 + ext_force: 50.0 + ext_linear: [0.0, 0.0, 0.0] + ext_torque: 8.0 + grasp_angular: [0.0, 0.0, 0.0] + grasp_duration: 1.0 + grasp_force: 70.0 + grasp_linear: [-0.05, 0.0, 0.0] + grasp_torque: 15.0 + pre_force: 20.0 + pre_offset: + - -0.035 + - 0.0 + - -0.02 + pre_pos: + - -0.035 + - 0.0 + - 1.0 + pre_quat: + - -0.1424887 + - -0.6926236 + - -0.6933992 + - -0.1398724 + pre_torque: 8.0 + post_move_into_primitive_name: "NONE" + post_move_into_primitive_params: [] + post_acquisition_primitive_name: "NONE" + post_acquisition_primitive_params: [] + align_to_robot_base: True diff --git a/ada_feeding_msgs/msg/AcquisitionSchema.msg b/ada_feeding_msgs/msg/AcquisitionSchema.msg index 4a796d8c..c67559b2 100644 --- a/ada_feeding_msgs/msg/AcquisitionSchema.msg +++ b/ada_feeding_msgs/msg/AcquisitionSchema.msg @@ -63,3 +63,21 @@ builtin_interfaces/Duration ext_duration # Extraction Force/Torque Thresholds float64 ext_force float64 ext_torque + +# The name of the primitive to run *after* the MoveInto Cartesian push. +# e.g., "TWIRL_CW" or "NONE". +string post_move_into_primitive_name + +# Parameters for the post_move_into_primitive. +float64[] post_move_into_primitive_params + +# The name of the primitive to run *after* the grasp/lift is complete. +# e.g., "VIBRATE_ROLL" to settle food. +string post_acquisition_primitive_name + +# Parameters for the post_acquisition_primitive. +float64[] post_acquisition_primitive_params + +# Determines if the food_frame should be rotated to align its X-axis +# with the robot's approach vector. +bool align_to_robot_base diff --git a/ada_planning_scene/ada_planning_scene/collision_object_manager.py b/ada_planning_scene/ada_planning_scene/collision_object_manager.py index 149b89c0..739406c7 100644 --- a/ada_planning_scene/ada_planning_scene/collision_object_manager.py +++ b/ada_planning_scene/ada_planning_scene/collision_object_manager.py @@ -52,10 +52,10 @@ def __init__(self, node: Node): callback_group = ReentrantCallbackGroup() self.moveit2 = MoveIt2( node=self.__node, - joint_names=kinova.joint_names(), + joint_names=kinova.joint_names() + ["atool_joint1", "atool_joint2"], base_link_name=kinova.base_link_name(), - end_effector_name="forkTip", - group_name="jaco_arm", + end_effector_name="tool_tip", + group_name="jaco_arm_with_articutool", callback_group=callback_group, ) diff --git a/ada_planning_scene/config/ada_planning_scene.yaml b/ada_planning_scene/config/ada_planning_scene.yaml index 40d5696e..b63702d4 100644 --- a/ada_planning_scene/config/ada_planning_scene.yaml +++ b/ada_planning_scene/config/ada_planning_scene.yaml @@ -93,7 +93,7 @@ ada_planning_scene: in_front_of_face_wall: # a wall in front of the user's face so robot motions don't unnecessarily move towards them. primitive_type: 1 # shape_msgs/SolidPrimitive.BOX primitive_dims: [0.65, 0.01, 0.4] - position: [0.37, 0.17, 0.85] + position: [0.37, 0.22, 0.85] quat_xyzw: [0.0, 0.0, 0.0, 1.0] frame_id: root # the frame_id that the pose is relative to add_on_initialize: False # don't add this to the planning scene initially diff --git a/ada_planning_scene/launch/ada_moveit_launch.xml b/ada_planning_scene/launch/ada_moveit_launch.xml index d436250c..ca011fc9 100644 --- a/ada_planning_scene/launch/ada_moveit_launch.xml +++ b/ada_planning_scene/launch/ada_moveit_launch.xml @@ -10,6 +10,7 @@ + @@ -18,6 +19,8 @@ + + @@ -26,6 +29,7 @@ + diff --git a/start.py b/start.py index b0eb8662..3f568c59 100755 --- a/start.py +++ b/start.py @@ -24,6 +24,18 @@ "launch the web app." ), ) +parser.add_argument( + "--end_effector_tool", + default="fork", + help=("Which end-effector tool to use"), + choices=["fork", "spoon"], +) +parser.add_argument( + "--action", + default=0, + help=("Which action (index) to use"), + type=int, +) parser.add_argument( "-t", "--termination_wait_secs", @@ -153,7 +165,7 @@ async def main(args: argparse.Namespace, pwd: str) -> None: "`./src/feeding_web_interface/feedingwebapp`)." ) else: - print(f"# Terminating the ada_feeding demo in **{ args.sim}**") + print(f"# Terminating the ada_feeding demo in **{args.sim}**") print( "################################################################################" ) @@ -169,9 +181,6 @@ async def main(args: argparse.Namespace, pwd: str) -> None: "cd ./src/feeding_web_interface/feedingwebapp", "node --env-file=.env server.js", ], - "ft": [ - "ros2 run ada_feeding dummy_ft_sensor.py", - ], "camera": [ ( "ros2 launch feeding_web_app_ros2_test feeding_web_app_dummy_nodes_launch.xml " @@ -180,6 +189,9 @@ async def main(args: argparse.Namespace, pwd: str) -> None: "run_food_on_fork_detection:=false run_table_detection:=false " ), ], + "articutool": [ + f"ros2 launch articutool_system articutool.launch.py sim:=mock end_effector_tool:={args.end_effector_tool}", + ], "nano_bridge_sender": [ "ros2 launch nano_bridge sender.launch.xml", ], @@ -204,11 +216,14 @@ async def main(args: argparse.Namespace, pwd: str) -> None: "feeding": [ ( "ros2 launch ada_feeding ada_feeding_launch.xml use_estop:=false " - f"policy:={args.policy}" + f"policy:={args.policy} " + f"end_effector_tool:={args.end_effector_tool} " + f"action:={args.action} " ), ], "moveit": [ - "ros2 launch ada_planning_scene ada_moveit_launch.xml sim:=mock" + "ros2 launch ada_planning_scene ada_moveit_launch.xml sim:=mock " + f"end_effector_tool:={args.end_effector_tool}" ], "browser": [ "cd ./src/feeding_web_interface/feedingwebapp", @@ -226,9 +241,6 @@ async def main(args: argparse.Namespace, pwd: str) -> None: "cd ./src/feeding_web_interface/feedingwebapp", "node --env-file=.env server.js", ], - "ft": [ - "ros2 run ada_feeding dummy_ft_sensor.py", - ], "perception": [ ( "ros2 launch feeding_web_app_ros2_test feeding_web_app_dummy_nodes_launch.xml " @@ -250,7 +262,8 @@ async def main(args: argparse.Namespace, pwd: str) -> None: "run_web_bridge:=false run_food_detection:=false run_face_detection:=false " "run_food_on_fork_detection:=false run_table_detection:=false " "run_real_sense:=false " - f"policy:={args.policy}" + f"policy:={args.policy} " + f"action:={args.action} " ), ], "browser": [ @@ -273,8 +286,41 @@ async def main(args: argparse.Namespace, pwd: str) -> None: "camera": [ "ssh nano@nano -t './start_nano.sh'", ], - "ft": [ - "ros2 run forque_sensor_hardware forque_sensor_hardware --ros-args -p host:=ft-sensor-2", + "articutool": [ + # This command chains several Docker commands on the remote RPi (babbage) + # The -t flag for SSH allocates a pseudo-terminal, which is often necessary + # for interactive docker commands and proper signal handling. + 'ssh charles@babbage -t "' + # 1. Stop the container if it's already running. `|| true` prevents errors if it's not running. + "docker stop articutool_container || true; " + # 2. Remove the stopped container to ensure a fresh start. + "docker rm articutool_container || true; " + # 3. Run a new container. + "docker run " + # --rm: Automatically remove the container when it exits/stops. + "--rm " + # -it: Interactive TTY, allows you to attach and see logs. + "-it " + # --name: Assign a consistent name for easy management. + "--name articutool_container " + # --network=host: Shares the host's networking stack. Easiest for ROS 2 discovery. + "--network=host " + # --privileged + volumes: Provide necessary permissions and access to host devices. + "--privileged " + "--device=/dev/imu:/dev/imu " + "--device=/dev/u2d2:/dev/u2d2 " + "--device=/dev/resense_ft:/dev/resense_ft " + "--volume /run/udev:/run/udev:ro " + "--volume /etc/udev:/etc/udev:ro " + "--volume /dev:/dev " + # Pass environment variables needed for ROS 2 communication. + "-e RMW_IMPLEMENTATION=rmw_cyclonedds_cpp " + f"-e ROS_DOMAIN_ID={args.real_domain_id} " # Pass the domain ID to the container! + # The image to run. + "ros2_articutool:latest" + # The CMD from your Dockerfile will be executed, which is: + # ros2 launch articutool_system articutool.launch.py sim:=real + '"' ], "rosbridge": [ "ros2 launch rosbridge_server rosbridge_websocket_launch.xml", @@ -283,15 +329,19 @@ async def main(args: argparse.Namespace, pwd: str) -> None: "ros2 launch ada_feeding_perception ada_feeding_perception.launch.py combine_perception_nodes:=true", ], "moveit": [ - "Xvfb :5 -screen 0 800x600x24 &" if not args.dev else "", - "export DISPLAY=:5" if not args.dev else "", - f"ros2 launch ada_planning_scene ada_moveit_launch.xml use_rviz:={'true' if args.dev else 'false'}", + # "Xvfb :5 -screen 0 800x600x24 &" if not args.dev else "", + # "export DISPLAY=:5" if not args.dev else "", + "ros2 launch ada_planning_scene ada_moveit_launch.xml " + f"use_rviz:={'true' if args.dev else 'false'} " + f"end_effector_tool:={args.end_effector_tool}", ], "feeding": [ - "sudo ./src/ada_feeding/configure_lovelace.sh", + # "sudo ./src/ada_feeding/configure_lovelace.sh", ( "ros2 launch ada_feeding ada_feeding_launch.xml " - f"use_estop:={'false' if args.dev else 'true'} run_web_bridge:=false policy:={args.policy}" + f"use_estop:={'false' if args.dev else 'true'} run_web_bridge:=false policy:={args.policy} " + f"end_effector_tool:={args.end_effector_tool} " + f"action:={args.action} " ), ], "browser": [