diff --git a/lib/README.md b/lib/README.md index 7d1c5ee7..96b75c00 100644 --- a/lib/README.md +++ b/lib/README.md @@ -1,3 +1,17 @@ # `lib/` These are libraries used by things in the `src/` directory. They're not Colcon packages, and shouldn't be built by `colcon build`. + +## Why a monorepo? + +[Back in the day](https://github.com/Sooner-Rover-Team/SoonerRoverTeamV), we had a monorepo - and it sucked. So, we understand your concerns more than you could ever know! ;D + +However, for a number of reasons, it's valuable in our modern codebase: + +- The Rover runs offline when testing, so without a monorepo, we often had trouble building remote dependencies. +- Rust `git` dependencies are useful, but it's harder to make changes to those dependencies. + - Previously, we'd have to clone the dependency to our computer, make the changes, commit w/ a new version in `Cargo.toml`, and, finally, update the dependency in all our other repos! Not fun... +- `colcon` has no support for building remote dependencies - they need to be in your `colcon` (ROS 2) workspace. +- When doing manual/offline testing on the NVIDIA Jetson, we'd be unable to make changes locally. + - That means swapping the ethernet cable to one of the bay's WAN-connect ports, then re-opening all your stuff on the new IP, and pulling changes made on GitHub. + - Local changes are much easier - we can just `scp` them to our computers, then commit there. :D diff --git a/src/drive_launcher/launch/drive.launch.py b/src/drive_launcher/launch/drive.launch.py index 7fe2b9de..0b8d00c2 100644 --- a/src/drive_launcher/launch/drive.launch.py +++ b/src/drive_launcher/launch/drive.launch.py @@ -27,11 +27,11 @@ def generate_launch_description() -> LaunchDescription: [ pkg_drive_launcher, "launch", - "rover.launch.py", + "rover.zed.launch.py", ] ) ] - ) + ), ), # # and `ebox.launch.py` is the ebox <=> autonomous communication diff --git a/src/drive_launcher/launch/helpers/navigation_stack.launch.py b/src/drive_launcher/launch/helpers/navigation_stack.launch.py index aedf3d58..05a16895 100644 --- a/src/drive_launcher/launch/helpers/navigation_stack.launch.py +++ b/src/drive_launcher/launch/helpers/navigation_stack.launch.py @@ -10,7 +10,6 @@ from launch.substitutions import ( LaunchConfiguration, PathJoinSubstitution, - PythonExpression, ) from launch_ros.actions import Node, SetParameter from launch_ros.actions.node import ExecuteProcess @@ -166,8 +165,7 @@ def _slam_toolbox( "launch", "drive_launcher", "slam_toolbox.launch.py", - # 😮‍💨 yet another hack... - PythonExpression(["'use_sim_time:=' + str(", use_sim_time, ")"]), + ["use_sim_time:=", use_sim_time], ], shell=True, output="screen", diff --git a/src/drive_launcher/launch/rover.zed.launch.py b/src/drive_launcher/launch/rover.zed.launch.py new file mode 100644 index 00000000..c8df1e77 --- /dev/null +++ b/src/drive_launcher/launch/rover.zed.launch.py @@ -0,0 +1,231 @@ +# this launch file handles launching Nav2 in the right order, with support for +# GPS coordinate mapping. + +from ament_index_python.packages import get_package_share_directory +from launch import LaunchDescription +from launch.actions import ( + DeclareLaunchArgument, + IncludeLaunchDescription, +) +from launch.launch_description_sources import PythonLaunchDescriptionSource +from launch.substitutions import ( + LaunchConfiguration, + PathJoinSubstitution, +) +from launch_ros.actions import Node + + +def generate_launch_description() -> LaunchDescription: + # whether or not we'll launch using sim time. + use_sim_time = LaunchConfiguration("use_sim_time") + + # start the utm conversion node. + # + # it allows us to provide GNSS coordinates to navigate to. wtihout it, we + # have no frame of reference onto the map. + utm_conversion_node: Node = Node( + executable="utm_conversion_node", + package="navigator", + name="utm_conversion_node", + parameters=[ + {"use_sim_time": use_sim_time}, + ], + ) + + # this collection of nodes is from the `robot_localization` package. these + # provide essential functionality for the Rover. + # + # together, they work to translate known positions from our (hopefully + # accurate) URDF model of the Rover into offsets. + # + # `robot_localization::navsat_transform_node` offsets the recv'd GPS + # coordinates such that they are local to the Rover's origin. + # + # next up, we have two `robot_localization::ekf_node` instances... + # + # the first is `ekf_filter_node_odom`, which reads from the IMU and + # converts its data into something usable on the frame (in other words, + # local "odometry" data). other nodes use that info for state estimation! + # + # on the other hand, we've got `ekf_filter_node_map`. it uses info from the + # `navsat_transform_node` and `ekf_filter_node_odom` to make a "global" + # representation of the map. it's used by the planners (and, as a result, + # directly by our Navigator). + robot_localization: IncludeLaunchDescription = _robot_localization( + use_sim_time + ) + + # ok, so this part launches a few different things... + # + # first, we start some nodes from `slam_toolbox`, all of which provide + # SLAM tracking to the Rover. importantly, **this node provides the + # mandatory `map -> odom -> base_link` frame chain for Nav to start doing + # anything useful. + # + # after that, we start a Node (the `navigator::navigation_bringup_node`) to + # wait on the `slam_toolbox` to make those links properly. + # + # finally, we launch Nav2! it provides navigation utilties to the SoRo + # Navigator and beyond. + # + # it's primarily helpful for its provided algorithms that are really + # difficult (and verbose) to implement by hand. you control it through the + # various actions its plugins provide during navigation. + # + # in any case, here's that launch config... + navigation_stack: IncludeLaunchDescription = _navigation_bringup( + use_sim_time + ) + + # we'll also want the `robot_state_publisher::robot_state_publisher` node. + # + # it says where things are on the Rover in relation to one another, which + # is required for consistent mapping, good navigation, and object + # avoidance. + robot_state_publisher: IncludeLaunchDescription = _robot_state_publisher( + use_sim_time + ) + + # `ros2_control` is a collection of nodes that we use to control the Rover. + # + # it recvs instructions from any node, but in practice, it'll always come + # directly from the Nav2 stack directing the robot where to go (and how not + # to get stuck on giant cliff boulders). + # + # the nodes from this package are particularly useful in simulating and + # visualizing the Rover, as they provide simple `tf2` transforms on the + # wheels as they spin. + # + # we currently do not use this package for directly manipulating the + # wheels, as doing so would require writing a `ros2_control` hardware + # plugin. + # + # that'd be an extremely painful task with our microcontroller setup, as + # there are no cross-platform networking APIs in the C++ stdlib. note that + # `boost` could be an option in the future if future Autonomous members9 + # would like to move toward a `ros2_control`-based approach. + ros2_control: IncludeLaunchDescription = _ros2_control(use_sim_time) + + # the `zed_official` launch file makes use of the (annoying, unportable) + # `zed-ros2-wrapper` package. + # + # we should aim to replace it as soon as possible, but we're using it at + # URC '25 for an easier configuration. + zed_official: IncludeLaunchDescription = _zed_official() + + return LaunchDescription( + [ + DeclareLaunchArgument("use_sim_time", default_value="False"), + utm_conversion_node, + robot_state_publisher, + robot_localization, + zed_official, + navigation_stack, + ros2_control, + ] + ) + + +def _robot_localization( + use_sim_time: LaunchConfiguration, +) -> IncludeLaunchDescription: + pkg_drive_launcher: str = get_package_share_directory("drive_launcher") + + return IncludeLaunchDescription( + PythonLaunchDescriptionSource( + [ + PathJoinSubstitution( + [ + pkg_drive_launcher, + "launch", + "helpers", + "robot_localization.launch.py", + ] + ) + ] + ), + launch_arguments=[("use_sim_time", (use_sim_time))], + ) + + +def _robot_state_publisher( + use_sim_time: LaunchConfiguration, +) -> IncludeLaunchDescription: + pkg_drive_launcher: str = get_package_share_directory("drive_launcher") + + return IncludeLaunchDescription( + PythonLaunchDescriptionSource( + [ + PathJoinSubstitution( + [ + pkg_drive_launcher, + "launch", + "helpers", + "robot_state_publisher.launch.py", + ] + ) + ] + ), + launch_arguments=[("use_sim_time", (use_sim_time))], + ) + + +def _navigation_bringup( + use_sim_time: LaunchConfiguration, +) -> IncludeLaunchDescription: + pkg_drive_launcher: str = get_package_share_directory("drive_launcher") + return IncludeLaunchDescription( + PythonLaunchDescriptionSource( + [ + PathJoinSubstitution( + [ + pkg_drive_launcher, + "launch", + "helpers", + "navigation_stack.launch.py", + ] + ) + ], + ), + launch_arguments=[("use_sim_time", (use_sim_time))], + ) + + +def _ros2_control( + use_sim_time: LaunchConfiguration, +) -> IncludeLaunchDescription: + pkg_drive_launcher: str = get_package_share_directory("drive_launcher") + + return IncludeLaunchDescription( + PythonLaunchDescriptionSource( + [ + PathJoinSubstitution( + [ + pkg_drive_launcher, + "launch", + "helpers", + "ros2_control.launch.py", + ] + ) + ] + ), + launch_arguments=[("use_sim_time", use_sim_time)], + ) + + +def _zed_official() -> IncludeLaunchDescription: + pkg_zed: str = get_package_share_directory("zed") + + return IncludeLaunchDescription( + PythonLaunchDescriptionSource( + [ + PathJoinSubstitution( + [ + pkg_zed, + "launch", + "zed_official.launch.py", + ] + ) + ] + ), + ) diff --git a/src/drive_launcher/params/robot_localization/ekf_filter_node_odom.yaml b/src/drive_launcher/params/robot_localization/ekf_filter_node_odom.yaml index 8ebc4442..7a3ee973 100644 --- a/src/drive_launcher/params/robot_localization/ekf_filter_node_odom.yaml +++ b/src/drive_launcher/params/robot_localization/ekf_filter_node_odom.yaml @@ -21,7 +21,7 @@ debug_out_file: rl_ekf_filter_node_odom.log # configure imu for local usage - imu0: /sensors/imu + imu0: /sensors/depth_image/imu imu0_config: [ false, @@ -76,3 +76,25 @@ # false, # false, # ] + + # the ZED 2i provides "visual" odometry. we can use that here: + odom0: /sensors/depth_image/odom + odom0_config: [ + true, + true, + false, # Use x, y position + false, + false, + true, # Use yaw orientation + true, + true, + false, # Use vx, vy velocity + false, + false, + true, # Use vyaw angular velocity + false, + false, + false, # Don't use acceleration from odometry + ] + odom0_differential: false + odom0_queue_size: 10 diff --git a/src/zed/launch/internal.launch.py b/src/zed/launch/internal.launch.py new file mode 100644 index 00000000..fdc3f10d --- /dev/null +++ b/src/zed/launch/internal.launch.py @@ -0,0 +1,533 @@ +# Copyright 2024 Stereolabs, 2025 Barrett Ray + the Sooner Rover Team +# +# Licensed under the Apache License, Version 2.0 (the 'License'); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an 'AS IS' BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os + +from ament_index_python.packages import get_package_share_directory +from launch import LaunchContext, LaunchDescription, LaunchDescriptionEntity +from launch.actions import DeclareLaunchArgument, LogInfo, OpaqueFunction +from launch.conditions import IfCondition +from launch.substitutions import ( + Command, + LaunchConfiguration, + PathJoinSubstitution, + TextSubstitution, +) +from launch_ros.actions import ( + ComposableNodeContainer, + LoadComposableNodes, + Node, +) +from launch_ros.descriptions import ComposableNode +from launch_ros.parameters_type import SomeParameters + +# ZED Configurations to be loaded by ZED Node +default_config_common = os.path.join( + get_package_share_directory("zed_wrapper"), "config", "common" +) + +# FFMPEG Configuration to be loaded by ZED Node +default_config_ffmpeg = os.path.join( + get_package_share_directory("zed_wrapper"), "config", "ffmpeg.yaml" +) + +# Object Detection Configuration to be loaded by ZED Node +default_object_detection_config_path = os.path.join( + get_package_share_directory("zed_wrapper"), + "config", + "object_detection.yaml", +) +# Custom Object Detection Configuration to be loaded by ZED Node +default_custom_object_detection_config_path = os.path.join( + get_package_share_directory("zed_wrapper"), + "config", + "custom_object_detection.yaml", +) + +# URDF/xacro file to be loaded by the Robot State Publisher node +default_xacro_path = os.path.join( + get_package_share_directory("zed_wrapper"), "urdf", "zed_descr.urdf.xacro" +) + + +def parse_array_param(param: str) -> list[str]: + str = param.replace("[", "") + str = str.replace("]", "") + arr = str.split(",") + + return arr + + +def launch_setup( + context: LaunchContext, +) -> list[LaunchDescriptionEntity]: + return_array: list[LaunchDescriptionEntity] = [] + + # Launch configuration variables + svo_path = LaunchConfiguration("svo_path") + publish_svo_clock = LaunchConfiguration("publish_svo_clock") + + use_sim_time = LaunchConfiguration("use_sim_time") + sim_mode = LaunchConfiguration("sim_mode") + sim_address = LaunchConfiguration("sim_address") + sim_port = LaunchConfiguration("sim_port") + + stream_address = LaunchConfiguration("stream_address") + stream_port = LaunchConfiguration("stream_port") + + container_name = LaunchConfiguration("container_name") + namespace = LaunchConfiguration("namespace") + camera_name = LaunchConfiguration("camera_name") + camera_model = LaunchConfiguration("camera_model") + + node_name = LaunchConfiguration("node_name") + + ros_params_override_path = LaunchConfiguration("ros_params_override_path") + config_ffmpeg = LaunchConfiguration("ffmpeg_config_path") + object_detection_config_path = LaunchConfiguration( + "object_detection_config_path" + ) + custom_object_detection_config_path = LaunchConfiguration( + "custom_object_detection_config_path" + ) + + serial_number = LaunchConfiguration("serial_number") + camera_id = LaunchConfiguration("camera_id") + + publish_urdf = LaunchConfiguration("publish_urdf") + publish_tf = LaunchConfiguration("publish_tf") + publish_map_tf = LaunchConfiguration("publish_map_tf") + publish_imu_tf = LaunchConfiguration("publish_imu_tf") + xacro_path = LaunchConfiguration("xacro_path") + + custom_baseline = LaunchConfiguration("custom_baseline") + + enable_gnss = LaunchConfiguration("enable_gnss") + gnss_antenna_offset = LaunchConfiguration("gnss_antenna_offset") + + container_name_val: str = container_name.perform(context) + namespace_val: str = namespace.perform(context) + camera_name_val: str = camera_name.perform(context) + camera_model_val: str = camera_model.perform(context) + node_name_val: str = node_name.perform(context) + enable_gnss_val: str = enable_gnss.perform(context) + gnss_coords: list[str] = parse_array_param( + gnss_antenna_offset.perform(context) + ) + custom_baseline_val: str = custom_baseline.perform(context) + + if camera_name_val == "": + camera_name_val = "zed" + + if camera_model_val == "virtual" and float(custom_baseline_val) <= 0: + return [ + LogInfo( + msg="Please set a positive value for the 'custom_baseline' argument when using a 'virtual' Stereo Camera with two ZED X One devices." + ), + ] + + if namespace_val == "": + namespace_val = camera_name_val + else: + node_name_val = camera_name_val + + # Common configuration file + if ( + camera_model_val == "zed" + or camera_model_val == "zedm" + or camera_model_val == "zed2" + or camera_model_val == "zed2i" + or camera_model_val == "zedx" + or camera_model_val == "zedxm" + or camera_model_val == "virtual" + ): + config_common_path_val = default_config_common + "_stereo.yaml" + else: + config_common_path_val = default_config_common + "_mono.yaml" + + info = "Using common configuration file: " + config_common_path_val + return_array.append(LogInfo(msg=TextSubstitution(text=info))) + + # Camera configuration file + config_camera_path = os.path.join( + get_package_share_directory("zed_wrapper"), + "config", + camera_model_val + ".yaml", + ) + + info = "Using camera configuration file: " + config_camera_path + return_array.append(LogInfo(msg=TextSubstitution(text=info))) + + # FFMPEG configuration file + info = "Using FFMPEG configuration file: " + config_ffmpeg.perform(context) + return_array.append(LogInfo(msg=TextSubstitution(text=info))) + + # Object Detection configuration file + info = ( + "Using Object Detection configuration file: " + + object_detection_config_path.perform(context) + ) + return_array.append(LogInfo(msg=TextSubstitution(text=info))) + + # Custom Object Detection configuration file + info = ( + "Using Custom Object Detection configuration file: " + + custom_object_detection_config_path.perform(context) + ) + return_array.append(LogInfo(msg=TextSubstitution(text=info))) + + # ROS parameters override file + ros_params_override_path_val = ros_params_override_path.perform(context) + if ros_params_override_path_val != "": + info = ( + "Using ROS parameters override file: " + + ros_params_override_path_val + ) + return_array.append(LogInfo(msg=TextSubstitution(text=info))) + + # Xacro command with options + xacro_command = [] + xacro_command.append("xacro") + xacro_command.append(" ") + xacro_command.append(xacro_path.perform(context)) + xacro_command.append(" ") + xacro_command.append("camera_name:=") + xacro_command.append(camera_name_val) + xacro_command.append(" ") + xacro_command.append("camera_model:=") + xacro_command.append(camera_model_val) + xacro_command.append(" ") + xacro_command.append("custom_baseline:=") + xacro_command.append(custom_baseline_val) + if enable_gnss_val == "true": + xacro_command.append(" ") + xacro_command.append("enable_gnss:=true") + xacro_command.append(" ") + if len(gnss_coords) == 3: + xacro_command.append("gnss_x:=") + xacro_command.append(gnss_coords[0]) + xacro_command.append(" ") + xacro_command.append("gnss_y:=") + xacro_command.append(gnss_coords[1]) + xacro_command.append(" ") + xacro_command.append("gnss_z:=") + xacro_command.append(gnss_coords[2]) + xacro_command.append(" ") + + # Robot State Publisher node + rsp_name = camera_name_val + "_state_publisher" + rsp_node = Node( + condition=IfCondition(publish_urdf), + package="robot_state_publisher", + namespace=namespace_val, + executable="robot_state_publisher", + name=rsp_name, + output="screen", + parameters=[ + { + "use_sim_time": publish_svo_clock, + "robot_description": Command(xacro_command), + } + ], + ) + return_array.append(rsp_node) + + # ROS 2 Component Container + if container_name_val == "": + container_name_val = "zed_container" + container_exec: str = "component_container_isolated" + + zed_container = ComposableNodeContainer( + name=container_name_val, + namespace=namespace_val, + package="rclcpp_components", + executable=container_exec, + arguments=[ + "--use_multi_threaded_executor", + "--ros-args", + "--log-level", + "info", + ], + output="screen", + composable_node_descriptions=[], + ) + return_array.append(zed_container) + + # ZED Node parameters + node_parameters: SomeParameters = [ + # YAML files + config_common_path_val, # Common parameters + config_camera_path, # Camera related parameters + config_ffmpeg, # FFMPEG parameters + object_detection_config_path, # Object detection parameters + custom_object_detection_config_path, # Custom object detection parameters + # + # add the launch file params directly + { + "use_sim_time": use_sim_time, + "simulation.sim_enabled": sim_mode, + "simulation.sim_address": sim_address, + "simulation.sim_port": sim_port, + "stream.stream_address": stream_address, + "stream.stream_port": stream_port, + "general.camera_name": camera_name_val, + "general.camera_model": camera_model_val, + "svo.svo_path": svo_path, + "svo.publish_svo_clock": publish_svo_clock, + "general.serial_number": serial_number, + "general.camera_id": camera_id, + "pos_tracking.publish_tf": publish_tf, + "pos_tracking.publish_map_tf": publish_map_tf, + "sensors.publish_imu_tf": publish_imu_tf, + "gnss_fusion.gnss_fusion_enabled": enable_gnss, + }, + ] + + if ros_params_override_path_val != "": + node_parameters.append(ros_params_override_path) + + # ZED Wrapper component + if ( + camera_model_val == "zed" + or camera_model_val == "zedm" + or camera_model_val == "zed2" + or camera_model_val == "zed2i" + or camera_model_val == "zedx" + or camera_model_val == "zedxm" + or camera_model_val == "virtual" + ): + zed_wrapper_component = ComposableNode( + package="zed_components", + namespace=namespace_val, + plugin="stereolabs::ZedCamera", + name=node_name_val, + parameters=node_parameters, + extra_arguments=[{"use_intra_process_comms": True}], + ) + else: # 'zedxonegs' or 'zedxone4k') + zed_wrapper_component = ComposableNode( + package="zed_components", + namespace=namespace_val, + plugin="stereolabs::ZedCameraOne", + name=node_name_val, + parameters=node_parameters, + extra_arguments=[{"use_intra_process_comms": True}], + ) + + full_container_name = "/" + namespace_val + "/" + container_name_val + info = ( + "Loading ZED node `" + + node_name_val + + "` in container `" + + full_container_name + + "`" + ) + return_array.append(LogInfo(msg=TextSubstitution(text=info))) + + # we'll also add the `pointcloud_to_laserscan` component here + pointcloud_to_laserscan = ComposableNode( + package="pointcloud_to_laserscan", + namespace=namespace_val, + plugin="pointcloud_to_laserscan::PointCloudToLaserScanNode", + name="pointcloud_to_laserscan", + parameters=[ + PathJoinSubstitution( + [ + get_package_share_directory("zed"), + "params", + "pointcloud_to_laserscan.yaml", + ] + ) + ], + extra_arguments=[{"use_intra_process_comms": True}], + remappings=[ + ("cloud_in", f"{camera_name_val}/depth/points"), + ("scan", "/scan"), + ], + ) + + load_composable_node = LoadComposableNodes( + target_container=full_container_name, + composable_node_descriptions=[ + zed_wrapper_component, + pointcloud_to_laserscan, + ], + ) + return_array.append(load_composable_node) + + return return_array + + +def generate_launch_description(): + return LaunchDescription( + [ + DeclareLaunchArgument( + "camera_name", + default_value=TextSubstitution(text="zed"), + description="The name of the camera. It can be different from the camera model and it will be used as node `namespace`.", + ), + DeclareLaunchArgument( + "camera_model", + description="[REQUIRED] The model of the camera. Using a wrong camera model can disable camera features.", + choices=[ + "zed", + "zedm", + "zed2", + "zed2i", + "zedx", + "zedxm", + "virtual", + "zedxonegs", + "zedxone4k", + ], + ), + DeclareLaunchArgument( + "container_name", + default_value="", + description="The name of the container to be used to load the ZED component. If empty (default) a new container will be created.", + ), + DeclareLaunchArgument( + "namespace", + default_value="", + description="The namespace of the node. If empty (default) the camera name is used.", + ), + DeclareLaunchArgument( + "node_name", + default_value="zed_node", + description="The name of the zed_wrapper node. All the topic will have the same prefix: `///`. If a namespace is specified, the node name is replaced by the camera name.", + ), + DeclareLaunchArgument( + "ros_params_override_path", + default_value="", + description="The path to an additional parameters file to override the default values.", + ), + DeclareLaunchArgument( + "ffmpeg_config_path", + default_value=TextSubstitution(text=default_config_ffmpeg), + description="Path to the YAML configuration file for the FFMPEG parameters when using FFMPEG image transport plugin.", + ), + DeclareLaunchArgument( + "object_detection_config_path", + default_value=TextSubstitution( + text=default_object_detection_config_path + ), + description="Path to the YAML configuration file for the Object Detection parameters.", + ), + DeclareLaunchArgument( + "custom_object_detection_config_path", + default_value=TextSubstitution( + text=default_custom_object_detection_config_path + ), + description="Path to the YAML configuration file for the Custom Object Detection parameters.", + ), + DeclareLaunchArgument( + "serial_number", + default_value="0", + description="The serial number of the camera to be opened. It is mandatory to use this parameter or camera ID in multi-camera rigs to distinguish between different cameras. Use `ZED_Explorer -a` to retrieve the serial number of all the connected cameras.", + ), + DeclareLaunchArgument( + "camera_id", + default_value="-1", + description="The ID of the camera to be opened. It is mandatory to use this parameter or serial number in multi-camera rigs to distinguish between different cameras. Use `ZED_Explorer -a` to retrieve the ID of all the connected cameras.", + ), + DeclareLaunchArgument( + "publish_urdf", + default_value="true", + description="Enable URDF processing and starts Robot State Published to propagate static TF.", + choices=["true", "false"], + ), + DeclareLaunchArgument( + "publish_tf", + default_value="true", + description="Enable publication of the `odom -> camera_link` TF.", + choices=["true", "false"], + ), + DeclareLaunchArgument( + "publish_map_tf", + default_value="true", + description="Enable publication of the `map -> odom` TF. Note: Ignored if `publish_tf` is False.", + choices=["true", "false"], + ), + DeclareLaunchArgument( + "publish_imu_tf", + default_value="false", + description="Enable publication of the IMU TF. Note: Ignored if `publish_tf` is False.", + choices=["true", "false"], + ), + DeclareLaunchArgument( + "xacro_path", + default_value=TextSubstitution(text=default_xacro_path), + description="Path to the camera URDF file as a xacro file.", + ), + DeclareLaunchArgument( + "svo_path", + default_value=TextSubstitution(text="live"), + description="Path to an input SVO file.", + ), + DeclareLaunchArgument( + "publish_svo_clock", + default_value="false", + description="If set to `true` the node will act as a clock server publishing the SVO timestamp. This is useful for node synchronization", + ), + DeclareLaunchArgument( + "enable_gnss", + default_value="false", + description="Enable GNSS fusion to fix positional tracking pose with GNSS data from messages of type `sensor_msgs::msg::NavSatFix`. The fix topic can be customized in `common_stereo.yaml`.", + choices=["true", "false"], + ), + DeclareLaunchArgument( + "gnss_antenna_offset", + default_value="[]", + description="Position of the GNSS antenna with respect to the mounting point of the ZED camera. Format: [x,y,z]", + ), + DeclareLaunchArgument( + "use_sim_time", + default_value="false", + description="If set to `true` the node will wait for messages on the `/clock` topic to start and will use this information as the timestamp reference", + choices=["true", "false"], + ), + DeclareLaunchArgument( + "sim_mode", + default_value="false", + description="Enable simulation mode. Set `sim_address` and `sim_port` to configure the simulator input.", + choices=["true", "false"], + ), + DeclareLaunchArgument( + "sim_address", + default_value="127.0.0.1", + description="The connection address of the simulation server. See the documentation of the supported simulation plugins for more information.", + ), + DeclareLaunchArgument( + "sim_port", + default_value="30000", + description="The connection port of the simulation server. See the documentation of the supported simulation plugins for more information.", + ), + DeclareLaunchArgument( + "stream_address", + default_value="", + description="The connection address of the input streaming server.", + ), + DeclareLaunchArgument( + "stream_port", + default_value="30000", + description="The connection port of the input streaming server.", + ), + DeclareLaunchArgument( + "custom_baseline", + default_value="0.0", + description="Distance between the center of ZED X One cameras in a custom stereo rig.", + ), + OpaqueFunction(function=launch_setup), + ] + ) diff --git a/src/zed/launch/zed_official.launch.py b/src/zed/launch/zed_official.launch.py new file mode 100644 index 00000000..05bac10e --- /dev/null +++ b/src/zed/launch/zed_official.launch.py @@ -0,0 +1,113 @@ +from launch import LaunchDescription +from launch.actions import GroupAction, IncludeLaunchDescription +from launch.launch_description_sources import PythonLaunchDescriptionSource +from launch.substitutions import PathJoinSubstitution +from launch_ros.actions import Node, SetRemap +from launch_ros.substitutions.find_package import get_package_share_directory + + +def generate_launch_description() -> LaunchDescription: + pkg_zed: str = get_package_share_directory("zed") + pkg_simulator: str = get_package_share_directory("simulator") + + # include ZED's bundled launch file to avoid verbose nonsense. + # + # doing so avoids launching all the nodes ourselves + zed_camera_launch_file: IncludeLaunchDescription = IncludeLaunchDescription( + PythonLaunchDescriptionSource( + [ + PathJoinSubstitution( + [ + get_package_share_directory("zed_wrapper"), + "launch", + "zed_camera.launch.py", + ] + ) + ] + ), + launch_arguments=[ + ("camera_model", "zed2i"), + # + # set namespace to align w/ our original spec + ("namespace", "/sensors/depth_image"), + # + # let other nodes take care of publishing to the tf tree + # ("publish_map_tf", "false"), + # ("publish_imu_tf", "false"), + # + # provide the correct path to the Rover URDF (3d model) + # ("xacro_path", f"{pkg_simulator}/resource/rover.urdf.xacro.xml"), + # + # configure the wrapper to use our sensors while mapping. this can + # increase accuracy and help in other subtle ways + ("enable_gnss", "true"), + # + # use config file with QoS overrides for GPS topic compatibility + ("ros_params_override_path", f"{get_package_share_directory('zed_wrapper')}/config/common_stereo.yaml"), + ], + ) + + # then, we'll put it within a "group action." + # + # that allows us to add topic remappings for all the nodes in the ZED + # launch file. which means we don't have a bunch of `/zed` topics randomly + # in our `/` namespace :P + remapping_group: GroupAction = GroupAction( + actions=[ + # add remappings. for more info about these, please see the ZED + # documentation: + # https://web.archive.org/web/20240522090026/https://www.stereolabs.com/docs/ros2/zed-node + # + # ...sensor fusion + odometry + SetRemap(src="/fix", dst="/sensors/gps"), + SetRemap(src="zed/imu/data", dst="/sensors/depth_image/imu"), + SetRemap(src="zed/odom", dst="/sensors/depth_image/odom"), + # + # ...depth camera stuff + SetRemap(src="depth/depth_registered", dst="depth_registered"), + SetRemap(src="depth/camera_info", dst="camera_info"), + SetRemap( + src="depth/depth_registered", + dst="/sensors/depth_image/depth_registered", + ), + SetRemap( + src="depth/camera_info", dst="/sensors/depth_image/camera_info" + ), + # + # ...pointcloud stuff + SetRemap(src="point_cloud", dst="/sensors/depth_image/point_cloud"), + # + # and, now, add the launch file itself. + # + # it's ran after all the other stuff, so the remaps will apply + zed_camera_launch_file, + ], + ) + + # we'll also add a node that just sticks the Zed camera (and all its many, + # many frames) onto the chassis. + # + # otherwise, it's kinda just... floating there + static_transform_node: Node = Node( + package="tf2_ros", + executable="static_transform_publisher", + arguments=[ + # transform (x, y, z) + "0", + "0", + "0", + # + # rotation (roll, yaw, pitch) + "0", + "0", + "0", + # + # parent_frame + "chassis", + # + # child_frame + "zed_camera_link", + ], + ) + + return LaunchDescription([remapping_group, static_transform_node]) diff --git a/src/zed/params/common_stereo.yaml b/src/zed/params/common_stereo.yaml new file mode 100644 index 00000000..6c88a41c --- /dev/null +++ b/src/zed/params/common_stereo.yaml @@ -0,0 +1,194 @@ +# config/common_stereo.yaml +# Common parameters to Stereolabs ZED Stereo cameras + +--- +/**: + ros__parameters: + #qos_overrides./sensors/gps.subscription.depth + #qos_overrides./sensors/gps.subscription.history + qos_overrides./sensors/gps.subscription.reliability: best_effort + + use_sim_time: false # Set to `true` only if there is a publisher for the simulated clock to the `/clock` topic. Normally used in simulation mode. + + simulation: + sim_enabled: false # Set to `true` to enable the simulation mode and connect to a simulation server + sim_address: '127.0.0.1' # The connection address of the simulation server. See the documentation of the supported simulation plugins for more information. + sim_port: 30000 # The connection port of the simulation server. See the documentation of the supported simulation plugins for more information. + + svo: + use_svo_timestamps: true # Use the SVO timestamps to publish data. If false, data will be published at the system time. + svo_loop: false # Enable loop mode when using an SVO as input source. NOTE: ignored if SVO timestamping is used + svo_realtime: true # if true the SVO will be played trying to respect the original framerate eventually skipping frames, otherwise every frame will be processed respecting the `pub_frame_rate` setting + play_from_frame: 0 # Start playing the SVO from a specific frame + + general: + camera_timeout_sec: 5 + camera_max_reconnect: 5 + camera_flip: false + self_calib: true # Enable the self-calibration process at camera opening. See https://www.stereolabs.com/docs/api/structsl_1_1InitParameters.html#affeaa06cfc1d849e311e484ceb8edcc5 + serial_number: 0 # usually overwritten by launch file + pub_resolution: 'CUSTOM' # The resolution used for image and depth map publishing. 'NATIVE' to use the same `general.grab_resolution` - `CUSTOM` to apply the `general.pub_downscale_factor` downscale factory to reduce bandwidth in transmission + pub_downscale_factor: 2.0 # rescale factor used to rescale image before publishing when 'pub_resolution' is 'CUSTOM' + pub_frame_rate: 30.0 # frequency of publishing of visual images and depth images + enable_image_validity_check: 1 # [SDK5 required] Sets the image validity check. If set to 1, the SDK will check if the frames are valid before processing. + gpu_id: -1 + optional_opencv_calibration_file: '' # Optional path where the ZED SDK can find a file containing the calibration information of the camera computed by OpenCV. Read the ZED SDK documentation for more information: https://www.stereolabs.com/docs/api/structsl_1_1InitParameters.html#a9eab2753374ef3baec1d31960859ba19 + async_image_retrieval: false # Enable/disable the asynchronous image retrieval - Note: enable only to improve SVO recording performance + + video: + brightness: 4 # [DYNAMIC] Not available for ZED X/ZED X Mini + contrast: 4 # [DYNAMIC] Not available for ZED X/ZED X Mini + hue: 0 # [DYNAMIC] Not available for ZED X/ZED X Mini + saturation: 4 # [DYNAMIC] + sharpness: 4 # [DYNAMIC] + gamma: 8 # [DYNAMIC] + auto_exposure_gain: true # [DYNAMIC] + exposure: 80 # [DYNAMIC] + gain: 80 # [DYNAMIC] + auto_whitebalance: true # [DYNAMIC] + whitebalance_temperature: 42 # [DYNAMIC] - [28,65] x100 - works only if `auto_whitebalance` is false + + sensors: + publish_imu_tf: false # [usually overwritten by launch file] enable/disable the IMU TF broadcasting + sensors_image_sync: false # Synchronize Sensors messages with latest published video/depth message + sensors_pub_rate: 100. # frequency of publishing of sensors data. MAX: 400. - MIN: grab rate + + region_of_interest: + automatic_roi: false # Enable the automatic ROI generation to automatically detect part of the robot in the FoV and remove them from the processing. Note: if enabled the value of `manual_polygon` is ignored + depth_far_threshold_meters: 2.5 # Filtering how far object in the ROI should be considered, this is useful for a vehicle for instance + image_height_ratio_cutoff: 0.5 # By default consider only the lower half of the image, can be useful to filter out the sky + #manual_polygon: '[]' # A polygon defining the ROI where the ZED SDK perform the processing ignoring the rest. Coordinates must be normalized to '1.0' to be resolution independent. + #manual_polygon: '[[0.25,0.33],[0.75,0.33],[0.75,0.5],[0.5,0.75],[0.25,0.5]]' # A polygon defining the ROI where the ZED SDK perform the processing ignoring the rest. Coordinates must be normalized to '1.0' to be resolution independent. + #manual_polygon: '[[0.25,0.25],[0.75,0.25],[0.75,0.75],[0.25,0.75]]' # A polygon defining the ROI where the ZED SDK perform the processing ignoring the rest. Coordinates must be normalized to '1.0' to be resolution independent. + #manual_polygon: '[[0.5,0.25],[0.75,0.5],[0.5,0.75],[0.25,0.5]]' # A polygon defining the ROI where the ZED SDK perform the processing ignoring the rest. Coordinates must be normalized to '1.0' to be resolution independent. + apply_to_depth: true # Apply ROI to depth processing + apply_to_positional_tracking: true # Apply ROI to positional tracking processing + apply_to_object_detection: true # Apply ROI to object detection processing + apply_to_body_tracking: true # Apply ROI to body tracking processing + apply_to_spatial_mapping: true # Apply ROI to spatial mapping processing + + depth: + depth_mode: 'NEURAL_LIGHT' # Matches the ZED SDK setting: 'NONE', 'PERFORMANCE', 'QUALITY', 'ULTRA', 'NEURAL', 'NEURAL_PLUS' - Note: if 'NONE' all the modules that requires depth extraction are disabled by default (Pos. Tracking, Obj. Detection, Mapping, ...) + depth_stabilization: 30 # Forces positional tracking to start if major than 0 - Range: [0,100] + openni_depth_mode: false # 'false': 32bit float [meters], 'true': 16bit unsigned int [millimeters] + point_cloud_freq: 15.0 # [DYNAMIC] - frequency of the pointcloud publishing (equal or less to `pub_frame_rate` value) + point_cloud_res: 'COMPACT' # The resolution used for point cloud publishing - 'COMPACT'-Standard resolution. Optimizes processing and bandwidth, 'REDUCED'-Half resolution. Low processing and bandwidth requirements + depth_confidence: 95 # [DYNAMIC] + depth_texture_conf: 100 # [DYNAMIC] + remove_saturated_areas: true # [DYNAMIC] + + pos_tracking: + pos_tracking_enabled: true # True to enable positional tracking from start + pos_tracking_mode: 'GEN_1' # Matches the ZED SDK setting: 'GEN_1', 'GEN_2' + imu_fusion: true # enable/disable IMU fusion. When set to false, only the optical odometry will be used. + publish_tf: true # [usually overwritten by launch file] publish `odom -> camera_link` TF + publish_map_tf: true # [usually overwritten by launch file] publish `map -> odom` TF + map_frame: 'map' + odometry_frame: 'odom' + area_memory_db_path: '' + area_memory: true # Enable to detect loop closure + reset_odom_with_loop_closure: true # Re-initialize odometry to the last valid pose when loop closure happens (reset camera odometry drift) + depth_min_range: 0.0 # Set this value for removing fixed zones of the robot in the FoV of the camerafrom the visual odometry evaluation + set_as_static: false # If 'true' the camera will be static and not move in the environment + set_gravity_as_origin: true # If 'true' align the positional tracking world to imu gravity measurement. Keep the yaw from the user initial pose. + floor_alignment: false # Enable to automatically calculate camera/floor offset + initial_base_pose: [0.0, 0.0, 0.0, 0.0, 0.0, 0.0] # Initial position of the `camera_link` frame in the map -> [X, Y, Z, R, P, Y] + path_pub_rate: 2.0 # [DYNAMIC] - Camera trajectory publishing frequency + path_max_count: -1 # use '-1' for unlimited path size + two_d_mode: false # Force navigation on a plane. If true the Z value will be fixed to 'fixed_z_value', roll and pitch to zero + fixed_z_value: 0.00 # Value to be used for Z coordinate if `two_d_mode` is true + transform_time_offset: 0.0 # The value added to the timestamp of `map->odom` and `odom->camera_link` transform being generated + reset_pose_with_svo_loop: true # Reset the camera pose the `initial_base_pose` when the SVO loop is enabled and the SVO playback reaches the end of the file. + + gnss_fusion: + gnss_fusion_enabled: false # fuse 'sensor_msg/NavSatFix' message information into pose data + gnss_fix_topic: '/fix' # Name of the GNSS topic of type NavSatFix to subscribe [Default: '/gps/fix'] + gnss_zero_altitude: false # Set to `true` to ignore GNSS altitude information + h_covariance_mul: 1.0 # Multiplier factor to be applied to horizontal covariance of the received fix (plane X/Y) + v_covariance_mul: 1.0 # Multiplier factor to be applied to vertical covariance of the received fix (Z axis) + publish_utm_tf: true # Publish `utm` -> `map` TF + broadcast_utm_transform_as_parent_frame: false # if 'true' publish `utm` -> `map` TF, otherwise `map` -> `utm` + enable_reinitialization: false # determines whether reinitialization should be performed between GNSS and VIO fusion when a significant disparity is detected between GNSS data and the current fusion data. It becomes particularly crucial during prolonged GNSS signal loss scenarios. + enable_rolling_calibration: true # If this parameter is set to true, the fusion algorithm will used a rough VIO / GNSS calibration at first and then refine it. This allow you to quickly get a fused position. + enable_translation_uncertainty_target: false # When this parameter is enabled (set to true), the calibration process between GNSS and VIO accounts for the uncertainty in the determined translation, thereby facilitating the calibration termination. The maximum allowable uncertainty is controlled by the '' parameter. + gnss_vio_reinit_threshold: 5.0 # determines the threshold for GNSS/VIO reinitialization. If the fused position deviates beyond out of the region defined by the product of the GNSS covariance and the gnss_vio_reinit_threshold, a reinitialization will be triggered. + target_translation_uncertainty: 0.02 # defines the target translation uncertainty at which the calibration process between GNSS and VIO concludes. By default, the threshold is set at 10 centimeters. + target_yaw_uncertainty: 0.02 # defines the target yaw uncertainty at which the calibration process between GNSS and VIO concludes. The unit of this parameter is in radian. By default, the threshold is set at 0.1 radians. + + mapping: + mapping_enabled: false # True to enable mapping and fused point cloud pubblication + resolution: 0.05 # maps resolution in meters [min: 0.01f - max: 0.2f] + max_mapping_range: 5.0 # maximum depth range while mapping in meters (-1 for automatic calculation) [2.0, 20.0] + fused_pointcloud_freq: 1.0 # frequency of the publishing of the fused colored point cloud + clicked_point_topic: '/clicked_point' # Topic published by Rviz when a point of the cloud is clicked. Used for plane detection + pd_max_distance_threshold: 0.15 # Plane detection: controls the spread of plane by checking the position difference. + pd_normal_similarity_threshold: 15.0 # Plane detection: controls the spread of plane by checking the angle difference. + + object_detection: + od_enabled: false # True to enable Object Detection + model: 'MULTI_CLASS_BOX_FAST' # 'MULTI_CLASS_BOX_FAST', 'MULTI_CLASS_BOX_MEDIUM', 'MULTI_CLASS_BOX_ACCURATE', 'PERSON_HEAD_BOX_FAST', 'PERSON_HEAD_BOX_ACCURATE', 'CUSTOM_YOLOLIKE_BOX_OBJECTS' + custom_onnx_file: '' # Only used if 'model' is 'CUSTOM_YOLOLIKE_BOX_OBJECTS'. Path to the YOLO-like ONNX file for custom object detection directly performed by the ZED SDK. + custom_onnx_input_size: 512 # Resolution used with the YOLO-like ONNX file. For example, 512 means a input tensor '1x3x512x512' + custom_label_yaml: '' # Only used if 'model' is 'CUSTOM_YOLOLIKE_BOX_OBJECTS'. Path to the COCO-like YAML file storing the custom class labels. + allow_reduced_precision_inference: true # Allow inference to run at a lower precision to improve runtime and memory usage + max_range: 20.0 # [m] Defines a upper depth range for detections + confidence_threshold: 75.0 # [DYNAMIC] - Minimum value of the detection confidence of an object [0,99] + prediction_timeout: 0.5 # During this time [sec], the object will have OK state even if it is not detected. Set this parameter to 0 to disable SDK predictions + enable_tracking: true # Defines if the object detection will track objects across images flow + filtering_mode: 1 # '0': NONE - '1': NMS3D - '2': NMS3D_PER_CLASS + mc_people: true # [DYNAMIC] - Enable/disable the detection of persons for 'MULTI_CLASS_X' models + mc_vehicle: true # [DYNAMIC] - Enable/disable the detection of vehicles for 'MULTI_CLASS_X' models + mc_bag: true # [DYNAMIC] - Enable/disable the detection of bags for 'MULTI_CLASS_X' models + mc_animal: true # [DYNAMIC] - Enable/disable the detection of animals for 'MULTI_CLASS_X' models + mc_electronics: true # [DYNAMIC] - Enable/disable the detection of electronic devices for 'MULTI_CLASS_X' models + mc_fruit_vegetable: true # [DYNAMIC] - Enable/disable the detection of fruits and vegetables for 'MULTI_CLASS_X' models + mc_sport: true # [DYNAMIC] - Enable/disable the detection of sport-related objects for 'MULTI_CLASS_X' models + + body_tracking: + bt_enabled: false # True to enable Body Tracking + model: 'HUMAN_BODY_MEDIUM' # 'HUMAN_BODY_FAST', 'HUMAN_BODY_MEDIUM', 'HUMAN_BODY_ACCURATE' + body_format: 'BODY_38' # 'BODY_18','BODY_34','BODY_38','BODY_70' + allow_reduced_precision_inference: false # Allow inference to run at a lower precision to improve runtime and memory usage + max_range: 20.0 # [m] Defines a upper depth range for detections + body_kp_selection: 'FULL' # 'FULL', 'UPPER_BODY' + enable_body_fitting: false # Defines if the body fitting will be applied + enable_tracking: true # Defines if the object detection will track objects across images flow + prediction_timeout_s: 0.5 # During this time [sec], the skeleton will have OK state even if it is not detected. Set this parameter to 0 to disable SDK predictions + confidence_threshold: 50.0 # [DYNAMIC] - Minimum value of the detection confidence of skeleton key points [0,99] + minimum_keypoints_threshold: 5 # [DYNAMIC] - Minimum number of skeleton key points to be detected for a valid skeleton + + stream_server: + stream_enabled: false # enable the streaming server when the camera is open + codec: 'H264' # different encoding types for image streaming: 'H264', 'H265' + port: 30000 # Port used for streaming. Port must be an even number. Any odd number will be rejected. + bitrate: 12500 # [1000 - 60000] Streaming bitrate (in Kbits/s) used for streaming. See https://www.stereolabs.com/docs/api/structsl_1_1StreamingParameters.html#a873ba9440e3e9786eb1476a3bfa536d0 + gop_size: -1 # [max 256] The GOP size determines the maximum distance between IDR/I-frames. Very high GOP size will result in slightly more efficient compression, especially on static scenes. But latency will increase. + adaptative_bitrate: false # Bitrate will be adjusted depending the number of packet dropped during streaming. If activated, the bitrate can vary between [bitrate/4, bitrate]. + chunk_size: 16084 # [1024 - 65000] Stream buffers are divided into X number of chunks where each chunk is chunk_size bytes long. You can lower chunk_size value if network generates a lot of packet lost: this will generates more chunk for a single image, but each chunk sent will be lighter to avoid inside-chunk corruption. Increasing this value can decrease latency. + target_framerate: 0 # Framerate for the streaming output. This framerate must be below or equal to the camera framerate. Allowed framerates are 15, 30, 60 or 100 if possible. Any other values will be discarded and camera FPS will be taken. + + advanced: # WARNING: do not modify unless you are confident of what you are doing + # Reference documentation: https://man7.org/linux/man-pages/man7/sched.7.html + thread_sched_policy: 'SCHED_BATCH' # 'SCHED_OTHER', 'SCHED_BATCH', 'SCHED_FIFO', 'SCHED_RR' - NOTE: 'SCHED_FIFO' and 'SCHED_RR' require 'sudo' + thread_grab_priority: 50 # ONLY with 'SCHED_FIFO' and 'SCHED_RR' - [1 (LOW) z-> 99 (HIGH)] - NOTE: 'sudo' required + thread_sensor_priority: 70 # ONLY with 'SCHED_FIFO' and 'SCHED_RR' - [1 (LOW) z-> 99 (HIGH)] - NOTE: 'sudo' required + thread_pointcloud_priority: 60 # ONLY with 'SCHED_FIFO' and 'SCHED_RR' - [1 (LOW) z-> 99 (HIGH)] - NOTE: 'sudo' required + + debug: + sdk_verbose: 1 # Set the verbose level of the ZED SDK + sdk_verbose_log_file: '' # Path to the file where the ZED SDK will log its messages. If empty, no file will be created. The log level can be set using the `sdk_verbose` parameter. + debug_common: false + debug_sim: false + debug_video_depth: false + debug_camera_controls: false + debug_point_cloud: false + debug_positional_tracking: false + debug_gnss: false + debug_sensors: false + debug_mapping: false + debug_terrain_mapping: false + debug_object_detection: false + debug_body_tracking: false + debug_roi: false + debug_streaming: false + debug_advanced: false diff --git a/src/zed/params/pointcloud_to_laserscan.yaml b/src/zed/params/pointcloud_to_laserscan.yaml new file mode 100644 index 00000000..87d98749 --- /dev/null +++ b/src/zed/params/pointcloud_to_laserscan.yaml @@ -0,0 +1,8 @@ +/**: + ros__parameters: + output_frame: base_link + range_min: 0.3 + range_max: 10.0 + use_inf: true + height_min: -0.25 + height_max: 0.25 diff --git a/src/zed_wrapper/config/common_stereo.yaml b/src/zed_wrapper/config/common_stereo.yaml new file mode 100644 index 00000000..db375534 --- /dev/null +++ b/src/zed_wrapper/config/common_stereo.yaml @@ -0,0 +1,185 @@ +/**: + ros__parameters: + # we override the QoS for the NavSatFix topic, as, for some reason, + # `zed-ros2-wrapper` seems to expect the GPS to use reliable qos + qos_overrides./sensors/gps.subscription.reliability: best_effort + + use_sim_time: false + + simulation: + sim_enabled: false # Set to `true` to enable the simulation mode and connect to a simulation server + sim_address: "127.0.0.1" # The connection address of the simulation server. See the documentation of the supported simulation plugins for more information. + sim_port: 30000 # The connection port of the simulation server. See the documentation of the supported simulation plugins for more information. + + svo: + use_svo_timestamps: true # Use the SVO timestamps to publish data. If false, data will be published at the system time. + publish_svo_clock: false # [overwritten by launch file options] When use_svo_timestamps is true allows to publish the SVO clock to the `/clock` topic. This is useful for synchronous rosbag playback. + svo_loop: false # Enable loop mode when using an SVO as input source. NOTE: ignored if SVO timestamping is used + svo_realtime: false # if true the SVO will be played trying to respect the original framerate eventually skipping frames, otherwise every frame will be processed respecting the `pub_frame_rate` setting + play_from_frame: 0 # Start playing the SVO from a specific frame + replay_rate: 1.0 # Replay rate for the SVO when not used in realtime mode (between [0.10-5.0]) + + general: + camera_timeout_sec: 5 + camera_max_reconnect: 5 + camera_flip: false + self_calib: true # Enable the self-calibration process at camera opening. See https://www.stereolabs.com/docs/api/structsl_1_1InitParameters.html#affeaa06cfc1d849e311e484ceb8edcc5 + serial_number: 0 # overwritten by launch file + pub_resolution: "CUSTOM" # The resolution used for image and depth map publishing. 'NATIVE' to use the same `general.grab_resolution` - `CUSTOM` to apply the `general.pub_downscale_factor` downscale factory to reduce bandwidth in transmission + pub_downscale_factor: 2.0 # rescale factor used to rescale image before publishing when 'pub_resolution' is 'CUSTOM' + pub_frame_rate: 30.0 # frequency of publishing of visual images and depth images + enable_image_validity_check: 1 # [SDK5 required] Sets the image validity check. If set to 1, the SDK will check if the frames are valid before processing. + gpu_id: -1 + optional_opencv_calibration_file: "" # Optional path where the ZED SDK can find a file containing the calibration information of the camera computed by OpenCV. Read the ZED SDK documentation for more information: https://www.stereolabs.com/docs/api/structsl_1_1InitParameters.html#a9eab2753374ef3baec1d31960859ba19 + async_image_retrieval: false # If set to true will camera image retrieve at a framerate different from \ref grab() application framerate. This is useful for recording SVO or sending camera stream at different rate than application. + # Other parameters are defined, according to the camera model, in the 'zed.yaml', 'zedm.yaml', 'zed2.yaml', 'zed2i.yaml' + # 'zedx.yaml', 'zedxmini.yaml', 'virtual.yaml' files + + video: + saturation: 4 # [DYNAMIC] + sharpness: 4 # [DYNAMIC] + gamma: 8 # [DYNAMIC] + auto_exposure_gain: true # [DYNAMIC] + exposure: 80 # [DYNAMIC] + gain: 80 # [DYNAMIC] + auto_whitebalance: true # [DYNAMIC] + whitebalance_temperature: 42 # [DYNAMIC] - [28,65] x100 - works only if `auto_whitebalance` is false + # Other parameters are defined, according to the camera model, in the 'zed.yaml', 'zedm.yaml', 'zed2.yaml', 'zed2i.yaml' + # 'zedx.yaml', 'zedxmini.yaml', 'virtual.yaml' files + + sensors: + publish_imu_tf: false # [overwritten by launch file options] enable/disable the IMU TF broadcasting + sensors_image_sync: false # Synchronize Sensors messages with latest published video/depth message + sensors_pub_rate: 100. # frequency of publishing of sensors data. MAX: 400. - MIN: grab rate + + region_of_interest: + automatic_roi: false # Enable the automatic ROI generation to automatically detect part of the robot in the FoV and remove them from the processing. Note: if enabled the value of `manual_polygon` is ignored + depth_far_threshold_meters: 2.5 # Filtering how far object in the ROI should be considered, this is useful for a vehicle for instance + image_height_ratio_cutoff: 0.5 # By default consider only the lower half of the image, can be useful to filter out the sky + #manual_polygon: '[]' # A polygon defining the ROI where the ZED SDK perform the processing ignoring the rest. Coordinates must be normalized to '1.0' to be resolution independent. + #manual_polygon: '[[0.25,0.33],[0.75,0.33],[0.75,0.5],[0.5,0.75],[0.25,0.5]]' # A polygon defining the ROI where the ZED SDK perform the processing ignoring the rest. Coordinates must be normalized to '1.0' to be resolution independent. + #manual_polygon: '[[0.25,0.25],[0.75,0.25],[0.75,0.75],[0.25,0.75]]' # A polygon defining the ROI where the ZED SDK perform the processing ignoring the rest. Coordinates must be normalized to '1.0' to be resolution independent. + #manual_polygon: '[[0.5,0.25],[0.75,0.5],[0.5,0.75],[0.25,0.5]]' # A polygon defining the ROI where the ZED SDK perform the processing ignoring the rest. Coordinates must be normalized to '1.0' to be resolution independent. + apply_to_depth: true # Apply ROI to depth processing + apply_to_positional_tracking: true # Apply ROI to positional tracking processing + apply_to_object_detection: true # Apply ROI to object detection processing + apply_to_body_tracking: true # Apply ROI to body tracking processing + apply_to_spatial_mapping: true # Apply ROI to spatial mapping processing + + depth: + depth_mode: "NEURAL" # Matches the ZED SDK setting: 'NONE', 'PERFORMANCE', 'QUALITY', 'ULTRA', 'NEURAL', 'NEURAL_PLUS' - Note: if 'NONE' all the modules that requires depth extraction are disabled by default (Pos. Tracking, Obj. Detection, Mapping, ...) + depth_stabilization: 30 # Forces positional tracking to start if major than 0 - Range: [0,100] + openni_depth_mode: false # 'false': 32bit float [meters], 'true': 16bit unsigned int [millimeters] + point_cloud_freq: 15.0 # [DYNAMIC] - frequency of the pointcloud publishing (equal or less to `pub_frame_rate` value) + point_cloud_res: "COMPACT" # The resolution used for point cloud publishing - 'COMPACT'-Standard resolution. Optimizes processing and bandwidth, 'REDUCED'-Half resolution. Low processing and bandwidth requirements + depth_confidence: 95 # [DYNAMIC] + depth_texture_conf: 100 # [DYNAMIC] + remove_saturated_areas: true # [DYNAMIC] + # Other parameters are defined, according to the camera model, in the 'zed.yaml', 'zedm.yaml', 'zed2.yaml', 'zed2i.yaml' + # 'zedx.yaml', 'zedxmini.yaml', 'virtual.yaml' files + + pos_tracking: + pos_tracking_enabled: true # True to enable positional tracking from start + pos_tracking_mode: "GEN_1" # Matches the ZED SDK setting: 'GEN_1', 'GEN_2', 'GEN_3' + imu_fusion: true # enable/disable IMU fusion. When set to false, only the optical odometry will be used. + publish_tf: false # [overwritten by launch file options] publish `odom -> camera_link` TF + publish_map_tf: false # [overwritten by launch file options] publish `map -> odom` TF + map_frame: "map" + odometry_frame: "odom" + area_memory_db_path: "" + area_memory: true # Enable to detect loop closure + reset_odom_with_loop_closure: true # Re-initialize odometry to the last valid pose when loop closure happens (reset camera odometry drift) + depth_min_range: 0.0 # Set this value for removing fixed zones of the robot in the FoV of the camerafrom the visual odometry evaluation + set_as_static: false # If 'true' the camera will be static and not move in the environment + set_gravity_as_origin: true # If 'true' align the positional tracking world to imu gravity measurement. Keep the yaw from the user initial pose. + floor_alignment: false # Enable to automatically calculate camera/floor offset + initial_base_pose: [0.0, 0.0, 0.0, 0.0, 0.0, 0.0] # Initial position of the `camera_link` frame in the map -> [X, Y, Z, R, P, Y] + path_pub_rate: 2.0 # [DYNAMIC] - Camera trajectory publishing frequency + path_max_count: -1 # use '-1' for unlimited path size + two_d_mode: false # Force navigation on a plane. If true the Z value will be fixed to 'fixed_z_value', roll and pitch to zero + fixed_z_value: 0.00 # Value to be used for Z coordinate if `two_d_mode` is true + transform_time_offset: 0.0 # The value added to the timestamp of `map->odom` and `odom->camera_link` transform being generated + reset_pose_with_svo_loop: true # Reset the camera pose the `initial_base_pose` when the SVO loop is enabled and the SVO playback reaches the end of the file. + + gnss_fusion: + gnss_fusion_enabled: true # fuse 'sensor_msg/NavSatFix' message information into pose data + gnss_fix_topic: "/sensors/gps" # Name of the GNSS topic of type NavSatFix to subscribe [Default: '/gps/fix'] + gnss_zero_altitude: false # Set to `true` to ignore GNSS altitude information + h_covariance_mul: 1.0 # Multiplier factor to be applied to horizontal covariance of the received fix (plane X/Y) + v_covariance_mul: 1.0 # Multiplier factor to be applied to vertical covariance of the received fix (Z axis) + publish_utm_tf: false # Publish `utm` -> `map` TF + broadcast_utm_transform_as_parent_frame: false # if 'true' publish `utm` -> `map` TF, otherwise `map` -> `utm` + enable_reinitialization: false # determines whether reinitialization should be performed between GNSS and VIO fusion when a significant disparity is detected between GNSS data and the current fusion data. It becomes particularly crucial during prolonged GNSS signal loss scenarios. + enable_rolling_calibration: true # If this parameter is set to true, the fusion algorithm will used a rough VIO / GNSS calibration at first and then refine it. This allow you to quickly get a fused position. + enable_translation_uncertainty_target: false # When this parameter is enabled (set to true), the calibration process between GNSS and VIO accounts for the uncertainty in the determined translation, thereby facilitating the calibration termination. The maximum allowable uncertainty is controlled by the 'target_translation_uncertainty' parameter. + gnss_vio_reinit_threshold: 5.0 # determines the threshold for GNSS/VIO reinitialization. If the fused position deviates beyond out of the region defined by the product of the GNSS covariance and the gnss_vio_reinit_threshold, a reinitialization will be triggered. + target_translation_uncertainty: 0.1 # defines the target translation uncertainty at which the calibration process between GNSS and VIO concludes. By default, the threshold is set at 10 centimeters. + target_yaw_uncertainty: 0.1 # defines the target yaw uncertainty at which the calibration process between GNSS and VIO concludes. The unit of this parameter is in radian. By default, the threshold is set at 0.1 radians. + + mapping: + mapping_enabled: true # True to enable mapping and fused point cloud pubblication + resolution: 0.05 # maps resolution in meters [min: 0.01f - max: 0.2f] + max_mapping_range: 5.0 # maximum depth range while mapping in meters (-1 for automatic calculation) [2.0, 20.0] + fused_pointcloud_freq: 1.0 # frequency of the publishing of the fused colored point cloud + clicked_point_topic: "/clicked_point" # Topic published by Rviz when a point of the cloud is clicked. Used for plane detection + pd_max_distance_threshold: 0.15 # Plane detection: controls the spread of plane by checking the position difference. + pd_normal_similarity_threshold: 15.0 # Plane detection: controls the spread of plane by checking the angle difference. + + object_detection: + od_enabled: false # True to enable Object Detection + enable_tracking: true # Whether the object detection system includes object tracking capabilities across a sequence of images. + detection_model: "MULTI_CLASS_BOX_FAST" # 'MULTI_CLASS_BOX_FAST', 'MULTI_CLASS_BOX_MEDIUM', 'MULTI_CLASS_BOX_ACCURATE', 'PERSON_HEAD_BOX_FAST', 'PERSON_HEAD_BOX_ACCURATE', 'CUSTOM_YOLOLIKE_BOX_OBJECTS' + max_range: 20.0 # [m] Upper depth range for detections.The value cannot be greater than 'depth.max_depth' + filtering_mode: "NMS3D" # Filtering mode that should be applied to raw detections: 'NONE', 'NMS3D', 'NMS3D_PER_CLASS' + prediction_timeout: 2.0 # During this time [sec], the object will have OK state even if it is not detected. Set this parameter to 0 to disable SDK predictions + allow_reduced_precision_inference: false # Allow inference to run at a lower precision to improve runtime and memory usage + # Other parameters are defined in the 'object_detection.yaml' and 'custom_object_detection.yaml' files + + body_tracking: + bt_enabled: false # True to enable Body Tracking + model: "HUMAN_BODY_MEDIUM" # 'HUMAN_BODY_FAST', 'HUMAN_BODY_MEDIUM', 'HUMAN_BODY_ACCURATE' + body_format: "BODY_38" # 'BODY_18','BODY_34','BODY_38' + allow_reduced_precision_inference: false # Allow inference to run at a lower precision to improve runtime and memory usage + max_range: 15.0 # [m] Defines a upper depth range for detections + body_kp_selection: "FULL" # 'FULL', 'UPPER_BODY' + enable_body_fitting: false # Defines if the body fitting will be applied + enable_tracking: true # Defines if the object detection will track objects across images flow + prediction_timeout_s: 0.5 # During this time [sec], the skeleton will have OK state even if it is not detected. Set this parameter to 0 to disable SDK predictions + confidence_threshold: 50.0 # [DYNAMIC] - Minimum value of the detection confidence of skeleton key points [0,99] + minimum_keypoints_threshold: 5 # [DYNAMIC] - Minimum number of skeleton key points to be detected for a valid skeleton + + stream_server: + stream_enabled: false # enable the streaming server when the camera is open + codec: "H264" # different encoding types for image streaming: 'H264', 'H265' + port: 30000 # Port used for streaming. Port must be an even number. Any odd number will be rejected. + bitrate: 12500 # [1000 - 60000] Streaming bitrate (in Kbits/s) used for streaming. See https://www.stereolabs.com/docs/api/structsl_1_1StreamingParameters.html#a873ba9440e3e9786eb1476a3bfa536d0 + gop_size: -1 # [max 256] The GOP size determines the maximum distance between IDR/I-frames. Very high GOP size will result in slightly more efficient compression, especially on static scenes. But latency will increase. + adaptative_bitrate: false # Bitrate will be adjusted depending the number of packet dropped during streaming. If activated, the bitrate can vary between [bitrate/4, bitrate]. + chunk_size: 16084 # [1024 - 65000] Stream buffers are divided into X number of chunks where each chunk is chunk_size bytes long. You can lower chunk_size value if network generates a lot of packet lost: this will generates more chunk for a single image, but each chunk sent will be lighter to avoid inside-chunk corruption. Increasing this value can decrease latency. + target_framerate: 0 # Framerate for the streaming output. This framerate must be below or equal to the camera framerate. Allowed framerates are 15, 30, 60 or 100 if possible. Any other values will be discarded and camera FPS will be taken. + + advanced: # WARNING: do not modify unless you are confident of what you are doing + # Reference documentation: https://man7.org/linux/man-pages/man7/sched.7.html + thread_sched_policy: "SCHED_BATCH" # 'SCHED_OTHER', 'SCHED_BATCH', 'SCHED_FIFO', 'SCHED_RR' - NOTE: 'SCHED_FIFO' and 'SCHED_RR' require 'sudo' + thread_grab_priority: 50 # ONLY with 'SCHED_FIFO' and 'SCHED_RR' - [1 (LOW) z-> 99 (HIGH)] - NOTE: 'sudo' required + thread_sensor_priority: 70 # ONLY with 'SCHED_FIFO' and 'SCHED_RR' - [1 (LOW) z-> 99 (HIGH)] - NOTE: 'sudo' required + thread_pointcloud_priority: 60 # ONLY with 'SCHED_FIFO' and 'SCHED_RR' - [1 (LOW) z-> 99 (HIGH)] - NOTE: 'sudo' required + + debug: + sdk_verbose: 1 # Set the verbose level of the ZED SDK + sdk_verbose_log_file: "" # Path to the file where the ZED SDK will log its messages. If empty, no file will be created. The log level can be set using the `sdk_verbose` parameter. + debug_common: false + debug_sim: false + debug_video_depth: false + debug_camera_controls: false + debug_point_cloud: false + debug_positional_tracking: false + debug_gnss: false + debug_sensors: false + debug_mapping: false + debug_terrain_mapping: false + debug_object_detection: false + debug_body_tracking: false + debug_roi: false + debug_streaming: false + debug_advanced: false