diff --git a/interface_rajant/CMakeLists.txt b/interface_rajant/CMakeLists.txt index f0a147b..df6572d 100644 --- a/interface_rajant/CMakeLists.txt +++ b/interface_rajant/CMakeLists.txt @@ -1,203 +1,44 @@ -cmake_minimum_required(VERSION 2.8.3) +cmake_minimum_required(VERSION 3.8) project(interface_rajant) -## Compile as C++11, supported in ROS Kinetic and newer -# add_compile_options(-std=c++11) +if(CMAKE_COMPILER_IS_GNUCXX OR CMAKE_CXX_COMPILER_ID MATCHES "Clang") + add_compile_options(-Wall -Wextra -Wpedantic) +endif() -## Find catkin macros and libraries -## if COMPONENTS list like find_package(catkin REQUIRED COMPONENTS xyz) -## is used, also find other catkin packages -find_package(catkin REQUIRED COMPONENTS - rospy - std_msgs -) - -## System dependencies are found with CMake's conventions -# find_package(Boost REQUIRED COMPONENTS system) - - -## Uncomment this if the package has a setup.py. This macro ensures -## modules and global scripts declared therein get installed -## See http://ros.org/doc/api/catkin/html/user_guide/setup_dot_py.html -# catkin_python_setup() - -################################################ -## Declare ROS messages, services and actions ## -################################################ - -## To declare and build messages, services or actions from within this -## package, follow these steps: -## * Let MSG_DEP_SET be the set of packages whose message types you use in -## your messages/services/actions (e.g. std_msgs, actionlib_msgs, ...). -## * In the file package.xml: -## * add a build_depend tag for "message_generation" -## * add a build_depend and a exec_depend tag for each package in MSG_DEP_SET -## * If MSG_DEP_SET isn't empty the following dependency has been pulled in -## but can be declared for certainty nonetheless: -## * add a exec_depend tag for "message_runtime" -## * In this file (CMakeLists.txt): -## * add "message_generation" and every package in MSG_DEP_SET to -## find_package(catkin REQUIRED COMPONENTS ...) -## * add "message_runtime" and every package in MSG_DEP_SET to -## catkin_package(CATKIN_DEPENDS ...) -## * uncomment the add_*_files sections below as needed -## and list every .msg/.srv/.action file to be processed -## * uncomment the generate_messages entry below -## * add every package in MSG_DEP_SET to generate_messages(DEPENDENCIES ...) - -## Generate messages in the 'msg' folder -# add_message_files( -# FILES -# Message1.msg -# Message2.msg -# ) - -## Generate services in the 'srv' folder -# add_service_files( -# FILES -# Service1.srv -# Service2.srv -# ) - -## Generate actions in the 'action' folder -# add_action_files( -# FILES -# Action1.action -# Action2.action -# ) - -## Generate added messages and services with any dependencies listed here -# generate_messages( -# DEPENDENCIES -# std_msgs -# ) - -################################################ -## Declare ROS dynamic reconfigure parameters ## -################################################ - -## To declare and build dynamic reconfigure parameters within this -## package, follow these steps: -## * In the file package.xml: -## * add a build_depend and a exec_depend tag for "dynamic_reconfigure" -## * In this file (CMakeLists.txt): -## * add "dynamic_reconfigure" to -## find_package(catkin REQUIRED COMPONENTS ...) -## * uncomment the "generate_dynamic_reconfigure_options" section below -## and list every .cfg file to be processed - -## Generate dynamic reconfigure parameters in the 'cfg' folder -# generate_dynamic_reconfigure_options( -# cfg/DynReconf1.cfg -# cfg/DynReconf2.cfg -# ) - -################################### -## catkin specific configuration ## -################################### -## The catkin_package macro generates cmake config files for your package -## Declare things to be passed to dependent projects -## INCLUDE_DIRS: uncomment this if your package contains header files -## LIBRARIES: libraries you create in this project that dependent projects also need -## CATKIN_DEPENDS: catkin_packages dependent projects also need -## DEPENDS: system dependencies of this project that dependent projects also need -catkin_package( -# INCLUDE_DIRS include -# LIBRARIES interface_rajant -# CATKIN_DEPENDS rospy std_msgs -# DEPENDS system_lib -) +# find dependencies +find_package(ament_cmake REQUIRED) +find_package(ament_cmake_python REQUIRED) +find_package(rclpy REQUIRED) -########### -## Build ## -########### +# Install Python modules +ament_python_install_package(${PROJECT_NAME}) -## Specify additional locations of header files -## Your package locations should be listed before other locations -include_directories( -# include - ${catkin_INCLUDE_DIRS} -) - -## Declare a C++ library -# add_library(${PROJECT_NAME} -# src/${PROJECT_NAME}/interface_rajant.cpp -# ) - -## Add cmake target dependencies of the library -## as an example, code may need to be generated before libraries -## either from message generation or dynamic reconfigure -# add_dependencies(${PROJECT_NAME} ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS}) - -## Declare a C++ executable -## With catkin_make all packages are built within a single CMake context -## The recommended prefix ensures that target names across packages don't collide -# add_executable(${PROJECT_NAME}_node src/interface_rajant_node.cpp) - -## Rename C++ executable without prefix -## The above recommended prefix causes long target names, the following renames the -## target back to the shorter version for ease of user use -## e.g. "rosrun someones_pkg node" instead of "rosrun someones_pkg someones_pkg_node" -# set_target_properties(${PROJECT_NAME}_node PROPERTIES OUTPUT_NAME node PREFIX "") - -## Add cmake target dependencies of the executable -## same as for the library above -# add_dependencies(${PROJECT_NAME}_node ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS}) - -## Specify libraries to link a library or executable target against -# target_link_libraries(${PROJECT_NAME}_node -# ${catkin_LIBRARIES} -# ) - -############# -## Install ## -############# - -# all install targets should use catkin DESTINATION variables -# See http://ros.org/doc/api/catkin/html/adv_user_guide/variables.html - -## Mark executable scripts (Python etc.) for installation -## in contrast to setup.py, you can choose the destination +# Install executables install(PROGRAMS - scripts/rajant_listener.py - scripts/rajant_listener_new.py - scripts/rajant_log.py - DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION} + interface_rajant/rajant_peer_rssi.py + DESTINATION lib/${PROJECT_NAME} ) -## Mark executables and/or libraries for installation -# install(TARGETS ${PROJECT_NAME} ${PROJECT_NAME}_node -# ARCHIVE DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION} -# LIBRARY DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION} -# RUNTIME DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION} -# ) - -## Mark cpp header files for installation -# install(DIRECTORY include/${PROJECT_NAME}/ -# DESTINATION ${CATKIN_PACKAGE_INCLUDE_DESTINATION} -# FILES_MATCHING PATTERN "*.h" -# PATTERN ".svn" EXCLUDE -# ) - -## Mark other files for installation (e.g. launch and bag files, etc.) -install(DIRECTORY - launch - DESTINATION ${CATKIN_PACKAGE_SHARE_DESTINATION} +# Install launch files +install(DIRECTORY launch/ + DESTINATION share/${PROJECT_NAME}/launch ) -install(DIRECTORY - scripts/thirdParty - DESTINATION ${CATKIN_PACKAGE_SHARE_DESTINATION}/scripts -) - -############# -## Testing ## -############# -## Add gtest based cpp test target and link libraries -# catkin_add_gtest(${PROJECT_NAME}-test test/test_interface_rajant.cpp) -# if(TARGET ${PROJECT_NAME}-test) -# target_link_libraries(${PROJECT_NAME}-test ${PROJECT_NAME}) -# endif() +# Install thirdParty directory (contains Java JAR file) +install(DIRECTORY thirdParty/ + DESTINATION share/${PROJECT_NAME}/thirdParty +) -## Add folders to be run by python nosetests -# catkin_add_nosetests(test) +if(BUILD_TESTING) + find_package(ament_lint_auto REQUIRED) + # the following line skips the linter which checks for copyrights + # comment the line when a copyright and license is added to all source files + set(ament_cmake_copyright_FOUND TRUE) + # the following line skips cpplint (only works in a git repo) + # comment the line when this package is in a git repo and when + # a copyright and license is added to all source files + set(ament_cmake_cpplint_FOUND TRUE) + ament_lint_auto_find_test_dependencies() +endif() + +ament_package() diff --git a/interface_rajant/interface_rajant/__init__.py b/interface_rajant/interface_rajant/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/interface_rajant/interface_rajant/rajant_peer_rssi.py b/interface_rajant/interface_rajant/rajant_peer_rssi.py new file mode 100755 index 0000000..d956798 --- /dev/null +++ b/interface_rajant/interface_rajant/rajant_peer_rssi.py @@ -0,0 +1,320 @@ +#!/usr/bin/env python3 +import rclpy +from rclpy.executors import MultiThreadedExecutor +from rclpy.node import Node +import threading +import pdb +import os +import time +import signal +import yaml +import subprocess +import sys +import queue +import std_msgs.msg +from collections import deque + +ON_POSIX = 'posix' in sys.builtin_module_names + +def ping(host): + command = ["ping", "-c", "1", host] + try: + result = subprocess.run(command, stdout=subprocess.DEVNULL, + stderr=subprocess.DEVNULL) + return result.returncode == 0 + except Exception as e: + print(f"Error pinging {host}: {e}") + return False + +class PeerPublisher(): + # QUEUE_LENGTH is used to filter repeated RSSI + QUEUE_LENGTH = 3 + + def __init__(self, target_name, ros_node): + assert ros_node is not None + assert target_name is not None and isinstance(target_name, str) + + self.target_name = target_name + self.ros_node = ros_node + + self.last_registered_timestamp = None + self.rssi_ts = [] + + self.published_rssi_queue = deque(maxlen=self.QUEUE_LENGTH) + self.repeated_counter = 0 + + # Create a publisher for the node + topic_name = f"mocha/rajant/rssi/{self.target_name}" + self.ros_node.get_logger().info(f"{ros_node.this_robot} - Rajant Peer RSSI - Topic /{topic_name} created") + self.pub = self.ros_node.create_publisher(std_msgs.msg.Int32, + topic_name, 10) + + def filter_rssi(self, ts, rssi): + if self.last_registered_timestamp is None: + self.last_registered_timestamp = ts + self.rssi_ts.append((ts, rssi)) + + def publish_all(self, ts): + # Skip if we did not collect info for this node in this session + if self.last_registered_timestamp is None: + return + # Check that current end timestamp and msg timestamps agree + if ts != self.last_registered_timestamp: + self.ros_node.get_logger().error(f"{self.ros_node.this_robot} - Rajant Peer RSSI - Timestamp of end message different than last registered timestamp in publish_all") + return + # Verify that all the timestamps are the same for all the radios + all_ts = [i[0] for i in self.rssi_ts] + if not len(all_ts): + self.ros_node.get_logger().error(f"{self.ros_node.this_robot} - Rajant Peer RSSI - Empty list of timestamps in publish_all.") + return + if len(set(all_ts)) != 1: + self.ros_node.get_logger().error(f"{self.ros_node.this_robot} - Rajant Peer RSSI - Multiple different timestamps for the same group in publish_all.") + return + + # Find out the largest RSSI and sum of RSSI + all_rssi = [i[1] for i in self.rssi_ts] + max_rssi = max(all_rssi) + sum_rssi = sum(all_rssi) + + # Clear lists + self.last_registered_timestamp = None + self.rssi_ts = [] + + self.published_rssi_queue.append(sum_rssi) + + # If we published the same sum of RSSI for the last QUEUE_LENGTH times we may drop + # the subscription. This may happen for two reasons: + # - It is a dead radio + # - The RSSI has just not changed + # + # As it is difficult to disambiguate one from the other one, a + # compromise solution is to throttle the topic where we observe this + # behavior by 1/4. + # + # With the current configuration it takes about 30 seconds for a node to + # disconnect, so this would publish one bad message only + # + # print(self.published_rssi_queue, set(self.published_rssi_queue)) + if len(set(self.published_rssi_queue)) == 1 and \ + len(self.published_rssi_queue) == self.QUEUE_LENGTH: + if self.repeated_counter < 4: + self.repeated_counter += 1 + self.ros_node.get_logger().debug(f"{self.ros_node.this_robot} - Rajant Peer RSSI - Repeated RSSI for {self.target_name} for the last {self.QUEUE_LENGTH*3} seconds. Throttling counter {self.repeated_counter}") + return + + self.ros_node.get_logger().debug(f"{self.ros_node.this_robot} - Rajant Peer RSSI - Publishing {self.target_name}") + self.repeated_counter = 0 + msg = std_msgs.msg.Int32() + msg.data = max_rssi + self.pub.publish(msg) + + +class RajantPeerRSSI(Node): + def __init__(self): + super().__init__("rajant_peer_rssi") + self.logger = self.get_logger() + + # Handle shutdown signal + self.shutdownTriggered = threading.Event() + self.shutdownTriggered.clear() + + def signal_handler(sig, frame): + if not self.shutdownTriggered.is_set(): + self.logger.warning(f"{self.this_robot} - Rajant Peer RSSI - Got SIGINT. Triggering shutdown.") + self.shutdown() + signal.signal(signal.SIGINT, signal_handler) + + # Declare parameters + self.declare_parameter("robot_name", "") + self.declare_parameter("robot_configs", "") + self.declare_parameter("radio_configs", "") + self.declare_parameter("bcapi_jar_file", "") + + self.this_robot = self.get_parameter("robot_name").get_parameter_value().string_value + + if len(self.this_robot) == 0: + self.logger.error(f"{self.this_robot} - Rajant Peer RSSI - Empty robot name") + raise ValueError("Empty robot name") + + # Load and check robot configs + self.robot_configs_file = self.get_parameter("robot_configs").get_parameter_value().string_value + try: + with open(self.robot_configs_file, "r") as f: + self.robot_configs = yaml.load(f, Loader=yaml.FullLoader) + except Exception as e: + self.logger.error(f"{self.this_robot} - Rajant Peer RSSI - robot_configs file") + raise e + if self.this_robot not in self.robot_configs.keys(): + self.logger.error(f"{self.this_robot} - Rajant Peer RSSI - robot_configs file") + raise ValueError("Robot not in config file") + + # Load and check radio configs + self.radio_configs_file = self.get_parameter("radio_configs").get_parameter_value().string_value + try: + with open(self.radio_configs_file, "r") as f: + self.radio_configs = yaml.load(f, Loader=yaml.FullLoader) + except Exception as e: + self.logger.error(f"{self.this_robot} - Rajant Peer RSSI - radio_configs file") + raise e + self.radio = self.robot_configs[self.this_robot]["using-radio"] + if self.radio not in self.radio_configs.keys(): + self.logger.error(f"{self.this_robot} - Rajant Peer RSSI - radio_configs file") + raise ValueError("Radio {self.radio} not in config file") + + # Get the location of the jar file for bcapi + self.bcapi_jar_file = self.get_parameter("bcapi_jar_file").get_parameter_value().string_value + if not (os.path.isfile(self.bcapi_jar_file) and + self.bcapi_jar_file.endswith('.jar')): + self.get_logger().error(f"{self.this_robot} - Rajant Peer RSSI - Erroneous BCAPI jar file") + raise ValueError(f"Erroneous BCAPI file") + + # Get the target ip for the local rajant + rajant_name = self.robot_configs[self.this_robot]['using-radio'] + if rajant_name in self.radio_configs.keys(): + self.target_ip = self.radio_configs[rajant_name]['computed-IP-address'] + else: + self.get_logger().error(f"{self.this_robot} - Rajant Peer RSSI - Radio {rajant_name} for robot {self.this_robot} not found in configs") + raise ValueError(f"Radio {rajant_name} for robot {self.this_robot} not found in configs") + + # Ping the local rajant + if not ping(self.target_ip): + self.get_logger().error(f"{self.this_robot} - Rajant Peer RSSI - Failed to ping {self.target_ip}") + raise ValueError(f"Failed to ping {self.target_ip}") + + # Create the publishers for the peers + self.peers = {} + for peer in self.robot_configs[self.this_robot]["clients"]: + # Index peer by computed IP + peer_radio = self.robot_configs[peer]["using-radio"] + computed_ip = self.radio_configs[peer_radio]["computed-IP-address"] + self.peers[computed_ip] = PeerPublisher(peer, self) + + # Invert radio lookup + self.ip_to_radio = {self.radio_configs[radio]["computed-IP-address"]: radio + for radio in self.radio_configs} + + # Start the java program and thread to read output + self.start_java_process_and_queue() + + # Thread for processing results + self.process_thread_shutdown = threading.Event() + self.process_thread_shutdown.clear() + self.process_thread = threading.Thread(target=self.process_output, args=()) + self.process_thread.start() + + def shutdown(self): + if self.shutdownTriggered.is_set(): + return + self.shutdownTriggered.set() + # Stop the process_output thread + self.process_thread_shutdown.set() + self.process_thread.join() + # Kill the subprocess + self.java_process.terminate() + # This should end the thread for enqueue_output + self.enqueue_thread.join() + + def enqueue_output(self): + """ Saves the output of the process in a queue to be parsed + afterwards """ + for line in self.java_process.stdout: + self.process_queue.put(line) + # If the java process dies, we will reach the end of the thread + self.java_process.stdout.close() + + def process_output(self): + restart_count = 0 + while not self.process_thread_shutdown.is_set(): + if self.enqueue_thread is not None and not self.enqueue_thread.is_alive(): + time.sleep(1) + self.get_logger().error(f"{self.this_robot} - Rajant Peer RSSI - Java process died, restarting") + self.start_java_process_and_queue() + restart_count += 1 + if restart_count == 5: + # shutdown + self.get_logger().error(f"{self.this_robot} - Rajant Peer RSSI - Java process died too many times. Killing node.") + sys.exit(1) + continue + try: + data = self.process_queue.get(timeout=1) + except queue.Empty: + # No data, just continue the loop + continue + # Data comes in lines. Decide what to do based on the output + if data == "\n": + continue + elif data == "ERR\n": + self.java_process.terminate() + continue + elif "END," in data: + # End of transmission, send messages + data = data.replace("END,", "") + data = data.replace(";\n", "") + end_ts = int(data) + for peer in self.peers: + self.peers[peer].publish_all(end_ts) + continue + # Process regular messages + data = data.replace(";\n","") # Remove end line + ts, iface, peer, rssi = data.split(",") + # Cleanup things + ts = int(ts.replace("Ts:", "")) + iface = iface.replace("Iface:", "") + peer = peer.replace("Peer:", "") + rssi = int(rssi.replace("RSSI:", "")) + # Let the right peer handle it + if peer in self.peers: + self.peers[peer].filter_rssi(ts, rssi) + else: + # Discover the rogue peer with the radio + if peer in self.ip_to_radio: + rogue_radio = self.ip_to_radio[peer] + self.get_logger().debug(f"{self.this_robot} - Rajant Peer RSSI - Peer with radio {rogue_radio} not assigned to any robot") + else: + self.get_logger().debug(f"{self.this_robot} - Rajant Peer RSSI - Peer with IP {peer} not assigned to any robot") + + + + def start_java_process_and_queue(self): + # Subprocess to run the java BCAPI interface + self.get_logger().info(f"{self.this_robot} - Rajant Peer RSSI - Starting Java Process for IP {self.target_ip}") + # Run process in its own separate process group so it does not get + # SIGINT. We will handle that ourselves + popen_kwargs = {} + popen_kwargs['preexec_fn'] = os.setpgrp + self.java_process = subprocess.Popen(['java', '-jar', self.bcapi_jar_file, + self.target_ip], stdout=subprocess.PIPE, close_fds=ON_POSIX, + text=True, **popen_kwargs) + self.process_queue = queue.Queue() + self.enqueue_thread = threading.Thread(target=self.enqueue_output, args=()) + self.enqueue_thread.start() + + +def main(args=None): + rclpy.init(args=args) + + try: + rajant_peer_rssi_node = RajantPeerRSSI() + except Exception as e: + print(f"Node initialization failed: {e}") + rclpy.shutdown() + return + + # Use mt executor + mtexecutor = MultiThreadedExecutor(num_threads=4) + mtexecutor.add_node(rajant_peer_rssi_node) + + # Context manager for clean shutdown + try: + while rclpy.ok() and not rajant_peer_rssi_node.shutdownTriggered.is_set(): + mtexecutor.spin_once(timeout_sec=0.1) + except KeyboardInterrupt: + rajant_peer_rssi_node.shutdown() + print("Keyboard interrupt") + except Exception as e: + print(f"Exception: {e}") + rajant_peer_rssi_node.shutdown() + + +if __name__ == "__main__": + main() diff --git a/interface_rajant/launch/rajant_nodes.launch b/interface_rajant/launch/rajant_nodes.launch deleted file mode 100644 index d014dfd..0000000 --- a/interface_rajant/launch/rajant_nodes.launch +++ /dev/null @@ -1,22 +0,0 @@ - - - - - - - - - - - - - - - - - - - - diff --git a/interface_rajant/launch/rajant_nodes.launch.py b/interface_rajant/launch/rajant_nodes.launch.py new file mode 100644 index 0000000..bdc90cd --- /dev/null +++ b/interface_rajant/launch/rajant_nodes.launch.py @@ -0,0 +1,76 @@ +#!/usr/bin/env python3 + +from launch import LaunchDescription +from launch.actions import DeclareLaunchArgument +from launch.substitutions import LaunchConfiguration, PathJoinSubstitution +from launch_ros.actions import Node +from launch_ros.substitutions import FindPackageShare + + +def generate_launch_description(): + """ + Launch Rajant interface nodes (query and parser) + """ + + # Declare launch arguments + robot_name_arg = DeclareLaunchArgument( + 'robot_name', + default_value='charon', + description='Name of the robot' + ) + + robot_configs_arg = DeclareLaunchArgument( + 'robot_configs', + default_value=PathJoinSubstitution([ + FindPackageShare('mocha_core'), + 'config', 'robot_configs.yaml' + ]), + description='Path to robot configuration file' + ) + + radio_configs_arg = DeclareLaunchArgument( + 'radio_configs', + default_value=PathJoinSubstitution([ + FindPackageShare('mocha_core'), + 'config', 'radio_configs.yaml' + ]), + description='Path to radio configuration file' + ) + + # Get launch configurations + robot_name = LaunchConfiguration('robot_name') + robot_configs = LaunchConfiguration('robot_configs') + radio_configs = LaunchConfiguration('radio_configs') + + # Define nodes + rajant_query_node = Node( + package='interface_rajant', + executable='rajant_query.py', + name='rajant_query', + output='screen', + parameters=[{ + 'robot_name': robot_name, + 'robot_configs': robot_configs, + 'radio_configs': radio_configs + }] + ) + + rajant_parser_node = Node( + package='interface_rajant', + executable='rajant_parser.py', + name='rajant_parser', + output='screen', + parameters=[{ + 'robot_name': robot_name, + 'robot_configs': robot_configs, + 'radio_configs': radio_configs + }] + ) + + return LaunchDescription([ + robot_name_arg, + robot_configs_arg, + radio_configs_arg, + rajant_query_node, + rajant_parser_node + ]) \ No newline at end of file diff --git a/interface_rajant/package.xml b/interface_rajant/package.xml index 374ab51..2cfe84e 100644 --- a/interface_rajant/package.xml +++ b/interface_rajant/package.xml @@ -1,65 +1,21 @@ - + + interface_rajant 0.0.0 The interface_rajant package - - - - Fernando Cladera + BSD 3-Clause License + ament_cmake + ament_cmake_python - - - - TODO - - - - - - - + rclpy - - - - + ament_lint_auto + ament_lint_common - - - - - - - - - - - - - - - - - - - - - - catkin - rospy - std_msgs - rospy - std_msgs - rospy - std_msgs - - - - - + ament_cmake diff --git a/interface_rajant/resource/interface_rajant b/interface_rajant/resource/interface_rajant new file mode 100644 index 0000000..e69de29 diff --git a/interface_rajant/scripts/rajant_parser.py b/interface_rajant/scripts/rajant_parser.py deleted file mode 100755 index c45718f..0000000 --- a/interface_rajant/scripts/rajant_parser.py +++ /dev/null @@ -1,124 +0,0 @@ -#!/usr/bin/env python3 - -import ast -import yaml -import os -import re -import threading -import pprint -import sys -import pdb - -import rospy -from std_msgs.msg import String -from std_msgs.msg import Int32 -import rospkg - -class RajantParser(): - def __init__(self, this_robot, robot_configs, radio_configs): - - # Check input args - assert isinstance(this_robot, str) - assert isinstance(robot_configs, dict) - assert isinstance(radio_configs, dict) - self.MAC_DICT = {} - - self.this_robot = this_robot - self.robot_cfg = robot_configs - self.radio_cfg = radio_configs - self.rate = rospy.Rate(.5) - - rospy.loginfo(f"{self.this_robot} - Rajant API Parser - Starting") - - # Generate a standard configuration with a RSSI of -1 - for radio in self.radio_cfg.keys(): - for address in self.radio_cfg[radio]['MAC-address']: - self.MAC_DICT[address] = {} - self.MAC_DICT[address]['rssi'] = -20 - self.MAC_DICT[address]['timestamp'] = rospy.Time.now() - self.MAC_DICT[address]['radio'] = radio - self.MAC_DICT[address]['publisher'] = None - - # Generate publishers for each item in the dict - for mac in self.MAC_DICT.keys(): - for robot in self.robot_cfg.keys(): - if self.MAC_DICT[mac]['radio'] == self.robot_cfg[robot]['using-radio'] and robot != self.this_robot: - self.MAC_DICT[mac]['publisher'] = rospy.Publisher('ddb/rajant/rssi/' + robot, Int32, queue_size = 10) - - rospy.Subscriber('ddb/rajant/log', String, self.update_dict) - rospy.spin() - - - def update_dict(self, data): - # If we did not receive an update after dt, drop the RSSI to -1 - no_rssi = -1 - dt = rospy.Duration(20.) - - # Evaluate the input data as a dictionary - alldata = data.data - data_dict = ast.literal_eval(data.data) - - state = data_dict['watchResponse']['state'] - - # Still need to figure out the rospy time issue - # Update the RSSI - for wireless_channel in state.keys(): - for wireless_keys in state[wireless_channel].keys(): - if wireless_keys[0:4] == 'peer': - peer = wireless_keys - if 'rssi' in state[wireless_channel][peer].keys(): - mac = state[wireless_channel][peer]['mac'] - if mac not in self.MAC_DICT.keys(): - rospy.logerr(f"MAC: {mac} is not in the list of knowns MACs. Is your radio_configs.yaml file correct?") - continue - rssi = state[wireless_channel][peer]['rssi'] - self.MAC_DICT[mac]['rssi'] = rssi - self.MAC_DICT[mac]['timestamp'] = rospy.Time.now() - # Only publish if the publisher is not None - # This avoids an error for a radio that is connected but that is not - # actively used by any robot - if self.MAC_DICT[mac]['publisher'] is not None: - self.MAC_DICT[mac]['publisher'].publish(rssi) - else: - rospy.logwarn(f"{self.this_robot} - Rajant API Parser - " + - f"active radio {self.MAC_DICT[mac]['radio']} not assigned to any robot") - elif 'mac' in state[wireless_channel][peer].keys() and 'rssi' not in state[wireless_channel][peer].keys(): - mac = state[wireless_channel][peer]['mac'] - if mac not in self.MAC_DICT.keys(): - rospy.logerr(f"MAC: {mac} is not in the list of knowns MACs. Is your radio_configs.yaml file correct?") - continue - if rospy.Time.now()-self.MAC_DICT[mac]['timestamp'] > dt: - self.MAC_DICT[mac]['rssi'] = no_rssi - # Only publish if the publisher is not None - # See comment above - if self.MAC_DICT[mac]['publisher'] is not None: - self.MAC_DICT[mac]['publisher'].publish(no_rssi) - else: - rospy.logwarn(f"{self.this_robot} - Rajant API Parser - " + - f"active radio {self.MAC_DICT[mac]['radio']} not assigned to any robot") - - -if __name__ == '__main__': - - rospy.init_node('rajant_listener', anonymous=False) - # Get robot name from the ~robot_name param - robot_name = rospy.get_param('~robot_name', 'charon') - - # Get robot configs - robot_configs_file = rospy.get_param("~robot_configs") - with open(robot_configs_file, "r") as f: - robot_configs = yaml.load(f, Loader=yaml.FullLoader) - if robot_name not in robot_configs.keys(): - rospy.signal_shutdown("Robot not in config file") - rospy.spin() - - # Get radio configs - radio_configs_file = rospy.get_param("~radio_configs") - with open(radio_configs_file, "r") as f: - radio_configs = yaml.load(f, Loader=yaml.FullLoader) - radio = robot_configs[robot_name]["using-radio"] - if radio not in radio_configs.keys(): - rospy.shutdown("Radio not in config file") - rospy.spin() - - RajantParser(robot_name, robot_configs, radio_configs) diff --git a/interface_rajant/scripts/rajant_query.py b/interface_rajant/scripts/rajant_query.py deleted file mode 100755 index 264760d..0000000 --- a/interface_rajant/scripts/rajant_query.py +++ /dev/null @@ -1,170 +0,0 @@ -#!/usr/bin/env python3 -import sys -import subprocess -from threading import Thread -from queue import Queue, Empty -from pprint import pprint -import sys -import os -import time -import yaml -import re -import pdb -import string -import hashlib -import random -import rospy -import std_msgs.msg -import rospkg - -def randomNumber(stringLength=4): - """Generate a random string of fixed length """ - number = random.randint(1000, 9999) - return str(number) - - -def enqueue_output(out, queue): - """ Saves the output of the process in a queue to be parsed - afterwards """ - for line in iter(out.readline, b''): - queue.put(line) - out.close() - - -def ping_ip(ip_address): - try: - # Run the ping command with a single ping packet (-c 1) and a timeout of 1 second (-W 1) - result = subprocess.run(["ping", "-c", "1", "-W", "1", ip_address], - stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True, check=True) - return result.returncode == 0 - except subprocess.CalledProcessError: - # An error occurred (ping failed) - return False - - -def line_parser(line_bytes): - """ Returns parsed str version of bytes input line - This is quite magic: rajant output is not yaml but it is very - yamlish. If we replace the { with :, we remove }, and we do some - minor modifications everything works out of the box!""" - line_str = line_bytes.decode('unicode-escape') - line_str = line_str.replace("{", ":") - line_str = line_str.replace("}", "") - # random numbers are added to avoid overwriting the key on the yaml - line_str = re.sub("wireless", - "wireless-" + randomNumber(), line_str) - line_str = re.sub("peer", - "peer-" + randomNumber(), line_str) - # MACs are a little bit more tricky - if line_str.replace(" ", "")[:4] == "mac:": - separator = line_str.find(":") + 2 - mac_str = line_str[separator:] - mac_bytes = bytes(mac_str, 'raw_unicode_escape') - mac_decoded = ":".join(["%02x" % c for c in mac_bytes[1:-2]]) - line_str = line_str[:separator] + mac_decoded + "\n" - return line_str - - -ON_POSIX = 'posix' in sys.builtin_module_names - -if __name__ == "__main__": - rospy.init_node("rajant_query", anonymous=False) - - # Get robot name from the ~robot_name param - robot_name = rospy.get_param('~robot_name', 'charon') - - # Get robot configs - robot_configs_file = rospy.get_param("~robot_configs") - with open(robot_configs_file, "r") as f: - robot_configs = yaml.load(f, Loader=yaml.FullLoader) - if robot_name not in robot_configs.keys(): - rospy.signal_shutdown("Robot not in config file") - rospy.spin() - - # Get radio configs - radio_configs_file = rospy.get_param("~radio_configs") - with open(radio_configs_file, "r") as f: - radio_configs = yaml.load(f, Loader=yaml.FullLoader) - radio = robot_configs[robot_name]["using-radio"] - if radio not in radio_configs.keys(): - rospy.shutdown("Radio not in config file") - rospy.spin() - - # Get path of the package - rajant_name = robot_configs[robot_name]['using-radio'] - if rajant_name in radio_configs.keys(): - target_ip = radio_configs[rajant_name]['computed-IP-address'] - else: - rospy.logerr(f"Radio {rajant_name} for robot {robot_name} not found in configs") - rospy.signal_shutdown("Radio not in configs") - rospy.spin() - - - # Create ros publisher - pub = rospy.Publisher('ddb/rajant/log', std_msgs.msg.String, queue_size=10) - - rospack = rospkg.RosPack() - ros_path = rospack.get_path('interface_rajant') - - # Create subprocess for the java app - java_bin = os.path.join(ros_path, 'scripts', - 'thirdParty/watchstate/bcapi-watchstate-11.19.0-SNAPSHOT-jar-with-dependencies.jar') - - p = subprocess.Popen(['java', - '-jar', - java_bin, - target_ip], stdout=subprocess.PIPE, close_fds=ON_POSIX) - q = Queue() - t = Thread(target=enqueue_output, args=(p.stdout, q)) - t.daemon = True # thread dies with the program - t.start() - - # Go - rospy.loginfo(f"{robot_name} - Rajant API Query - Starting on {rajant_name}") - - # ping the assigned radio - if ping_ip(target_ip): - rospy.loginfo(f"{robot_name} - Rajant API Query - ping success") - else: - rospy.logerr(f"{robot_name} - Rajant API Query - Rajant ping failed") - rospy.signal_shutdown("Rajant IP") - rospy.spin() - - while not rospy.is_shutdown(): - if not t.is_alive(): - rospy.logerr(f'{robot_name}: Rajant Java process died! Restarting...') - p = subprocess.Popen(['java', - '-jar', - java_bin, - target_ip], stdout=subprocess.PIPE, close_fds=ON_POSIX) - q = Queue() - t = Thread(target=enqueue_output, args=(p.stdout, q)) - t.daemon = True # thread dies with the program - t.start() - - # This sleep avoids problem with the java process. DO NOT REMOVE IT - rospy.sleep(1) - - try: - line = q.get_nowait() - except Empty: - # No output yet - pass - else: # got line - answ_array = line_parser(line) - while not rospy.is_shutdown(): - try: - newline = q.get_nowait() - except Empty: - break - else: - answ_array += line_parser(newline) - try: - yaml_res = yaml.load(answ_array, Loader=yaml.Loader) - if type(yaml_res) == type({}): - # rospy.logdebug(str(yaml_res) + "\n") - pub.publish(str(yaml_res)) - else: - rospy.logerr(f"{robot_name}: YAML from Rajant did not look like an object!") - except yaml.scanner.ScannerError: - rospy.logerr(f"{robot_name}: Could not parse YAML from Rajant!") diff --git a/interface_rajant/setup.py b/interface_rajant/setup.py new file mode 100644 index 0000000..8260bd0 --- /dev/null +++ b/interface_rajant/setup.py @@ -0,0 +1,27 @@ +from setuptools import setup + +package_name = "interface_rajant" + +setup( + name=package_name, + version="0.0.0", + packages=[package_name], + data_files=[ + ("share/ament_index/resource_index/packages", ["resource/" + package_name]), + ("share/" + package_name, ["package.xml"]), + ("share/" + package_name + "/launch", ["launch/rajant_nodes.launch.py"]), + ("share/" + package_name + "/thirdParty", ["thirdParty/PeerRSSI-bcapi-11.26.1.jar"]), + ], + install_requires=["setuptools"], + zip_safe=True, + maintainer="Fernando Cladera", + maintainer_email="fclad@seas.upenn.edu", + description="The interface_rajant package", + license="BSD 3-Clause License", + tests_require=["pytest"], + entry_points={ + "console_scripts": [ + "rajant_peer_rssi = interface_rajant.rajant_peer_rssi:main", + ], + }, +) diff --git a/interface_rajant/test/test_samples/no_clients b/interface_rajant/test/test_samples/no_clients new file mode 100644 index 0000000..b20a70b --- /dev/null +++ b/interface_rajant/test/test_samples/no_clients @@ -0,0 +1,11 @@ +END,1756520217550; +END,1756520220634; +END,1756520223717; +END,1756520226798; +END,1756520229880; +END,1756520232961; +END,1756520236039; +END,1756520239119; +END,1756520242199; +END,1756520245274; +END,1756520248354; diff --git a/interface_rajant/test/test_samples/single_client_single_iface b/interface_rajant/test/test_samples/single_client_single_iface new file mode 100644 index 0000000..f53440f --- /dev/null +++ b/interface_rajant/test/test_samples/single_client_single_iface @@ -0,0 +1,66 @@ +Ts:1756520588373,Iface:wlan1,Peer:10.6.141.1,RSSI:39; +END,1756520588373; +Ts:1756520591454,Iface:wlan1,Peer:10.6.141.1,RSSI:39; +END,1756520591454; +Ts:1756520594534,Iface:wlan1,Peer:10.6.141.1,RSSI:39; +END,1756520594534; +Ts:1756520597614,Iface:wlan1,Peer:10.6.141.1,RSSI:39; +END,1756520597614; +Ts:1756520600694,Iface:wlan1,Peer:10.6.141.1,RSSI:40; +END,1756520600694; +Ts:1756520603774,Iface:wlan1,Peer:10.6.141.1,RSSI:40; +END,1756520603774; +Ts:1756520606851,Iface:wlan1,Peer:10.6.141.1,RSSI:40; +END,1756520606851; +Ts:1756520609928,Iface:wlan1,Peer:10.6.141.1,RSSI:40; +END,1756520609928; +Ts:1756520613003,Iface:wlan1,Peer:10.6.141.1,RSSI:41; +END,1756520613003; +Ts:1756520616081,Iface:wlan1,Peer:10.6.141.1,RSSI:41; +END,1756520616081; +Ts:1756520619161,Iface:wlan1,Peer:10.6.141.1,RSSI:41; +END,1756520619161; +Ts:1756520622237,Iface:wlan1,Peer:10.6.141.1,RSSI:41; +END,1756520622237; +Ts:1756520625316,Iface:wlan1,Peer:10.6.141.1,RSSI:41; +END,1756520625316; +Ts:1756520628397,Iface:wlan1,Peer:10.6.141.1,RSSI:41; +END,1756520628397; +Ts:1756520631476,Iface:wlan1,Peer:10.6.141.1,RSSI:40; +END,1756520631476; +Ts:1756520634555,Iface:wlan1,Peer:10.6.141.1,RSSI:41; +END,1756520634555; +Ts:1756520637634,Iface:wlan1,Peer:10.6.141.1,RSSI:41; +END,1756520637634; +Ts:1756520640713,Iface:wlan1,Peer:10.6.141.1,RSSI:41; +END,1756520640713; +Ts:1756520643792,Iface:wlan1,Peer:10.6.141.1,RSSI:40; +END,1756520643792; +Ts:1756520646872,Iface:wlan1,Peer:10.6.141.1,RSSI:40; +END,1756520646872; +Ts:1756520649951,Iface:wlan1,Peer:10.6.141.1,RSSI:40; +END,1756520649951; +Ts:1756520653026,Iface:wlan1,Peer:10.6.141.1,RSSI:40; +END,1756520653026; +Ts:1756520656103,Iface:wlan1,Peer:10.6.141.1,RSSI:40; +END,1756520656103; +Ts:1756520659183,Iface:wlan1,Peer:10.6.141.1,RSSI:40; +END,1756520659183; +Ts:1756520662262,Iface:wlan1,Peer:10.6.141.1,RSSI:40; +END,1756520662262; +Ts:1756520665339,Iface:wlan1,Peer:10.6.141.1,RSSI:40; +END,1756520665339; +Ts:1756520668418,Iface:wlan1,Peer:10.6.141.1,RSSI:40; +END,1756520668418; +Ts:1756520671497,Iface:wlan1,Peer:10.6.141.1,RSSI:40; +END,1756520671497; +Ts:1756520674575,Iface:wlan1,Peer:10.6.141.1,RSSI:41; +END,1756520674575; +Ts:1756520677654,Iface:wlan1,Peer:10.6.141.1,RSSI:41; +END,1756520677654; +Ts:1756520680732,Iface:wlan1,Peer:10.6.141.1,RSSI:41; +END,1756520680732; +Ts:1756520683810,Iface:wlan1,Peer:10.6.141.1,RSSI:41; +END,1756520683810; +Ts:1756520686888,Iface:wlan1,Peer:10.6.141.1,RSSI:41; +END,1756520686888; diff --git a/interface_rajant/test/test_samples/single_client_two_iface b/interface_rajant/test/test_samples/single_client_two_iface new file mode 100644 index 0000000..3992774 --- /dev/null +++ b/interface_rajant/test/test_samples/single_client_two_iface @@ -0,0 +1,66 @@ +Ts:1756520257596,Iface:wlan0,Peer:10.149.180.1,RSSI:50; +Ts:1756520257596,Iface:wlan1,Peer:10.149.180.1,RSSI:56; +END,1756520257596; +Ts:1756520260678,Iface:wlan0,Peer:10.149.180.1,RSSI:50; +Ts:1756520260678,Iface:wlan1,Peer:10.149.180.1,RSSI:56; +END,1756520260678; +Ts:1756520263761,Iface:wlan0,Peer:10.149.180.1,RSSI:53; +Ts:1756520263761,Iface:wlan1,Peer:10.149.180.1,RSSI:55; +END,1756520263761; +Ts:1756520266844,Iface:wlan0,Peer:10.149.180.1,RSSI:53; +Ts:1756520266844,Iface:wlan1,Peer:10.149.180.1,RSSI:55; +END,1756520266844; +Ts:1756520269926,Iface:wlan0,Peer:10.149.180.1,RSSI:53; +Ts:1756520269926,Iface:wlan1,Peer:10.149.180.1,RSSI:55; +END,1756520269926; +Ts:1756520273006,Iface:wlan0,Peer:10.149.180.1,RSSI:53; +Ts:1756520273006,Iface:wlan1,Peer:10.149.180.1,RSSI:55; +END,1756520273006; +Ts:1756520276086,Iface:wlan0,Peer:10.149.180.1,RSSI:53; +Ts:1756520276086,Iface:wlan1,Peer:10.149.180.1,RSSI:55; +END,1756520276086; +Ts:1756520279167,Iface:wlan0,Peer:10.149.180.1,RSSI:50; +Ts:1756520279167,Iface:wlan1,Peer:10.149.180.1,RSSI:55; +END,1756520279167; +Ts:1756520282250,Iface:wlan0,Peer:10.149.180.1,RSSI:50; +Ts:1756520282250,Iface:wlan1,Peer:10.149.180.1,RSSI:55; +END,1756520282250; +Ts:1756520285334,Iface:wlan0,Peer:10.149.180.1,RSSI:50; +Ts:1756520285334,Iface:wlan1,Peer:10.149.180.1,RSSI:55; +END,1756520285334; +Ts:1756520288418,Iface:wlan0,Peer:10.149.180.1,RSSI:50; +Ts:1756520288418,Iface:wlan1,Peer:10.149.180.1,RSSI:54; +END,1756520288418; +Ts:1756520291499,Iface:wlan0,Peer:10.149.180.1,RSSI:50; +Ts:1756520291499,Iface:wlan1,Peer:10.149.180.1,RSSI:54; +END,1756520291499; +Ts:1756520294583,Iface:wlan0,Peer:10.149.180.1,RSSI:49; +Ts:1756520294583,Iface:wlan1,Peer:10.149.180.1,RSSI:54; +END,1756520294583; +Ts:1756520297664,Iface:wlan0,Peer:10.149.180.1,RSSI:54; +Ts:1756520297664,Iface:wlan1,Peer:10.149.180.1,RSSI:54; +END,1756520297664; +Ts:1756520300748,Iface:wlan0,Peer:10.149.180.1,RSSI:54; +Ts:1756520300748,Iface:wlan1,Peer:10.149.180.1,RSSI:54; +END,1756520300748; +Ts:1756520303829,Iface:wlan0,Peer:10.149.180.1,RSSI:53; +Ts:1756520303829,Iface:wlan1,Peer:10.149.180.1,RSSI:54; +END,1756520303829; +Ts:1756520306909,Iface:wlan0,Peer:10.149.180.1,RSSI:50; +Ts:1756520306909,Iface:wlan1,Peer:10.149.180.1,RSSI:54; +END,1756520306909; +Ts:1756520309989,Iface:wlan0,Peer:10.149.180.1,RSSI:50; +Ts:1756520309989,Iface:wlan1,Peer:10.149.180.1,RSSI:54; +END,1756520309989; +Ts:1756520313068,Iface:wlan0,Peer:10.149.180.1,RSSI:50; +Ts:1756520313068,Iface:wlan1,Peer:10.149.180.1,RSSI:54; +END,1756520313068; +Ts:1756520316150,Iface:wlan0,Peer:10.149.180.1,RSSI:54; +Ts:1756520316150,Iface:wlan1,Peer:10.149.180.1,RSSI:54; +END,1756520316150; +Ts:1756520319232,Iface:wlan0,Peer:10.149.180.1,RSSI:53; +Ts:1756520319232,Iface:wlan1,Peer:10.149.180.1,RSSI:55; +END,1756520319232; +Ts:1756520322313,Iface:wlan0,Peer:10.149.180.1,RSSI:53; +Ts:1756520322313,Iface:wlan1,Peer:10.149.180.1,RSSI:55; +END,1756520322313; diff --git a/interface_rajant/test/test_samples/single_client_two_iface_disconnect b/interface_rajant/test/test_samples/single_client_two_iface_disconnect new file mode 100644 index 0000000..06ccd89 --- /dev/null +++ b/interface_rajant/test/test_samples/single_client_two_iface_disconnect @@ -0,0 +1,71 @@ +Ts:1756520789697,Iface:wlan0,Peer:10.149.180.1,RSSI:51; +Ts:1756520789697,Iface:wlan1,Peer:10.149.180.1,RSSI:55; +END,1756520789697; +Ts:1756520792780,Iface:wlan0,Peer:10.149.180.1,RSSI:51; +Ts:1756520792780,Iface:wlan1,Peer:10.149.180.1,RSSI:55; +END,1756520792780; +Ts:1756520795861,Iface:wlan0,Peer:10.149.180.1,RSSI:49; +Ts:1756520795861,Iface:wlan1,Peer:10.149.180.1,RSSI:55; +END,1756520795861; +Ts:1756520798940,Iface:wlan0,Peer:10.149.180.1,RSSI:49; +Ts:1756520798940,Iface:wlan1,Peer:10.149.180.1,RSSI:55; +END,1756520798940; +Ts:1756520802017,Iface:wlan0,Peer:10.149.180.1,RSSI:49; +Ts:1756520802017,Iface:wlan1,Peer:10.149.180.1,RSSI:55; +END,1756520802017; +Ts:1756520805097,Iface:wlan0,Peer:10.149.180.1,RSSI:50; +Ts:1756520805097,Iface:wlan1,Peer:10.149.180.1,RSSI:54; +END,1756520805097; +Ts:1756520808179,Iface:wlan0,Peer:10.149.180.1,RSSI:49; +Ts:1756520808179,Iface:wlan1,Peer:10.149.180.1,RSSI:54; +END,1756520808179; +Ts:1756520811260,Iface:wlan0,Peer:10.149.180.1,RSSI:49; +Ts:1756520811260,Iface:wlan1,Peer:10.149.180.1,RSSI:54; +END,1756520811260; +Ts:1756520814340,Iface:wlan0,Peer:10.149.180.1,RSSI:49; +Ts:1756520814340,Iface:wlan1,Peer:10.149.180.1,RSSI:54; +END,1756520814340; +Ts:1756520817420,Iface:wlan0,Peer:10.149.180.1,RSSI:49; +Ts:1756520817420,Iface:wlan1,Peer:10.149.180.1,RSSI:55; +END,1756520817420; +Ts:1756520820500,Iface:wlan0,Peer:10.149.180.1,RSSI:49; +Ts:1756520820500,Iface:wlan1,Peer:10.149.180.1,RSSI:55; +END,1756520820500; +Ts:1756520823582,Iface:wlan0,Peer:10.149.180.1,RSSI:50; +Ts:1756520823582,Iface:wlan1,Peer:10.149.180.1,RSSI:54; +END,1756520823582; +# Disconnect +Ts:1756520826663,Iface:wlan0,Peer:10.149.180.1,RSSI:50; +Ts:1756520826663,Iface:wlan1,Peer:10.149.180.1,RSSI:54; +END,1756520826663; +Ts:1756520829745,Iface:wlan0,Peer:10.149.180.1,RSSI:50; +Ts:1756520829745,Iface:wlan1,Peer:10.149.180.1,RSSI:54; +END,1756520829745; +Ts:1756520832826,Iface:wlan0,Peer:10.149.180.1,RSSI:50; +Ts:1756520832826,Iface:wlan1,Peer:10.149.180.1,RSSI:54; +END,1756520832826; +Ts:1756520835906,Iface:wlan0,Peer:10.149.180.1,RSSI:50; +Ts:1756520835906,Iface:wlan1,Peer:10.149.180.1,RSSI:54; +END,1756520835906; +Ts:1756520838986,Iface:wlan0,Peer:10.149.180.1,RSSI:50; +Ts:1756520838986,Iface:wlan1,Peer:10.149.180.1,RSSI:54; +END,1756520838986; +Ts:1756520842068,Iface:wlan0,Peer:10.149.180.1,RSSI:50; +Ts:1756520842068,Iface:wlan1,Peer:10.149.180.1,RSSI:54; +END,1756520842068; +Ts:1756520845148,Iface:wlan0,Peer:10.149.180.1,RSSI:50; +Ts:1756520845148,Iface:wlan1,Peer:10.149.180.1,RSSI:54; +END,1756520845148; +Ts:1756520848229,Iface:wlan0,Peer:10.149.180.1,RSSI:50; +Ts:1756520848229,Iface:wlan1,Peer:10.149.180.1,RSSI:54; +END,1756520848229; +Ts:1756520851307,Iface:wlan0,Peer:10.149.180.1,RSSI:50; +Ts:1756520851307,Iface:wlan1,Peer:10.149.180.1,RSSI:54; +END,1756520851307; +Ts:1756520854388,Iface:wlan0,Peer:10.149.180.1,RSSI:50; +Ts:1756520854388,Iface:wlan1,Peer:10.149.180.1,RSSI:54; +END,1756520854388; +Ts:1756520857468,Iface:wlan1,Peer:10.149.180.1,RSSI:54; +END,1756520857468; +END,1756520860547; + diff --git a/interface_rajant/test/test_samples/single_client_two_iface_reconnect b/interface_rajant/test/test_samples/single_client_two_iface_reconnect new file mode 100644 index 0000000..3bc2e6f --- /dev/null +++ b/interface_rajant/test/test_samples/single_client_two_iface_reconnect @@ -0,0 +1,40 @@ +END,1756520900407; +END,1756520903491; +END,1756520906573; +END,1756520909656; +END,1756520912741; +END,1756520915822; +END,1756520918903; +END,1756520921984; +END,1756520925063; +END,1756520928142; +END,1756520931223; +END,1756520934304; +END,1756520937383; +END,1756520940462; +END,1756520943541; +END,1756520946632; +END,1756520949712; +END,1756520952794; +END,1756520955874; +END,1756520958954; +END,1756520962033; +END,1756520965114; +END,1756520968196; +END,1756520971277; +END,1756520974358; +END,1756520977439; +END,1756520980523; +END,1756520983603; +END,1756520986684; +Ts:1756520989764,Iface:wlan0,Peer:10.149.180.1,RSSI:48; +Ts:1756520989764,Iface:wlan1,Peer:10.149.180.1,RSSI:55; +END,1756520989764; +Ts:1756520992848,Iface:wlan0,Peer:10.149.180.1,RSSI:47; +Ts:1756520992848,Iface:wlan1,Peer:10.149.180.1,RSSI:54; +END,1756520992848; +Ts:1756520995930,Iface:wlan0,Peer:10.149.180.1,RSSI:45; +Ts:1756520995930,Iface:wlan1,Peer:10.149.180.1,RSSI:54; +END,1756520995930; +Ts:1756520999011,Iface:wlan0,Peer:10.149.180.1,RSSI:45; +Ts:1756520999011,Iface:wlan1,Peer:10.149.180.1,RSSI:54; diff --git a/interface_rajant/test/test_samples/two_clients_two_iface b/interface_rajant/test/test_samples/two_clients_two_iface new file mode 100644 index 0000000..b4618ed --- /dev/null +++ b/interface_rajant/test/test_samples/two_clients_two_iface @@ -0,0 +1,92 @@ +Ts:1756520436245,Iface:wlan0,Peer:10.149.180.1,RSSI:51; +Ts:1756520436245,Iface:wlan1,Peer:10.149.180.1,RSSI:54; +Ts:1756520436245,Iface:wlan1,Peer:10.6.141.1,RSSI:41; +END,1756520436245; +Ts:1756520439330,Iface:wlan0,Peer:10.149.180.1,RSSI:48; +Ts:1756520439330,Iface:wlan1,Peer:10.149.180.1,RSSI:55; +Ts:1756520439330,Iface:wlan1,Peer:10.6.141.1,RSSI:42; +END,1756520439330; +Ts:1756520442415,Iface:wlan0,Peer:10.149.180.1,RSSI:49; +Ts:1756520442415,Iface:wlan1,Peer:10.149.180.1,RSSI:55; +Ts:1756520442415,Iface:wlan1,Peer:10.6.141.1,RSSI:42; +END,1756520442415; +Ts:1756520445498,Iface:wlan0,Peer:10.149.180.1,RSSI:49; +Ts:1756520445498,Iface:wlan1,Peer:10.149.180.1,RSSI:55; +Ts:1756520445498,Iface:wlan1,Peer:10.6.141.1,RSSI:42; +END,1756520445498; +Ts:1756520448583,Iface:wlan0,Peer:10.149.180.1,RSSI:49; +Ts:1756520448583,Iface:wlan1,Peer:10.149.180.1,RSSI:55; +Ts:1756520448583,Iface:wlan1,Peer:10.6.141.1,RSSI:42; +END,1756520448583; +Ts:1756520451665,Iface:wlan0,Peer:10.149.180.1,RSSI:49; +Ts:1756520451665,Iface:wlan1,Peer:10.149.180.1,RSSI:55; +Ts:1756520451665,Iface:wlan1,Peer:10.6.141.1,RSSI:41; +END,1756520451665; +Ts:1756520454746,Iface:wlan0,Peer:10.149.180.1,RSSI:50; +Ts:1756520454746,Iface:wlan1,Peer:10.149.180.1,RSSI:55; +Ts:1756520454746,Iface:wlan1,Peer:10.6.141.1,RSSI:42; +END,1756520454746; +Ts:1756520457829,Iface:wlan0,Peer:10.149.180.1,RSSI:50; +Ts:1756520457829,Iface:wlan1,Peer:10.149.180.1,RSSI:55; +Ts:1756520457829,Iface:wlan1,Peer:10.6.141.1,RSSI:42; +END,1756520457829; +Ts:1756520460911,Iface:wlan0,Peer:10.149.180.1,RSSI:50; +Ts:1756520460911,Iface:wlan1,Peer:10.149.180.1,RSSI:55; +Ts:1756520460911,Iface:wlan1,Peer:10.6.141.1,RSSI:42; +END,1756520460911; +Ts:1756520463993,Iface:wlan0,Peer:10.149.180.1,RSSI:48; +Ts:1756520463993,Iface:wlan1,Peer:10.149.180.1,RSSI:55; +Ts:1756520463993,Iface:wlan1,Peer:10.6.141.1,RSSI:41; +END,1756520463993; +Ts:1756520467074,Iface:wlan0,Peer:10.149.180.1,RSSI:48; +Ts:1756520467074,Iface:wlan1,Peer:10.149.180.1,RSSI:55; +Ts:1756520467074,Iface:wlan1,Peer:10.6.141.1,RSSI:41; +END,1756520467074; +Ts:1756520470153,Iface:wlan0,Peer:10.149.180.1,RSSI:48; +Ts:1756520470153,Iface:wlan1,Peer:10.149.180.1,RSSI:55; +Ts:1756520470153,Iface:wlan1,Peer:10.6.141.1,RSSI:41; +END,1756520470153; +Ts:1756520473234,Iface:wlan0,Peer:10.149.180.1,RSSI:48; +Ts:1756520473234,Iface:wlan1,Peer:10.149.180.1,RSSI:55; +Ts:1756520473234,Iface:wlan1,Peer:10.6.141.1,RSSI:41; +END,1756520473234; +Ts:1756520476313,Iface:wlan0,Peer:10.149.180.1,RSSI:48; +Ts:1756520476313,Iface:wlan1,Peer:10.149.180.1,RSSI:55; +Ts:1756520476313,Iface:wlan1,Peer:10.6.141.1,RSSI:41; +END,1756520476313; +Ts:1756520479393,Iface:wlan0,Peer:10.149.180.1,RSSI:48; +Ts:1756520479393,Iface:wlan1,Peer:10.149.180.1,RSSI:55; +Ts:1756520479393,Iface:wlan1,Peer:10.6.141.1,RSSI:41; +END,1756520479393; +Ts:1756520482475,Iface:wlan0,Peer:10.149.180.1,RSSI:48; +Ts:1756520482475,Iface:wlan1,Peer:10.149.180.1,RSSI:55; +Ts:1756520482475,Iface:wlan1,Peer:10.6.141.1,RSSI:41; +END,1756520482475; +Ts:1756520485553,Iface:wlan0,Peer:10.149.180.1,RSSI:48; +Ts:1756520485553,Iface:wlan1,Peer:10.149.180.1,RSSI:55; +Ts:1756520485553,Iface:wlan1,Peer:10.6.141.1,RSSI:41; +END,1756520485553; +Ts:1756520488633,Iface:wlan0,Peer:10.149.180.1,RSSI:48; +Ts:1756520488633,Iface:wlan1,Peer:10.149.180.1,RSSI:55; +Ts:1756520488633,Iface:wlan1,Peer:10.6.141.1,RSSI:41; +END,1756520488633; +Ts:1756520491715,Iface:wlan0,Peer:10.149.180.1,RSSI:51; +Ts:1756520491715,Iface:wlan1,Peer:10.149.180.1,RSSI:54; +Ts:1756520491715,Iface:wlan1,Peer:10.6.141.1,RSSI:41; +END,1756520491715; +Ts:1756520494793,Iface:wlan0,Peer:10.149.180.1,RSSI:49; +Ts:1756520494793,Iface:wlan1,Peer:10.149.180.1,RSSI:55; +Ts:1756520494793,Iface:wlan1,Peer:10.6.141.1,RSSI:41; +END,1756520494793; +Ts:1756520497870,Iface:wlan0,Peer:10.149.180.1,RSSI:49; +Ts:1756520497870,Iface:wlan1,Peer:10.149.180.1,RSSI:55; +Ts:1756520497870,Iface:wlan1,Peer:10.6.141.1,RSSI:41; +END,1756520497870; +Ts:1756520500948,Iface:wlan0,Peer:10.149.180.1,RSSI:49; +Ts:1756520500948,Iface:wlan1,Peer:10.149.180.1,RSSI:55; +Ts:1756520500948,Iface:wlan1,Peer:10.6.141.1,RSSI:41; +END,1756520500948; +Ts:1756520504028,Iface:wlan0,Peer:10.149.180.1,RSSI:49; +Ts:1756520504028,Iface:wlan1,Peer:10.149.180.1,RSSI:55; +Ts:1756520504028,Iface:wlan1,Peer:10.6.141.1,RSSI:41; +END,1756520504028; diff --git a/interface_rajant/scripts/thirdParty/.gitignore b/interface_rajant/thirdParty/.gitignore similarity index 100% rename from interface_rajant/scripts/thirdParty/.gitignore rename to interface_rajant/thirdParty/.gitignore diff --git a/mocha_core/CMakeLists.txt b/mocha_core/CMakeLists.txt index 64dd136..c9f0ee2 100644 --- a/mocha_core/CMakeLists.txt +++ b/mocha_core/CMakeLists.txt @@ -1,212 +1,84 @@ -cmake_minimum_required(VERSION 2.8.3) +cmake_minimum_required(VERSION 3.8) project(mocha_core) -## Compile as C++11, supported in ROS Kinetic and newer -# add_compile_options(-std=c++11) - -## Find catkin macros and libraries -## if COMPONENTS list like find_package(catkin REQUIRED COMPONENTS xyz) -## is used, also find other catkin packages -find_package(catkin REQUIRED COMPONENTS - geometry_msgs - roscpp - rospy - std_msgs - geometry_msgs - sensor_msgs - vision_msgs - message_generation -) - -## System dependencies are found with CMake's conventions -# find_package(Boost REQUIRED COMPONENTS system) - - -## Uncomment this if the package has a setup.py. This macro ensures -## modules and global scripts declared therein get installed -## See http://ros.org/doc/api/catkin/html/user_guide/setup_dot_py.html -# catkin_python_setup() - -################################################ -## Declare ROS messages, services and actions ## -################################################ - -## To declare and build messages, services or actions from within this -## package, follow these steps: -## * Let MSG_DEP_SET be the set of packages whose message types you use in -## your messages/services/actions (e.g. std_msgs, actionlib_msgs, ...). -## * In the file package.xml: -## * add a build_depend tag for "message_generation" -## * add a build_depend and a exec_depend tag for each package in MSG_DEP_SET -## * If MSG_DEP_SET isn't empty the following dependency has been pulled in -## but can be declared for certainty nonetheless: -## * add a exec_depend tag for "message_runtime" -## * In this file (CMakeLists.txt): -## * add "message_generation" and every package in MSG_DEP_SET to -## find_package(catkin REQUIRED COMPONENTS ...) -## * add "message_runtime" and every package in MSG_DEP_SET to -## catkin_package(CATKIN_DEPENDS ...) -## * uncomment the add_*_files sections below as needed -## and list every .msg/.srv/.action file to be processed -## * uncomment the generate_messages entry below -## * add every package in MSG_DEP_SET to generate_messages(DEPENDENCIES ...) - -## Generate messages in the 'msg' folder -add_message_files( - FILES - # Legacy, may be removed in the future - Header_pub.msg - SM_state.msg - Client_stats.msg +if(CMAKE_COMPILER_IS_GNUCXX OR CMAKE_CXX_COMPILER_ID MATCHES "Clang") + add_compile_options(-Wall -Wextra -Wpedantic) +endif() + +# find dependencies +find_package(ament_cmake REQUIRED) +find_package(ament_cmake_python REQUIRED) +find_package(rclpy REQUIRED) +find_package(rosidl_default_generators REQUIRED) +find_package(std_msgs REQUIRED) +find_package(builtin_interfaces REQUIRED) +find_package(geometry_msgs REQUIRED) +find_package(vision_msgs REQUIRED) +# uncomment the following section in order to fill in +# further dependencies manually. + +# Generate messages and services +rosidl_generate_interfaces(${PROJECT_NAME} + "msg/ClientStats.msg" + "msg/HeaderPub.msg" + "msg/SMState.msg" + "msg/DatabaseCB.msg" + "srv/AddUpdateDB.srv" + "srv/GetDataHeaderDB.srv" + "srv/SelectDB.srv" + DEPENDENCIES std_msgs geometry_msgs vision_msgs builtin_interfaces ) -## Generate services in the 'srv' folder -add_service_files( - FILES - AddUpdateDB.srv - GetDataHeaderDB.srv - SelectDB.srv -) - -## Generate actions in the 'action' folder -# add_action_files( -# FILES -# Action1.action -# Action2.action -# ) - -## Generate added messages and services with any dependencies listed here - generate_messages( - DEPENDENCIES - std_msgs - geometry_msgs - vision_msgs -) - -################################################ -## Declare ROS dynamic reconfigure parameters ## -################################################ - -## To declare and build dynamic reconfigure parameters within this -## package, follow these steps: -## * In the file package.xml: -## * add a build_depend and a exec_depend tag for "dynamic_reconfigure" -## * In this file (CMakeLists.txt): -## * add "dynamic_reconfigure" to -## find_package(catkin REQUIRED COMPONENTS ...) -## * uncomment the "generate_dynamic_reconfigure_options" section below -## and list every .cfg file to be processed - -## Generate dynamic reconfigure parameters in the 'cfg' folder -# generate_dynamic_reconfigure_options( -# cfg/DynReconf1.cfg -# cfg/DynReconf2.cfg -# ) - -################################### -## catkin specific configuration ## -################################### -## The catkin_package macro generates cmake config files for your package -## Declare things to be passed to dependent projects -## INCLUDE_DIRS: uncomment this if your package contains header files -## LIBRARIES: libraries you create in this project that dependent projects also need -## CATKIN_DEPENDS: catkin_packages dependent projects also need -## DEPENDS: system dependencies of this project that dependent projects also need -catkin_package( -# INCLUDE_DIRS include -CATKIN_DEPENDS message_runtime std_msgs geometry_msgs vision_msgs -# DEPENDS system_lib +# Install Python package modules as library +install( + DIRECTORY mocha_core/ + DESTINATION ${PYTHON_INSTALL_DIR}/${PROJECT_NAME} + FILES_MATCHING PATTERN "*.py" + PATTERN "__pycache__" EXCLUDE ) -########### -## Build ## -########### - -## Specify additional locations of header files -## Your package locations should be listed before other locations -include_directories( -# include - ${catkin_INCLUDE_DIRS} -) - -## Declare a C++ library -# add_library(${PROJECT_NAME} -# src/${PROJECT_NAME}/radio_communication.cpp -# ) - -## Add cmake target dependencies of the library -## as an example, code may need to be generated before libraries -## either from message generation or dynamic reconfigure -# add_dependencies(${PROJECT_NAME} ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS}) - -## Declare a C++ executable -## With catkin_make all packages are built within a single CMake context -## The recommended prefix ensures that target names across packages don't collide -# add_executable(${PROJECT_NAME}_node src/radio_communication_node.cpp) - -## Rename C++ executable without prefix -## The above recommended prefix causes long target names, the following renames the -## target back to the shorter version for ease of user use -## e.g. "rosrun someones_pkg node" instead of "rosrun someones_pkg someones_pkg_node" -# set_target_properties(${PROJECT_NAME}_node PROPERTIES OUTPUT_NAME node PREFIX "") - -## Add cmake target dependencies of the executable -## same as for the library above -# add_dependencies(${PROJECT_NAME}_node ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS}) - -## Specify libraries to link a library or executable target against -# target_link_libraries(${PROJECT_NAME}_node -# ${catkin_LIBRARIES} -# ) - -############# -## Install ## -############# - -# all install targets should use catkin DESTINATION variables -# See http://ros.org/doc/api/catkin/html/adv_user_guide/variables.html - -## Mark executable scripts (Python etc.) for installation -## in contrast to setup.py, you can choose the destination +# Install executables as scripts install(PROGRAMS - scripts/integrate_database.py - DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION} + mocha_core/mocha.py + mocha_core/database_server.py + mocha_core/synchronize_channel.py + mocha_core/zmq_comm_node.py + mocha_core/topic_publisher.py + mocha_core/translator.py + DESTINATION lib/${PROJECT_NAME} ) -## Mark executables and/or libraries for installation -# install(TARGETS ${PROJECT_NAME} ${PROJECT_NAME}_node -# ARCHIVE DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION} -# LIBRARY DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION} -# RUNTIME DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION} -# ) - -## Mark cpp header files for installation -# install(DIRECTORY include/${PROJECT_NAME}/ -# DESTINATION ${CATKIN_PACKAGE_INCLUDE_DESTINATION} -# FILES_MATCHING PATTERN "*.h" -# PATTERN ".svn" EXCLUDE -# ) - -## Mark other files for installation (e.g. launch and bag files, etc.) -install(DIRECTORY - launch - DESTINATION ${CATKIN_PACKAGE_SHARE_DESTINATION} +# Install launch files +install(DIRECTORY launch + DESTINATION share/${PROJECT_NAME} ) -install(DIRECTORY - scripts - DESTINATION ${CATKIN_PACKAGE_SHARE_DESTINATION} +# Install config files +install(DIRECTORY config + DESTINATION share/${PROJECT_NAME} ) -############# -## Testing ## -############# - -## Add gtest based cpp test target and link libraries -# catkin_add_gtest(${PROJECT_NAME}-test test/test_radio_communication.cpp) -# if(TARGET ${PROJECT_NAME}-test) -# target_link_libraries(${PROJECT_NAME}-test ${PROJECT_NAME}) -# endif() - -## Add folders to be run by python nosetests -# catkin_add_nosetests(test) +if(BUILD_TESTING) + # find_package(ament_lint_auto REQUIRED) + find_package(ament_cmake_pytest REQUIRED) + + # the following line skips the linter which checks for copyrights + # comment the line when a copyright and license is added to all source files + set(ament_cmake_copyright_FOUND TRUE) + # the following line skips cpplint (only works in a git repo) + # comment the line when this package is in a git repo and when + # a copyright and license is added to all source files + set(ament_cmake_cpplint_FOUND TRUE) + # ament_lint_auto_find_test_dependencies() + + # Add Python tests + ament_add_pytest_test(test_database test/test_database.py) + ament_add_pytest_test(test_database_utils test/test_database_utils.py) + ament_add_pytest_test(test_zmq_comm_node test/test_zmq_comm_node.py) + ament_add_pytest_test(test_synchronize_channel test/test_synchronize_channel.py) + ament_add_pytest_test(test_database_server test/test_database_server.py) + ament_add_pytest_test(test_translator test/test_translator.py) + ament_add_pytest_test(test_topic_publisher test/test_topic_publisher.py) +endif() + +ament_package() diff --git a/mocha_core/config/topic_configs.yaml b/mocha_core/config/topic_configs.yaml index 235e839..961fb15 100644 --- a/mocha_core/config/topic_configs.yaml +++ b/mocha_core/config/topic_configs.yaml @@ -9,27 +9,12 @@ ground_robot: msg_priority: "NO_PRIORITY" msg_history: "LAST_MESSAGE" - - msg_topic: "/spomp_global/reachability_history" - msg_type: "spomp/LocalReachabilityArray" - msg_priority: "LOW_PRIORITY" - msg_history: "LAST_MESSAGE" - - msg_topic: "/spomp_global/path_viz" msg_type: "nav_msgs/Path" msg_priority: "MEDIUM_PRIORITY" msg_history: "LAST_MESSAGE" - - msg_topic: "/goal_manager/claimed_goals" - msg_type: "spomp/ClaimedGoalArray" - msg_priority: "HIGH_PRIORITY" - msg_history: "LAST_MESSAGE" - aerial_robot: - - msg_topic: "/asoom/map/compressed" - msg_type: "grid_map_msgs/GridMapCompressed" - msg_priority: "HIGH_PRIORITY" - msg_history: "LAST_MESSAGE" - - msg_topic: "/asoom/recent_key_pose" msg_type: "geometry_msgs/PoseStamped" msg_priority: "NO_PRIORITY" diff --git a/mocha_core/launch/database_translators_publishers.launch b/mocha_core/launch/database_translators_publishers.launch deleted file mode 100644 index 9a0c4dc..0000000 --- a/mocha_core/launch/database_translators_publishers.launch +++ /dev/null @@ -1,35 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/mocha_core/launch/database_translators_publishers.launch.py b/mocha_core/launch/database_translators_publishers.launch.py new file mode 100644 index 0000000..37aa71d --- /dev/null +++ b/mocha_core/launch/database_translators_publishers.launch.py @@ -0,0 +1,108 @@ +#!/usr/bin/env python3 + +from launch import LaunchDescription +from launch.actions import DeclareLaunchArgument +from launch.substitutions import LaunchConfiguration, PathJoinSubstitution +from launch_ros.actions import Node +from launch_ros.substitutions import FindPackageShare + + +def generate_launch_description(): + """ + Launch database, translator, and topic publisher nodes for MOCHA system + """ + + # Declare launch arguments + robot_name_arg = DeclareLaunchArgument( + 'robot_name', + default_value='charon', + description='Name of the robot' + ) + + robot_configs_arg = DeclareLaunchArgument( + 'robot_configs', + default_value=PathJoinSubstitution([ + FindPackageShare('mocha_core'), + 'config', 'robot_configs.yaml' + ]), + description='Path to robot configuration file' + ) + + topic_configs_arg = DeclareLaunchArgument( + 'topic_configs', + default_value=PathJoinSubstitution([ + FindPackageShare('mocha_core'), + 'config', 'topic_configs.yaml' + ]), + description='Path to topic configuration file' + ) + + radio_configs_arg = DeclareLaunchArgument( + 'radio_configs', + default_value=PathJoinSubstitution([ + FindPackageShare('mocha_core'), + 'config', 'radio_configs.yaml' + ]), + description='Path to radio configuration file' + ) + + sigterm_timeout_arg = DeclareLaunchArgument( + 'sigterm_timeout', default_value='10', + description='Modify default SIGTERM timeout to 10 seconds' + ) + + # Get launch configurations + robot_name = LaunchConfiguration('robot_name') + robot_configs = LaunchConfiguration('robot_configs') + topic_configs = LaunchConfiguration('topic_configs') + radio_configs = LaunchConfiguration('radio_configs') + + # Define nodes + mocha_node = Node( + package='mocha_core', + executable='mocha.py', + name='mocha', + output='screen', + parameters=[{ + 'robot_name': robot_name, + 'robot_configs': robot_configs, + 'radio_configs': radio_configs, + 'topic_configs': topic_configs, + 'rssi_threshold': 35 + }] + ) + + translator_node = Node( + package='mocha_core', + executable='translator.py', + name='translator', + output='screen', + parameters=[{ + 'robot_name': robot_name, + 'robot_configs': robot_configs, + 'topic_configs': topic_configs + }] + ) + + topic_publisher_node = Node( + package='mocha_core', + executable='topic_publisher.py', + name='topic_publisher', + output='screen', + parameters=[{ + 'robot_name': robot_name, + 'robot_configs': robot_configs, + 'topic_configs': topic_configs + }] + ) + + return LaunchDescription([ + sigterm_timeout_arg, + robot_name_arg, + robot_configs_arg, + topic_configs_arg, + radio_configs_arg, + mocha_node, + translator_node, + topic_publisher_node + ]) diff --git a/mocha_core/mocha_core/__init__.py b/mocha_core/mocha_core/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/mocha_core/scripts/core/database.py b/mocha_core/mocha_core/database.py similarity index 73% rename from mocha_core/scripts/core/database.py rename to mocha_core/mocha_core/database.py index 9c743ae..2b1d94f 100644 --- a/mocha_core/scripts/core/database.py +++ b/mocha_core/mocha_core/database.py @@ -1,11 +1,10 @@ #!/usr/bin/env python3 import threading -import hash_comm -import rospy +import mocha_core.hash_comm as hash_comm +import rclpy.time import pdb -import database_utils as du +import mocha_core.database_utils as du import numpy as np -import hash_comm import copy @@ -26,14 +25,14 @@ def __init__(self, robot_id, topic_id, topic_id (int): Topic id of the robot dtype (int): Data type identifier. priority (int): Priority level of the message. - ts (rostime): Timestamp of the message + ts (rclpy.time.Time): Timestamp of the message data (bytes): Binary data payload. """ assert isinstance(robot_id, int) assert isinstance(topic_id, int) assert isinstance(dtype, int) assert isinstance(priority, int) - assert isinstance(ts, rospy.Time) + assert isinstance(ts, rclpy.time.Time) assert isinstance(data, bytes) # The first three items are encoded in the header self.robot_id = robot_id @@ -47,6 +46,9 @@ def __init__(self, robot_id, topic_id, header = hash_comm.TsHeader.from_data(self.robot_id, self.topic_id, self.ts) self.header = header.bindigest() + # Update timestamp to match header precision (ms resolution) + _, _, header_ts = header.get_id_and_time() + self.ts = header_ts def __eq__(self, other): if not isinstance(other, DBMessage): @@ -59,19 +61,22 @@ def __eq__(self, other): return False if other.priority != self.priority: return False - if other.ts.secs != self.ts.secs: - return False - if np.abs(other.ts.nsecs - self.ts.nsecs) > 1000000: - print("nsecs diff: %d" % np.abs(other.ts.nsecs - self.ts.nsecs)) + if other.ts.nanoseconds != self.ts.nanoseconds: return False if other.data != self.data: return False return True def __str__(self): - return "%d, %d, %d, %d, %f" % (self.robot_id, self.topic_id, - self.dtype, self.priority, - self.ts) + string = f"----- Database Message -----\n" + string += f"Robot ID: {self.robot_id}\n" + string += f"Topic ID: {self.topic_id}\n" + string += f"Data type: {self.dtype}\n" + string += f"Priority: {self.priority}\n" + string += f"Timestamp (ns): {self.ts.nanoseconds}\n" + string += f"Binary data: {self.data}\n" + string += f"----------------------------" + return string class DBwLock(): @@ -83,13 +88,20 @@ class DBwLock(): sample_db may be a valid dictionary that can be preloaded into the object (useful for debugging, see sample_db.py) """ - def __init__(self, sample_db=None): + def __init__(self, sample_db=None, ros_node=None, insertion_cb=None): if sample_db is not None: assert isinstance(sample_db, dict) - self.db = copy.deepcopy(sample_db) + # For ROS2, we can't deepcopy Time objects, so we'll just assign directly + # This is acceptable for testing purposes. This function should not + # be called during production. + self.db = sample_db else: self.db = {} + if insertion_cb is not None: + assert callable(insertion_cb) self.lock = threading.Lock() + self.ros_node = ros_node + self.insertion_cb = insertion_cb def add_modify_data(self, dbm): """ Insert new stuff into the db. Use the header as message index """ @@ -103,8 +115,19 @@ def add_modify_data(self, dbm): self.db[dbm.robot_id] = {} if dbm.topic_id not in self.db[dbm.robot_id]: self.db[dbm.robot_id][dbm.topic_id] = {} + if dbm.header in self.db[dbm.robot_id][dbm.topic_id]: + if self.ros_node is not None: + error_msg = f"Database - Header collision detected. " + error_msg += f"You may want to decrease your publish rate into MOCHA. " + error_msg += f"Robot ID {dbm.robot_id}. Topic ID {dbm.topic_id}." + self.ros_node.get_logger().error(error_msg) + else: + raise Exception("database: Header Collision Detected") self.db[dbm.robot_id][dbm.topic_id][dbm.header] = dbm self.lock.release() + # Callback on insertion of new data + if self.insertion_cb is not None: + self.insertion_cb(dbm.robot_id, dbm.topic_id, dbm.ts, dbm.header) return dbm.header def get_header_list(self, filter_robot_id=None, filter_latest=None): @@ -129,20 +152,22 @@ def get_header_list(self, filter_robot_id=None, filter_latest=None): if filter_robot_id is not None and robot_id != filter_robot_id: continue for topic in self.db[robot_id]: - if filter_latest: - latest_msg_ts = rospy.Time(1, 0) - latest_msg = None - for header in self.db[robot_id][topic]: - msg_content = self.db[robot_id][topic][header] - if filter_latest and msg_content.ts > latest_msg_ts: - latest_msg_ts = msg_content.ts - latest_msg = msg_content - if not filter_latest: + if not filter_latest: + for header in self.db[robot_id][topic]: + msg_content = self.db[robot_id][topic][header] header_list[header] = {'prio': msg_content.priority, 'ts': msg_content.ts} - if filter_latest: - header_list[latest_msg.header] = {'prio': latest_msg.priority, - 'ts': latest_msg.ts} + else: + latest_msg_ts = rclpy.time.Time() + latest_msg = None + for header in self.db[robot_id][topic]: + msg_content = self.db[robot_id][topic][header] + if msg_content.ts > latest_msg_ts: + latest_msg_ts = msg_content.ts + latest_msg = msg_content + if latest_msg is not None: + header_list[latest_msg.header] = {'prio': latest_msg.priority, + 'ts': latest_msg.ts} self.lock.release() # Sort the dictionary by value, and get the keys @@ -161,11 +186,11 @@ def get_ts_dict(self): ts_dict[robot_id] = {} for topic in self.db[robot_id]: if topic not in ts_dict[robot_id]: - ts_dict[robot_id][topic] = -np.inf + ts_dict[robot_id][topic] = None for header in self.db[robot_id][topic]: msg = self.db[robot_id][topic][header] - ts_dict[robot_id][topic] = max(ts_dict[robot_id][topic], - msg.ts.to_sec()) + curr_time = ts_dict[robot_id][topic] + ts_dict[robot_id][topic] = max(curr_time, msg.ts) if curr_time is not None else msg.ts self.lock.release() return ts_dict @@ -192,7 +217,7 @@ def headers_not_in_local(self, remote_header_list, newer=False): missing_headers.append(h.bindigest()) elif t_id not in ts_dict[h.robot_id]: missing_headers.append(h.bindigest()) - elif time.to_sec() > ts_dict[h.robot_id][h.topic_id]: + elif (time > ts_dict[h.robot_id][h.topic_id]): missing_headers.append(h.bindigest()) return missing_headers diff --git a/mocha_core/mocha_core/database_server.py b/mocha_core/mocha_core/database_server.py new file mode 100644 index 0000000..c145318 --- /dev/null +++ b/mocha_core/mocha_core/database_server.py @@ -0,0 +1,159 @@ +#!/usr/bin/env python3 + +import os +import threading +import rclpy +import rclpy.logging +import rclpy.time +from rclpy.callback_groups import ReentrantCallbackGroup +import mocha_core.srv +import mocha_core.database as database +import pdb +import mocha_core.database_utils as du +import mocha_core.msg + +from rclpy.qos import QoSProfile, ReliabilityPolicy, HistoryPolicy, DurabilityPolicy + + +class DatabaseServer: + """ Starts a clean database and offers an API-like service + to interact with the database. + + Please see the list of services in the srv folder + """ + QOS_PROFILE = QoSProfile( + reliability=ReliabilityPolicy.RELIABLE, + durability=DurabilityPolicy.VOLATILE, + history=HistoryPolicy.KEEP_ALL, + ) + + def __init__(self, robot_configs, topic_configs, robot_name, ros_node): + # Check input topics + assert isinstance(robot_configs, dict) + assert isinstance(topic_configs, dict) + assert isinstance(robot_name, str) + assert ros_node is not None + + self.robot_configs = robot_configs + self.topic_configs = topic_configs + + # Store or create the ROS2 node + self.ros_node = ros_node + self.logger = self.ros_node.get_logger() + self.ros_node_name = self.ros_node.get_fully_qualified_name() + + self.callback_group = ReentrantCallbackGroup() + + # Get robot name from parameter + self.robot_name = robot_name + + if self.robot_name not in self.robot_configs.keys(): + self.logger.error(f"{self.robot_name} does not exist in robot_configs") + raise ValueError("robot_name does not exist in robot_configs") + + self.topic_list = self.topic_configs[self.robot_configs[self.robot_name]["node-type"]] + + # Publish a message every time that the database is modified + pub_database = self.ros_node.create_publisher( + mocha_core.msg.DatabaseCB, + f"{self.ros_node_name}/database_cb", + 10 + ) + def database_cb(robot_id, topic_id, ts, header): + database_cb_msg = mocha_core.msg.DatabaseCB() + database_cb_msg.header.stamp = ts.to_msg() + database_cb_msg.header.frame_id = self.robot_name + database_cb_msg.robot_id = robot_id + database_cb_msg.topic_id = topic_id + database_cb_msg.msg_header = header + pub_database.publish(database_cb_msg) + + # Create the empty database with lock + self.dbl = database.DBwLock(ros_node=self.ros_node, insertion_cb=database_cb) + + # Get current robot number + self.robot_number = du.get_robot_id_from_name(self.robot_configs, + robot_name=self.robot_name) + + # create services for all the possible calls to the DB + self.service_list = [] + self.logger.debug("database_server: Starting database server") + s = self.ros_node.create_service(mocha_core.srv.AddUpdateDB, + f"{self.ros_node_name}/add_update_db", + self.add_update_db_service_cb, + callback_group=self.callback_group, + qos_profile=self.QOS_PROFILE) + self.service_list.append(s) + s = self.ros_node.create_service(mocha_core.srv.GetDataHeaderDB, + f"{self.ros_node_name}/get_data_header_db", + self.get_data_hash_db_service_cb, + callback_group=self.callback_group, + qos_profile=self.QOS_PROFILE) + self.service_list.append(s) + s = self.ros_node.create_service(mocha_core.srv.SelectDB, + f"{self.ros_node_name}/select_db", + self.select_db_service_cb, + callback_group=self.callback_group, + qos_profile=self.QOS_PROFILE) + self.service_list.append(s) + self.msg_types = du.msg_types(self.robot_configs, self.topic_configs, + self.ros_node) + + def add_update_db_service_cb(self, req, response): + if not isinstance(req.topic_id, int) or req.topic_id is None: + self.logger.error("topic_id empty") + return response + if len(req.msg_content) == 0: + self.logger.error("msg_content empty") + return response + if req.topic_id >= len(self.topic_list): # Changed > to >= for proper bounds check + self.logger.error(f"topic_id {req.topic_id} not in topic_list (length: {len(self.topic_list)})") + return response + + topic = self.topic_list[req.topic_id] + priority = du.get_priority_number(topic["msg_priority"]) + # req.timestamp is already a builtin_interfaces.msg.Time + # Convert to rclpy.time.Time for internal processing + ts_msg = req.timestamp + ts = rclpy.time.Time.from_msg(ts_msg) + + # Convert array to bytes if needed (ROS2 service messages use array) + msg_data = req.msg_content + msg_data = bytes(msg_data) + + dbm = database.DBMessage(self.robot_number, + req.topic_id, + dtype=self.msg_types[self.robot_number][req.topic_id]["dtype"], + priority=priority, + ts=ts, + data=msg_data) + + header = self.dbl.add_modify_data(dbm) + response.new_header = header + return response + + def get_data_hash_db_service_cb(self, req, response): + if req.msg_header is None or len(req.msg_header) == 0: + self.logger.error("msg_header empty") + return response + + # Convert array to bytes if needed (ROS2 service messages use array) + header_data = req.msg_header + header_data = bytes(header_data) + + dbm = self.dbl.find_header(header_data) + + response.robot_id = dbm.robot_id + response.topic_id = dbm.topic_id + # Convert rclpy.time.Time to builtin_interfaces.msg.Time + response.timestamp = dbm.ts.to_msg() + response.msg_content = dbm.data + return response + + def select_db_service_cb(self, req, response): + # TODO Implement filtering + + list_headers = self.dbl.get_header_list(req.robot_id) + + response.headers = du.serialize_headers(list_headers) + return response diff --git a/mocha_core/scripts/core/database_utils.py b/mocha_core/mocha_core/database_utils.py similarity index 82% rename from mocha_core/scripts/core/database_utils.py rename to mocha_core/mocha_core/database_utils.py index 41a9941..a7101bf 100644 --- a/mocha_core/scripts/core/database_utils.py +++ b/mocha_core/mocha_core/database_utils.py @@ -1,7 +1,9 @@ import struct -import rospy -import database as db -import hash_comm +import rclpy.time +import rclpy.logging +import mocha_core.database as db +import mocha_core.hash_comm as hash_comm +from rclpy.serialization import serialize_message, deserialize_message import io import pdb import importlib @@ -13,7 +15,6 @@ def get_robot_id_from_name(robot_configs, robot_name): """ Returns the current robot number, based on the name of the - robot. If no name is provided, returns the number of the current robot. """ # Check input arguments @@ -28,6 +29,7 @@ def get_robot_id_from_name(robot_configs, robot_name): def get_robot_name_from_id(robot_configs, robot_id): + """ Returns the robot name from the ID """ assert isinstance(robot_configs, dict) assert robot_id is not None and isinstance(robot_id, int) robots = {get_robot_id_from_name(robot_configs, robot): robot for robot @@ -36,13 +38,17 @@ def get_robot_name_from_id(robot_configs, robot_id): def get_topic_id_from_name(robot_configs, topic_configs, - robot_name, topic_name): + robot_name, topic_name, ros_node): """ Returns the topic id for a particular robot, by searching the topic name""" assert isinstance(robot_configs, dict) assert isinstance(topic_configs, dict) + assert ros_node is not None assert robot_name is not None and isinstance(robot_name, str) assert topic_name is not None and isinstance(topic_name, str) + assert ros_node is not None + + logger = ros_node.get_logger() list_topics = topic_configs[robot_configs[robot_name]["node-type"]] id = None @@ -51,23 +57,27 @@ def get_topic_id_from_name(robot_configs, topic_configs, id = i break if id is None: - rospy.logerr(f"{topic_name} does not exist in topic_configs") - rospy.signal_shutdown("topic_name does not exist in topic_configs") - rospy.spin() + logger.error(f"{topic_name} does not exist in topic_configs") + return None return id -def get_topic_name_from_id(robot_configs, topic_configs, robot_name, topic_id): +def get_topic_name_from_id(robot_configs, topic_configs, robot_name, topic_id, + ros_node): """ Returns the topic name for a particular robot, given its id""" assert isinstance(robot_configs, dict) assert isinstance(topic_configs, dict) assert robot_name is not None and isinstance(robot_name, str) assert topic_id is not None and isinstance(topic_id, int) + assert ros_node is not None + + logger = ros_node.get_logger() + list_topics = topic_configs[robot_configs[robot_name]["node-type"]] if topic_id >= len(list_topics): - rospy.logerr(f"{topic_id} does not exist in topic_configs") - rospy.signal_shutdown("topic_id does not exist in topic_configs") - rospy.spin() + logger = rclpy.logging.get_logger('database_utils') + logger.error(f"{topic_id} does not exist in topic_configs") + return None return list_topics[topic_id]["msg_topic"] @@ -131,27 +141,29 @@ def unpack_data(header, packed_data): def serialize_ros_msg(msg): - # TODO check that we are not entering garbage - sio_h = io.BytesIO() - msg.serialize(sio_h) - serialized = sio_h.getvalue() + """ Serialize and compress a message using ROS 2 serialization """ + serialized = serialize_message(msg) compressed = lz4.frame.compress(serialized) return compressed +def deserialize_ros_msg(msg): + """ Decompress and deserialize a message using ROS 2 serialization """ + def parse_answer(api_answer, msg_types): constructor = msg_types[api_answer.robot_id][api_answer.topic_id]['obj'] - msg = constructor() # api_answer.msg_content has the compressed message + + # Debug: Check what we're trying to decompress decompressed = lz4.frame.decompress(api_answer.msg_content) - msg.deserialize(decompressed) + msg = deserialize_message(decompressed, constructor) robot_id = api_answer.robot_id topic_id = api_answer.topic_id ts = api_answer.timestamp return robot_id, topic_id, ts, msg -def msg_types(robot_configs, topic_configs): +def msg_types(robot_configs, topic_configs, ros_node): """ Extracts message types from the topic_configs dictionary and validates them. Returns a dictionary with message type names as keys and @@ -161,6 +173,9 @@ def msg_types(robot_configs, topic_configs): - 'obj' (the message type object itself) """ assert isinstance(topic_configs, dict) + assert ros_node is not None + + logger = ros_node.get_logger() msg_list = [] for robot in topic_configs: @@ -172,9 +187,8 @@ def msg_types(robot_configs, topic_configs): if not (len(parts) == 2 and all(part.replace("_", "").isalnum() for part in parts)): - rospy.logerr(f"Error: msg_type {msg} not valid") - rospy.signal_shutdown("Error: msg_type {msg} not valid") - rospy.spin() + logger.error(f"Error: msg_type {msg} not valid") + return None msg_list.append(topic['msg_type']) # Important: sort the msg_list so we have a deterministic order msg_list.sort() @@ -209,7 +223,8 @@ def generate_random_header(): # generate random robot_id and topic_id, between 0 and 255 robot_id = random.randint(0, 255) topic_id = random.randint(0, 255) - # Generate a random rospy timestamp - time = rospy.Time.from_sec(random.random()) + # Generate a random rclpy timestamp + time = rclpy.time.Time(seconds=random.randint(0, 65535), + nanoseconds=random.randint(0, 1000)*1e6) h = hash_comm.TsHeader.from_data(robot_id, topic_id, time) return h.bindigest() diff --git a/mocha_core/scripts/core/hash_comm.py b/mocha_core/mocha_core/hash_comm.py similarity index 77% rename from mocha_core/scripts/core/hash_comm.py rename to mocha_core/mocha_core/hash_comm.py index 31b5bce..67eb939 100644 --- a/mocha_core/scripts/core/hash_comm.py +++ b/mocha_core/mocha_core/hash_comm.py @@ -1,10 +1,19 @@ #!/usr/bin/env python3 import hashlib -import rospy +import rclpy.time import struct LENGTH = 6 +""" hash_comm.py + This file defines the potential headers to be used in the database. There + are two types: Hash and TsHeader. Hash is created by hashing the data with + sha256 and cropping the hash. TsHeader is created using the robot_id, + topic_id, and timestamp when the message was created. + + TsHeader is the implementation favored for MOCHA's paper, as it keeps the + size of the message exchange bounded. +""" class Hash(): HASH_LENGTH = LENGTH @@ -47,17 +56,17 @@ def from_data(cls, robot_id, topic_id, time): assert type(robot_id) == int and robot_id >= 0 and robot_id < 256 # Same for topic ID assert type(topic_id) == int and topic_id >= 0 and topic_id < 256 - # time is a rospy time object - assert type(time) == rospy.Time + # time is a rclpy time object + assert type(time) == rclpy.time.Time # The header will be comprised of 1 byte for the robot number, 1 byte # for the topic id, and 4 bytes for the time. The first 2 bytes of the # time are the secs, and the 2 last bytes are the ms of the message. # This means that we have ms resolution. We only handle positive times - assert type(time.secs) == int and time.secs >= 0 - assert type(time.nsecs) == int and time.nsecs >= 0 - secs = time.secs % 65536 - msecs = time.nsecs // 1000000 + assert type(time.seconds_nanoseconds()[0]) == int and time.seconds_nanoseconds()[0] >= 0 + assert type(time.seconds_nanoseconds()[1]) == int and time.seconds_nanoseconds()[1] >= 0 + secs = time.seconds_nanoseconds()[0] % 65536 + msecs = time.seconds_nanoseconds()[1] // 1000000 robot_id = robot_id topic_id = topic_id return cls(robot_id=robot_id, topic_id=topic_id, @@ -85,7 +94,7 @@ def get_id_and_time(self): assert self.topic_id is not None assert self.secs is not None assert self.msecs is not None - time = rospy.Time(self.secs, self.msecs*1000000) + time = rclpy.time.Time(seconds=self.secs, nanoseconds=self.msecs*1000000) return self.robot_id, self.topic_id, time if __name__ == "__main__": @@ -123,16 +132,16 @@ def get_id_and_time(self): for i in range(100): random_robot = random.randint(0, 255) random_topic = random.randint(0, 255) - random_time = rospy.Time(random.randint(0, 65536), - random.randint(0, 1000000000)) + random_time = rclpy.time.Time(seconds=random.randint(0, 65536), + nanoseconds=random.randint(0, 1000000000)) ts = TsHeader.from_data(random_robot, random_topic, random_time) bindigest = ts.bindigest() ts2 = TsHeader.from_header(bindigest) robot_id, topic_id, time = ts2.get_id_and_time() assert robot_id == random_robot assert topic_id == random_topic - assert time.secs == random_time.secs - assert np.abs(time.nsecs-random_time.nsecs) < 1000000 + assert time.seconds_nanoseconds()[0] == random_time.seconds_nanoseconds()[0] + assert np.abs(time.seconds_nanoseconds()[1]-random_time.seconds_nanoseconds()[1]) < 1000000 else: sys.exit("Invalid test") diff --git a/mocha_core/mocha_core/mocha.py b/mocha_core/mocha_core/mocha.py new file mode 100755 index 0000000..ba18017 --- /dev/null +++ b/mocha_core/mocha_core/mocha.py @@ -0,0 +1,222 @@ +#!/usr/bin/env python3 +import os +import random +import signal +import sys +import time +import pdb +import traceback +from functools import partial + +import rclpy +from rclpy.logging import LoggingSeverity +import rclpy.logging +import rclpy.time +from rclpy.node import Node +from rclpy.executors import MultiThreadedExecutor +import threading +import yaml +import std_msgs.msg +import subprocess + +import mocha_core.database_server as ds +import mocha_core.database_utils as du +import mocha_core.synchronize_channel as sync + + +def ping(host): + command = ["ping", "-c", "1", host] + try: + result = subprocess.run(command, stdout=subprocess.DEVNULL, + stderr=subprocess.DEVNULL) + return result.returncode == 0 + except Exception as e: + print(f"Error pinging {host}: {e}") + return False + + +class Mocha(Node): + def __init__(self): + super().__init__("mocha") + self.logger = self.get_logger() + # self.logger.set_level(LoggingSeverity.DEBUG) + + # Handle shutdown signal + self.shutdownTriggered = threading.Event() + self.shutdownTriggered.clear() + + def signal_handler(sig, frame): + if not self.shutdownTriggered.is_set(): + self.logger.warning(f"{self.this_robot} - MOCHA Server - Got SIGINT. Triggering shutdown.") + self.shutdown("Killed by user") + signal.signal(signal.SIGINT, signal_handler) + + + # Declare parameters + self.declare_parameter("robot_name", "") + self.declare_parameter("rssi_threshold", 20) + self.declare_parameter("client_timeout", 6.0) + self.declare_parameter("robot_configs", "") + self.declare_parameter("radio_configs", "") + self.declare_parameter("topic_configs", "") + + self.this_robot = self.get_parameter("robot_name").get_parameter_value().string_value + self.rssi_threshold = self.get_parameter("rssi_threshold").get_parameter_value().integer_value + + if len(self.this_robot) == 0: + self.logger.error(f"{self.this_robot} - MOCHA Server - Empty robot name") + raise ValueError("Empty robot name") + + self.logger.info(f"{self.this_robot} - MOCHA Server - " + + f"RSSI threshold: {self.rssi_threshold}") + self.client_timeout = self.get_parameter("client_timeout").get_parameter_value().double_value + self.logger.info(f"{self.this_robot} - MOCHA Server - " + + f"Client timeout: {self.client_timeout}") + + # Load and check robot configs + self.robot_configs_file = self.get_parameter("robot_configs").get_parameter_value().string_value + try: + with open(self.robot_configs_file, "r") as f: + self.robot_configs = yaml.load(f, Loader=yaml.FullLoader) + except Exception as e: + self.logger.error(f"{self.this_robot} - MOCHA Server - robot_configs file") + raise e + if self.this_robot not in self.robot_configs.keys(): + self.logger.error(f"{self.this_robot} - MOCHA Server - robot_configs file") + raise ValueError("Robot not in config file") + + # Load and check radio configs + self.radio_configs_file = self.get_parameter("radio_configs").get_parameter_value().string_value + try: + with open(self.radio_configs_file, "r") as f: + self.radio_configs = yaml.load(f, Loader=yaml.FullLoader) + except Exception as e: + self.logger.error(f"{self.this_robot} - MOCHA Server - radio_configs file") + raise e + self.radio = self.robot_configs[self.this_robot]["using-radio"] + if self.radio not in self.radio_configs.keys(): + self.logger.error(f"{self.this_robot} - MOCHA Server - radio_configs file") + raise ValueError("Radio {self.radio} not in config file") + + # Load and check topic configs + self.topic_configs_file = self.get_parameter("topic_configs").get_parameter_value().string_value + try: + with open(self.topic_configs_file, "r") as f: + self.topic_configs = yaml.load(f, Loader=yaml.FullLoader) + except Exception as e: + self.logger.error(f"{self.this_robot} - MOCHA Server - topics_configs file") + raise e + self_type = self.robot_configs[self.this_robot]["node-type"] + if self_type not in self.topic_configs.keys(): + self.logger.error(f"{self.this_robot} - MOCHA Server - topics_configs file") + raise ValueError("Node type not in config file") + + # Check that we can ping the radios + ip = self.robot_configs[self.this_robot]["IP-address"] + if not ping(ip): + self.logger.error(f"{self.this_robot} - MOCHA Server - " + + f"Cannot ping self {ip}. Is the radio on?") + raise ValueError("Cannot ping self") + + # Create database server + self.DBServer = ds.DatabaseServer(self.robot_configs, + self.topic_configs, self.this_robot, self) + + + self.num_robot_in_comm = 0 + + self.logger.info(f"{self.this_robot} - MOCHA Server - " + + "Created all communication channels!") + + # Start comm channels with other robots + self.all_channels = [] + self.other_robots = [i for i in list(self.robot_configs.keys()) if i != + self.this_robot] + for other_robot in self.other_robots: + if other_robot not in self.robot_configs[self.this_robot]["clients"]: + self.logger.warning( + f"{self.this_robot} - MOCHA Server - "+ + f"Skipping channel {self.this_robot}->{other_robot} " + + "as it is not in the graph of this robot" + ) + continue + # Start communication channel + channel = sync.Channel(self.DBServer.dbl, + self.this_robot, + other_robot, self.robot_configs, + self.client_timeout, self) + channel.run() + self.all_channels.append(channel) + + # Attach a radio trigger to each channel. This will be triggered + # when the RSSI is high enough. You can use another approach here + # such as using a timer to periodically trigger the sync + def make_callback(ch): + return lambda msg: self.rssi_cb(msg, ch) + + self.create_subscription( + std_msgs.msg.Int32, + 'mocha/rajant/rssi/' + other_robot, + make_callback(channel), + 10 + ) + + def shutdown(self, reason): + # Only trigger shutdown once + if self.shutdownTriggered.is_set(): + return + self.shutdownTriggered.set() + + assert isinstance(reason, str) + self.logger.error(f"{self.this_robot} - MOCHA Server - " + reason) + # Shutting down communication channels + if hasattr(self, 'all_channels') and len(self.all_channels) != 0: + for channel in self.all_channels: + channel.stop() + self.logger.warning(f"{self.this_robot} - MOCHA Server - " + "Killed Channels") + # Wait for all the channels to be gone. This needs to be slightly + # larger than RECV_TIMEOUT + time.sleep(3.5) + self.logger.warning(f"{self.this_robot} - MOCHA Server - " + "Shutdown complete") + + def rssi_cb(self, data, comm_node): + rssi = data.data + if rssi > self.rssi_threshold: + self.num_robot_in_comm += 1 + try: + self.logger.info(f"{self.this_robot} <- {comm_node.target_robot}: Triggering comms") + comm_node.trigger_sync() + except: + traceback.print_exception(*sys.exc_info()) + + +def main(args=None): + # Initialize ROS2 with command line arguments + rclpy.init(args=args) + + # Start the node + try: + mocha = Mocha() + except Exception as e: + print(f"Node initialization failed: {e}") + rclpy.shutdown() + return + + # Load mtexecutor + mtexecutor = MultiThreadedExecutor(num_threads=4) + mtexecutor.add_node(mocha) + + # Use context manager for clean shutdown + try: + # Spin with periodic checking for shutdown + while rclpy.ok() and not mocha.shutdownTriggered.is_set(): + mtexecutor.spin_once(timeout_sec=0.1) + except KeyboardInterrupt: + print("Keyboard Interrupt") + mocha.shutdown("KeyboardInterrupt") + except Exception as e: + print(f"Exception: {e}") + mocha.shutdown(f"Exception: {e}") + +if __name__ == "__main__": + main() diff --git a/mocha_core/scripts/core/synchronize_channel.py b/mocha_core/mocha_core/synchronize_channel.py similarity index 80% rename from mocha_core/scripts/core/synchronize_channel.py rename to mocha_core/mocha_core/synchronize_channel.py index 92a84e3..3c96482 100644 --- a/mocha_core/scripts/core/synchronize_channel.py +++ b/mocha_core/mocha_core/synchronize_channel.py @@ -2,16 +2,18 @@ import enum import threading +import time import smach -import database as db -import database_utils as du -import hash_comm as hc -import zmq_comm_node -import logging -import rospy +import mocha_core.database as db +import mocha_core.database_utils as du +import mocha_core.hash_comm as hc +import mocha_core.zmq_comm_node as zmq_comm_node +import rclpy +import rclpy.logging +import rclpy.time import pdb -from std_msgs.msg import Time, String -from mocha_core.msg import SM_state +from builtin_interfaces.msg import Time +from mocha_core.msg import SMState # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - # General configuration variables @@ -47,13 +49,12 @@ def __init__(self, outer): def execute(self, userdata): self.outer.publishState("Idle Start") - while (not self.outer.sm_shutdown.is_set() and - not rospy.is_shutdown()): + while (not self.outer.sm_shutdown.is_set()): if self.outer.sync.get_state(): # trigger sync and reset bistable self.outer.publishState("Idle to RequestHash") return 'to_req_hash' - rospy.sleep(CHECK_TRIGGER_TIME) + time.sleep(CHECK_TRIGGER_TIME) self.outer.publishState("Idle to Stopped") return 'to_stopped' @@ -82,12 +83,12 @@ def execute(self, userdata): and not self.outer.sm_shutdown.is_set()): answer = self.outer.client_answer if answer is not None: - rospy.logdebug(f"{comm.this_node} - Channel" + + self.outer.logger.debug(f"{comm.this_node} - Channel" + f"- REQUESTHASH: {answer}") userdata.out_answer = answer self.outer.publishState("RequestHash to Reply") return 'to_req_hash_reply' - rospy.sleep(CHECK_POLL_TIME) + time.sleep(CHECK_POLL_TIME) i += 1 if self.outer.sm_shutdown.is_set(): self.outer.publishState("RequestHash to Stopped") @@ -113,7 +114,7 @@ def execute(self, userdata): # depending on the message type hash_list = self.outer.dbl.headers_not_in_local(deserialized, newer=True) - rospy.logdebug(f"======== - REQUESTHASH: {hash_list}") + self.outer.logger.debug(f"======== - REQUESTHASH: {hash_list}") if len(hash_list): # We have hashes. Go get them # rospy.logdebug(f"{self.this_robot} - REQUESTHASH: Unique -> {hash_list}") @@ -143,7 +144,7 @@ def execute(self, userdata): hash_list = userdata.in_hash_list.copy() # Get the first hash of the list, the one with the higher priority req_hash = hash_list.pop(0) - rospy.logdebug(f"{comm.this_node} - Channel - GETDATA: {req_hash}") + self.outer.logger.debug(f"{comm.this_node} - Channel - GETDATA: {req_hash}") # Ask for hash msg = Comm_msgs.GDATA.name.encode() + req_hash comm.connect_send_message(msg) @@ -160,7 +161,7 @@ def execute(self, userdata): userdata.out_req_hash = req_hash self.outer.publishState("GetData to GetDataReply") return 'to_get_data_reply' - rospy.sleep(CHECK_POLL_TIME) + time.sleep(CHECK_POLL_TIME) i += 1 if self.outer.sm_shutdown.is_set(): self.outer.publishState("GetData to Stopped") @@ -208,7 +209,7 @@ def execute(self, userdata): self.outer.publishState("TransmissionEnd Start") # Request current comm node comm = self.outer.get_comm_node() - rospy.logdebug(f"{comm.this_node} - Channel - DENDT") + self.outer.logger.debug(f"{comm.this_node} - Channel - DENDT") # Ask for hash msg = Comm_msgs.DENDT.name.encode() + self.outer.this_robot.encode() comm.connect_send_message(msg) @@ -221,12 +222,14 @@ def execute(self, userdata): answer = self.outer.client_answer if answer is not None: # We received an ACK - self.outer.client_sync_complete_pub.publish(Time(rospy.get_rostime())) + if self.outer.client_sync_complete_pub is not None: + current_time = self.outer.ros_node.get_clock().now() + self.outer.client_sync_complete_pub.publish(current_time.to_msg()) break - rospy.sleep(CHECK_POLL_TIME) + time.sleep(CHECK_POLL_TIME) i += 1 if self.outer.sm_shutdown.is_set(): - self.outer.publishState("TransmissionEnd to Stopped") + # self.outer.publishState("TransmissionEnd to Stopped") return 'to_stopped' self.outer.sync.reset() self.outer.publishState("TransmissionEnd to Idle") @@ -265,7 +268,7 @@ def get_state(self): class Channel(): def __init__(self, dbl, this_robot, target_robot, robot_configs, - client_timeout): + client_timeout, ros_node): # Check input arguments assert type(dbl) is db.DBwLock assert type(this_robot) is str @@ -273,11 +276,15 @@ def __init__(self, dbl, this_robot, assert type(robot_configs) is dict assert type(client_timeout) is float or type(client_timeout) is int - # Override smach logger to use rospy loggers - # Use rospy.logdebug for smach info as it is too verbose - # def set_loggers(info,warn,debug,error): - smach.set_loggers(rospy.logdebug, rospy.logwarn, - rospy.logdebug, rospy.logerr) + # Confirm that we are providing ad ROS node + assert ros_node is not None + self.ros_node = ros_node + self.logger = self.ros_node.get_logger() + self.ros_node_name = self.ros_node.get_fully_qualified_name() + + # Override smach logger to use ROS2 loggers + smach.set_loggers(self.logger.debug, self.logger.warn, + self.logger.debug, self.logger.error) # Basic parameters of the communication channel self.this_robot = this_robot @@ -306,18 +313,23 @@ def __init__(self, dbl, this_robot, self.sm = smach.StateMachine(outcomes=['failure', 'stopped']) # Create topic to notify that the transmission ended - self.client_sync_complete_pub = rospy.Publisher(f"ddb/client_sync_complete/{self.target_robot}", - Time, - queue_size=20) - self.server_sync_complete_pub = rospy.Publisher(f"ddb/server_sync_complete/{self.target_robot}", - Time, - queue_size=20) + self.client_sync_complete_pub = self.ros_node.create_publisher( + Time, + f"{self.ros_node_name}/client_sync_complete/{self.target_robot}", + 20 + ) + self.server_sync_complete_pub = self.ros_node.create_publisher( + Time, + f"{self.ros_node_name}/server_sync_complete/{self.target_robot}", + 20 + ) # Create a topic that prints the current state of the state machine - self.sm_state_pub = rospy.Publisher(f"ddb/client_sm_state/{self.target_robot}", - SM_state, - queue_size=20) - self.sm_state_count = 0 + self.sm_state_pub = self.ros_node.create_publisher( + SMState, + f"{self.ros_node_name}/client_sm_state/{self.target_robot}", + 20 + ) with self.sm: smach.StateMachine.add('IDLE', @@ -366,13 +378,12 @@ def publishState(self, msg): """ Publish the string msg (where the state message will be stored) with a timestamp""" assert type(msg) is str - state_msg = SM_state() - state_msg.header.stamp = rospy.Time.now() - state_msg.header.frame_id = self.this_robot - state_msg.header.seq = self.sm_state_count - self.sm_state_count += 1 - state_msg.state = msg - self.sm_state_pub.publish(state_msg) + if self.sm_state_pub is not None: + state_msg = SMState() + state_msg.header.stamp = self.ros_node.get_clock().now().to_msg() + state_msg.header.frame_id = self.this_robot + state_msg.state = msg + self.sm_state_pub.publish(state_msg) def run(self): """ Configures the zmq_comm_node and also starts the state @@ -384,54 +395,55 @@ def run(self): self.robot_configs, self.callback_client, self.callback_server, - self.client_timeout) + self.client_timeout, + self.ros_node) # Unset this flag before starting the SM thread self.sm_shutdown.clear() self.th = threading.Thread(target=self.sm_thread, args=()) - self.th.setDaemon(True) self.th.start() def stop(self): # Set the flag and wait until the state machine finishes self.sm_shutdown.set() + # This terminates the comm node, so we can wait for the thread to finish self.th.join() + self.logger.info(f"Channel {self.this_robot} -> {self.target_robot} destroyed") def sm_thread(self): # Start the state machine and wait until it ends - rospy.logwarn(f"Channel {self.this_robot} -> {self.target_robot} started") + self.logger.info(f"Channel {self.this_robot} -> {self.target_robot} started") outcome = self.sm.execute() + # Terminate the comm node once the state machine ends + self.comm_node.terminate() exit_msg = f"Channel {self.this_robot} -> {self.target_robot}" + \ f" finished with outcome: {outcome}" if outcome == 'failure': - rospy.logerr(exit_msg) + self.logger.error(exit_msg) elif outcome == 'stopped': - rospy.logwarn(exit_msg) - # Terminate the comm node once the state machine ends - self.comm_node.terminate() + self.logger.info(exit_msg) def get_comm_node(self): if not self.comm_node: - rospy.logerr("Requesting for an empty comm node") - rospy.signal_shutdown("Requesting for an empty comm node") - rospy.spin() + self.logger.error("Requesting for an empty comm node") + raise RuntimeError("Requesting for an empty comm node") return self.comm_node def trigger_sync(self): if self.sync.get_state(): - rospy.logwarn(f"{self.this_robot} <- {self.target_robot}: Channel Busy") + self.logger.warn(f"{self.this_robot} <- {self.target_robot}: Channel busy") else: self.sync.set() def callback_client(self, msg): if msg is not None: - rospy.logdebug(f"{self.this_robot} - Channel - CALLBACK_CLIENT: len: {len(msg)}") + self.logger.debug(f"{self.this_robot} - Channel - CALLBACK_CLIENT: len: {len(msg)}") else: - rospy.logdebug(f"{self.this_robot} - Channel - CALLBACK_CLIENT - None") + self.logger.debug(f"{self.this_robot} - Channel - CALLBACK_CLIENT - None") self.client_answer = msg def callback_server(self, msg): - rospy.logdebug(f"{self.this_robot} - Channel - CALLBACK_SERVER: {msg}") + self.logger.debug(f"{self.this_robot} - Channel - CALLBACK_SERVER: {msg}") header = msg[:CODE_LENGTH].decode() data = msg[CODE_LENGTH:] @@ -439,8 +451,8 @@ def callback_server(self, msg): # Returns all the headers that this node has # FIXME(Fernando): Configure this depending on the message type headers = self.dbl.get_header_list(filter_latest=True) - rospy.logdebug(f"{self.this_robot} - Channel - Sending {len(headers)} headers") - rospy.logdebug(f"{self.this_robot} - Channel - {headers}") + self.logger.debug(f"{self.this_robot} - Channel - Sending {len(headers)} headers") + self.logger.debug(f"{self.this_robot} - Channel - {headers}") serialized = du.serialize_headers(headers) return serialized if header == Comm_msgs.GDATA.name: @@ -448,14 +460,14 @@ def callback_server(self, msg): # Returns a packed data for the requires header # One header at a time if len(data) != HEADER_LENGTH: - rospy.logerr(f"{self.this_robot} - Wrong header length: {len(data)}") + self.logger.error(f"{self.this_robot} - Wrong header length: {len(data)}") return Comm_msgs.SERRM.name.encode() try: dbm = self.dbl.find_header(r_header) packed = du.pack_data(dbm) return packed except Exception: - rospy.logerr(f"{self.this_robot} - Header not found: {r_header}") + self.logger.error(f"{self.this_robot} - Header not found: {r_header}") return Comm_msgs.SERRM.name.encode() if header == Comm_msgs.DENDT.name: target = data @@ -463,6 +475,8 @@ def callback_server(self, msg): print(f"{self.this_robot} - Channel - Wrong DENDT -" + f" Target: {target.decode()} - " + f"My target: {self.target_robot}") - self.server_sync_complete_pub.publish(Time(rospy.get_rostime())) + if self.server_sync_complete_pub is not None: + current_time = self.ros_node.get_clock().now() + self.server_sync_complete_pub.publish(current_time.to_msg()) return "Ack".encode() return Comm_msgs.SERRM.name.encode() diff --git a/mocha_core/mocha_core/topic_publisher.py b/mocha_core/mocha_core/topic_publisher.py new file mode 100755 index 0000000..967f1f4 --- /dev/null +++ b/mocha_core/mocha_core/topic_publisher.py @@ -0,0 +1,232 @@ +#!/usr/bin/env python3 +import os +import pdb +import re +import sys + +import rclpy +import rclpy.clock +import rclpy.time +from rclpy.node import Node +import yaml +import threading +from rclpy.callback_groups import ReentrantCallbackGroup +from rclpy.executors import MultiThreadedExecutor +import mocha_core.database_utils as du +import mocha_core.msg +import mocha_core.srv +from mocha_core.srv import AddUpdateDB, GetDataHeaderDB, SelectDB +from mocha_core.msg import DatabaseCB +import mocha_core.database_server as ds +import mocha_core.hash_comm as hc +import queue +from rclpy.qos import QoSProfile, ReliabilityPolicy, HistoryPolicy, DurabilityPolicy + + +QOS_PROFILE = QoSProfile( + reliability=ReliabilityPolicy.RELIABLE, + durability=DurabilityPolicy.VOLATILE, + history=HistoryPolicy.KEEP_ALL, +) + + +class TopicPublisher: + def __init__(self, robot_id, robot_name, topic_id, topic_name, msg_types, obj, ros_node): + assert robot_id is not None and isinstance(robot_id, int) + assert topic_id is not None and isinstance(topic_id, int) + assert ros_node is not None + assert msg_types is not None + + self.robot_id = robot_id + self.robot_name = robot_name + self.topic_id = topic_id + self.topic_name = topic_name + self.msg_types = msg_types + self.ros_node = ros_node + self.logger = self.ros_node.get_logger() + self.get_data_header_db_cli = ros_node.get_data_header_db_cli + # self.header_pub = ros_node.header_pub + + self.publisher = self.ros_node.create_publisher(obj, + f"/{robot_name}{topic_name}", + qos_profile=QOS_PROFILE) + self.is_shutdown = threading.Event() + self.is_shutdown.set() + + self.msg_queue = queue.Queue() + + self.th = threading.Thread(target=self.run, args=()) + + + def run(self): + self.logger.info(f"{self.robot_name}{self.topic_name} - Topic Publisher - Started") + self.is_shutdown.clear() + while not self.is_shutdown.is_set(): + # Try to acquire the mutex and loop back if we failed + try: + msg_header = self.msg_queue.get(timeout=1) + except queue.Empty: + continue + self.logger.debug(f"{self.robot_name}{self.topic_name} - Topic Publisher - Loop") + req = mocha_core.srv.GetDataHeaderDB.Request() + req.msg_header = msg_header + future = self.get_data_header_db_cli.call_async(req) + + def handle_response(future): + try: + if future.done(): + answ = future.result() + ans_robot_id, ans_topic_id, ans_ts, ans_data = du.parse_answer( + answ, self.msg_types + ) + self.publisher.publish(ans_data) + # self.msg_queue.task_done() + else: + self.logger.warning(f"{self.robot_name} - Topic Publisher - get_data_header_db service call timeout") + except Exception as e: + self.logger.error(f"{self.robot_name} - Topic Publisher - Error handling service response: {e}") + + future.add_done_callback(handle_response) + + def get_message(self, msg_header): + self.logger.debug(f"{self.robot_name} - Topic Publisher - Callback") + self.msg_queue.put(msg_header) + + def shutdown(self): + # self.msg_queue.join() + self.is_shutdown.set() + +class TopicPublisherNode(Node): + def __init__(self, this_robot=None, robot_configs=None, topic_configs=None): + super().__init__("topic_publisher") + # Declare parameters + self.declare_parameter("robot_name", "") + self.declare_parameter("robot_configs", "") + self.declare_parameter("topic_configs", "") + self.logger = self.get_logger() + + # Create reentrant callback group like DatabaseServer + self.callback_group = ReentrantCallbackGroup() + + self.this_robot = self.get_parameter("robot_name").get_parameter_value().string_value if this_robot is None else this_robot + + if len(self.this_robot) == 0: + self.logger.error(f"{self.this_robot} - Topic Publisher - Empty robot name") + raise ValueError("Empty robot name") + + # Load and check robot configs + self.robot_configs_file = self.get_parameter("robot_configs").get_parameter_value().string_value if robot_configs is None else robot_configs + try: + with open(self.robot_configs_file, "r") as f: + self.robot_configs = yaml.load(f, Loader=yaml.FullLoader) + except Exception as e: + self.logger.error(f"{self.this_robot} - Topic Publisher - robot_configs file") + raise e + if self.this_robot not in self.robot_configs.keys(): + self.logger.error(f"{self.this_robot} - Topic Publisher - robot_configs file") + raise ValueError("Robot not in config file") + + # Load and check topic configs + self.topic_configs_file = self.get_parameter("topic_configs").get_parameter_value().string_value if topic_configs is None else topic_configs + try: + with open(self.topic_configs_file, "r") as f: + self.topic_configs = yaml.load(f, Loader=yaml.FullLoader) + except Exception as e: + self.logger.error(f"{self.this_robot} - Topic Publisher - topics_configs file") + raise e + self_type = self.robot_configs[self.this_robot]["node-type"] + if self_type not in self.topic_configs.keys(): + self.logger.error(f"{self.this_robot} - Topic Publisher - topics_configs file") + raise ValueError("Node type not in config file") + this_robot_topics = self.topic_configs[self.robot_configs[self.this_robot]["node-type"]] + + # Get msg_types dict used to publish the topics + self.msg_types = du.msg_types(self.robot_configs, self.topic_configs, + self) + + # Create the service clients + self.get_data_header_db_cli = self.create_client(GetDataHeaderDB, + "/mocha/get_data_header_db", + qos_profile=ds.DatabaseServer.QOS_PROFILE) + wait_counter = 0 + while not self.get_data_header_db_cli.wait_for_service(timeout_sec=1.0): + self.logger.debug("Topic Publisher - Waiting for get_data_header_db") + wait_counter += 1 + if wait_counter % 5 == 0: + self.logger.warn("Topic Publisher - Waiting for get_data_header_db for more than 5 seconds") + + # Create a service to publish the received headers + # self.header_pub = self.create_publisher( + # mocha_core.msg.HeaderPub, "topic_publisher/headers", 10 + # ) + + self.list_of_threads = {} + # Iterate for each robot in robot_configs, and generate the threads + for robot in self.robot_configs.keys(): + robot_id = du.get_robot_id_from_name(self.robot_configs, robot) + if robot_id not in self.list_of_threads: + self.list_of_threads[robot_id] = {} + robot_type = self.robot_configs[robot]["node-type"] + topic_list = self.topic_configs[robot_type] + for topic in topic_list: + msg_topic = topic["msg_topic"] + topic_id = du.get_topic_id_from_name(self.robot_configs, + self.topic_configs, + robot, + msg_topic, + self) + if topic_id not in self.list_of_threads[robot_id]: + self.list_of_threads[robot_id][topic_id] = {} + + obj = self.msg_types[robot_id][topic_id]["obj"] + tp = TopicPublisher(robot_id, robot, topic_id, msg_topic, + self.msg_types, obj, self) + self.list_of_threads[robot_id][topic_id] = tp + tp.th.start() + sub = self.create_subscription(DatabaseCB, "/mocha/database_cb", + self.msgheader_callback, 10, + callback_group=self.callback_group) + + def msgheader_callback(self, data): + robot_id = data.robot_id + topic_id = data.topic_id + msg_header = data.msg_header + + # Signal the right thread + self.list_of_threads[robot_id][topic_id].get_message(msg_header) + + + def shutdown(self): + for topic_ids in self.list_of_threads.values(): + for tp in topic_ids.values(): + tp.shutdown() + for topic_ids in self.list_of_threads.values(): + for tp in topic_ids.values(): + tp.th.join() + + +def main(args=None): + # Initialize ROS2 + rclpy.init() + try: + topic_publisher_node = TopicPublisherNode() + except Exception as e: + print(f"Node initialization failed: {e}") + rclpy.shutdown() + return + + # Load mtexecutor + mtexecutor = MultiThreadedExecutor(num_threads=4) + mtexecutor.add_node(topic_publisher_node) + + try: + mtexecutor.spin() + except KeyboardInterrupt: + print("Keyboard Interrupt") + topic_publisher_node.shutdown() + except Exception as e: + print(f"Exception: {e}") + topic_publisher_node.shutdown() + +if __name__ == "__main__": + main() diff --git a/mocha_core/mocha_core/translator.py b/mocha_core/mocha_core/translator.py new file mode 100755 index 0000000..3276f96 --- /dev/null +++ b/mocha_core/mocha_core/translator.py @@ -0,0 +1,164 @@ +#!/usr/bin/env python3 + +import os +import pdb +import sys + +import rclpy +import rclpy.time +import yaml +from rclpy.node import Node +import mocha_core.srv +# Get the database utils module +from rclpy.executors import MultiThreadedExecutor +from rclpy.callback_groups import ReentrantCallbackGroup +import mocha_core.database_utils as du +from mocha_core.srv import AddUpdateDB, GetDataHeaderDB, SelectDB +import mocha_core.database_server as ds + +from rclpy.qos import QoSProfile, ReliabilityPolicy, HistoryPolicy, DurabilityPolicy + + +QOS_PROFILE = QoSProfile( + reliability=ReliabilityPolicy.RELIABLE, + durability=DurabilityPolicy.VOLATILE, + history=HistoryPolicy.KEEP_ALL, +) + + +class Translator(): + def __init__( + self, this_robot, this_robot_id, topic_name, topic_id, msg_type, ros_node, callback_group=None + ): + + self.logger = ros_node.get_logger() + self.ros_node = ros_node + self.topic_name = topic_name + self.topic_id = topic_id + self.this_robot = this_robot + self.this_robot_id = this_robot_id + + # Create service client + self.add_update_db_cli = self.ros_node.create_client(AddUpdateDB, + "/mocha/add_update_db", + qos_profile=ds.DatabaseServer.QOS_PROFILE) + wait_counter = 0 + while not self.add_update_db_cli.wait_for_service(timeout_sec=1.0): + self.logger.debug("Translator - Waiting for add_update_db") + wait_counter += 1 + if wait_counter % 5 == 0: + self.logger.warn("Translator - Waiting for add_update_db for more than 5 seconds") + + # Create subscriber with callback group + self.subscription = self.ros_node.create_subscription( + msg_type, self.topic_name, self.translator_cb, + callback_group=callback_group, + qos_profile=QOS_PROFILE + ) + self.logger.info(f"Translator created for {self.topic_name}") + + def translator_cb(self, data): + msg = data + + serialized_msg = du.serialize_ros_msg(msg) + # Get current time + ts = self.ros_node.get_clock().now().to_msg() + + # Create service request + req = mocha_core.srv.AddUpdateDB.Request() + req.topic_id = self.topic_id + req.timestamp = ts + req.msg_content = serialized_msg + + future = self.add_update_db_cli.call_async(req) + def response_callback(future): + if future.done(): + answ = future.result() + answ_header = answ.new_header + else: + self.logger.warning(f"{self.this_robot} - translator " + + "Service call failed for {self.topic_name}" + ) + future.add_done_callback(response_callback) + +class TranslatorNode(Node): + def __init__(self, this_robot=None, robot_configs=None, topic_configs=None): + super().__init__("translator") + # Declare parameters + self.declare_parameter("robot_name", "") + self.declare_parameter("robot_configs", "") + self.declare_parameter("topic_configs", "") + self.logger = self.get_logger() + + # Create reentrant callback group like DatabaseServer + self.callback_group = ReentrantCallbackGroup() + + self.this_robot = self.get_parameter("robot_name").get_parameter_value().string_value if this_robot is None else this_robot + + if len(self.this_robot) == 0: + self.logger.error(f"{self.this_robot} - Translator - Empty robot name") + raise ValueError("Empty robot name") + + # Load and check robot configs + self.robot_configs_file = self.get_parameter("robot_configs").get_parameter_value().string_value if robot_configs is None else robot_configs + try: + with open(self.robot_configs_file, "r") as f: + self.robot_configs = yaml.load(f, Loader=yaml.FullLoader) + except Exception as e: + self.logger.error(f"{self.this_robot} - Translator - robot_configs file") + raise e + if self.this_robot not in self.robot_configs.keys(): + self.logger.error(f"{self.this_robot} - Translator - robot_configs file") + raise ValueError("Robot not in config file") + + # Load and check topic configs + self.topic_configs_file = self.get_parameter("topic_configs").get_parameter_value().string_value if topic_configs is None else topic_configs + try: + with open(self.topic_configs_file, "r") as f: + self.topic_configs = yaml.load(f, Loader=yaml.FullLoader) + except Exception as e: + self.logger.error(f"{self.this_robot} - Translator - topics_configs file") + raise e + self_type = self.robot_configs[self.this_robot]["node-type"] + if self_type not in self.topic_configs.keys(): + self.logger.error(f"{self.this_robot} - Translator - topics_configs file") + raise ValueError("Node type not in config file") + this_robot_topics = self.topic_configs[self.robot_configs[self.this_robot]["node-type"]] + + # Get msg_types dict used to publish the topics + self.msg_types = du.msg_types(self.robot_configs, self.topic_configs, + self) + + # Create translators for each topic + translators = [] + for topic_id, topic in enumerate(this_robot_topics): + # Get robot id + robot_id = du.get_robot_id_from_name(self.robot_configs, self.this_robot) + obj = self.msg_types[robot_id][topic_id]["obj"] + translator = Translator( + self.this_robot, robot_id, topic["msg_topic"], topic_id, obj, self, self.callback_group + ) + translators.append(translator) + +def main(args=None): + rclpy.init(args=args) + try: + translator_node = TranslatorNode() + except Exception as e: + print(f"Node initialization failed: {e}") + rclpy.shutdown() + return + + # Load mtexecutor + mtexecutor = MultiThreadedExecutor(num_threads=4) + mtexecutor.add_node(translator_node) + + try: + mtexecutor.spin() + except KeyboardInterrupt: + print("Keyboard Interrupt") + except Exception as e: + print(f"Exception: {e}") + +if __name__ == "__main__": + main() diff --git a/mocha_core/scripts/core/zmq_comm_node.py b/mocha_core/mocha_core/zmq_comm_node.py similarity index 67% rename from mocha_core/scripts/core/zmq_comm_node.py rename to mocha_core/mocha_core/zmq_comm_node.py index bff2f0f..fd29dd6 100644 --- a/mocha_core/scripts/core/zmq_comm_node.py +++ b/mocha_core/mocha_core/zmq_comm_node.py @@ -3,9 +3,12 @@ import pdb import threading import uuid -import rospy +import rclpy +import rclpy.logging +import rclpy.time +import rclpy.node import zmq -import hash_comm +import mocha_core.hash_comm as hash_comm import mocha_core.msg HASH_LENGTH = hash_comm.Hash.HASH_LENGTH @@ -21,7 +24,7 @@ class SyncStatus(enum.Enum): class Comm_node: def __init__(self, this_node, client_node, robot_configs, - client_callback, server_callback, client_timeout): + client_callback, server_callback, client_timeout, ros_node): # Check input arguments assert isinstance(this_node, str) assert isinstance(client_node, str) @@ -30,18 +33,21 @@ def __init__(self, this_node, client_node, robot_configs, assert callable(server_callback) assert isinstance(client_timeout, (int, float)) + # Confirm that we are providing a ROS node + assert ros_node is not None + self.ros_node = ros_node + self.logger = self.ros_node.get_logger() + self.ros_node_name = self.ros_node.get_fully_qualified_name() + # Check that this_node and client_node exist in the config file if this_node not in robot_configs: - rospy.logerr(f"{this_node} - Node: this_node not in config file") - rospy.signal_shutdown("this_node not in config file") - rospy.spin() + self.logger.error(f"{this_node} - Node: this_node not in config file") + raise ValueError("this_node not in config file") self.this_node = this_node if client_node not in robot_configs[self.this_node]["clients"]: - rospy.logerr(f"{this_node} - Node: client_node not in config file") - rospy.signal_shutdown("client_node not in config file") - rospy.spin() - + self.logger.error(f"{this_node} - Node: client_node not in config file") + raise ValueError("client_node not in config file") self.client_node = client_node self.robot_configs = robot_configs @@ -63,10 +69,11 @@ def __init__(self, this_node, client_node, robot_configs, self.client_timeout = client_timeout # Create a publisher for the client bandwidth - self.pub_client_stats = rospy.Publisher(f"ddb/client_stats/{self.client_node}", - mocha_core.msg.Client_stats, - queue_size=10) - self.pub_client_count = 0 + self.pub_client_stats = self.ros_node.create_publisher( + mocha_core.msg.ClientStats, + f"{self.ros_node_name}/client_stats/{self.client_node}", + 10 + ) self.syncStatus = SyncStatus.IDLE self.syncStatus_lock = threading.Lock() @@ -74,22 +81,23 @@ def __init__(self, this_node, client_node, robot_configs, self.context = zmq.Context(1) # Start server thread - server = threading.Thread(target=self.server_thread, args=()) - self.server_running = True - server.start() + self.th = threading.Thread(target=self.server_thread, args=()) + self.server_running = threading.Event() + self.server_running.clear() + self.th.start() def connect_send_message(self, msg): # TODO keep connection open instead of opening in each call # Msg check if not isinstance(msg, bytes): - rospy.logdebug(f"{self.this_node} - Node - SENDMSG: " + + self.logger.debug(f"{self.this_node} - Node - SENDMSG: " + "msg has to be bytes") return # Check that we are in the right state self.syncStatus_lock.acquire() if self.syncStatus != SyncStatus.IDLE: - rospy.logdebug(f"{self.this_node} - Node - SENDMSG: " + + self.logger.debug(f"{self.this_node} - Node - SENDMSG: " + "Sync is running, abort") return self.client_thread = SyncStatus.SYNCHRONIZING @@ -105,7 +113,7 @@ def connect_send_message(self, msg): + str(int(target_robot["base-port"]) + port_offset) ) - rospy.logdebug(f"{self.this_node} - Node - SENDMSG: " + + self.logger.debug(f"{self.this_node} - Node - SENDMSG: " + f"Connecting to server {server_endpoint}") client = self.context.socket(zmq.REQ) client.connect(server_endpoint) @@ -117,16 +125,17 @@ def connect_send_message(self, msg): rnd_uuid = str(uuid.uuid4().hex).encode() msg_id = hash_comm.Hash(rnd_uuid).digest() full_msg = msg_id + msg - rospy.logdebug(f"{self.this_node} - Node - SENDMSG: " + + self.logger.debug(f"{self.this_node} - Node - SENDMSG: " + f"Sending ({full_msg})") client.send(full_msg) - start_ts = rospy.Time.now() + start_ts = self.ros_node.get_clock().now() + # Wait at most self.client_timeout * 1000 ms socks = dict(poll.poll(self.client_timeout*1000)) if socks.get(client) == zmq.POLLIN: reply = client.recv() if not reply: - rospy.logdebug( + self.logger.debug( f"{self.this_node} - Node - SENDMSG: " + "No response from the server" ) @@ -135,37 +144,38 @@ def connect_send_message(self, msg): header = reply[0:HASH_LENGTH] data = reply[HASH_LENGTH:] if header == msg_id: - rospy.logdebug( + self.logger.debug( f"{self.this_node} - Node - SENDMSG: Server replied " + f"({len(reply)} bytes)" ) - stop_ts = rospy.Time.now() - time_d = stop_ts - start_ts - time_s = float(time_d.to_sec()) + stop_ts = self.ros_node.get_clock().now() + time_d = (stop_ts - start_ts).nanoseconds + time_s = float(time_d / 1e9) + bw = len(reply)/time_s/1024/1024 - stats = mocha_core.msg.Client_stats() - stats.header.stamp = rospy.Time.now() + + stats = mocha_core.msg.ClientStats() + stats.header.stamp = self.ros_node.get_clock().now().to_msg() stats.header.frame_id = self.this_node - stats.header.seq = self.pub_client_count - self.pub_client_count += 1 + # Note: ROS2 doesn't have seq field in header stats.msg = msg[:5].decode("utf-8") stats.rtt = time_s stats.bw = bw stats.answ_len = len(reply) self.pub_client_stats.publish(stats) if len(reply) > 10*1024 and SHOW_BANDWIDTH: - rospy.loginfo(f"{self.this_node} - Node - " + + self.logger.info(f"{self.this_node} - Node - " + f"SENDMSG: Data RTT: {time_s}") - rospy.loginfo(f"{self.this_node} - Node - SENDMSG: " + + self.logger.info(f"{self.this_node} - Node - SENDMSG: " + f"BW: {bw}" + "MBytes/s") self.client_callback(data) else: - rospy.logerr(f"{self.this_node} - Node - SENDMSG: " + + self.logger.error(f"{self.this_node} - Node - SENDMSG: " + f"Malformed reply from server: {reply}") self.client_callback(None) else: - rospy.logdebug(f"{self.this_node} - Node - SENDMSG: " + + self.logger.debug(f"{self.this_node} - Node - SENDMSG: " + "No response from server") self.client_callback(None) client.setsockopt(zmq.LINGER, 0) @@ -176,6 +186,7 @@ def connect_send_message(self, msg): self.syncStatus_lock.release() def server_thread(self): + self.server_running.set() # This timer does not have a big impact as it is only the timer until # the recv times out Most calls from the client are very lightweight RECV_TIMEOUT = 1000 @@ -186,7 +197,7 @@ def server_thread(self): self.server.bind("tcp://*:" + str(port)) - while self.server_running: + while self.server_running.is_set(): try: request = self.server.recv() except zmq.ZMQError as e: @@ -197,32 +208,30 @@ def server_thread(self): # ) continue else: - rospy.logerr(f"{self.this_node} - Node - SERVER: " + + self.logger.error(f"{self.this_node} - Node - SERVER: " + f"ZMQ error: {e}") - rospy.signal_shutdown("ZMQ error") - rospy.spin() - rospy.logdebug(f"{self.this_node} - Node - SERVER: Got {request}") + raise RuntimeError("ZMQ error") + self.logger.debug(f"{self.this_node} - Node - SERVER: Got {request}") header = request[0:HASH_LENGTH] data = request[HASH_LENGTH:] reply = self.server_callback(data) if reply is None: - rospy.logerr(f"{self.this_node} - Node - SERVER: " + + self.logger.error(f"{self.this_node} - Node - SERVER: " + f"reply cannot be none") - rospy.signal_shutdown("Reply none") - rospy.spin() + raise RuntimeError("Reply none") if not isinstance(reply, bytes): - rospy.logerr(f"{self.this_node} - Node - SERVER: " + + self.logger.error(f"{self.this_node} - Node - SERVER: " + "reply has to be bytes") - rospy.signal_shutdown("Reply not bytes") - rospy.spin() + raise RuntimeError("Reply not bytes") ans = header + reply self.server.send(ans) - rospy.logdebug(f"{self.this_node} - Node - SERVER: " + - "Replied {len(ans)} bytes") - rospy.logdebug(f"{self.this_node} - SERVER: {ans}") + self.logger.debug(f"{self.this_node} - Node - SERVER: " + + f"Replied {len(ans)} bytes") + self.logger.debug(f"{self.this_node} - SERVER: {ans}") self.server.close() self.context.term() def terminate(self): - rospy.logdebug(f"{self.this_node} - Node - Terminating server") - self.server_running = False + self.server_running.clear() + self.th.join() + self.logger.info(f"Node {self.this_node} -> {self.client_node} terminating server") diff --git a/mocha_core/msg/Client_stats.msg b/mocha_core/msg/ClientStats.msg similarity index 100% rename from mocha_core/msg/Client_stats.msg rename to mocha_core/msg/ClientStats.msg diff --git a/mocha_core/msg/DatabaseCB.msg b/mocha_core/msg/DatabaseCB.msg new file mode 100644 index 0000000..653a3c9 --- /dev/null +++ b/mocha_core/msg/DatabaseCB.msg @@ -0,0 +1,4 @@ +std_msgs/Header header +uint16 robot_id +uint16 topic_id +uint8[] msg_header diff --git a/mocha_core/msg/Header_pub.msg b/mocha_core/msg/HeaderPub.msg similarity index 100% rename from mocha_core/msg/Header_pub.msg rename to mocha_core/msg/HeaderPub.msg diff --git a/mocha_core/msg/SM_state.msg b/mocha_core/msg/SMState.msg similarity index 100% rename from mocha_core/msg/SM_state.msg rename to mocha_core/msg/SMState.msg diff --git a/mocha_core/package.xml b/mocha_core/package.xml index 39ab8be..64b8166 100644 --- a/mocha_core/package.xml +++ b/mocha_core/package.xml @@ -1,79 +1,32 @@ - + + mocha_core 0.0.1 DCIST Distributed Database package - - - developer + BSD 3-Clause License + ament_cmake + ament_cmake_python - - - - GPLv3 - + rclpy + std_msgs + builtin_interfaces + geometry_msgs + sensor_msgs + vision_msgs + rosidl_default_generators - - - - + ros2launch + rosidl_default_runtime - - - - + ament_lint_auto + ament_lint_common - - - - - - - - - - message_generation - - - - - - message_runtime - - - - - catkin - geometry_msgs - roscpp - rospy - std_msgs - geometry_msgs - sensor_msgs - vision_msgs - geometry_msgs - roscpp - rospy - std_msgs - geometry_msgs - sensor_msgs - vision_msgs - geometry_msgs - roscpp - rospy - std_msgs - geometry_msgs - sensor_msgs - vision_msgs - depth_atlas - detection_filter_processor - cell_detect + rosidl_interface_packages - - - + ament_cmake diff --git a/mocha_core/resource/mocha_core b/mocha_core/resource/mocha_core new file mode 100644 index 0000000..e69de29 diff --git a/mocha_core/scripts/core/database_server.py b/mocha_core/scripts/core/database_server.py deleted file mode 100644 index d1a612f..0000000 --- a/mocha_core/scripts/core/database_server.py +++ /dev/null @@ -1,109 +0,0 @@ -#!/usr/bin/env python3 - -import os -import threading -import rospy -import mocha_core.srv -import database -import pdb -import database_utils as du - - -class DatabaseServer: - """ Starts a clean database and offers an API-like service - to interact with the databaseself. - - Please see the list of services in the srv folder - """ - def __init__(self, robot_configs, topic_configs): - # Check input topics - assert isinstance(robot_configs, dict) - assert isinstance(topic_configs, dict) - - self.robot_configs = robot_configs - self.topic_configs = topic_configs - - # Get current robot name from params - self.robot_name = rospy.get_param('~robot_name') - - if self.robot_name not in self.robot_configs.keys(): - rospy.logerr(f"{self.robot_name} does not exist in robot_configs") - rospy.signal_shutdown("robot_name does not exist in robot_configs") - rospy.spin() - - self.topic_list = self.topic_configs[self.robot_configs[self.robot_name]["node-type"]] - - # Create the empty database with lock - self.dbl = database.DBwLock() - - # Get current robot number - self.robot_number = du.get_robot_id_from_name(self.robot_configs, - robot_name=self.robot_name) - - # create services for all the possible calls to the DB - self.service_list = [] - s = rospy.Service('~AddUpdateDB', - mocha_core.srv.AddUpdateDB, - self.add_update_db_service_cb) - self.service_list.append(s) - s = rospy.Service('~GetDataHeaderDB', - mocha_core.srv.GetDataHeaderDB, - self.get_data_hash_db_service_cb) - self.service_list.append(s) - s = rospy.Service('~SelectDB', - mocha_core.srv.SelectDB, - self.select_db_service_cb) - self.service_list.append(s) - - self.msg_types = du.msg_types(self.robot_configs, self.topic_configs) - - def add_update_db_service_cb(self, req): - if not isinstance(req.topic_id, int) or req.topic_id is None: - rospy.logerr("Error: topic_id empty") - return - if len(req.msg_content) == 0: - rospy.logerr("Error: msg_content empty") - return - if req.topic_id > len(self.topic_list): - rospy.logerr("Error: topic_id not in topic_list") - return - topic = self.topic_list[req.topic_id] - priority = du.get_priority_number(topic["msg_priority"]) - ts = req.timestamp - ts = rospy.Time(ts.secs, ts.nsecs) - dbm = database.DBMessage(self.robot_number, - req.topic_id, - dtype=self.msg_types[self.robot_number][req.topic_id]["dtype"], - priority=priority, - ts=ts, - data=req.msg_content) - - header = self.dbl.add_modify_data(dbm) - return mocha_core.srv.AddUpdateDBResponse(header) - - def get_data_hash_db_service_cb(self, req): - if req.msg_header is None or len(req.msg_header) == 0: - rospy.logerr("Error: msg_header empty") - return - dbm = self.dbl.find_header(req.msg_header) - answ = mocha_core.srv.GetDataHeaderDBResponse(dbm.robot_id, - dbm.topic_id, - dbm.ts, - dbm.data) - return answ - - def select_db_service_cb(self, req): - # TODO Implement filtering - if req.robot_id is None: - rospy.logerr("Error: robot_id none") - return - if req.topic_id is None: - rospy.logerr("Error: topic_id none") - return - list_headers = self.dbl.get_header_list(req.robot_id) - answ = mocha_core.srv.SelectDBResponse(du.serialize_headers(list_headers)) - return answ - - def shutdown(self): - for s in self.service_list: - s.shutdown() diff --git a/mocha_core/scripts/core/integrate_database.py b/mocha_core/scripts/core/integrate_database.py deleted file mode 100755 index 2b611f7..0000000 --- a/mocha_core/scripts/core/integrate_database.py +++ /dev/null @@ -1,159 +0,0 @@ -#!/usr/bin/env python3 -import os -import random -import signal -import sys -import time -import traceback -from functools import partial - -import roslaunch -import rospkg -import rospy -import yaml -import std_msgs.msg -import subprocess - -import database_server as ds -import database_utils as du -import synchronize_channel as sync - - -def ping(host): - command = ["ping", "-c", "1", host] - try: - result = subprocess.run(command, stdout=subprocess.DEVNULL, - stderr=subprocess.DEVNULL) - return result.returncode == 0 - except Exception as e: - print(f"Error pinging {host}: {e}") - return False - - -class IntegrateDatabase: - def __init__(self): - rospy.init_node("integrate_database", anonymous=False) - - self.this_robot = rospy.get_param("~robot_name") - self.rssi_threshold = rospy.get_param("~rssi_threshold", 20) - self.all_channels = [] - rospy.loginfo(f"{self.this_robot} - Integrate - " + - f"RSSI threshold: {self.rssi_threshold}") - self.client_timeout = rospy.get_param("~client_timeout", 6.) - rospy.loginfo(f"{self.this_robot} - Integrate - " + - f"Client timeout: {self.client_timeout}") - - # Load and check robot configs - self.robot_configs_file = rospy.get_param("~robot_configs") - with open(self.robot_configs_file, "r") as f: - self.robot_configs = yaml.load(f, Loader=yaml.FullLoader) - if self.this_robot not in self.robot_configs.keys(): - self.shutdown("Robot not in config file") - - # Load and check radio configs - self.radio_configs_file = rospy.get_param("~radio_configs") - with open(self.radio_configs_file, "r") as f: - self.radio_configs = yaml.load(f, Loader=yaml.FullLoader) - self.radio = self.robot_configs[self.this_robot]["using-radio"] - if self.radio not in self.radio_configs.keys(): - self.shutdown("Radio not in config file") - - # Load and check topic configs - self.topic_configs_file = rospy.get_param("~topic_configs") - with open(self.topic_configs_file, "r") as f: - self.topic_configs = yaml.load(f, Loader=yaml.FullLoader) - self.node_type = self.robot_configs[self.this_robot]["node-type"] - if self.node_type not in self.topic_configs.keys(): - self.shutdown("Node type not in config file") - - # Check that we can ping the radios - ip = self.robot_configs[self.this_robot]["IP-address"] - if not ping(ip): - rospy.logerr(f"{self.this_robot} - Integrate - " + - f"Cannot ping self {ip}. Is the radio on?") - rospy.signal_shutdown("Cannot ping self") - rospy.spin() - - # Create a database server object - self.DBServer = ds.DatabaseServer(self.robot_configs, - self.topic_configs) - - self.num_robot_in_comm = 0 - - # Handle possible interruptions - self.interrupted = False - - def signal_handler(sig, frame): - rospy.logwarn(f"{self.this_robot} - Integrate - " + - f"Got signal. Killing comm nodes.") - self.interrupted = True - rospy.signal_shutdown("Killed by user") - signal.signal(signal.SIGINT, signal_handler) - - rospy.loginfo(f"{self.this_robot} - Integrate - " + - "Created all communication channels!") - - # Start comm channels with other robots - self.other_robots = [i for i in list(self.robot_configs.keys()) if i != - self.this_robot] - for other_robot in self.other_robots: - if other_robot not in self.robot_configs[self.this_robot]["clients"]: - rospy.logwarn( - f"{self.this_robot} - Integrate - "+ - f"Skipping channel {self.this_robot}->{other_robot} " + - "as it is not in the graph of this robot" - ) - continue - # Start communication channel - channel = sync.Channel(self.DBServer.dbl, - self.this_robot, - other_robot, self.robot_configs, - self.client_timeout) - self.all_channels.append(channel) - channel.run() - - # Attach a radio trigger to each channel. This will be triggered - # when the RSSI is high enough. You can use another approach here - # such as using a timer to periodically trigger the sync - rospy.Subscriber('ddb/rajant/rssi/' + other_robot, - std_msgs.msg.Int32, - self.rssi_cb, - channel) - - # Wait for all the robots to start - for _ in range(100): - if self.interrupted or rospy.is_shutdown(): - self.shutdown("Killed while waiting for other robots") - return - rospy.sleep(.1) - - # Spin! - rospy.spin() - - def shutdown(self, reason): - assert isinstance(reason, str) - rospy.logerr(f"{self.this_robot} - Integrate - " + reason) - for channel in self.all_channels: - channel.comm_node.terminate() - self.all_channels.remove(channel) - rospy.logwarn(f"{self.this_robot} - Integrate - " + "Killed Channels") - self.DBServer.shutdown() - rospy.logwarn(f"{self.this_robot} - Integrate - " + "Killed DB") - rospy.signal_shutdown(reason) - rospy.logwarn(f"{self.this_robot} - Integrate - " + "Shutting down") - rospy.spin() - - def rssi_cb(self, data, comm_node): - rssi = data.data - if rssi > self.rssi_threshold: - self.num_robot_in_comm += 1 - try: - rospy.loginfo(f"{self.this_robot} <- {comm_node.target_robot}: Triggering comms") - comm_node.trigger_sync() - except: - traceback.print_exception(*sys.exc_info()) - - -if __name__ == "__main__": - # Start the node - IntegrateDatabase() diff --git a/mocha_core/scripts/core/test/run_tests.sh b/mocha_core/scripts/core/test/run_tests.sh deleted file mode 100755 index 56c1f38..0000000 --- a/mocha_core/scripts/core/test/run_tests.sh +++ /dev/null @@ -1,61 +0,0 @@ -#!/bin/bash -set -eo pipefail - -# Check that the first argument is not null -if [ -z "$1" ]; then - echo "No catkin workspace provided. Usage: ./run_tests.sh " - exit 1 -fi - -# Echo some variables for debug -echo "$PYTHONPATH" - -# Colors for better distinguishing start and end of tests -RED='\033[0;31m' -GREEN='\033[0;32m' -YELLOW='\033[1;33m' -BOLD='\033[1m' -NC='\033[0m' # No Color - -# TESTS_TO_RUN is an array of tests that should be executed in the CI -TESTS_TO_RUN=( - "test_zmq_comm_node.py" - "test_database_utils.py" - "test_database.py" - "test_synchronize_channel.py" - "test_database_server.py" -) - -# The first argument is the path to the catkin workspace + catkin_ws -CATKIN_WS="$1" -# Display path -echo "Using catkin workspace: $CATKIN_WS" - -# Source the ROS environment -source "$CATKIN_WS/devel/setup.bash" - -# Run roscore in the background and save its PID to kill it later -roscore > /dev/null 2>&1 & -PID=$(ps -ef | grep roscore | grep -v grep | awk '{print $2}') -echo "Waiting 2 seconds for roscore to load" -sleep 2 - -# TODO(fclad) Change this when all the tests are passing -# Run all the files that start with test_ in this folder -# for i in $(ls | grep "^test_"); do - -for test in "${TESTS_TO_RUN[@]}"; do - # Print yellow separator - echo -e "${YELLOW}===================================== $test - START${NC}" - - rosrun mocha_core "$test" - retVal=$? - sleep 5 # To wait for some spawned processes - if [ $retVal -ne 0 ]; then - echo -e "${RED}${BOLD}===================================== $test - FAILED${NC}" - kill -9 "$PID" - exit $retVal - fi -done -echo -e "${GREEN}${BOLD}================================ALL TESTS PASSED${NC}" -exit 0 diff --git a/mocha_core/scripts/core/test/test_database_server.py b/mocha_core/scripts/core/test/test_database_server.py deleted file mode 100755 index 9535b0e..0000000 --- a/mocha_core/scripts/core/test/test_database_server.py +++ /dev/null @@ -1,239 +0,0 @@ -#!/usr/bin/env python3 - -import os -import unittest -import sys -import multiprocessing -import roslaunch -import rospy -import rospkg -import pdb -from pprint import pprint -import mocha_core.srv -from geometry_msgs.msg import PointStamped -import yaml -from colorama import Fore, Style -import random -import warnings - - -class Test(unittest.TestCase): - def setUp(self): - test_name = self._testMethodName - print("\n", Fore.RED, 20*"=", test_name, 20*"=", Style.RESET_ALL) - - # Ignore pesky warnings about sockets - warnings.filterwarnings(action="ignore", message="unclosed", category=ResourceWarning) - - # Create a database server object - self.dbs = ds.DatabaseServer(robot_configs, topic_configs) - - rospy.wait_for_service('~AddUpdateDB') - rospy.wait_for_service('~GetDataHeaderDB') - rospy.wait_for_service('~SelectDB') - - # Generate the service calls for all the services - service_name = '~AddUpdateDB' - self.add_update_db = rospy.ServiceProxy(service_name, - mocha_core.srv.AddUpdateDB, - persistent=True) - - service_name = '~GetDataHeaderDB' - self.get_data_header_db = rospy.ServiceProxy(service_name, - mocha_core.srv.GetDataHeaderDB, - persistent=True) - - service_name = '~SelectDB' - self.select_db = rospy.ServiceProxy(service_name, - mocha_core.srv.SelectDB, - persistent=True) - - super().setUp() - - def tearDown(self): - self.add_update_db.close() - self.get_data_header_db.close() - self.select_db.close() - self.dbs.shutdown() - rospy.sleep(1) - super().tearDown() - - def test_add_retrieve_single_msg(self): - # Simulate sending a "/pose" message from Charon - sample_msg = PointStamped() - sample_msg.header.frame_id = "world" - sample_msg.point.x = random.random() - sample_msg.point.y = random.random() - sample_msg.point.z = random.random() - sample_msg.header.stamp = rospy.Time.now() - - tid = du.get_topic_id_from_name(robot_configs, topic_configs, - robot_name, "/pose") - # Serialize and send through service - serialized_msg = du.serialize_ros_msg(sample_msg) - - try: - answ = self.add_update_db(tid, - rospy.get_rostime(), - serialized_msg) - answ_header = answ.new_header - except rospy.ServiceException as exc: - print("Service did not process request: " + str(exc)) - self.assertTrue(False) - # print("Got hash:", answ_hash) - # The data is now stored in the database! - - # Request the same hash through service - try: - answ = self.get_data_header_db(answ_header) - except rospy.ServiceException as exc: - print("Service did not process request: " + str(exc)) - self.assertTrue(False) - - # Parse answer and compare results - _, ans_topic_id, _, ans_data = du.parse_answer(answ, - msg_types) - - self.assertEqual(tid, ans_topic_id) - self.assertEqual(ans_data, sample_msg) - # print("Received topic:", ans_topic_name) - # print("ROS msg:", ans_data) - # print("Timestamp:", ans_ts) - - def test_add_select_robot(self): - stored_headers = [] - for i in range(3): - # Sleep is very important to have distinct messages - rospy.sleep(0.1) - # Create a dumb random PointStamped message - sample_msg = PointStamped() - sample_msg.header.frame_id = "world" - sample_msg.point.x = random.random() - sample_msg.point.y = random.random() - sample_msg.point.z = random.random() - sample_msg.header.stamp = rospy.Time.now() - tid = du.get_topic_id_from_name(robot_configs, topic_configs, - robot_name, "/pose") - - # Serialize and send through service - serialized_msg = du.serialize_ros_msg(sample_msg) - try: - answ = self.add_update_db(tid, - rospy.get_rostime(), - serialized_msg) - answ_header = answ.new_header - except rospy.ServiceException as exc: - print("Service did not process request: " + str(exc)) - self.assertTrue(False) - stored_headers.append(answ_header) - - # Request the list of headers through the service - try: - robot_id = du.get_robot_id_from_name(robot_configs, "charon") - answ = self.select_db(robot_id, None, None) - except rospy.ServiceException as exc: - print("Service did not process request: " + str(exc)) - self.assertTrue(False) - returned_headers = du.deserialize_headers(answ.headers) - - # Sort list before comparing - stored_headers.sort() - returned_headers.sort() - self.assertEqual(stored_headers, returned_headers) - - def test_insert_storm(self): - # Bomb the database with insert petitions from different robots, - # check the number of headers afterwards - NUM_PROCESSES = 100 - LOOPS_PER_PROCESS = 10 - - # Spin a number of processes to write into the database - def recorder_thread(): - # Get a random ros time - tid = du.get_topic_id_from_name(robot_configs, topic_configs, - robot_name, "/pose") - sample_msg = PointStamped() - sample_msg.header.frame_id = "world" - sample_msg.point.x = random.random() - sample_msg.point.y = random.random() - sample_msg.point.z = random.random() - sample_msg.header.stamp = rospy.Time.now() - # Serialize and send through service - serialized_msg = du.serialize_ros_msg(sample_msg) - - for i in range(LOOPS_PER_PROCESS): - timestamp = rospy.Time(random.randint(1, 10000), - random.randint(0, 1000)*1000000) - try: - _ = self.add_update_db(tid, - timestamp, - serialized_msg) - except rospy.ServiceException as exc: - print("Service did not process request: " + str(exc)) - self.assertTrue(False) - - process_list = [] - for i in range(NUM_PROCESSES): - x = multiprocessing.Process(target=recorder_thread) - process_list.append(x) - - for p in process_list: - p.start() - - for p in process_list: - p.join() - - # Get the list of hashes from the DB and count them - try: - robot_id = du.get_robot_id_from_name(robot_configs, "charon") - answ = self.select_db(robot_id, None, None) - except rospy.ServiceException as exc: - print("Service did not process request: " + str(exc)) - self.assertTrue(False) - returned_headers = du.deserialize_headers(answ.headers) - - self.assertEqual(len(returned_headers), - NUM_PROCESSES*LOOPS_PER_PROCESS) - - -if __name__ == '__main__': - rospy.init_node('test_database_server', anonymous=False) - - # Get the directory path and import all the required modules to test - rospack = rospkg.RosPack() - ddb_path = rospack.get_path('mocha_core') - scripts_path = os.path.join(ddb_path, "scripts/core") - sys.path.append(scripts_path) - import database_utils as du - import database_server as ds - - # Get the default path from the ddb_path - robot_configs_default = os.path.join(ddb_path, - "config/testConfigs/robot_configs.yaml") - # Get the path to the robot config file from the ros parameter robot_configs - robot_configs = rospy.get_param("robot_configs", - robot_configs_default) - - # Get the yaml dictionary objects - with open(robot_configs, "r") as f: - robot_configs = yaml.load(f, Loader=yaml.FullLoader) - - # Get the default path from the ddb_path - topic_configs_default = os.path.join(ddb_path, - "config/testConfigs/topic_configs.yaml") - # Get the path to the robot config file from the ros parameter topic_configs - topic_configs = rospy.get_param("topic_configs", - topic_configs_default) - - # Get the yaml dictionary objects - with open(topic_configs, "r") as f: - topic_configs = yaml.load(f, Loader=yaml.FullLoader) - - msg_types = du.msg_types(robot_configs, topic_configs) - - # Set the ~robot_name param to charon - robot_name = "charon" - rospy.set_param("~robot_name", robot_name) - - # Run test cases - unittest.main() diff --git a/mocha_core/scripts/core/test/test_database_utils.py b/mocha_core/scripts/core/test/test_database_utils.py deleted file mode 100755 index f110af0..0000000 --- a/mocha_core/scripts/core/test/test_database_utils.py +++ /dev/null @@ -1,117 +0,0 @@ -#!/usr/bin/env python3 -import unittest -import sys -import os -from pprint import pprint -import uuid -import geometry_msgs.msg -import rospkg -import pdb -import rospy -from colorama import Fore, Back, Style -import yaml -import random - -ROBOT0_TOPIC2_PRIO0 = b'\x00\x00\x00u\x00\xde' -ROBOT1_TOPIC1_PRIO4 = b'\x01\x00\x01O\x00\xdc' -ROBOT1_TOPIC2_PRIO2 = b'\x01\x00\x01O\x00\xc7' - - -class test(unittest.TestCase): - def setUp(self): - test_name = self._testMethodName - print("\n", Fore.RED, 20*"=", test_name, 20*"=", Style.RESET_ALL) - super().setUp() - - def tearDown(self): - rospy.sleep(1) - super().tearDown() - - def test_serialize_deserialize(self): - for _ in range(10): - header_list = [] - for _ in range(20): - header_list.append(du.generate_random_header()) - serialized = du.serialize_headers(header_list) - deserialized = du.deserialize_headers(serialized) - self.assertEqual(len(header_list), len(deserialized)) - self.assertEqual(deserialized, header_list) - - def test_deserialize_wrong(self): - header_list = [] - for _ in range(5): - header_list.append(du.generate_random_header()) - serialized = du.serialize_headers(header_list) - with self.assertRaises(Exception): - du.deserialize_headers(serialized[1:]) - - def test_pack_unpack_data_topic(self): - dbl = sample_db.get_sample_dbl() - dbm = dbl.find_header(ROBOT1_TOPIC1_PRIO4) - packed = du.pack_data(dbm) - u_dbm = du.unpack_data(ROBOT1_TOPIC1_PRIO4, packed) - self.assertEqual(dbm, u_dbm) - dbm = dbl.find_header(ROBOT1_TOPIC2_PRIO2) - packed = du.pack_data(dbm) - u_dbm = du.unpack_data(ROBOT1_TOPIC2_PRIO2, packed) - self.assertEqual(dbm, u_dbm) - - def test_topic_id(self): - for i in range(50): - # Pick a random robot - robot = random.choice(list(robot_configs.keys())) - # Pick a random topic - topic_list = topic_configs[robot_configs[robot]["node-type"]] - topic = random.choice(topic_list) - id = du.get_topic_id_from_name(robot_configs, topic_configs, - robot, topic["msg_topic"]) - topic_find = du.get_topic_name_from_id(robot_configs, - topic_configs, robot, id) - self.assertEqual(topic["msg_topic"], topic_find) - - def test_robot_id(self): - for robot in robot_configs: - number = du.get_robot_id_from_name(robot_configs, robot) - robot_name = du.get_robot_name_from_id(robot_configs, number) - self.assertEqual(robot, robot_name) - -if __name__ == '__main__': - # Get the directory path and import all the required modules to test - rospack = rospkg.RosPack() - ddb_path = rospack.get_path('mocha_core') - scripts_path = os.path.join(ddb_path, "scripts/core") - sys.path.append(scripts_path) - import database_utils as du - import sample_db - import hash_comm as hc - - # Set the node name - rospy.init_node('test_synchronize_utils', anonymous=False) - - # Get the default path from the ddb_path - robot_configs_default = os.path.join(ddb_path, - "config/testConfigs/robot_configs.yaml") - # Get the path to the robot config file from the ros parameter robot_configs - robot_configs = rospy.get_param("robot_configs", - robot_configs_default) - - # Get the yaml dictionary objects - with open(robot_configs, "r") as f: - robot_configs = yaml.load(f, Loader=yaml.FullLoader) - - # Get the default path from the ddb_path - topic_configs_default = os.path.join(ddb_path, - "config/testConfigs/topic_configs.yaml") - # Get the path to the robot config file from the ros - # parameter topic_configs - topic_configs = rospy.get_param("topic_configs", - topic_configs_default) - - # Get the yaml dictionary objects - with open(topic_configs, "r") as f: - topic_configs = yaml.load(f, Loader=yaml.FullLoader) - - msg_types = du.msg_types(robot_configs, topic_configs) - - # Run test cases! - unittest.main() diff --git a/mocha_core/scripts/core/test/test_delay_2robots.py b/mocha_core/scripts/core/test/test_delay_2robots.py deleted file mode 100755 index 15fc9f7..0000000 --- a/mocha_core/scripts/core/test/test_delay_2robots.py +++ /dev/null @@ -1,118 +0,0 @@ -#!/usr/bin/env python3 - -import unittest -import sys -import uuid -import time -from pprint import pprint -import multiprocessing -sys.path.append('..') -import get_sample_db as sdb -import synchronize_channel as sync -import synchronize_utils as su -import database_server_utils as du -from colorama import Fore, Style - -CONFIG_FILE = "testConfigs/robotConfigs.yml" - -class test(unittest.TestCase): - def setUp(self): - test_name = self._testMethodName - print("\n", Fore.RED, 20*"=", test_name, 20*"=", Style.RESET_ALL) - - def test_delay_run(self): - self.maxDiff = None - db_gs = sdb.get_sample_dbl() - db_r1 = sdb.get_sample_dbl() - db_r2 = sdb.get_sample_dbl() - - node_gs_r1 = sync.Channel(db_gs, 'groundstation', 'charon', CONFIG_FILE) - node_gs_r2 = sync.Channel(db_gs, 'groundstation', 'styx', CONFIG_FILE) - - node_r1_gs = sync.Channel(db_r1, 'charon', 'groundstation', CONFIG_FILE) - node_r2_gs = sync.Channel(db_r2, 'styx', 'groundstation', CONFIG_FILE) - - node_r1_r2 = sync.Channel(db_r1, 'charon', 'styx', CONFIG_FILE) - node_r2_r1 = sync.Channel(db_r2, 'styx', 'charon', CONFIG_FILE) - - pprint(db_gs.db) - pprint(db_r1.db) - pprint(db_r2.db) - - node_gs_r1.run() - node_gs_r2.run() - - dbm = su.DBMessage(1, 'Node_1', 1, 2, time.time(), bytes('r1_data', 'utf-8'), False) - du.add_modify_data_dbl(db_r1, dbm) - - node_gs_r1.trigger_sync() - time.sleep(4) - - node_r1_gs.run() - node_r2_gs.run() - - dbm = su.DBMessage(2, 'Node_2', 2, 1, time.time(), bytes('r2_data', 'utf-8'), False) - du.add_modify_data_dbl(db_r2, dbm) - - node_r1_r2.run() - - node_r1_r2.trigger_sync() - time.sleep(4) - - node_gs_r2.trigger_sync() - time.sleep(2) - - node_r2_r1.run() - - node_gs_r1.stop() - node_r1_gs.stop() - - node_r1_r2.stop() - node_r2_r1.stop() - - node_r2_gs.stop() - node_gs_r2.stop() - - pprint(db_gs.db) - pprint(db_r1.db) - pprint(db_r2.db) - time.sleep(2) - - def test_multiple_trigger_sync(self): - - self.maxDiff = None - db_gs = sdb.get_sample_dbl() - db_r1 = sdb.get_sample_dbl() - db_r2 = sdb.get_sample_dbl() - - node_gs_r1 = sync.Channel(db_gs, 'groundstation', 'charon', CONFIG_FILE) - node_gs_r2 = sync.Channel(db_gs, 'groundstation', 'styx', CONFIG_FILE) - - node_r1_gs = sync.Channel(db_r1, 'charon', 'groundstation', CONFIG_FILE) - node_r2_gs = sync.Channel(db_r2, 'styx', 'groundstation', CONFIG_FILE) - - node_r1_r2 = sync.Channel(db_r1, 'charon', 'styx', CONFIG_FILE) - node_r2_r1 = sync.Channel(db_r2, 'styx', 'charon', CONFIG_FILE) - -# pprint(db_gs) -# pprint(db_r1) -# pprint(db_r2) - -# node_r1_r2.run() - node_r2_r1.run() - - dbm = su.DBMessage(1, 'Node_1', 1, 2, time.time(), bytes('R1_data', 'utf-8'), False) - du.add_modify_data_dbl(db_r1, dbm) - - node_r2_r1.trigger_sync() - time.sleep(5) - -# node_r2_r1.trigger_sync() -# time.sleep(5) - -# node_r1_r2.stop() - node_r2_r1.stop() - time.sleep(2) - -if __name__ == '__main__': - unittest.main() diff --git a/mocha_core/scripts/core/test/test_multiple_robots_sync.py b/mocha_core/scripts/core/test/test_multiple_robots_sync.py deleted file mode 100755 index 4f9a892..0000000 --- a/mocha_core/scripts/core/test/test_multiple_robots_sync.py +++ /dev/null @@ -1,73 +0,0 @@ -#!/usr/bin/env python3 -import unittest -import sys -import uuid -import time -from pprint import pprint -import multiprocessing -sys.path.append('..') -import get_sample_db as sdb -import synchronize_channel as sync -import synchronize_utils as su -import database_server_utils as du -from colorama import Fore, Back, Style - -CONFIG_FILE = "testConfigs/robotConfigs.yml" - -class test(unittest.TestCase): - def setUp(self): - test_name = self._testMethodName - print("\n", Fore.RED, 20*"=", test_name, 20*"=", Style.RESET_ALL) - - def test_multi_robot_sync(self): - self.maxDiff=None - - db_gs = sdb.get_sample_dbl() - db_r1 = sdb.get_sample_dbl() - - # Set up all channels - node_gs_r1 = sync.Channel(db_gs, 'groundstation', - 'charon', CONFIG_FILE) - node_r1_gs = sync.Channel(db_r1, 'charon', - 'groundstation', CONFIG_FILE) - - pprint(db_gs.db) - pprint(db_r1.db) - - node_r1_gs.run() - node_gs_r1.run() - - nodes_1 = 3 - robot_prefix = 'R1_' - feature_prefix = 'Node_' - for i in range(nodes_1): - feature = robot_prefix + feature_prefix + str(i) - dbm = su.DBMessage(1, feature, 0, 1, time.time(), - bytes('r1_data', 'utf-8'), False) - du.add_modify_data_dbl(db_r1, dbm) - - node_gs_r1.trigger_sync() - time.sleep(2) - node_r1_gs.trigger_sync() - time.sleep(2) - - dbm = su.DBMessage(0, 'Node_0', 1, 0, time.time(), - bytes('r1_data', 'utf-8'), False) - du.add_modify_data_dbl(db_gs, dbm) - - node_r1_gs.trigger_sync() - time.sleep(2) - node_gs_r1.trigger_sync() - time.sleep(2) - - node_r1_gs.stop() - node_gs_r1.stop() - - pprint(db_gs.db) - pprint(db_r1.db) - - self.assertDictEqual(db_gs.db,db_r1.db) - time.sleep(2) - -if __name__ == '__main__': - unittest.main() diff --git a/mocha_core/scripts/core/test/test_synchronize_channel.py b/mocha_core/scripts/core/test/test_synchronize_channel.py deleted file mode 100755 index e18351b..0000000 --- a/mocha_core/scripts/core/test/test_synchronize_channel.py +++ /dev/null @@ -1,196 +0,0 @@ -#!/usr/bin/env python3 -import os -import unittest -import sys -import uuid -import pdb -import time -import yaml -import rospkg -import rospy -import multiprocessing -from pprint import pprint -from colorama import Fore, Style -import pprint - - -class Test(unittest.TestCase): - def setUp(self): - test_name = self._testMethodName - print("\n", Fore.RED, 20*"=", test_name, 20*"=", Style.RESET_ALL) - super().setUp() - - def tearDown(self): - rospy.sleep(1) - super().tearDown() - - def test_onehop_oneway_sync(self): - dbl1 = sample_db.get_sample_dbl() - dbl2 = sample_db.get_sample_dbl() - dbm = db.DBMessage(1, 1, 2, 1, - rospy.Time(123.456), bytes('New data', 'utf-8')) - dbl2.add_modify_data(dbm) - node_1 = sync.Channel(dbl1, 'basestation', - 'charon', robot_configs, 2) - node_2 = sync.Channel(dbl2, 'charon', - 'basestation', robot_configs, 2) - node_1.run() - node_2.run() - node_1.trigger_sync() - # Wait for comms propagation - time.sleep(2) - node_1.stop() - node_2.stop() - self.assertDictEqual(dbl1.db, dbl2.db) - # This sleep avoid getting an error because the socket is - # already in use - time.sleep(4) - - def test_convoluted_onehop_oneway_sync(self): - self.maxDiff=None - dbl1 = sample_db.get_sample_dbl() - dbl2 = sample_db.get_sample_dbl() - # print(id(db1), id(db2)) - # Modify one of the features in the db - dbm = db.DBMessage(1, 2, 2, 1, - rospy.Time(123.456), bytes('New data', 'utf-8')) - dbl2.add_modify_data(dbm) - node_1 = sync.Channel(dbl1, 'basestation', - 'charon', robot_configs, 2) - node_1.run() - node_1.trigger_sync() - time.sleep(8) - - node_2 = sync.Channel(dbl2, 'charon', 'basestation', - robot_configs, 2) - # Start the comm channel - node_2.run() - - # Trigger a synchronization - for i in range(10): - node_2.trigger_sync() - node_1.trigger_sync() - - # Wait for comms propagation - time.sleep(2) - self.assertDictEqual(dbl1.db, dbl2.db) - - # Stop the server and wait unti all is killed - node_1.stop() - node_2.stop() - # This sleep avoid getting an error because the socker is - # already in use - time.sleep(4) - - def test_convoluted_onehop_twoway_sync(self): - self.maxDiff=None - dbl1 = sample_db.get_sample_dbl() - dbl2 = sample_db.get_sample_dbl() - # Modify one of the features in the db - dbm = db.DBMessage(1, 2, 2, 1, - rospy.Time(123.456), bytes('New data', 'utf-8')) - dbl1.add_modify_data(dbm) - dbm = db.DBMessage(1, 3, 2, 1, - rospy.Time(123.456), bytes('New data', 'utf-8')) - dbl2.add_modify_data(dbm) - node_1 = sync.Channel(dbl1, 'basestation', - 'charon', robot_configs, 2) - node_1.run() - node_1.trigger_sync() - time.sleep(8) - - node_2 = sync.Channel(dbl2, 'charon', 'basestation', - robot_configs, 2) - # Start the comm channel - node_2.run() - - # Trigger a synchronization - for i in range(10): - node_2.trigger_sync() - node_1.trigger_sync() - - # Wait for comms propagation - time.sleep(2) - self.assertDictEqual(dbl1.db, dbl2.db) - - # Stop the server and wait unti all is killed - node_1.stop() - node_2.stop() - # This sleep avoid getting an error because the socker is - # already in use - time.sleep(4) - - def test_twohop_oneway_sync(self): - dbl_robot1 = sample_db.get_sample_dbl() - dbl_groundstation = sample_db.get_sample_dbl() - dbl_robot2 = sample_db.get_sample_dbl() - # Modify one of the features in the db - dbm = db.DBMessage(1, 2, 2, 1, - rospy.Time(123.456), bytes('New data', 'utf-8')) - dbl_robot1.add_modify_data(dbm) - node_1 = sync.Channel(dbl_robot1, 'charon', - 'basestation', robot_configs, 2) - node_2 = sync.Channel(dbl_groundstation, - 'basestation', - 'charon', robot_configs, 2) - node_3 = sync.Channel(dbl_groundstation, - 'basestation', - 'styx', robot_configs, 2) - node_4 = sync.Channel(dbl_robot2, - 'styx', 'basestation', - robot_configs, 2) - - node_1.run() - node_2.run() - node_3.run() - node_4.run() - - # Trigger first sync - node_2.trigger_sync() - - # Wait for comms propagation - time.sleep(2) - self.assertDictEqual(dbl_groundstation.db, dbl_robot1.db) - - # Trigger second sync - node_4.trigger_sync() - time.sleep(2) - self.assertDictEqual(dbl_robot2.db, dbl_groundstation.db) - self.assertDictEqual(dbl_robot2.db, dbl_robot1.db) - - # Wait until all the servers are killed - node_1.stop() - node_2.stop() - node_3.stop() - node_4.stop() - # This sleep avoid getting an error because the socker is - # already in use - time.sleep(4) - -if __name__ == '__main__': - rospy.init_node('test_synchronize_channel', anonymous=False, - log_level=rospy.DEBUG) - - # Get the directory path and import all the required modules to test - rospack = rospkg.RosPack() - ddb_path = rospack.get_path('mocha_core') - scripts_path = os.path.join(ddb_path, "scripts/core") - sys.path.append(scripts_path) - import sample_db - import synchronize_channel as sync - import database_utils as du - import database as db - - # Get the default path from the ddb_path - robot_configs_default = os.path.join(ddb_path, - "config/testConfigs/robot_configs.yaml") - # Get the path to the robot config file from the ros parameter robot_configs - robot_configs = rospy.get_param("robot_configs", - robot_configs_default) - - # Get the yaml dictionary objects - with open(robot_configs, "r") as f: - robot_configs = yaml.load(f, Loader=yaml.FullLoader) - - # Run test cases - unittest.main() diff --git a/mocha_core/scripts/core/test/test_zmq_comm_node.py b/mocha_core/scripts/core/test/test_zmq_comm_node.py deleted file mode 100755 index 28c985c..0000000 --- a/mocha_core/scripts/core/test/test_zmq_comm_node.py +++ /dev/null @@ -1,99 +0,0 @@ -#!/usr/bin/env python3 - -""" test_zmq_comm_node.py -This file tests the basic functionality between two communication nodes. - - Two nodes are created: basestation and charon. The configuration for these are - defined in the configuration file. The client of each node is set to the other one. - - We generate a random message that groundstation sends to charon. Upon reception, - cb_charon is called. The return value of this function is transmitted to the groundstation. - - The groundstation receives the message -""" - -import os -import random -import string -import sys -import unittest -import yaml -import rospkg -import pdb -import rospy -from colorama import Fore, Style - - -class Test(unittest.TestCase): - def setUp(self): - test_name = self._testMethodName - print("\n", Fore.RED, 20*"=", test_name, 20*"=", Style.RESET_ALL) - super().setUp() - - def tearDown(self): - rospy.sleep(1) - super().tearDown() - - def test_simple_connection(self): - self.answer = None - - def cb_groundstation(value): - rospy.logdebug("cb_groundstation") - self.answer = value - - def cb_charon(value): - # This function is called upon reception of a message by charon. - # The return value is transmitted as answer to the original - # message. - rospy.logdebug(f"cb_charon: {value}") - return value - - # Create the two robots - node_groundstation = zmq_comm_node.Comm_node( - "basestation", "charon", robot_configs, - cb_groundstation, cb_groundstation, 2 - ) - node_charon = zmq_comm_node.Comm_node( - "charon", "basestation", robot_configs, - cb_charon, cb_charon, 2 - ) - - # Generate random message - letters = string.ascii_letters - random_str = "".join(random.choice(letters) for i in range(10)) - random_msg = random_str + str(random.randint(0, 1024)) - random_msg = random_msg.encode() - - # Send message from node_groundstation to robot 2 - node_groundstation.connect_send_message(random_msg) - - # node_charon.connect_send_message(random_msg) - - # Terminate robots and test assertion - node_groundstation.terminate() - node_charon.terminate() - self.assertEqual(random_msg, self.answer, "Sent %s" % random_msg) - - -if __name__ == "__main__": - # Get the directory path and import all the required modules to test - rospack = rospkg.RosPack() - ddb_path = rospack.get_path("mocha_core") - scripts_path = os.path.join(ddb_path, "scripts/core") - sys.path.append(scripts_path) - import zmq_comm_node - - # Create a ROS node using during the test - rospy.init_node("test_zmq_comm_node", - log_level=rospy.DEBUG, anonymous=False) - - # Get the default path from the ddb_path - robot_configs_default = os.path.join(ddb_path, - "config/testConfigs/robot_configs.yaml") - # Get the path to the robot config file from the ros parameter robot_configs - robot_configs = rospy.get_param("robot_configs", - robot_configs_default) - - # Get the yaml dictionary objects - with open(robot_configs, "r") as f: - robot_configs = yaml.load(f, Loader=yaml.FullLoader) - - # Run test cases! - unittest.main() diff --git a/mocha_core/scripts/publishers/topic_publisher.py b/mocha_core/scripts/publishers/topic_publisher.py deleted file mode 100755 index 32d7e39..0000000 --- a/mocha_core/scripts/publishers/topic_publisher.py +++ /dev/null @@ -1,165 +0,0 @@ -#!/usr/bin/env python3 -import os -import pdb -import sys - -import rospkg -import rospy -import yaml -import re -import mocha_core.msg - -import mocha_core.srv - - -class TopicPublisher(): - def __init__(self, this_robot, target, msg_history="WHOLE_HISTORY"): - - self.this_robot = this_robot - - - # Service configuration - self.__select_service = "integrate_database/SelectDB" - self.__get_header_service = "integrate_database/GetDataHeaderDB" - - try: - rospy.wait_for_service(self.__select_service) - rospy.wait_for_service(self.__get_header_service) - except rospy.ROSInterruptException as exc: - rospy.logdebug("Service did not process request: " + - str(exc)) - rospy.signal_shutdown("Service did not process request") - rospy.spin() - self.__select_db = rospy.ServiceProxy( - self.__select_service, mocha_core.srv.SelectDB - ) - - self.__get_header_db = rospy.ServiceProxy( - self.__get_header_service, mocha_core.srv.GetDataHeaderDB - ) - - # List of robots to pull - self.__robot_list = [] - - # Publisher creation - self.publishers = {} - self.header_pub = rospy.Publisher("ddb/topic_publisher/headers", - mocha_core.msg.Header_pub, - queue_size=10) - for t in target: - robot, robot_id, topic, topic_id, obj = t - if robot not in self.__robot_list: - self.__robot_list.append(robot_id) - self.publishers[(robot_id, topic_id)] = {"pub": rospy.Publisher(f"/{robot}{topic}", - obj, - queue_size=10), - "ts": rospy.Time(1, 0)} - - - def run(self): - rospy.loginfo(f"{self.this_robot} - Topic Publisher - Started") - rate = rospy.Rate(10) - headers = set() - while not rospy.is_shutdown(): - for robot_id in self.__robot_list: - headers_to_get = [] - - try: - answ = self.__select_db(robot_id, None, None) - except rospy.ServiceException as exc: - rospy.logdebug(f"Service did not process request {exc}") - continue - returned_headers = du.deserialize_headers(answ.headers) - if len(returned_headers) == 0: - rate.sleep() - continue - - for header_ in returned_headers: - if header_ not in headers: - headers_to_get.append(header_) - - for get_header in headers_to_get: - rospy.logdebug(f"Getting headers {get_header}") - try: - answ = self.__get_header_db(get_header) - except rospy.ServiceException as exc: - rospy.logerr("Service did not process request: " + - str(exc)) - continue - headers.add(get_header) - - ans_robot_id, ans_topic_id, ans_ts, ans_data = du.parse_answer(answ, - msg_types) - for t in self.publishers.keys(): - if t == (ans_robot_id, ans_topic_id): - assert isinstance(ans_data, - self.publishers[t]['pub'].data_class) - # FIXME: remove this line once we have proper time - # filtering implemented - if ans_ts > self.publishers[t]["ts"]: - self.publishers[t]["ts"] = ans_ts - self.publishers[t]["pub"].publish(ans_data) - self.header_pub.publish(get_header) - rospy.logdebug(f"Publishing robot_id: {ans_robot_id}" + - f" - topic: {ans_topic_id}") - else: - rospy.logdebug(f"Skipping robot_id: {ans_robot_id}" + - f" - topic: {ans_topic_id} as there is an old ts") - rate.sleep() - - -if __name__ == "__main__": - rospy.init_node("mocha_core_publisher", anonymous=False) - - # Get the mocha_core path - rospack = rospkg.RosPack() - ddb_path = rospack.get_path('mocha_core') - scripts_path = os.path.join(ddb_path, "scripts/core") - sys.path.append(scripts_path) - import database_utils as du - - # Get robot from the parameters - this_robot = rospy.get_param("~robot_name") - - # Get the robot_config path and generate the robot_configs object - robot_configs_default = os.path.join(ddb_path, - "config/testConfigs/robot_configs.yaml") - robot_configs_path = rospy.get_param("~robot_configs", robot_configs_default) - with open(robot_configs_path, 'r') as f: - robot_configs = yaml.load(f, Loader=yaml.FullLoader) - if this_robot not in robot_configs.keys(): - rospy.logerr(f"Robot {this_robot} not in robot_configs") - rospy.signal_shutdown("Robot not in robot_configs") - rospy.spin() - - - # Get the topic_configs path and generate the topic_configs object - topic_configs_default = os.path.join(ddb_path, - "config/testConfigs/topic_configs.yaml") - topic_configs_path = rospy.get_param("~topic_configs", topic_configs_default) - with open(topic_configs_path, 'r') as f: - topic_configs = yaml.load(f, Loader=yaml.FullLoader) - - # Get msg_types dict used to publish the topics - msg_types = du.msg_types(robot_configs, topic_configs) - - # Flip the dict so that we have name: obj - msg_objects = {} - # for msg in msg_types: - # msg_objects[msg_types[msg]["name"]] = msg_types[msg]["obj"] - - list_of_topics = set() - - # Iterate for each robot in robot_configs, and generate the topics - for robot in robot_configs.keys(): - robot_id = du.get_robot_id_from_name(robot_configs, robot) - robot_type = robot_configs[robot]["node-type"] - topic_list = topic_configs[robot_type] - for topic_id, topic in enumerate(topic_list): - msg_topic = topic["msg_topic"] - obj = msg_types[robot_id][topic_id]["obj"] - robot_tuple = (robot, robot_id, topic["msg_topic"], topic_id, obj) - list_of_topics.add(robot_tuple) - - pub = TopicPublisher(this_robot, list_of_topics) - pub.run() diff --git a/mocha_core/scripts/translators/translator.py b/mocha_core/scripts/translators/translator.py deleted file mode 100755 index 5b3509b..0000000 --- a/mocha_core/scripts/translators/translator.py +++ /dev/null @@ -1,93 +0,0 @@ -#!/usr/bin/env python3 - -import os -import sys -import rospy -import rospkg -import mocha_core.srv -import yaml -import pdb - -class Translator: - def __init__(self, this_robot, this_robot_id, - topic_name, topic_id, msg_type): - self.__du = du - - self.__counter = 0 - self.__topic_name = topic_name - self.__topic_id = topic_id - self.__this_robot = this_robot - self.__this_robot_id = this_robot_id - self.__service_name = "integrate_database/AddUpdateDB" - self.__add_update_db = rospy.ServiceProxy( - self.__service_name, mocha_core.srv.AddUpdateDB - ) - - rospy.Subscriber( - self.__topic_name, msg_type, self.translator_cb - ) - - def translator_cb(self, data): - msg = data - - rospy.wait_for_service(self.__service_name) - serialized_msg = self.__du.serialize_ros_msg(msg) - try: - ts = rospy.get_rostime() - answ = self.__add_update_db(self.__topic_id, - ts, - serialized_msg) - answ_header = answ.new_header - rospy.logdebug(f"{self.__this_robot} - Header insert " + - f"- {self.__topic_name} - {answ_header}") - self.__counter += 1 - except rospy.ServiceException as exc: - rospy.logerr(f"Service did not process request: {exc}") - - -if __name__ == "__main__": - rospy.init_node("topic_translator", anonymous=False) - - # Get the mocha_core path - rospack = rospkg.RosPack() - ddb_path = rospack.get_path('mocha_core') - scripts_path = os.path.join(ddb_path, "scripts/core") - sys.path.append(scripts_path) - import database_utils as du - - # Get robot from the parameters - this_robot = rospy.get_param("~robot_name") - - # Get the robot_config path and generate the robot_configs object - robot_configs_default = os.path.join(ddb_path, - "config/testConfigs/robot_configs.yaml") - robot_configs_path = rospy.get_param("~robot_configs", robot_configs_default) - with open(robot_configs_path, 'r') as f: - robot_configs = yaml.load(f, Loader=yaml.FullLoader) - if this_robot not in robot_configs.keys(): - rospy.logerr(f"Robot {this_robot} not in robot_configs") - rospy.signal_shutdown("Robot not in robot_configs") - rospy.spin() - - # Get the topic_configs path and generate the topic_configs object - topic_configs_default = os.path.join(ddb_path, - "config/testConfigs/topic_configs.yaml") - topic_configs_path = rospy.get_param("~topic_configs", - topic_configs_default) - with open(topic_configs_path, 'r') as f: - topic_configs = yaml.load(f, Loader=yaml.FullLoader) - this_robot_topics = topic_configs[robot_configs[this_robot]["node-type"]] - - # Get msg_types dict used to publish the topics - msg_types = du.msg_types(robot_configs, topic_configs) - - for topic_id, topic in enumerate(this_robot_topics): - # Get robot id - robot_id = du.get_robot_id_from_name(robot_configs, this_robot) - obj = msg_types[robot_id][topic_id]["obj"] - Translator(this_robot, - robot_id, - topic["msg_topic"], - topic_id, - obj) - rospy.spin() diff --git a/mocha_core/setup.py b/mocha_core/setup.py new file mode 100644 index 0000000..961ae47 --- /dev/null +++ b/mocha_core/setup.py @@ -0,0 +1,30 @@ +from setuptools import setup + +package_name = "mocha_core" + +setup( + name=package_name, + version="0.0.1", + packages=[package_name], + data_files=[ + ("share/ament_index/resource_index/packages", ["resource/" + package_name]), + ("share/" + package_name, ["package.xml"]), + ], + install_requires=["setuptools"], + zip_safe=True, + maintainer="developer", + maintainer_email="fclad@seas.upenn.edu", + description="DCIST Distributed Database package", + license="BSD 3-Clause License", + tests_require=["pytest"], + entry_points={ + "console_scripts": [ + "mocha = mocha_core.mocha:main", + "database_server = mocha_core.database_server:main", + "synchronize_channel = mocha_core.synchronize_channel:main", + "zmq_comm_node = mocha_core.zmq_comm_node:main", + "topic_publisher = mocha_core.topic_publisher:main", + "translator = mocha_core.translator:main", + ], + }, +) diff --git a/mocha_core/srv/AddUpdateDB.srv b/mocha_core/srv/AddUpdateDB.srv index f8f7f55..03b0c75 100644 --- a/mocha_core/srv/AddUpdateDB.srv +++ b/mocha_core/srv/AddUpdateDB.srv @@ -1,5 +1,5 @@ uint8 topic_id -time timestamp +builtin_interfaces/Time timestamp uint8[] msg_content --- uint8[] new_header diff --git a/mocha_core/srv/GetDataHeaderDB.srv b/mocha_core/srv/GetDataHeaderDB.srv index 134b097..8885b85 100644 --- a/mocha_core/srv/GetDataHeaderDB.srv +++ b/mocha_core/srv/GetDataHeaderDB.srv @@ -2,5 +2,5 @@ uint8[] msg_header --- uint8 robot_id uint8 topic_id -time timestamp +builtin_interfaces/Time timestamp uint8[] msg_content diff --git a/mocha_core/srv/SelectDB.srv b/mocha_core/srv/SelectDB.srv index e8404f6..e7ddbaa 100644 --- a/mocha_core/srv/SelectDB.srv +++ b/mocha_core/srv/SelectDB.srv @@ -1,5 +1,5 @@ uint8 robot_id uint8 topic_id -time ts_limit +builtin_interfaces/Time ts_limit --- uint8[] headers diff --git a/mocha_core/scripts/core/test/sample_db.py b/mocha_core/test/sample_db.py similarity index 91% rename from mocha_core/scripts/core/test/sample_db.py rename to mocha_core/test/sample_db.py index 5aeceb0..682da23 100644 --- a/mocha_core/scripts/core/test/sample_db.py +++ b/mocha_core/test/sample_db.py @@ -2,12 +2,13 @@ import copy from pprint import pprint import sys -sys.path.append('..') -import hash_comm -import database as db -import database_utils as du +import os + +import mocha_core.hash_comm as hash_comm +import mocha_core.database as db +import mocha_core.database_utils as du import pdb -import rospy +import rclpy.time DB_TEMPLATE = { 0: { @@ -69,7 +70,7 @@ def get_sample_dbl(): DB[robot_id][topic_id] = {} for msg in DB_TEMPLATE[robot_id][topic_id]: ts = DB_TEMPLATE[robot_id][topic_id][msg]['ts'] - ts = rospy.Time.from_sec(ts) + ts = rclpy.time.Time(seconds=int(ts), nanoseconds=int((ts % 1) * 1e9)) data = DB_TEMPLATE[robot_id][topic_id][msg]['data'] dtype = DB_TEMPLATE[robot_id][topic_id][msg]['dtype'] prio = DB_TEMPLATE[robot_id][topic_id][msg]['priority'] diff --git a/mocha_core/scripts/core/test/test_database.py b/mocha_core/test/test_database.py similarity index 68% rename from mocha_core/scripts/core/test/test_database.py rename to mocha_core/test/test_database.py index 1ede2dc..df24f33 100755 --- a/mocha_core/scripts/core/test/test_database.py +++ b/mocha_core/test/test_database.py @@ -2,13 +2,14 @@ import unittest import sys import os -import uuid -import geometry_msgs.msg -import rospkg import pdb -import rospy from colorama import Fore, Back, Style -import yaml +import mocha_core.database as database +import sample_db +import mocha_core.hash_comm as hc +import mocha_core.database_utils as du +import copy + import pprint ROBOT0_TOPIC0_PRIO0 = b'\x00\x00\x00u\x00\xde' @@ -25,7 +26,8 @@ def setUp(self): super().setUp() def tearDown(self): - rospy.sleep(1) + import time + time.sleep(1) super().tearDown() def test_get_header_list(self): @@ -75,16 +77,53 @@ def test_get_header_list(self): def test_headers_not_in_local(self): dbl = sample_db.get_sample_dbl() header_list = dbl.get_header_list() + # Test with all headers extra_header_1 = du.generate_random_header() extra_header_2 = du.generate_random_header() header_list.append(extra_header_2) header_list.append(extra_header_1) new_headers = [extra_header_1, extra_header_2] new_headers.sort() + # Test without newer discover_extra_header = dbl.headers_not_in_local(header_list) discover_extra_header.sort() self.assertListEqual(discover_extra_header, new_headers) + # Test with older header, create a copy of one of the existing headers in the + # db. Pick one with the msec field that can accomodate both addition and + # substraction + test_header = None + for header in header_list: + h = hc.TsHeader.from_header(header) + if h.msecs > 0 and h.msecs < 999: + test_header = header + break + self.assertTrue(test_header is not None) + # Modify the timestamp to make it before the current msg + h = hc.TsHeader.from_header(test_header) + h.msecs -= 1 + extra_header_3 = h.bindigest() + header_list.append(extra_header_3) + discover_extra_header_latest = dbl.headers_not_in_local(header_list, newer=True) + discover_extra_header_latest.sort() + discover_extra_header_all = dbl.headers_not_in_local(header_list) + discover_extra_header_all.sort() + self.assertListEqual(discover_extra_header_latest, new_headers) + diff_header = set(discover_extra_header_all) - set(discover_extra_header) + self.assertEqual(diff_header.pop(), extra_header_3) + # Modify the timestamp to make it after the current msg + h = hc.TsHeader.from_header(test_header) + h.msecs += 1 + extra_header_4 = h.bindigest() + header_list.append(extra_header_4) + discover_extra_header_latest = dbl.headers_not_in_local(header_list, newer=True) + new_headers.append(extra_header_4) # The new header should show in the list + new_headers.sort() + discover_extra_header_latest.sort() + self.assertListEqual(new_headers, discover_extra_header_latest) + # Finally, getting all the headers should not filter them out + discover_extra_header = dbl.headers_not_in_local(header_list) + self.assertTrue(len(discover_extra_header) == 4) def test_find_header(self): dbl = sample_db.get_sample_dbl() @@ -103,33 +142,6 @@ def test_find_header(self): self.assertAlmostEqual(dbm.ts, ts) self.assertEqual(dbm.data, data) - if __name__ == '__main__': - # Get the directory path and import all the required modules to test - rospack = rospkg.RosPack() - ddb_path = rospack.get_path('mocha_core') - scripts_path = os.path.join(ddb_path, "scripts/core") - sys.path.append(scripts_path) - import database - import sample_db - import hash_comm as hc - import database_utils as du - - dbl = sample_db.get_sample_dbl() - - # Set the node name - rospy.init_node('test_synchronize_utils', anonymous=False) - - # Get the default path from the ddb_path - robot_configs_default = os.path.join(ddb_path, - "config/testConfigs/robot_configs.yaml") - # Get the path to the robot config file from the ros parameter robot_configs - robot_configs = rospy.get_param("robot_configs", - robot_configs_default) - - # Get the yaml dictionary objects - with open(robot_configs, "r") as f: - robot_configs = yaml.load(f, Loader=yaml.FullLoader) - # Run test cases! unittest.main() diff --git a/mocha_core/test/test_database_server.py b/mocha_core/test/test_database_server.py new file mode 100755 index 0000000..feb8cbe --- /dev/null +++ b/mocha_core/test/test_database_server.py @@ -0,0 +1,327 @@ +#!/usr/bin/env python3 + +import multiprocessing +import os +import pdb +import random +import sys +import time +import unittest +import warnings +from pprint import pprint +import threading +from rclpy.logging import LoggingSeverity +from rclpy.executors import SingleThreadedExecutor, MultiThreadedExecutor +from rclpy.node import Node +import rclpy +import rclpy.time +import rclpy.clock +from rclpy.logging import LoggingSeverity +import yaml +from colorama import Fore, Style +from geometry_msgs.msg import PointStamped +import mocha_core.database_server as ds +import mocha_core.database_utils as du +from mocha_core.srv import AddUpdateDB, GetDataHeaderDB, SelectDB +from mocha_core.msg import DatabaseCB + + +class Database_server_test(Node): + def __init__(self): + super().__init__("test_database_server") + + +class Database_client_test(Node): + def __init__(self): + self.name_rnd = random.randint(0, 2**32) + self.node_name = f"test_database_client_{self.name_rnd}" + super().__init__(self.node_name) + logger = self.get_logger() + logger.set_level(LoggingSeverity.DEBUG) + self.add_update_db_cli = self.create_client(AddUpdateDB, + "/test_database_server/add_update_db", + qos_profile=ds.DatabaseServer.QOS_PROFILE) + while not self.add_update_db_cli.wait_for_service(timeout_sec=1.0): + logger.debug("Waiting for add_update_db") + self.get_data_header_db_cli = self.create_client(GetDataHeaderDB, + "/test_database_server/get_data_header_db", + qos_profile=ds.DatabaseServer.QOS_PROFILE) + while not self.get_data_header_db_cli.wait_for_service(timeout_sec=1.0): + logger.debug("Waiting for get_data_header_db") + self.select_db_cli = self.create_client(SelectDB, + "/test_database_server/select_db", + qos_profile=ds.DatabaseServer.QOS_PROFILE) + while not self.select_db_cli.wait_for_service(timeout_sec=1.0): + logger.debug("Waiting for get_data_header_db") + + # Subscribe to the answers from the database + def callback(data): + self.data_answer = data + self.database_cb_sub = self.create_subscription(DatabaseCB, + "/test_database_server/database_cb", + callback, 10) + + def recorder_async(self, loops_per_thread, robot_configs, topic_configs, robot_name): + # Get a random ros time + tid = du.get_topic_id_from_name( + robot_configs, topic_configs, robot_name, + "/pose", + self + ) + sample_msg = PointStamped() + sample_msg.header.frame_id = "world" + sample_msg.point.x = random.random() + sample_msg.point.y = random.random() + sample_msg.point.z = random.random() + sample_msg.header.stamp = rclpy.clock.Clock().now().to_msg() + # Serialize and send through service + serialized_msg = du.serialize_ros_msg(sample_msg) + + all_futures = [] + + # Fire all requests as fast as possible + for i in range(loops_per_thread): + timestamp = rclpy.time.Time( + seconds=random.randint(0, 65535), + nanoseconds=random.randint(0, 1000) * 1000000, + ).to_msg() + try: + # Create request and call service method directly + req = AddUpdateDB.Request() + req.topic_id = tid + req.timestamp = timestamp + req.msg_content = serialized_msg + future = self.add_update_db_cli.call_async(req) + all_futures.append((self, future)) # Store node and future + except Exception as exc: + print(f"Service did not process request: {exc}") + + return all_futures # Return futures to be waited on later + + +class test(unittest.TestCase): + @classmethod + def setUpClass(cls): + # Load configurations at the class level + current_dir = os.path.dirname(os.path.abspath(__file__)) + ddb_path = os.path.join(current_dir, "..") + + # Load robot configs + robot_configs_path = os.path.join(ddb_path, "config/testConfigs/robot_configs.yaml") + with open(robot_configs_path, "r") as f: + cls.robot_configs = yaml.load(f, Loader=yaml.FullLoader) + + # Load topic configs + topic_configs_path = os.path.join(ddb_path, "config/testConfigs/topic_configs.yaml") + with open(topic_configs_path, "r") as f: + cls.topic_configs = yaml.load(f, Loader=yaml.FullLoader) + cls.robot_name = "charon" + + + def setUp(self): + test_name = self._testMethodName + print("\n", Fore.RED, 20 * "=", test_name, 20 * "=", Style.RESET_ALL) + + rclpy.init() + self.test_server_node = Database_server_test() + self.executor = MultiThreadedExecutor(num_threads=4) + self.executor.add_node(self.test_server_node) + executor_thread = threading.Thread(target=self.executor.spin, daemon=True) + executor_thread.start() + # Create a database server object with the node (this will create the services) + self.dbs = ds.DatabaseServer(self.robot_configs, self.topic_configs, + self.robot_name, self.test_server_node) + + # Create an empty list for the client nodes + self.test_client_nodes = set() + + super().setUp() + + def setUpClient(self): + test_client_node = Database_client_test() + executor = SingleThreadedExecutor() + executor.add_node(test_client_node) + logger = test_client_node.get_logger() + executor_thread = threading.Thread(target=executor.spin, daemon=True) + executor_thread.start() + self.test_client_nodes.add((test_client_node, executor, logger)) + return test_client_node, logger, executor_thread + + def tearDown(self): + self.executor.shutdown() + self.test_server_node.destroy_node() + for node, executor, _ in self.test_client_nodes: + executor.shutdown() + node.destroy_node() + rclpy.shutdown() + time.sleep(1) + super().tearDown() + + def test_add_retrieve_single_msg(self): + # Create a single client + client_node, _, _ = self.setUpClient() + + # Simulate sending a "/pose" message from Charon + sample_msg = PointStamped() + sample_msg.header.frame_id = "world" + sample_msg.point.x = random.random() + sample_msg.point.y = random.random() + sample_msg.point.z = random.random() + sample_msg.header.stamp = rclpy.clock.Clock().now().to_msg() + + tid = du.get_topic_id_from_name( + self.robot_configs, self.topic_configs, + self.robot_name, "/pose", + client_node + ) + # Serialize and send through service + serialized_msg = du.serialize_ros_msg(sample_msg) + + try: + # Create request and call service method directly + req = AddUpdateDB.Request() + req.topic_id = tid + req.timestamp = rclpy.clock.Clock().now().to_msg() + req.msg_content = serialized_msg + future = client_node.add_update_db_cli.call_async(req) + rclpy.spin_until_future_complete(client_node, future) + answ = future.result() + answ_header = answ.new_header + except Exception as exc: + print("Service did not process request: " + str(exc)) + self.assertTrue(False) + # print("Got hash:", answ_header) + # The data is now stored in the database! + + # Request the same hash through service + try: + # Create request and call service method directly + req = GetDataHeaderDB.Request() + req.msg_header = answ_header + future = client_node.get_data_header_db_cli.call_async(req) + rclpy.spin_until_future_complete(client_node, future) + answ = future.result() + except Exception as exc: + print("Service did not process request: " + str(exc)) + self.assertTrue(False) + + # Parse answer and compare results + self.msg_types = du.msg_types(self.robot_configs, self.topic_configs, + client_node) + _, ans_topic_id, _, ans_data = du.parse_answer(answ, self.msg_types) + + self.assertEqual(tid, ans_topic_id) + self.assertEqual(ans_data, sample_msg) + + # We will now test the callback from the database when a new message is + # published + self.assertTrue(client_node.data_answer is not None) + self.assertEqual(client_node.data_answer.topic_id, tid) + self.assertEqual(client_node.data_answer.header.frame_id, self.robot_name) + self.assertEqual(client_node.data_answer.msg_header, answ_header) + + def test_add_select_robot(self): + # Create a single client + client_node, _, _ = self.setUpClient() + + stored_headers = [] + for i in range(3): + # Sleep is very important to have distinct messages + time.sleep(0.1) + # Create a dumb random PointStamped message + sample_msg = PointStamped() + sample_msg.header.frame_id = "world" + sample_msg.point.x = random.random() + sample_msg.point.y = random.random() + sample_msg.point.z = random.random() + sample_msg.header.stamp = rclpy.clock.Clock().now().to_msg() + + tid = du.get_topic_id_from_name( + self.robot_configs, self.topic_configs, self.robot_name, + "/pose", + client_node + ) + + # Serialize and send through service + serialized_msg = du.serialize_ros_msg(sample_msg) + try: + # Create request and call service method directly + req = AddUpdateDB.Request() + req.topic_id = tid + req.timestamp = rclpy.clock.Clock().now().to_msg() + req.msg_content = serialized_msg + future = client_node.add_update_db_cli.call_async(req) + rclpy.spin_until_future_complete(client_node, future) + answ = future.result() + answ_header = answ.new_header + except Exception as exc: + print("Service did not process request: " + str(exc)) + self.assertTrue(False) + stored_headers.append(answ_header) + + # Request the list of headers through the service + try: + robot_id = du.get_robot_id_from_name(self.robot_configs, self.robot_name) + # Create request and call service method directly + req = SelectDB.Request() + req.robot_id = robot_id + req.topic_id = 0 # Changed from None to 0 since it's uint8 + req.ts_limit = rclpy.clock.Clock().now().to_msg() + future = client_node.select_db_cli.call_async(req) + rclpy.spin_until_future_complete(client_node, future) + answ = future.result() + except Exception as exc: + print("Service did not process request: " + str(exc)) + self.assertTrue(False) + returned_headers = du.deserialize_headers(answ.headers) + + # Sort list before comparing + stored_headers.sort() + returned_headers.sort() + self.assertEqual(stored_headers, returned_headers) + + def test_insert_storm(self): + # Bomb the database with insert petitions from different robots, + # check the number of headers afterwards + # Note: Using threading instead of multiprocessing to test concurrent access + # since ROS2 objects don't serialize well across processes + NUM_THREADS = 100 + LOOPS_PER_THREAD = 10 + + all_futures = [] + # Fire all service calls from all threads + for i in range(NUM_THREADS): + client_node, _, executor_thread = self.setUpClient() + futures = client_node.recorder_async(LOOPS_PER_THREAD, + self.robot_configs, + self.topic_configs, + self.robot_name) + all_futures.extend(futures) + random.shuffle(all_futures) + + # Wait for all the futures to complete + for node, future in all_futures: + rclpy.spin_until_future_complete(node, future) + + # Get the list of hashes from the DB and count them + # Use the last client_node for the final query + try: + robot_id = du.get_robot_id_from_name(self.robot_configs, self.robot_name) + # Create request and call service method directly + req = SelectDB.Request() + req.robot_id = robot_id + req.topic_id = 0 # Changed from None to 0 since it's uint8 + req.ts_limit = rclpy.clock.Clock().now().to_msg() + future = client_node.select_db_cli.call_async(req) + rclpy.spin_until_future_complete(client_node, future) + answ = future.result() + except Exception as exc: + print("Service did not process request: " + str(exc)) + self.assertTrue(False) + returned_headers = du.deserialize_headers(answ.headers) + + self.assertEqual(len(returned_headers), NUM_THREADS * LOOPS_PER_THREAD) + + +if __name__ == "__main__": + unittest.main(failfast=True) diff --git a/mocha_core/test/test_database_utils.py b/mocha_core/test/test_database_utils.py new file mode 100755 index 0000000..f972aeb --- /dev/null +++ b/mocha_core/test/test_database_utils.py @@ -0,0 +1,122 @@ +#!/usr/bin/env python3 +import unittest +import sys +import os +from pprint import pprint +import time +import rclpy +from colorama import Fore, Back, Style +import threading +import yaml +import random +from rclpy.logging import LoggingSeverity +import mocha_core.database_utils as du +import sample_db +import mocha_core.hash_comm as hc +from rclpy.node import Node +from rclpy.executors import SingleThreadedExecutor + +ROBOT0_TOPIC2_PRIO0 = b'\x00\x00\x00u\x00\xde' +ROBOT1_TOPIC1_PRIO4 = b'\x01\x00\x01O\x00\xdc' +ROBOT1_TOPIC2_PRIO2 = b'\x01\x00\x01O\x00\xc7' + +class Database_utils_test(Node): + def __init__(self): + super().__init__("test_database_utils") + + +class test(unittest.TestCase): + @classmethod + def setUpClass(cls): + # Load configurations at the class level + current_dir = os.path.dirname(os.path.abspath(__file__)) + ddb_path = os.path.join(current_dir, "..") + + # Load robot configs + robot_configs_path = os.path.join(ddb_path, "config/testConfigs/robot_configs.yaml") + with open(robot_configs_path, "r") as f: + cls.robot_configs = yaml.load(f, Loader=yaml.FullLoader) + + # Load topic configs + topic_configs_path = os.path.join(ddb_path, "config/testConfigs/topic_configs.yaml") + with open(topic_configs_path, "r") as f: + cls.topic_configs = yaml.load(f, Loader=yaml.FullLoader) + + + def setUp(self): + test_name = self._testMethodName + print("\n", Fore.RED, 20*"=", test_name, 20*"=", Style.RESET_ALL) + rclpy.init() + self.test_ros_node = Database_utils_test() + self.executor = SingleThreadedExecutor() + self.executor.add_node(self.test_ros_node) + self.logger = self.test_ros_node.get_logger() + self.logger.set_level(LoggingSeverity.DEBUG) + self.msg_types = du.msg_types(self.robot_configs, self.topic_configs, + self.test_ros_node) + executor_thread = threading.Thread(target=self.executor.spin, daemon=True) + executor_thread.start() + super().setUp() + + def tearDown(self): + time.sleep(1) + self.executor.shutdown() + self.test_ros_node.destroy_node() + rclpy.shutdown() + super().tearDown() + + def test_serialize_deserialize(self): + for _ in range(10): + header_list = [] + for _ in range(20): + header_list.append(du.generate_random_header()) + serialized = du.serialize_headers(header_list) + deserialized = du.deserialize_headers(serialized) + self.assertEqual(len(header_list), len(deserialized)) + self.assertEqual(deserialized, header_list) + + def test_deserialize_wrong(self): + header_list = [] + for _ in range(5): + header_list.append(du.generate_random_header()) + serialized = du.serialize_headers(header_list) + with self.assertRaises(Exception): + du.deserialize_headers(serialized[1:]) + + def test_pack_unpack_data_topic(self): + dbl = sample_db.get_sample_dbl() + dbm = dbl.find_header(ROBOT1_TOPIC1_PRIO4) + self.assertIsNotNone(dbm) + packed = du.pack_data(dbm) + u_dbm = du.unpack_data(ROBOT1_TOPIC1_PRIO4, packed) + self.assertEqual(dbm, u_dbm) + dbm = dbl.find_header(ROBOT1_TOPIC2_PRIO2) + packed = du.pack_data(dbm) + u_dbm = du.unpack_data(ROBOT1_TOPIC2_PRIO2, packed) + self.assertEqual(dbm, u_dbm) + + def test_topic_id(self): + for i in range(50): + # Pick a random robot + robot = random.choice(list(self.robot_configs.keys())) + # Pick a random topic + topic_list = self.topic_configs[self.robot_configs[robot]["node-type"]] + topic = random.choice(topic_list) + id = du.get_topic_id_from_name(self.robot_configs, self.topic_configs, + robot, topic["msg_topic"], + self.test_ros_node) + topic_find = du.get_topic_name_from_id(self.robot_configs, + self.topic_configs, robot, + id, + self.test_ros_node) + self.assertEqual(topic["msg_topic"], topic_find) + + def test_robot_id(self): + for robot in self.robot_configs: + number = du.get_robot_id_from_name(self.robot_configs, robot) + robot_name = du.get_robot_name_from_id(self.robot_configs, number) + self.assertEqual(robot, robot_name) + +if __name__ == '__main__': + # Run test cases! + unittest.main() diff --git a/mocha_core/test/test_synchronize_channel.py b/mocha_core/test/test_synchronize_channel.py new file mode 100755 index 0000000..8b84601 --- /dev/null +++ b/mocha_core/test/test_synchronize_channel.py @@ -0,0 +1,274 @@ +#!/usr/bin/env python3 +import os +import unittest +import pdb +import time +import yaml +import rclpy +import rclpy.time +import threading +from rclpy.logging import LoggingSeverity +from rclpy.executors import SingleThreadedExecutor +from rclpy.node import Node +from colorama import Fore, Style +import sample_db +import mocha_core.synchronize_channel as sync +import mocha_core.database_utils as du +import mocha_core.database as db + +class Synchronize_channel_test(Node): + def __init__(self): + super().__init__("test_synchronize_channel") + +class test(unittest.TestCase): + @classmethod + def setUpClass(cls): + # Load configurations at the class level + current_dir = os.path.dirname(os.path.abspath(__file__)) + ddb_path = os.path.join(current_dir, "..") + + # Load robot configs + robot_configs_path = os.path.join(ddb_path, "config/testConfigs/robot_configs.yaml") + with open(robot_configs_path, "r") as f: + cls.robot_configs = yaml.load(f, Loader=yaml.FullLoader) + cls.comm_sync_delay = 1 + + def setUp(self): + test_name = self._testMethodName + print("\n", Fore.RED, 20*"=", test_name, 20*"=", Style.RESET_ALL) + rclpy.init() + self.test_ros_node = Synchronize_channel_test() + self.executor = SingleThreadedExecutor() + self.executor.add_node(self.test_ros_node) + self.logger = self.test_ros_node.get_logger() + executor_thread = threading.Thread(target=self.executor.spin, daemon=True) + executor_thread.start() + super().setUp() + + def tearDown(self): + self.executor.shutdown() + self.test_ros_node.destroy_node() + rclpy.shutdown() + super().tearDown() + + def test_onehop_oneway_sync(self): + """ Start both DBs, insert a message in DB2, trigger_sync DB1, and check that both DBs are the same """ + dbl1 = sample_db.get_sample_dbl() + dbl2 = sample_db.get_sample_dbl() + dbm = db.DBMessage(1, 1, 2, 1, + rclpy.time.Time(seconds=123, nanoseconds=456000000), bytes('New data', 'utf-8')) + dbl2.add_modify_data(dbm) + sc_1 = sync.Channel(dbl1, 'basestation', 'charon', + self.robot_configs, 2, self.test_ros_node) + sc_2 = sync.Channel(dbl2, 'charon', 'basestation', + self.robot_configs, 2, self.test_ros_node) + sc_1.run() + sc_2.run() + sc_1.trigger_sync() + # Wait for comms propagation and check results + time.sleep(self.comm_sync_delay) + self.assertDictEqual(dbl1.db, dbl2.db) + + sc_1.stop() + sc_2.stop() + + def test_convoluted_onehop_oneway_sync(self): + """ Start one DB1, insert a message in DB2 but don't start, trigger_sync + DB1 (should be busy), start DB2, trigger_sync DB1, DBs should be the same """ + self.maxDiff=None + dbl1 = sample_db.get_sample_dbl() + dbl2 = sample_db.get_sample_dbl() + dbm = db.DBMessage(1, 2, 2, 1, + rclpy.time.Time(seconds=123, nanoseconds=456000000), bytes('New data', 'utf-8')) + dbl2.add_modify_data(dbm) + sc_1 = sync.Channel(dbl1, 'basestation', 'charon', self.robot_configs, + 2, self.test_ros_node) + sc_1.run() + sc_1.trigger_sync() + time.sleep(8) + + sc_2 = sync.Channel(dbl2, 'charon', 'basestation', self.robot_configs, + 2, self.test_ros_node) + sc_2.run() + + # Trigger a synchronization in a repeated fashion. This should print + # busy messages. + for i in range(10): + sc_2.trigger_sync() + sc_1.trigger_sync() + + # Wait for comms propagation + time.sleep(self.comm_sync_delay) + self.assertDictEqual(dbl1.db, dbl2.db) + + sc_1.stop() + sc_2.stop() + + def test_convoluted_onehop_twoway_sync(self): + """ Insert a message in DB1 and DB2, start DB1, insert a message in DB2 + but don't start, trigger_sync DB1 (should be busy), start DB2, + trigger_sync DB1, DBs should be the same """ + self.maxDiff=None + dbl1 = sample_db.get_sample_dbl() + dbl2 = sample_db.get_sample_dbl() + # Modify one of the features in the db + dbm = db.DBMessage(1, 2, 2, 1, + rclpy.time.Time(seconds=123, nanoseconds=456000000), bytes('New data', 'utf-8')) + dbl1.add_modify_data(dbm) + dbm = db.DBMessage(1, 3, 2, 1, + rclpy.time.Time(seconds=123, nanoseconds=456000000), bytes('New data', 'utf-8')) + dbl2.add_modify_data(dbm) + sc_1 = sync.Channel(dbl1, 'basestation', + 'charon', self.robot_configs, 2, self.test_ros_node) + sc_1.run() + sc_1.trigger_sync() + time.sleep(8) + + sc_2 = sync.Channel(dbl2, 'charon', 'basestation', + self.robot_configs, 2, self.test_ros_node) + sc_2.run() + + sc_2.trigger_sync() + sc_1.trigger_sync() + + time.sleep(self.comm_sync_delay) + self.assertDictEqual(dbl1.db, dbl2.db) + + sc_1.stop() + sc_2.stop() + + def test_twohop_oneway_sync(self): + """ Insert one message in DB_robot1, propagate to DB_basestation, + propagate to DB_robot2. We should have the same database in all the + robots """ + dbl_robot1 = sample_db.get_sample_dbl() + dbl_groundstation = sample_db.get_sample_dbl() + dbl_robot2 = sample_db.get_sample_dbl() + # Modify one of the features in the db + dbm = db.DBMessage(1, 2, 2, 1, + rclpy.time.Time(seconds=123, nanoseconds=456000000), bytes('New data', 'utf-8')) + dbl_robot1.add_modify_data(dbm) + sc_1 = sync.Channel(dbl_robot1, 'charon', + 'basestation', self.robot_configs, 2, self.test_ros_node) + sc_2 = sync.Channel(dbl_groundstation, + 'basestation', + 'charon', self.robot_configs, 2, self.test_ros_node) + sc_3 = sync.Channel(dbl_groundstation, + 'basestation', + 'styx', self.robot_configs, 2, self.test_ros_node) + sc_4 = sync.Channel(dbl_robot2, + 'styx', 'basestation', + self.robot_configs, 2, self.test_ros_node) + + sc_1.run() + sc_2.run() + sc_3.run() + sc_4.run() + + # Trigger first sync + sc_2.trigger_sync() + time.sleep(self.comm_sync_delay) + self.assertDictEqual(dbl_groundstation.db, dbl_robot1.db) + + # Trigger second sync + sc_4.trigger_sync() + time.sleep(self.comm_sync_delay) + self.assertDictEqual(dbl_robot2.db, dbl_groundstation.db) + self.assertDictEqual(dbl_robot2.db, dbl_robot1.db) + + sc_1.stop() + sc_2.stop() + sc_3.stop() + sc_4.stop() + + + def test_delay_run(self): + self.maxDiff = None + db_gs = sample_db.get_sample_dbl() + db_r1 = sample_db.get_sample_dbl() + db_r2 = sample_db.get_sample_dbl() + + sc_gs_r1 = sync.Channel(db_gs, 'basestation', 'charon', self.robot_configs, 2, self.test_ros_node) + sc_gs_r2 = sync.Channel(db_gs, 'basestation', 'styx', self.robot_configs, 2, self.test_ros_node) + + sc_r1_gs = sync.Channel(db_r1, 'charon', 'basestation', self.robot_configs, 2, self.test_ros_node) + sc_r2_gs = sync.Channel(db_r2, 'styx', 'basestation', self.robot_configs, 2, self.test_ros_node) + + sc_r1_r2 = sync.Channel(db_r1, 'charon', 'styx', self.robot_configs, 2, self.test_ros_node) + sc_r2_r1 = sync.Channel(db_r2, 'styx', 'charon', self.robot_configs, 2, self.test_ros_node) + + sc_gs_r1.run() + sc_gs_r2.run() + + dbm = db.DBMessage(1, 1, 2, 1, rclpy.time.Time(seconds=int(time.time())), bytes('r1_data', 'utf-8')) + db_r1.add_modify_data(dbm) + + sc_gs_r1.trigger_sync() + time.sleep(4) + + sc_r1_gs.run() + sc_r2_gs.run() + + dbm = db.DBMessage(2, 2, 1, 2, rclpy.time.Time(seconds=int(time.time())), bytes('r2_data', 'utf-8')) + db_r2.add_modify_data(dbm) + + sc_r1_r2.run() + + sc_r1_r2.trigger_sync() + time.sleep(4) + + sc_gs_r2.trigger_sync() + time.sleep(self.comm_sync_delay) + + sc_r2_r1.run() + + sc_gs_r1.stop() + sc_r1_gs.stop() + + sc_r1_r2.stop() + sc_r2_r1.stop() + + sc_r2_gs.stop() + sc_gs_r2.stop() + + def test_multi_robot_sync(self): + self.maxDiff=None + + db_gs = sample_db.get_sample_dbl() + db_r1 = sample_db.get_sample_dbl() + + # Set up all channels + sc_gs_r1 = sync.Channel(db_gs, 'basestation', + 'charon', self.robot_configs, 2, self.test_ros_node) + sc_r1_gs = sync.Channel(db_r1, 'charon', + 'basestation', self.robot_configs, 2, + self.test_ros_node) + + sc_r1_gs.run() + sc_gs_r1.run() + + current_time = self.test_ros_node.get_clock().now() + # Insert 5 messages with different topic IDs + for i in range(5): + dbm = db.DBMessage(1, i, 1, 1, current_time, bytes('r1_data', 'utf-8')) + db_r1.add_modify_data(dbm) + + sc_gs_r1.trigger_sync() + sc_r1_gs.trigger_sync() + time.sleep(self.comm_sync_delay) + + current_time = self.test_ros_node.get_clock().now() + dbm = db.DBMessage(0, 1, 0, 0, current_time, bytes('r1_data', 'utf-8')) + db_gs.add_modify_data(dbm) + + sc_r1_gs.trigger_sync() + sc_gs_r1.trigger_sync() + time.sleep(self.comm_sync_delay) + + sc_r1_gs.stop() + sc_gs_r1.stop() + + self.assertDictEqual(db_gs.db,db_r1.db) + +if __name__ == '__main__': + unittest.main(failfast=True) diff --git a/mocha_core/test/test_topic_publisher.py b/mocha_core/test/test_topic_publisher.py new file mode 100644 index 0000000..6f50fb7 --- /dev/null +++ b/mocha_core/test/test_topic_publisher.py @@ -0,0 +1,158 @@ +#!/usr/bin/env python3 + +import os +import pdb +import time +import unittest +import threading + +import rclpy +from rclpy.node import Node +import yaml +from colorama import Fore, Style +import mocha_core.database_server as ds +import mocha_core.topic_publisher as tp +from mocha_core.database import DBMessage +from rclpy.executors import MultiThreadedExecutor +import geometry_msgs.msg + +class Database_server_test(Node): + def __init__(self): + super().__init__("mocha") + + +class test_topic_publisher(unittest.TestCase): + @classmethod + def setUpClass(cls): + # Load configurations at the class level + current_dir = os.path.dirname(os.path.abspath(__file__)) + ddb_path = os.path.join(current_dir, "..") + + # Load robot configs + cls.robot_configs_path = os.path.join(ddb_path, "config/testConfigs/robot_configs.yaml") + with open(cls.robot_configs_path, "r") as f: + cls.robot_configs = yaml.load(f, Loader=yaml.FullLoader) + + # Load topic configs + cls.topic_configs_path = os.path.join(ddb_path, "config/testConfigs/topic_configs.yaml") + with open(cls.topic_configs_path, "r") as f: + cls.topic_configs = yaml.load(f, Loader=yaml.FullLoader) + cls.robot_name = "charon" + + def setUp(self): + test_name = self._testMethodName + print("\n", Fore.RED, 20 * "=", test_name, 20 * "=", Style.RESET_ALL) + rclpy.init() + self.test_server_node = Database_server_test() + self.executor_db = MultiThreadedExecutor(num_threads=4) + self.executor_db.add_node(self.test_server_node) + executor_thread_db = threading.Thread(target=self.executor_db.spin, daemon=True) + executor_thread_db.start() + + # Create a database server object with the node (this will create the services) + self.dbs = ds.DatabaseServer(self.robot_configs, self.topic_configs, + self.robot_name, self.test_server_node) + + # Create the topic_puiblisher node + self.test_topic_publisher_node = tp.TopicPublisherNode(self.robot_name, + self.robot_configs_path, + self.topic_configs_path) + self.executor_tp = MultiThreadedExecutor(num_threads=4) + self.executor_tp.add_node(self.test_topic_publisher_node) + self.logger = self.test_topic_publisher_node.get_logger() + executor_thread_tp = threading.Thread(target=self.executor_tp.spin, daemon=True) + executor_thread_tp.start() + time.sleep(1) # Wait for nodes to be set up + super().setUp() + + def tearDown(self): + self.test_topic_publisher_node.shutdown() + self.executor_db.shutdown() + self.test_server_node.destroy_node() + self.executor_tp.shutdown() + self.test_topic_publisher_node.destroy_node() + rclpy.shutdown() + time.sleep(1) # Wait final teardown + super().tearDown() + + def test_topic_publisher_creates_topics(self): + """ Verify that all the topics created are there with the corresponding + namespace """ + + # Get all topics that have publishers + topic_names_and_types = self.test_topic_publisher_node.get_topic_names_and_types() + + expected_names_and_types = {'/basestation/target_goals': + 'geometry_msgs/msg/PoseArray', + '/charon/odometry': + 'nav_msgs/msg/Odometry', + '/charon/pose': + 'geometry_msgs/msg/PointStamped', + '/mocha/database_cb': + 'mocha_core/msg/DatabaseCB', + '/quad1/image': + 'sensor_msgs/msg/Image', + '/quad1/pose': + 'geometry_msgs/msg/PointStamped', + '/styx/odometry': + 'nav_msgs/msg/Odometry', + '/styx/pose': + 'geometry_msgs/msg/PointStamped'} + + # Verify that the topics are there + for name, topictype in topic_names_and_types: + # Skip generic topics + if name == "/parameter_events" or name == "/rosout": + continue + self.assertIn(name, expected_names_and_types.keys()) + self.assertEqual(topictype[0], expected_names_and_types[name]) + + def test_publisher_publish_msg(self): + """ Verify that when a message is added to the database we will get this + message published by the publisher """ + + # Create a new topic to listen to the publisher + class ListenerNode(Node): + def __init__(self): + super().__init__("listener_node") + self.subscriptor = self.create_subscription( + geometry_msgs.msg.PointStamped, + '/charon/pose', + self.topic_cb, + qos_profile=tp.QOS_PROFILE + ) + self.result = None + self.counter = 0 + + def topic_cb(self, data): + self.result = data + self.counter += 1 + + test_listener_node = ListenerNode() + executor = MultiThreadedExecutor(num_threads=4) + executor.add_node(test_listener_node) + executor_thread = threading.Thread(target=executor.spin, daemon=True) + executor_thread.start() + time.sleep(1) + # Insert a message known to be from charon, pose + bin_data = b'\x04"M\x18h@,\x00\x00\x00\x00\x00\x00\x00<\x12\x00\x00\x005\x00\x01\x00\x01\x00\x07\x0b\x00\x0c\x02\x00P\x00\x00\x00\x00\x00\x00\x00\x00\x00' + msg_count = 10 + for i in range(msg_count): + msg = DBMessage(1, 1, 0, 0, + rclpy.time.Time(nanoseconds=1083902000000 + i*1000000), + bin_data) + # Insert the message in the database in a crude way + self.dbs.dbl.add_modify_data(msg) + # Let the publishers publish + time.sleep(3) + executor.shutdown() + test_listener_node.destroy_node() + time.sleep(1) + # We should have msg_count messages on the reader + self.assertEqual(msg_count, test_listener_node.counter) + self.assertIsInstance(test_listener_node.result, + geometry_msgs.msg._point_stamped.PointStamped) + + +if __name__ == "__main__": + unittest.main(failfast=True) diff --git a/mocha_core/test/test_translator.py b/mocha_core/test/test_translator.py new file mode 100644 index 0000000..99f0a77 --- /dev/null +++ b/mocha_core/test/test_translator.py @@ -0,0 +1,213 @@ +#!/usr/bin/env python3 + +import os +import pdb +import random +import sys +import time +import unittest +import threading + +import rclpy +import rclpy.time +import rclpy.clock +from rclpy.executors import MultiThreadedExecutor +from rclpy.node import Node +import yaml +from colorama import Fore, Style +from geometry_msgs.msg import PointStamped +from nav_msgs.msg import Odometry + +import mocha_core.database_server as ds +import mocha_core.database_utils as du +import mocha_core.translator as tr +from mocha_core.srv import SelectDB + + +class Database_server_test(Node): + def __init__(self): + # Important to match the topic that the translator expects + super().__init__("mocha") + + +class test_translator(unittest.TestCase): + @classmethod + def setUpClass(cls): + # Load configurations at the class level + current_dir = os.path.dirname(os.path.abspath(__file__)) + ddb_path = os.path.join(current_dir, "..") + + # Load robot configs + cls.robot_configs_path = os.path.join(ddb_path, "config/testConfigs/robot_configs.yaml") + with open(cls.robot_configs_path, "r") as f: + cls.robot_configs = yaml.load(f, Loader=yaml.FullLoader) + + # Load topic configs + cls.topic_configs_path = os.path.join(ddb_path, "config/testConfigs/topic_configs.yaml") + with open(cls.topic_configs_path, "r") as f: + cls.topic_configs = yaml.load(f, Loader=yaml.FullLoader) + cls.robot_name = "charon" + + def setUp(self): + test_name = self._testMethodName + print("\n", Fore.RED, 20 * "=", test_name, 20 * "=", Style.RESET_ALL) + + rclpy.init() + self.test_server_node = Database_server_test() + self.executor_server = MultiThreadedExecutor(num_threads=4) + self.executor_server.add_node(self.test_server_node) + executor_server_thread = threading.Thread(target=self.executor_server.spin, daemon=True) + executor_server_thread.start() + + # Create a database server object with the node (this will create the services) + self.dbs = ds.DatabaseServer(self.robot_configs, self.topic_configs, + self.robot_name, self.test_server_node) + + # Use the actual TranslatorNode from translator.py with class configs + self.test_translator_node = tr.TranslatorNode( + this_robot=self.robot_name, + robot_configs=self.robot_configs_path, + topic_configs=self.topic_configs_path + ) + self.executor_translator = MultiThreadedExecutor(num_threads=4) + self.executor_translator.add_node(self.test_translator_node) + executor_translator_thread = threading.Thread(target=self.executor_translator.spin, daemon=True) + executor_translator_thread.start() + time.sleep(1) # Let things talk to each other + super().setUp() + + + def tearDown(self): + self.executor_server.shutdown() + self.test_server_node.destroy_node() + self.executor_translator.shutdown() + self.test_translator_node.destroy_node() + rclpy.shutdown() + time.sleep(1) + super().tearDown() + + def test_translator_subscriptions_created(self): + """Test that translator creates subscriptions for robot topics""" + # Get the expected topics for charon robot + this_robot_topics = self.topic_configs[self.robot_configs[self.robot_name]["node-type"]] + + # Check that translator has exactly one subscription for each topic + for topic in this_robot_topics: + topic_name = topic["msg_topic"] + subscriptions_info = self.test_translator_node.get_subscriptions_info_by_topic(topic_name) + self.assertEqual(len(subscriptions_info), 1, + f"Expected exactly 1 subscription for {topic_name} topic, got {len(subscriptions_info)}") + + def test_translator_storm(self): + """Storm test: publish many messages to translator topics and verify they're all stored""" + MSGS_PER_TOPIC = 10 + + # Get available topics for this robot + this_robot_topics = self.topic_configs[self.robot_configs[self.robot_name]["node-type"]] + msg_types = du.msg_types(self.robot_configs, self.topic_configs, + self.test_translator_node) + robot_id = du.get_robot_id_from_name(self.robot_configs, self.robot_name) + + # Create a node that does the pubisher thing. We will instantiate this + # in multiple different threads + + class FakePublisher(Node): + def __init__(self, topic_id, topic_info, msg_types): + random_name = f"fake_publiser_{random.randint(0, 2**32)}" + super().__init__(random_name) + + topic_name = topic_info["msg_topic"] + + # Get message type for this topic + obj = msg_types[robot_id][topic_id]["obj"] + self.pub = self.create_publisher(obj, topic_name, tr.QOS_PROFILE) + + # Block until we are ready to go + self.mx = threading.Lock() + + # Wait for publisher to connect + time.sleep(1) + + self.mx.acquire() + for i in range(MSGS_PER_TOPIC): + # Create random message based on topic type + if topic_name == "/pose": + msg = PointStamped() + msg.header.frame_id = "world" + msg.point.x = random.random() + msg.point.y = random.random() + msg.point.z = random.random() + msg.header.stamp = self.get_clock().now().to_msg() + elif topic_name == "/odometry": + msg = Odometry() + msg.header.frame_id = "world" + msg.pose.pose.position.x = random.random() + msg.pose.pose.position.y = random.random() + msg.pose.pose.position.z = random.random() + msg.header.stamp = self.get_clock().now().to_msg() + self.pub.publish(msg) + time.sleep(.2) + + + # Launch one thread per topic + executors = [] + nodes = [] + for topic_id, topic in enumerate(this_robot_topics): + pub_node = FakePublisher(topic_id, topic, msg_types) + nodes.append(pub_node) + executor = MultiThreadedExecutor(num_threads=4) + executor.add_node(pub_node) + executors.append(executor) + thread = threading.Thread(target=executor.spin) + thread.start() + # Wait until things are stable + time.sleep(1) + + # Go! + for node in nodes: + node.mx.release() + time.sleep(1) + + # Wait for all publishing threads to complete + for executor in executors: + executor.shutdown() + + for node in nodes: + node.destroy_node() + + # Query the database to count stored messages + try: + robot_id = du.get_robot_id_from_name(self.robot_configs, self.robot_name) + req = SelectDB.Request() + req.robot_id = robot_id + req.topic_id = 0 # This is not used as filtering is not implemented + req.ts_limit = self.test_translator_node.get_clock().now().to_msg() # Not used + + select_client = self.test_translator_node.create_client( + SelectDB, + "/mocha/select_db", + qos_profile=ds.DatabaseServer.QOS_PROFILE + ) + while not select_client.wait_for_service(timeout_sec=1.0): + pass + + future = select_client.call_async(req) + rclpy.spin_until_future_complete(self.test_translator_node, future) + answ = future.result() + returned_headers = du.deserialize_headers(answ.headers) + + # Calculate expected total messages + # Each topic gets MSGS_PER_TOPIC messages + # charon robot has 2 topics: /pose and /odometry + expected_total = MSGS_PER_TOPIC * len(this_robot_topics) + + # Verify all messages were stored + self.assertEqual(len(returned_headers), expected_total, + f"Expected {expected_total} messages in database, got {len(returned_headers)}") + + except Exception as exc: + print(f"Database query failed: {exc}") + self.assertTrue(False) + +if __name__ == "__main__": + unittest.main() diff --git a/mocha_core/test/test_zmq_comm_node.py b/mocha_core/test/test_zmq_comm_node.py new file mode 100755 index 0000000..d09e3b8 --- /dev/null +++ b/mocha_core/test/test_zmq_comm_node.py @@ -0,0 +1,127 @@ +#!/usr/bin/env python3 + +""" test_zmq_comm_node.py +This file tests the basic functionality between two communication nodes. + - Two nodes are created: basestation and charon. The configuration for these are + defined in the configuration file. The client of each node is set to the other one. + - We generate a random message that groundstation sends to charon. Upon reception, + cb_charon is called. The return value of this function is transmitted to the groundstation. + - The groundstation receives the message +""" + +import os +import random +import string +import sys +import time +import unittest +import yaml +import pdb +import rclpy +import rclpy.logging +from rclpy.logging import LoggingSeverity +from rclpy.executors import SingleThreadedExecutor +from rclpy.node import Node +import threading +from colorama import Fore, Style +import mocha_core.zmq_comm_node as zmq_comm_node + +class Comm_node_test(Node): + def __init__(self): + super().__init__("comm_node_test") + + +class Test(unittest.TestCase): + @classmethod + def setUpClass(cls): + # Load configurations at the class level + current_dir = os.path.dirname(os.path.abspath(__file__)) + ddb_path = os.path.join(current_dir, "..") + + # Load robot configs + robot_configs_path = os.path.join(ddb_path, "config/testConfigs/robot_configs.yaml") + with open(robot_configs_path, "r") as f: + cls.robot_configs = yaml.load(f, Loader=yaml.FullLoader) + + + def setUp(self): + test_name = self._testMethodName + print("\n", Fore.RED, 20*"=", test_name, 20*"=", Style.RESET_ALL) + # Create a mock node to run the tests + rclpy.init() + self.comm_node_test = Comm_node_test() + self.executor = SingleThreadedExecutor() + self.executor.add_node(self.comm_node_test) + executor_thread = threading.Thread(target=self.executor.spin, daemon=True) + executor_thread.start() + super().setUp() + + def tearDown(self): + time.sleep(1) + self.executor.shutdown() + self.comm_node_test.destroy_node() + rclpy.shutdown() + super().tearDown() + + def test_simple_connection(self): + self.answer_cb_gs_client = None + self.answer_cb_ch_client = None + logger = self.comm_node_test.get_logger() + logger.set_level(LoggingSeverity.DEBUG) + + def cb_groundstation_client(value): + logger.debug(f"cb groundstation client") + self.answer_cb_gs_client = value + + def cb_groundstation_server(value): + # This function is called upon reception of a message by the base + # station + logger.debug("cb groundstation server") + to_append = b"GSSERVER" + return value + to_append + + def cb_charon_client(value): + logger.debug(f"cb charon client") + self.answer_cb_ch_client = value + + def cb_charon_server(value): + # This function is called upon reception of a message by charon. + # The return value is transmitted as answer to the original + # message + b"CHARON" + logger.debug(f"cb charon server") + to_append = b"CHARONSERVER" + return value + to_append + + # Create the two robots + node_groundstation = zmq_comm_node.Comm_node( + "basestation", "charon", self.robot_configs, + cb_groundstation_client, cb_groundstation_server, + 2, self.comm_node_test + ) + node_charon = zmq_comm_node.Comm_node( + "charon", "basestation", self.robot_configs, + cb_charon_client, cb_charon_server, + 2, self.comm_node_test + ) + + # Generate random message + letters = string.ascii_letters + random_str = "".join(random.choice(letters) for i in range(10)) + random_msg = random_str + str(random.randint(0, 1024)) + random_msg = random_msg.encode() + + # Send message from node_groundstation to robot 2 + node_groundstation.connect_send_message(random_msg) + node_charon.connect_send_message(random_msg) + + # Terminate robots and test assertion + node_groundstation.terminate() + node_charon.terminate() + self.assertEqual(random_msg + b"CHARONSERVER", self.answer_cb_gs_client, + "Sent %s" % random_msg) + self.assertEqual(random_msg + b"GSSERVER", self.answer_cb_ch_client, + "Sent %s" % random_msg) + +if __name__ == "__main__": + # Run test cases! + unittest.main() diff --git a/mocha_launch/CMakeLists.txt b/mocha_launch/CMakeLists.txt index b7c3a75..2762304 100644 --- a/mocha_launch/CMakeLists.txt +++ b/mocha_launch/CMakeLists.txt @@ -1,198 +1,31 @@ -cmake_minimum_required(VERSION 2.8.3) +cmake_minimum_required(VERSION 3.8) project(mocha_launch) -## Compile as C++11, supported in ROS Kinetic and newer -# add_compile_options(-std=c++11) +if(CMAKE_COMPILER_IS_GNUCXX OR CMAKE_CXX_COMPILER_ID MATCHES "Clang") + add_compile_options(-Wall -Wextra -Wpedantic) +endif() -## Find catkin macros and libraries -## if COMPONENTS list like find_package(catkin REQUIRED COMPONENTS xyz) -## is used, also find other catkin packages -find_package(catkin REQUIRED COMPONENTS - roscpp - rospy - std_msgs -) - -## System dependencies are found with CMake's conventions -# find_package(Boost REQUIRED COMPONENTS system) - - -## Uncomment this if the package has a setup.py. This macro ensures -## modules and global scripts declared therein get installed -## See http://ros.org/doc/api/catkin/html/user_guide/setup_dot_py.html -# catkin_python_setup() - -################################################ -## Declare ROS messages, services and actions ## -################################################ - -## To declare and build messages, services or actions from within this -## package, follow these steps: -## * Let MSG_DEP_SET be the set of packages whose message types you use in -## your messages/services/actions (e.g. std_msgs, actionlib_msgs, ...). -## * In the file package.xml: -## * add a build_depend tag for "message_generation" -## * add a build_depend and a exec_depend tag for each package in MSG_DEP_SET -## * If MSG_DEP_SET isn't empty the following dependency has been pulled in -## but can be declared for certainty nonetheless: -## * add a exec_depend tag for "message_runtime" -## * In this file (CMakeLists.txt): -## * add "message_generation" and every package in MSG_DEP_SET to -## find_package(catkin REQUIRED COMPONENTS ...) -## * add "message_runtime" and every package in MSG_DEP_SET to -## catkin_package(CATKIN_DEPENDS ...) -## * uncomment the add_*_files sections below as needed -## and list every .msg/.srv/.action file to be processed -## * uncomment the generate_messages entry below -## * add every package in MSG_DEP_SET to generate_messages(DEPENDENCIES ...) - -## Generate messages in the 'msg' folder -# add_message_files( -# FILES -# Message1.msg -# Message2.msg -# ) - -## Generate services in the 'srv' folder -# add_service_files( -# FILES -# Service1.srv -# Service2.srv -# ) - -## Generate actions in the 'action' folder -# add_action_files( -# FILES -# Action1.action -# Action2.action -# ) - -## Generate added messages and services with any dependencies listed here -# generate_messages( -# DEPENDENCIES -# std_msgs -# ) - -################################################ -## Declare ROS dynamic reconfigure parameters ## -################################################ - -## To declare and build dynamic reconfigure parameters within this -## package, follow these steps: -## * In the file package.xml: -## * add a build_depend and a exec_depend tag for "dynamic_reconfigure" -## * In this file (CMakeLists.txt): -## * add "dynamic_reconfigure" to -## find_package(catkin REQUIRED COMPONENTS ...) -## * uncomment the "generate_dynamic_reconfigure_options" section below -## and list every .cfg file to be processed - -## Generate dynamic reconfigure parameters in the 'cfg' folder -# generate_dynamic_reconfigure_options( -# cfg/DynReconf1.cfg -# cfg/DynReconf2.cfg -# ) - -################################### -## catkin specific configuration ## -################################### -## The catkin_package macro generates cmake config files for your package -## Declare things to be passed to dependent projects -## INCLUDE_DIRS: uncomment this if your package contains header files -## LIBRARIES: libraries you create in this project that dependent projects also need -## CATKIN_DEPENDS: catkin_packages dependent projects also need -## DEPENDS: system dependencies of this project that dependent projects also need -catkin_package( -# INCLUDE_DIRS include -# LIBRARIES mocha_launch -# CATKIN_DEPENDS roscpp rospy std_msgs -# DEPENDS system_lib -) - -########### -## Build ## -########### +# find dependencies +find_package(ament_cmake REQUIRED) +# uncomment the following section in order to fill in +# further dependencies manually. +# find_package( REQUIRED) -## Specify additional locations of header files -## Your package locations should be listed before other locations -include_directories( -# include - ${catkin_INCLUDE_DIRS} +# Install launch files +install(DIRECTORY launch + DESTINATION share/${PROJECT_NAME} ) -## Declare a C++ library -# add_library(${PROJECT_NAME} -# src/${PROJECT_NAME}/mocha_launch.cpp -# ) - -## Add cmake target dependencies of the library -## as an example, code may need to be generated before libraries -## either from message generation or dynamic reconfigure -# add_dependencies(${PROJECT_NAME} ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS}) - -## Declare a C++ executable -## With catkin_make all packages are built within a single CMake context -## The recommended prefix ensures that target names across packages don't collide -# add_executable(${PROJECT_NAME}_node src/mocha_launch_node.cpp) - -## Rename C++ executable without prefix -## The above recommended prefix causes long target names, the following renames the -## target back to the shorter version for ease of user use -## e.g. "rosrun someones_pkg node" instead of "rosrun someones_pkg someones_pkg_node" -# set_target_properties(${PROJECT_NAME}_node PROPERTIES OUTPUT_NAME node PREFIX "") - -## Add cmake target dependencies of the executable -## same as for the library above -# add_dependencies(${PROJECT_NAME}_node ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS}) - -## Specify libraries to link a library or executable target against -# target_link_libraries(${PROJECT_NAME}_node -# ${catkin_LIBRARIES} -# ) - -############# -## Install ## -############# - -# all install targets should use catkin DESTINATION variables -# See http://ros.org/doc/api/catkin/html/adv_user_guide/variables.html - -## Mark executable scripts (Python etc.) for installation -## in contrast to setup.py, you can choose the destination -# install(PROGRAMS -# scripts/my_python_script -# DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION} -# ) - -## Mark executables and/or libraries for installation -# install(TARGETS ${PROJECT_NAME} ${PROJECT_NAME}_node -# ARCHIVE DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION} -# LIBRARY DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION} -# RUNTIME DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION} -# ) - -## Mark cpp header files for installation -# install(DIRECTORY include/${PROJECT_NAME}/ -# DESTINATION ${CATKIN_PACKAGE_INCLUDE_DESTINATION} -# FILES_MATCHING PATTERN "*.h" -# PATTERN ".svn" EXCLUDE -# ) - -## Mark other files for installation (e.g. launch and bag files, etc.) -install(DIRECTORY - launch - DESTINATION ${CATKIN_PACKAGE_SHARE_DESTINATION} -) - -############# -## Testing ## -############# - -## Add gtest based cpp test target and link libraries -# catkin_add_gtest(${PROJECT_NAME}-test test/test_mocha_launch.cpp) -# if(TARGET ${PROJECT_NAME}-test) -# target_link_libraries(${PROJECT_NAME}-test ${PROJECT_NAME}) -# endif() - -## Add folders to be run by python nosetests -# catkin_add_nosetests(test) +if(BUILD_TESTING) + find_package(ament_lint_auto REQUIRED) + # the following line skips the linter which checks for copyrights + # comment the line when a copyright and license is added to all source files + set(ament_cmake_copyright_FOUND TRUE) + # the following line skips cpplint (only works in a git repo) + # comment the line when this package is in a git repo and when + # a copyright and license is added to all source files + set(ament_cmake_cpplint_FOUND TRUE) + ament_lint_auto_find_test_dependencies() +endif() + +ament_package() diff --git a/mocha_launch/launch/basestation.launch b/mocha_launch/launch/basestation.launch deleted file mode 100644 index 08509c0..0000000 --- a/mocha_launch/launch/basestation.launch +++ /dev/null @@ -1,16 +0,0 @@ - - - - - - - - - - - - - - - - diff --git a/mocha_launch/launch/basestation.launch.py b/mocha_launch/launch/basestation.launch.py new file mode 100644 index 0000000..101e09a --- /dev/null +++ b/mocha_launch/launch/basestation.launch.py @@ -0,0 +1,96 @@ +#!/usr/bin/env python3 + +from launch import LaunchDescription +from launch.actions import DeclareLaunchArgument, IncludeLaunchDescription +from launch.launch_description_sources import PythonLaunchDescriptionSource +from launch.substitutions import LaunchConfiguration, PathJoinSubstitution +from launch_ros.substitutions import FindPackageShare + + +def generate_launch_description(): + """ + Launch basestation robot with database, translators, publishers, and Rajant interface + """ + + # Declare launch arguments + robot_name_arg = DeclareLaunchArgument( + 'robot_name', + default_value='basestation', + description='Name of the robot' + ) + + robot_configs_arg = DeclareLaunchArgument( + 'robot_configs', + default_value=PathJoinSubstitution([ + FindPackageShare('mocha_core'), + 'config', 'robot_configs.yaml' + ]), + description='Path to robot configuration file' + ) + + topic_configs_arg = DeclareLaunchArgument( + 'topic_configs', + default_value=PathJoinSubstitution([ + FindPackageShare('mocha_core'), + 'config', 'topic_configs.yaml' + ]), + description='Path to topic configuration file' + ) + + radio_configs_arg = DeclareLaunchArgument( + 'radio_configs', + default_value=PathJoinSubstitution([ + FindPackageShare('mocha_core'), + 'config', 'radio_configs.yaml' + ]), + description='Path to radio configuration file' + ) + + # Get launch configurations + robot_name = LaunchConfiguration('robot_name') + robot_configs = LaunchConfiguration('robot_configs') + topic_configs = LaunchConfiguration('topic_configs') + radio_configs = LaunchConfiguration('radio_configs') + + # Include database, translators and publishers launch file + database_translators_publishers_launch = IncludeLaunchDescription( + PythonLaunchDescriptionSource([ + PathJoinSubstitution([ + FindPackageShare('mocha_core'), + 'launch', + 'database_translators_publishers.launch.py' + ]) + ]), + launch_arguments={ + 'robot_name': robot_name, + 'robot_configs': robot_configs, + 'topic_configs': topic_configs, + 'radio_configs': radio_configs + }.items() + ) + + # Include Rajant interface launch file + rajant_interface_launch = IncludeLaunchDescription( + PythonLaunchDescriptionSource([ + PathJoinSubstitution([ + FindPackageShare('interface_rajant'), + 'launch', + 'rajant_nodes.launch.py' + ]) + ]), + launch_arguments={ + 'robot_name': robot_name, + 'robot_configs': robot_configs, + 'topic_configs': topic_configs, + 'radio_configs': radio_configs + }.items() + ) + + return LaunchDescription([ + robot_name_arg, + robot_configs_arg, + topic_configs_arg, + radio_configs_arg, + database_translators_publishers_launch, + rajant_interface_launch + ]) \ No newline at end of file diff --git a/mocha_launch/launch/jackal.launch b/mocha_launch/launch/jackal.launch deleted file mode 100644 index 7789597..0000000 --- a/mocha_launch/launch/jackal.launch +++ /dev/null @@ -1,16 +0,0 @@ - - - - - - - - - - - - - - - - diff --git a/mocha_launch/launch/jackal.launch.py b/mocha_launch/launch/jackal.launch.py new file mode 100644 index 0000000..b2131ef --- /dev/null +++ b/mocha_launch/launch/jackal.launch.py @@ -0,0 +1,96 @@ +#!/usr/bin/env python3 + +from launch import LaunchDescription +from launch.actions import DeclareLaunchArgument, IncludeLaunchDescription +from launch.launch_description_sources import PythonLaunchDescriptionSource +from launch.substitutions import LaunchConfiguration, PathJoinSubstitution +from launch_ros.substitutions import FindPackageShare + + +def generate_launch_description(): + """ + Launch jackal robot (callisto) with database, translators, publishers, and Rajant interface + """ + + # Declare launch arguments + robot_name_arg = DeclareLaunchArgument( + 'robot_name', + default_value='callisto', + description='Name of the robot' + ) + + robot_configs_arg = DeclareLaunchArgument( + 'robot_configs', + default_value=PathJoinSubstitution([ + FindPackageShare('mocha_core'), + 'config', 'robot_configs.yaml' + ]), + description='Path to robot configuration file' + ) + + topic_configs_arg = DeclareLaunchArgument( + 'topic_configs', + default_value=PathJoinSubstitution([ + FindPackageShare('mocha_core'), + 'config', 'topic_configs.yaml' + ]), + description='Path to topic configuration file' + ) + + radio_configs_arg = DeclareLaunchArgument( + 'radio_configs', + default_value=PathJoinSubstitution([ + FindPackageShare('mocha_core'), + 'config', 'radio_configs.yaml' + ]), + description='Path to radio configuration file' + ) + + # Get launch configurations + robot_name = LaunchConfiguration('robot_name') + robot_configs = LaunchConfiguration('robot_configs') + topic_configs = LaunchConfiguration('topic_configs') + radio_configs = LaunchConfiguration('radio_configs') + + # Include database, translators and publishers launch file + database_translators_publishers_launch = IncludeLaunchDescription( + PythonLaunchDescriptionSource([ + PathJoinSubstitution([ + FindPackageShare('mocha_core'), + 'launch', + 'database_translators_publishers.launch.py' + ]) + ]), + launch_arguments={ + 'robot_name': robot_name, + 'robot_configs': robot_configs, + 'topic_configs': topic_configs, + 'radio_configs': radio_configs + }.items() + ) + + # Include Rajant interface launch file + rajant_interface_launch = IncludeLaunchDescription( + PythonLaunchDescriptionSource([ + PathJoinSubstitution([ + FindPackageShare('interface_rajant'), + 'launch', + 'rajant_nodes.launch.py' + ]) + ]), + launch_arguments={ + 'robot_name': robot_name, + 'robot_configs': robot_configs, + 'topic_configs': topic_configs, + 'radio_configs': radio_configs + }.items() + ) + + return LaunchDescription([ + robot_name_arg, + robot_configs_arg, + topic_configs_arg, + radio_configs_arg, + database_translators_publishers_launch, + rajant_interface_launch + ]) \ No newline at end of file diff --git a/mocha_launch/launch/titan.launch b/mocha_launch/launch/titan.launch deleted file mode 100644 index cff87b0..0000000 --- a/mocha_launch/launch/titan.launch +++ /dev/null @@ -1,16 +0,0 @@ - - - - - - - - - - - - - - - - diff --git a/mocha_launch/launch/titan.launch.py b/mocha_launch/launch/titan.launch.py new file mode 100644 index 0000000..f367267 --- /dev/null +++ b/mocha_launch/launch/titan.launch.py @@ -0,0 +1,96 @@ +#!/usr/bin/env python3 + +from launch import LaunchDescription +from launch.actions import DeclareLaunchArgument, IncludeLaunchDescription +from launch.launch_description_sources import PythonLaunchDescriptionSource +from launch.substitutions import LaunchConfiguration, PathJoinSubstitution +from launch_ros.substitutions import FindPackageShare + + +def generate_launch_description(): + """ + Launch titan robot with database, translators, publishers, and Rajant interface + """ + + # Declare launch arguments + robot_name_arg = DeclareLaunchArgument( + 'robot_name', + default_value='titan', + description='Name of the robot' + ) + + robot_configs_arg = DeclareLaunchArgument( + 'robot_configs', + default_value=PathJoinSubstitution([ + FindPackageShare('mocha_core'), + 'config', 'robot_configs.yaml' + ]), + description='Path to robot configuration file' + ) + + topic_configs_arg = DeclareLaunchArgument( + 'topic_configs', + default_value=PathJoinSubstitution([ + FindPackageShare('mocha_core'), + 'config', 'topic_configs.yaml' + ]), + description='Path to topic configuration file' + ) + + radio_configs_arg = DeclareLaunchArgument( + 'radio_configs', + default_value=PathJoinSubstitution([ + FindPackageShare('mocha_core'), + 'config', 'radio_configs.yaml' + ]), + description='Path to radio configuration file' + ) + + # Get launch configurations + robot_name = LaunchConfiguration('robot_name') + robot_configs = LaunchConfiguration('robot_configs') + topic_configs = LaunchConfiguration('topic_configs') + radio_configs = LaunchConfiguration('radio_configs') + + # Include database, translators and publishers launch file + database_translators_publishers_launch = IncludeLaunchDescription( + PythonLaunchDescriptionSource([ + PathJoinSubstitution([ + FindPackageShare('mocha_core'), + 'launch', + 'database_translators_publishers.launch.py' + ]) + ]), + launch_arguments={ + 'robot_name': robot_name, + 'robot_configs': robot_configs, + 'topic_configs': topic_configs, + 'radio_configs': radio_configs + }.items() + ) + + # Include Rajant interface launch file + rajant_interface_launch = IncludeLaunchDescription( + PythonLaunchDescriptionSource([ + PathJoinSubstitution([ + FindPackageShare('interface_rajant'), + 'launch', + 'rajant_nodes.launch.py' + ]) + ]), + launch_arguments={ + 'robot_name': robot_name, + 'robot_configs': robot_configs, + 'topic_configs': topic_configs, + 'radio_configs': radio_configs + }.items() + ) + + return LaunchDescription([ + robot_name_arg, + robot_configs_arg, + topic_configs_arg, + radio_configs_arg, + database_translators_publishers_launch, + rajant_interface_launch + ]) \ No newline at end of file diff --git a/mocha_launch/package.xml b/mocha_launch/package.xml index 93e484e..86ead74 100644 --- a/mocha_launch/package.xml +++ b/mocha_launch/package.xml @@ -1,68 +1,18 @@ - + + mocha_launch 0.0.0 The mocha_launch package - - - - Fernando Cladera + BSD 3-Clause License + ament_cmake - - - - TODO - - - - - - - - - - - - + ament_lint_auto + ament_lint_common - - - - - - - - - - - - - - - - - - - - - - catkin - roscpp - rospy - std_msgs - roscpp - rospy - std_msgs - roscpp - rospy - std_msgs - - - - - + ament_cmake